aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/RCU/stallwarn.txt16
-rw-r--r--Documentation/RCU/trace.txt32
-rw-r--r--Documentation/devicetree/bindings/mfd/max77686.txt14
-rw-r--r--Documentation/devicetree/bindings/regulator/da9211.txt7
-rw-r--r--Documentation/devicetree/bindings/regulator/isl9305.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6397-regulator.txt217
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt94
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt16
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sirf.txt41
-rw-r--r--Documentation/devicetree/bindings/spi/spi-st-ssc.txt40
-rw-r--r--Documentation/hwmon/ina2xx23
-rw-r--r--Documentation/memory-barriers.txt46
-rw-r--r--Documentation/networking/netlink_mmap.txt13
-rw-r--r--MAINTAINERS11
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/compressed/head.S39
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi4
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kvm/Kconfig1
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/context.c26
-rw-r--r--arch/arm/mm/dma-mapping.c3
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/mips/Kconfig23
-rw-r--r--arch/mips/boot/elf2ecoff.c64
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/configs/malta_defconfig16
-rw-r--r--arch/mips/include/asm/fpu.h43
-rw-r--r--arch/mips/include/asm/fw/arc/hinv.h6
-rw-r--r--arch/mips/include/asm/mips-cm.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h15
-rw-r--r--arch/mips/include/asm/syscall.h8
-rw-r--r--arch/mips/include/asm/thread_info.h1
-rw-r--r--arch/mips/include/uapi/asm/unistd.h15
-rw-r--r--arch/mips/jz4740/irq.c3
-rw-r--r--arch/mips/kernel/elf.c8
-rw-r--r--arch/mips/kernel/irq_cpu.c4
-rw-r--r--arch/mips/kernel/process.c36
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/smp-cmp.c4
-rw-r--r--arch/mips/kernel/smp-mt.c3
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mn10300/include/asm/cacheflush.h7
-rw-r--r--arch/nios2/mm/fault.c8
-rw-r--r--arch/powerpc/include/asm/cacheflush.h7
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/s390/include/asm/cacheflush.h4
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/sparc/include/asm/cacheflush_64.h5
-rw-r--r--arch/tile/kvm/Kconfig1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/pci/common.c16
-rw-r--r--arch/x86/pci/intel_mid_pci.c1
-rw-r--r--block/blk-mq-sysfs.c25
-rw-r--r--block/blk-mq.c23
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--drivers/acpi/acpi_lpss.c35
-rw-r--r--drivers/base/regmap/internal.h10
-rw-r--r--drivers/base/regmap/regmap-ac97.c4
-rw-r--r--drivers/base/regmap/regmap-i2c.c46
-rw-r--r--drivers/base/regmap/regmap.c7
-rw-r--r--drivers/char/random.c8
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/devfreq/Kconfig1
-rw-r--r--drivers/gpio/gpio-mcp23s08.c17
-rw-r--r--drivers/gpio/gpio-omap.c39
-rw-r--r--drivers/gpio/gpiolib-sysfs.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c6
-rw-r--r--drivers/hwmon/Kconfig5
-rw-r--r--drivers/hwmon/abx500.c6
-rw-r--r--drivers/hwmon/ad7314.c5
-rw-r--r--drivers/hwmon/adc128d818.c3
-rw-r--r--drivers/hwmon/ads7828.c102
-rw-r--r--drivers/hwmon/ina2xx.c334
-rw-r--r--drivers/hwmon/jc42.c15
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/tmp102.c15
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c137
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c239
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c22
-rw-r--r--drivers/irqchip/irq-mips-gic.c27
-rw-r--r--drivers/isdn/hardware/eicon/message.c2
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/bitmap.c13
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/caif/caif_hsi.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c27
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c26
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1
-rw-r--r--drivers/net/hyperv/netvsc.c11
-rw-r--r--drivers/net/macvtap.c16
-rw-r--r--drivers/net/ppp/ppp_deflate.c2
-rw-r--r--drivers/net/tun.c25
-rw-r--r--drivers/net/usb/sr9700.c36
-rw-r--r--drivers/net/usb/sr9700.h66
-rw-r--r--drivers/net/virtio_net.c24
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wan/Kconfig6
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/pci/host/pcie-designware.c3
-rw-r--r--drivers/pci/quirks.c40
-rw-r--r--drivers/regulator/Kconfig17
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/axp20x-regulator.c93
-rw-r--r--drivers/regulator/core.c375
-rw-r--r--drivers/regulator/da9211-regulator.c16
-rw-r--r--drivers/regulator/fan53555.c4
-rw-r--r--drivers/regulator/internal.h2
-rw-r--r--drivers/regulator/isl9305.c6
-rw-r--r--drivers/regulator/lp872x.c24
-rw-r--r--drivers/regulator/max14577.c62
-rw-r--r--drivers/regulator/max77686.c70
-rw-r--r--drivers/regulator/max77843.c227
-rw-r--r--drivers/regulator/max8649.c4
-rw-r--r--drivers/regulator/mt6397-regulator.c332
-rw-r--r--drivers/regulator/of_regulator.c11
-rw-r--r--drivers/regulator/pfuze100-regulator.c134
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c15
-rw-r--r--drivers/regulator/rk808-regulator.c6
-rw-r--r--drivers/regulator/rt5033-regulator.c8
-rw-r--r--drivers/regulator/tps65023-regulator.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c3
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/spi/Kconfig24
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-atmel.c12
-rw-r--r--drivers/spi/spi-au1550.c4
-rw-r--r--drivers/spi/spi-bcm2835.c4
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-bitbang.c4
-rw-r--r--drivers/spi/spi-butterfly.c4
-rw-r--r--drivers/spi/spi-coldfire-qspi.c5
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dln2.c881
-rw-r--r--drivers/spi/spi-dw-mid.c15
-rw-r--r--drivers/spi/spi-dw-pci.c38
-rw-r--r--drivers/spi/spi-dw.c9
-rw-r--r--drivers/spi/spi-falcon.c12
-rw-r--r--drivers/spi/spi-fsl-cpm.c9
-rw-r--r--drivers/spi/spi-fsl-dspi.c163
-rw-r--r--drivers/spi/spi-fsl-lib.c16
-rw-r--r--drivers/spi/spi-fsl-lib.h4
-rw-r--r--drivers/spi/spi-gpio.c8
-rw-r--r--drivers/spi/spi-img-spfi.c49
-rw-r--r--drivers/spi/spi-imx.c32
-rw-r--r--drivers/spi/spi-lm70llp.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c2
-rw-r--r--drivers/spi/spi-mxs.c5
-rw-r--r--drivers/spi/spi-omap-100k.c5
-rw-r--r--drivers/spi/spi-omap-uwire.c4
-rw-r--r--drivers/spi/spi-omap2-mcspi.c5
-rw-r--r--drivers/spi/spi-orion.c88
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c17
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c34
-rw-r--r--drivers/spi/spi-pxa2xx.c207
-rw-r--r--drivers/spi/spi-pxa2xx.h34
-rw-r--r--drivers/spi/spi-qup.c11
-rw-r--r--drivers/spi/spi-rockchip.c6
-rw-r--r--drivers/spi/spi-rspi.c5
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sc18is602.c4
-rw-r--r--drivers/spi/spi-sh-hspi.c5
-rw-r--r--drivers/spi/spi-sh-msiof.c91
-rw-r--r--drivers/spi/spi-sh.c5
-rw-r--r--drivers/spi/spi-sirf.c1
-rw-r--r--drivers/spi/spi-st-ssc4.c504
-rw-r--r--drivers/spi/spi-ti-qspi.c14
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/spi/spi-xilinx.c298
-rw-r--r--drivers/spi/spi.c120
-rw-r--r--drivers/spi/spidev.c125
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--fs/aio.c7
-rw-r--r--fs/btrfs/Kconfig1
-rw-r--r--fs/btrfs/tree-log.c1
-rw-r--r--fs/cifs/cifs_debug.c6
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/smbencrypt.c2
-rw-r--r--fs/nilfs2/nilfs.h2
-rw-r--r--fs/nilfs2/segment.c44
-rw-r--r--fs/nilfs2/segment.h5
-rw-r--r--fs/notify/Kconfig1
-rw-r--r--fs/quota/Kconfig1
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/if_vlan.h60
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/rculist.h16
-rw-r--r--include/linux/rcupdate.h13
-rw-r--r--include/linux/rcutiny.h45
-rw-r--r--include/linux/rcutree.h11
-rw-r--r--include/linux/regmap.h2
-rw-r--r--include/linux/regulator/da9211.h2
-rw-r--r--include/linux/regulator/driver.h13
-rw-r--r--include/linux/regulator/machine.h13
-rw-r--r--include/linux/regulator/mt6397-regulator.h49
-rw-r--r--include/linux/regulator/pfuze100.h14
-rw-r--r--include/linux/spi/at86rf230.h4
-rw-r--r--include/linux/spi/l4f00242t03.h4
-rw-r--r--include/linux/spi/lms283gf05.h4
-rw-r--r--include/linux/spi/mxs-spi.h4
-rw-r--r--include/linux/spi/pxa2xx_spi.h5
-rw-r--r--include/linux/spi/rspi.h5
-rw-r--r--include/linux/spi/sh_hspi.h4
-rw-r--r--include/linux/spi/sh_msiof.h2
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spi/tle62x0.h4
-rw-r--r--include/linux/spi/tsc2005.h5
-rw-r--r--include/linux/srcu.h14
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/linux/wait.h1
-rw-r--r--include/net/flow_keys.h6
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ipv6.h7
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/sch_generic.h13
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/rdma/ib_verbs.h5
-rw-r--r--include/sound/ak4113.h2
-rw-r--r--include/sound/ak4114.h2
-rw-r--r--include/sound/soc.h1
-rw-r--r--include/trace/events/tlb.h4
-rw-r--r--include/uapi/rdma/ib_user_verbs.h27
-rw-r--r--init/Kconfig18
-rw-r--r--kernel/cpu.c56
-rw-r--r--kernel/notifier.c3
-rw-r--r--kernel/power/Kconfig1
-rw-r--r--kernel/rcu/Makefile3
-rw-r--r--kernel/rcu/rcu.h6
-rw-r--r--kernel/rcu/rcutorture.c66
-rw-r--r--kernel/rcu/srcu.c2
-rw-r--r--kernel/rcu/tiny.c113
-rw-r--r--kernel/rcu/tiny_plugin.h9
-rw-r--r--kernel/rcu/tree.c355
-rw-r--r--kernel/rcu/tree.h62
-rw-r--r--kernel/rcu/tree_plugin.h271
-rw-r--r--kernel/rcu/tree_trace.c8
-rw-r--r--kernel/sched/core.c36
-rw-r--r--kernel/sched/deadline.c3
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/checksum.c12
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/pagewalk.c5
-rw-r--r--mm/shmem.c2
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c29
-rw-r--r--net/caif/chnl_net.c1
-rw-r--r--net/core/dev.c37
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/ipv4/ip_output.c29
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cong.c32
-rw-r--r--net/ipv4/tcp_cubic.c39
-rw-r--r--net/ipv4/tcp_ipv4.c37
-rw-r--r--net/ipv4/tcp_scalable.c3
-rw-r--r--net/ipv4/tcp_veno.c2
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/ipv6/output_core.c41
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/udp_offload.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c33
-rw-r--r--net/netfilter/nf_tables_api.c28
-rw-r--r--net/netfilter/nft_masq.c26
-rw-r--r--net/netfilter/nft_nat.c40
-rw-r--r--net/netfilter/nft_redir.c25
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/rds/sysctl.c4
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/sched/sch_fq.c10
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--security/tomoyo/Kconfig1
-rw-r--r--sound/i2c/other/ak4113.c17
-rw-r--r--sound/i2c/other/ak4114.c18
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c24
-rw-r--r--sound/soc/codecs/rt5640.c1
-rw-r--r--sound/soc/codecs/sgtl5000.c13
-rw-r--r--sound/soc/codecs/tlv320aic3x.c2
-rw-r--r--sound/soc/codecs/wm8731.c2
-rw-r--r--sound/soc/codecs/wm9705.c16
-rw-r--r--sound/soc/codecs/wm9712.c12
-rw-r--r--sound/soc/codecs/wm9713.c12
-rw-r--r--sound/soc/intel/sst-haswell-ipc.c4
-rw-r--r--sound/soc/intel/sst/sst_acpi.c2
-rw-r--r--sound/soc/soc-ac97.c36
-rw-r--r--tools/lib/lockdep/.gitignore1
-rw-r--r--tools/lib/lockdep/Makefile2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/cpus2use.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh18
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh9
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh20
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh2
343 files changed, 6248 insertions, 2962 deletions
diff --git a/.mailmap b/.mailmap
index d357e1bd2a43..0d971cfb0772 100644
--- a/.mailmap
+++ b/.mailmap
@@ -73,6 +73,7 @@ Juha Yrjola <juha.yrjola@nokia.com>
73Juha Yrjola <juha.yrjola@solidboot.com> 73Juha Yrjola <juha.yrjola@solidboot.com>
74Kay Sievers <kay.sievers@vrfy.org> 74Kay Sievers <kay.sievers@vrfy.org>
75Kenneth W Chen <kenneth.w.chen@intel.com> 75Kenneth W Chen <kenneth.w.chen@intel.com>
76Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
76Koushik <raghavendra.koushik@neterion.com> 77Koushik <raghavendra.koushik@neterion.com>
77Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 78Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
78Leonid I Ananiev <leonid.i.ananiev@intel.com> 79Leonid I Ananiev <leonid.i.ananiev@intel.com>
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index ed186a902d31..b57c0c1cdac6 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -15,7 +15,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT
15 21 seconds. 15 21 seconds.
16 16
17 This configuration parameter may be changed at runtime via the 17 This configuration parameter may be changed at runtime via the
18 /sys/module/rcutree/parameters/rcu_cpu_stall_timeout, however 18 /sys/module/rcupdate/parameters/rcu_cpu_stall_timeout, however
19 this parameter is checked only at the beginning of a cycle. 19 this parameter is checked only at the beginning of a cycle.
20 So if you are 10 seconds into a 40-second stall, setting this 20 So if you are 10 seconds into a 40-second stall, setting this
21 sysfs parameter to (say) five will shorten the timeout for the 21 sysfs parameter to (say) five will shorten the timeout for the
@@ -152,6 +152,15 @@ no non-lazy callbacks ("." is printed otherwise, as shown above) and
152"D" indicates that dyntick-idle processing is enabled ("." is printed 152"D" indicates that dyntick-idle processing is enabled ("." is printed
153otherwise, for example, if disabled via the "nohz=" kernel boot parameter). 153otherwise, for example, if disabled via the "nohz=" kernel boot parameter).
154 154
155If the relevant grace-period kthread has been unable to run prior to
156the stall warning, the following additional line is printed:
157
158 rcu_preempt kthread starved for 2023 jiffies!
159
160Starving the grace-period kthreads of CPU time can of course result in
161RCU CPU stall warnings even when all CPUs and tasks have passed through
162the required quiescent states.
163
155 164
156Multiple Warnings From One Stall 165Multiple Warnings From One Stall
157 166
@@ -187,6 +196,11 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
187 behavior, you might need to replace some of the cond_resched() 196 behavior, you might need to replace some of the cond_resched()
188 calls with calls to cond_resched_rcu_qs(). 197 calls with calls to cond_resched_rcu_qs().
189 198
199o Anything that prevents RCU's grace-period kthreads from running.
200 This can result in the "All QSes seen" console-log message.
201 This message will include information on when the kthread last
202 ran and how often it should be expected to run.
203
190o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might 204o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
191 happen to preempt a low-priority task in the middle of an RCU 205 happen to preempt a low-priority task in the middle of an RCU
192 read-side critical section. This is especially damaging if 206 read-side critical section. This is especially damaging if
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index b63b9bb3bc0c..08651da15448 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -56,14 +56,14 @@ rcuboost:
56 56
57The output of "cat rcu/rcu_preempt/rcudata" looks as follows: 57The output of "cat rcu/rcu_preempt/rcudata" looks as follows:
58 58
59 0!c=30455 g=30456 pq=1 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716 59 0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
60 1!c=30719 g=30720 pq=1 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982 60 1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
61 2!c=30150 g=30151 pq=1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458 61 2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
62 3 c=31249 g=31250 pq=1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622 62 3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
63 4!c=29502 g=29503 pq=1 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521 63 4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
64 5 c=31201 g=31202 pq=1 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698 64 5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
65 6!c=30253 g=30254 pq=1 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353 65 6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
66 7 c=31178 g=31178 pq=1 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969 66 7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
67 67
68This file has one line per CPU, or eight for this 8-CPU system. 68This file has one line per CPU, or eight for this 8-CPU system.
69The fields are as follows: 69The fields are as follows:
@@ -188,14 +188,14 @@ o "ca" is the number of RCU callbacks that have been adopted by this
188Kernels compiled with CONFIG_RCU_BOOST=y display the following from 188Kernels compiled with CONFIG_RCU_BOOST=y display the following from
189/debug/rcu/rcu_preempt/rcudata: 189/debug/rcu/rcu_preempt/rcudata:
190 190
191 0!c=12865 g=12866 pq=1 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871 191 0!c=12865 g=12866 pq=1/0 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
192 1 c=14407 g=14408 pq=1 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485 192 1 c=14407 g=14408 pq=1/0 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
193 2 c=14407 g=14408 pq=1 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490 193 2 c=14407 g=14408 pq=1/0 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
194 3 c=14407 g=14408 pq=1 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290 194 3 c=14407 g=14408 pq=1/0 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
195 4 c=14405 g=14406 pq=1 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114 195 4 c=14405 g=14406 pq=1/0 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
196 5!c=14168 g=14169 pq=1 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722 196 5!c=14168 g=14169 pq=1/0 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
197 6 c=14404 g=14405 pq=1 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811 197 6 c=14404 g=14405 pq=1/0 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
198 7 c=14407 g=14408 pq=1 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042 198 7 c=14407 g=14408 pq=1/0 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
199 199
200This is similar to the output discussed above, but contains the following 200This is similar to the output discussed above, but contains the following
201additional fields: 201additional fields:
diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt
index 75fdfaf41831..e39f0bc1f55e 100644
--- a/Documentation/devicetree/bindings/mfd/max77686.txt
+++ b/Documentation/devicetree/bindings/mfd/max77686.txt
@@ -39,6 +39,12 @@ to get matched with their hardware counterparts as follow:
39 -BUCKn : 1-4. 39 -BUCKn : 1-4.
40 Use standard regulator bindings for it ('regulator-off-in-suspend'). 40 Use standard regulator bindings for it ('regulator-off-in-suspend').
41 41
42 LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
43 control. To turn this feature on this property must be added to the regulator
44 sub-node:
45 - maxim,ena-gpios : one GPIO specifier enable control (the gpio
46 flags are actually ignored and always
47 ACTIVE_HIGH is used)
42 48
43Example: 49Example:
44 50
@@ -65,4 +71,12 @@ Example:
65 regulator-always-on; 71 regulator-always-on;
66 regulator-boot-on; 72 regulator-boot-on;
67 }; 73 };
74
75 buck9_reg {
76 regulator-compatible = "BUCK9";
77 regulator-name = "CAM_ISP_CORE_1.2V";
78 regulator-min-microvolt = <1000000>;
79 regulator-max-microvolt = <1200000>;
80 maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
81 };
68 } 82 }
diff --git a/Documentation/devicetree/bindings/regulator/da9211.txt b/Documentation/devicetree/bindings/regulator/da9211.txt
index 240019a82f9a..eb618907c7de 100644
--- a/Documentation/devicetree/bindings/regulator/da9211.txt
+++ b/Documentation/devicetree/bindings/regulator/da9211.txt
@@ -11,6 +11,7 @@ Required properties:
11 BUCKA and BUCKB. 11 BUCKA and BUCKB.
12 12
13Optional properties: 13Optional properties:
14- enable-gpios: platform gpio for control of BUCKA/BUCKB.
14- Any optional property defined in regulator.txt 15- Any optional property defined in regulator.txt
15 16
16Example 1) DA9211 17Example 1) DA9211
@@ -27,6 +28,7 @@ Example 1) DA9211
27 regulator-max-microvolt = <1570000>; 28 regulator-max-microvolt = <1570000>;
28 regulator-min-microamp = <2000000>; 29 regulator-min-microamp = <2000000>;
29 regulator-max-microamp = <5000000>; 30 regulator-max-microamp = <5000000>;
31 enable-gpios = <&gpio 27 0>;
30 }; 32 };
31 BUCKB { 33 BUCKB {
32 regulator-name = "VBUCKB"; 34 regulator-name = "VBUCKB";
@@ -34,11 +36,12 @@ Example 1) DA9211
34 regulator-max-microvolt = <1570000>; 36 regulator-max-microvolt = <1570000>;
35 regulator-min-microamp = <2000000>; 37 regulator-min-microamp = <2000000>;
36 regulator-max-microamp = <5000000>; 38 regulator-max-microamp = <5000000>;
39 enable-gpios = <&gpio 17 0>;
37 }; 40 };
38 }; 41 };
39 }; 42 };
40 43
41Example 2) DA92113 44Example 2) DA9213
42 pmic: da9213@68 { 45 pmic: da9213@68 {
43 compatible = "dlg,da9213"; 46 compatible = "dlg,da9213";
44 reg = <0x68>; 47 reg = <0x68>;
@@ -51,6 +54,7 @@ Example 2) DA92113
51 regulator-max-microvolt = <1570000>; 54 regulator-max-microvolt = <1570000>;
52 regulator-min-microamp = <3000000>; 55 regulator-min-microamp = <3000000>;
53 regulator-max-microamp = <6000000>; 56 regulator-max-microamp = <6000000>;
57 enable-gpios = <&gpio 27 0>;
54 }; 58 };
55 BUCKB { 59 BUCKB {
56 regulator-name = "VBUCKB"; 60 regulator-name = "VBUCKB";
@@ -58,6 +62,7 @@ Example 2) DA92113
58 regulator-max-microvolt = <1570000>; 62 regulator-max-microvolt = <1570000>;
59 regulator-min-microamp = <3000000>; 63 regulator-min-microamp = <3000000>;
60 regulator-max-microamp = <6000000>; 64 regulator-max-microamp = <6000000>;
65 enable-gpios = <&gpio 17 0>;
61 }; 66 };
62 }; 67 };
63 }; 68 };
diff --git a/Documentation/devicetree/bindings/regulator/isl9305.txt b/Documentation/devicetree/bindings/regulator/isl9305.txt
index a626fc1bbf0d..d6e7c9ec9413 100644
--- a/Documentation/devicetree/bindings/regulator/isl9305.txt
+++ b/Documentation/devicetree/bindings/regulator/isl9305.txt
@@ -2,7 +2,7 @@ Intersil ISL9305/ISL9305H voltage regulator
2 2
3Required properties: 3Required properties:
4 4
5- compatible: "isl,isl9305" or "isl,isl9305h" 5- compatible: "isil,isl9305" or "isil,isl9305h"
6- reg: I2C slave address, usually 0x68. 6- reg: I2C slave address, usually 0x68.
7- regulators: A node that houses a sub-node for each regulator within the 7- regulators: A node that houses a sub-node for each regulator within the
8 device. Each sub-node is identified using the node's name, with valid 8 device. Each sub-node is identified using the node's name, with valid
@@ -19,7 +19,7 @@ Optional properties:
19Example 19Example
20 20
21 pmic: isl9305@68 { 21 pmic: isl9305@68 {
22 compatible = "isl,isl9305"; 22 compatible = "isil,isl9305";
23 reg = <0x68>; 23 reg = <0x68>;
24 24
25 VINDCD1-supply = <&system_power>; 25 VINDCD1-supply = <&system_power>;
diff --git a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
new file mode 100644
index 000000000000..a42b1d6e9863
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
@@ -0,0 +1,217 @@
1Mediatek MT6397 Regulator Driver
2
3Required properties:
4- compatible: "mediatek,mt6397-regulator"
5- mt6397regulator: List of regulators provided by this controller. It is named
6 according to its regulator type, buck_<name> and ldo_<name>.
7 The definition for each of these nodes is defined using the standard binding
8 for regulators at Documentation/devicetree/bindings/regulator/regulator.txt.
9
10The valid names for regulators are::
11BUCK:
12 buck_vpca15, buck_vpca7, buck_vsramca15, buck_vsramca7, buck_vcore, buck_vgpu,
13 buck_vdrm, buck_vio18
14LDO:
15 ldo_vtcxo, ldo_va28, ldo_vcama, ldo_vio28, ldo_vusb, ldo_vmc, ldo_vmch,
16 ldo_vemc3v3, ldo_vgp1, ldo_vgp2, ldo_vgp3, ldo_vgp4, ldo_vgp5, ldo_vgp6,
17 ldo_vibr
18
19Example:
20 pmic {
21 compatible = "mediatek,mt6397";
22
23 mt6397regulator: mt6397regulator {
24 compatible = "mediatek,mt6397-regulator";
25
26 mt6397_vpca15_reg: buck_vpca15 {
27 regulator-compatible = "buck_vpca15";
28 regulator-name = "vpca15";
29 regulator-min-microvolt = < 850000>;
30 regulator-max-microvolt = <1350000>;
31 regulator-ramp-delay = <12500>;
32 regulator-enable-ramp-delay = <200>;
33 };
34
35 mt6397_vpca7_reg: buck_vpca7 {
36 regulator-compatible = "buck_vpca7";
37 regulator-name = "vpca7";
38 regulator-min-microvolt = < 850000>;
39 regulator-max-microvolt = <1350000>;
40 regulator-ramp-delay = <12500>;
41 regulator-enable-ramp-delay = <115>;
42 };
43
44 mt6397_vsramca15_reg: buck_vsramca15 {
45 regulator-compatible = "buck_vsramca15";
46 regulator-name = "vsramca15";
47 regulator-min-microvolt = < 850000>;
48 regulator-max-microvolt = <1350000>;
49 regulator-ramp-delay = <12500>;
50 regulator-enable-ramp-delay = <115>;
51
52 };
53
54 mt6397_vsramca7_reg: buck_vsramca7 {
55 regulator-compatible = "buck_vsramca7";
56 regulator-name = "vsramca7";
57 regulator-min-microvolt = < 850000>;
58 regulator-max-microvolt = <1350000>;
59 regulator-ramp-delay = <12500>;
60 regulator-enable-ramp-delay = <115>;
61
62 };
63
64 mt6397_vcore_reg: buck_vcore {
65 regulator-compatible = "buck_vcore";
66 regulator-name = "vcore";
67 regulator-min-microvolt = < 850000>;
68 regulator-max-microvolt = <1350000>;
69 regulator-ramp-delay = <12500>;
70 regulator-enable-ramp-delay = <115>;
71 };
72
73 mt6397_vgpu_reg: buck_vgpu {
74 regulator-compatible = "buck_vgpu";
75 regulator-name = "vgpu";
76 regulator-min-microvolt = < 700000>;
77 regulator-max-microvolt = <1350000>;
78 regulator-ramp-delay = <12500>;
79 regulator-enable-ramp-delay = <115>;
80 };
81
82 mt6397_vdrm_reg: buck_vdrm {
83 regulator-compatible = "buck_vdrm";
84 regulator-name = "vdrm";
85 regulator-min-microvolt = < 800000>;
86 regulator-max-microvolt = <1400000>;
87 regulator-ramp-delay = <12500>;
88 regulator-enable-ramp-delay = <500>;
89 };
90
91 mt6397_vio18_reg: buck_vio18 {
92 regulator-compatible = "buck_vio18";
93 regulator-name = "vio18";
94 regulator-min-microvolt = <1500000>;
95 regulator-max-microvolt = <2120000>;
96 regulator-ramp-delay = <12500>;
97 regulator-enable-ramp-delay = <500>;
98 };
99
100 mt6397_vtcxo_reg: ldo_vtcxo {
101 regulator-compatible = "ldo_vtcxo";
102 regulator-name = "vtcxo";
103 regulator-min-microvolt = <2800000>;
104 regulator-max-microvolt = <2800000>;
105 regulator-enable-ramp-delay = <90>;
106 };
107
108 mt6397_va28_reg: ldo_va28 {
109 regulator-compatible = "ldo_va28";
110 regulator-name = "va28";
111 /* fixed output 2.8 V */
112 regulator-enable-ramp-delay = <218>;
113 };
114
115 mt6397_vcama_reg: ldo_vcama {
116 regulator-compatible = "ldo_vcama";
117 regulator-name = "vcama";
118 regulator-min-microvolt = <1500000>;
119 regulator-max-microvolt = <2800000>;
120 regulator-enable-ramp-delay = <218>;
121 };
122
123 mt6397_vio28_reg: ldo_vio28 {
124 regulator-compatible = "ldo_vio28";
125 regulator-name = "vio28";
126 /* fixed output 2.8 V */
127 regulator-enable-ramp-delay = <240>;
128 };
129
130 mt6397_usb_reg: ldo_vusb {
131 regulator-compatible = "ldo_vusb";
132 regulator-name = "vusb";
133 /* fixed output 3.3 V */
134 regulator-enable-ramp-delay = <218>;
135 };
136
137 mt6397_vmc_reg: ldo_vmc {
138 regulator-compatible = "ldo_vmc";
139 regulator-name = "vmc";
140 regulator-min-microvolt = <1800000>;
141 regulator-max-microvolt = <3300000>;
142 regulator-enable-ramp-delay = <218>;
143 };
144
145 mt6397_vmch_reg: ldo_vmch {
146 regulator-compatible = "ldo_vmch";
147 regulator-name = "vmch";
148 regulator-min-microvolt = <3000000>;
149 regulator-max-microvolt = <3300000>;
150 regulator-enable-ramp-delay = <218>;
151 };
152
153 mt6397_vemc_3v3_reg: ldo_vemc3v3 {
154 regulator-compatible = "ldo_vemc3v3";
155 regulator-name = "vemc_3v3";
156 regulator-min-microvolt = <3000000>;
157 regulator-max-microvolt = <3300000>;
158 regulator-enable-ramp-delay = <218>;
159 };
160
161 mt6397_vgp1_reg: ldo_vgp1 {
162 regulator-compatible = "ldo_vgp1";
163 regulator-name = "vcamd";
164 regulator-min-microvolt = <1220000>;
165 regulator-max-microvolt = <3300000>;
166 regulator-enable-ramp-delay = <240>;
167 };
168
169 mt6397_vgp2_reg: ldo_vgp2 {
170 egulator-compatible = "ldo_vgp2";
171 regulator-name = "vcamio";
172 regulator-min-microvolt = <1000000>;
173 regulator-max-microvolt = <3300000>;
174 regulator-enable-ramp-delay = <218>;
175 };
176
177 mt6397_vgp3_reg: ldo_vgp3 {
178 regulator-compatible = "ldo_vgp3";
179 regulator-name = "vcamaf";
180 regulator-min-microvolt = <1200000>;
181 regulator-max-microvolt = <3300000>;
182 regulator-enable-ramp-delay = <218>;
183 };
184
185 mt6397_vgp4_reg: ldo_vgp4 {
186 regulator-compatible = "ldo_vgp4";
187 regulator-name = "vgp4";
188 regulator-min-microvolt = <1200000>;
189 regulator-max-microvolt = <3300000>;
190 regulator-enable-ramp-delay = <218>;
191 };
192
193 mt6397_vgp5_reg: ldo_vgp5 {
194 regulator-compatible = "ldo_vgp5";
195 regulator-name = "vgp5";
196 regulator-min-microvolt = <1200000>;
197 regulator-max-microvolt = <3000000>;
198 regulator-enable-ramp-delay = <218>;
199 };
200
201 mt6397_vgp6_reg: ldo_vgp6 {
202 regulator-compatible = "ldo_vgp6";
203 regulator-name = "vgp6";
204 regulator-min-microvolt = <1200000>;
205 regulator-max-microvolt = <3300000>;
206 regulator-enable-ramp-delay = <218>;
207 };
208
209 mt6397_vibr_reg: ldo_vibr {
210 regulator-compatible = "ldo_vibr";
211 regulator-name = "vibr";
212 regulator-min-microvolt = <1200000>;
213 regulator-max-microvolt = <3300000>;
214 regulator-enable-ramp-delay = <218>;
215 };
216 };
217 };
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
index 34ef5d16d0f1..9b40db88f637 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.txt
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -1,7 +1,7 @@
1PFUZE100 family of regulators 1PFUZE100 family of regulators
2 2
3Required properties: 3Required properties:
4- compatible: "fsl,pfuze100" or "fsl,pfuze200" 4- compatible: "fsl,pfuze100", "fsl,pfuze200", "fsl,pfuze3000"
5- reg: I2C slave address 5- reg: I2C slave address
6 6
7Required child node: 7Required child node:
@@ -14,6 +14,8 @@ Required child node:
14 sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6 14 sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6
15 --PFUZE200 15 --PFUZE200
16 sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6 16 sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6
17 --PFUZE3000
18 sw1a,sw1b,sw2,sw3,swbst,vsnvs,vrefddr,vldo1,vldo2,vccsd,v33,vldo3,vldo4
17 19
18Each regulator is defined using the standard binding for regulators. 20Each regulator is defined using the standard binding for regulators.
19 21
@@ -205,3 +207,93 @@ Example 2: PFUZE200
205 }; 207 };
206 }; 208 };
207 }; 209 };
210
211Example 3: PFUZE3000
212
213 pmic: pfuze3000@08 {
214 compatible = "fsl,pfuze3000";
215 reg = <0x08>;
216
217 regulators {
218 sw1a_reg: sw1a {
219 regulator-min-microvolt = <700000>;
220 regulator-max-microvolt = <1475000>;
221 regulator-boot-on;
222 regulator-always-on;
223 regulator-ramp-delay = <6250>;
224 };
225 /* use sw1c_reg to align with pfuze100/pfuze200 */
226 sw1c_reg: sw1b {
227 regulator-min-microvolt = <700000>;
228 regulator-max-microvolt = <1475000>;
229 regulator-boot-on;
230 regulator-always-on;
231 regulator-ramp-delay = <6250>;
232 };
233
234 sw2_reg: sw2 {
235 regulator-min-microvolt = <2500000>;
236 regulator-max-microvolt = <3300000>;
237 regulator-boot-on;
238 regulator-always-on;
239 };
240
241 sw3a_reg: sw3 {
242 regulator-min-microvolt = <900000>;
243 regulator-max-microvolt = <1650000>;
244 regulator-boot-on;
245 regulator-always-on;
246 };
247
248 swbst_reg: swbst {
249 regulator-min-microvolt = <5000000>;
250 regulator-max-microvolt = <5150000>;
251 };
252
253 snvs_reg: vsnvs {
254 regulator-min-microvolt = <1000000>;
255 regulator-max-microvolt = <3000000>;
256 regulator-boot-on;
257 regulator-always-on;
258 };
259
260 vref_reg: vrefddr {
261 regulator-boot-on;
262 regulator-always-on;
263 };
264
265 vgen1_reg: vldo1 {
266 regulator-min-microvolt = <1800000>;
267 regulator-max-microvolt = <3300000>;
268 regulator-always-on;
269 };
270
271 vgen2_reg: vldo2 {
272 regulator-min-microvolt = <800000>;
273 regulator-max-microvolt = <1550000>;
274 };
275
276 vgen3_reg: vccsd {
277 regulator-min-microvolt = <2850000>;
278 regulator-max-microvolt = <3300000>;
279 regulator-always-on;
280 };
281
282 vgen4_reg: v33 {
283 regulator-min-microvolt = <2850000>;
284 regulator-max-microvolt = <3300000>;
285 };
286
287 vgen5_reg: vldo3 {
288 regulator-min-microvolt = <1800000>;
289 regulator-max-microvolt = <3300000>;
290 regulator-always-on;
291 };
292
293 vgen6_reg: vldo4 {
294 regulator-min-microvolt = <1800000>;
295 regulator-max-microvolt = <3300000>;
296 regulator-always-on;
297 };
298 };
299 };
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index d11c3721e7cd..4c388bb2f0a2 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -30,6 +30,22 @@ Optional properties:
30 specifiers, one for transmission, and one for 30 specifiers, one for transmission, and one for
31 reception. 31 reception.
32- dma-names : Must contain a list of two DMA names, "tx" and "rx". 32- dma-names : Must contain a list of two DMA names, "tx" and "rx".
33- renesas,dtdl : delay sync signal (setup) in transmit mode.
34 Must contain one of the following values:
35 0 (no bit delay)
36 50 (0.5-clock-cycle delay)
37 100 (1-clock-cycle delay)
38 150 (1.5-clock-cycle delay)
39 200 (2-clock-cycle delay)
40
41- renesas,syncdl : delay sync signal (hold) in transmit mode.
42 Must contain one of the following values:
43 0 (no bit delay)
44 50 (0.5-clock-cycle delay)
45 100 (1-clock-cycle delay)
46 150 (1.5-clock-cycle delay)
47 200 (2-clock-cycle delay)
48 300 (3-clock-cycle delay)
33 49
34Optional properties, deprecated for soctype-specific bindings: 50Optional properties, deprecated for soctype-specific bindings:
35- renesas,tx-fifo-size : Overrides the default tx fifo size given in words 51- renesas,tx-fifo-size : Overrides the default tx fifo size given in words
diff --git a/Documentation/devicetree/bindings/spi/spi-sirf.txt b/Documentation/devicetree/bindings/spi/spi-sirf.txt
new file mode 100644
index 000000000000..4c7adb8f777c
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-sirf.txt
@@ -0,0 +1,41 @@
1* CSR SiRFprimaII Serial Peripheral Interface
2
3Required properties:
4- compatible : Should be "sirf,prima2-spi"
5- reg : Offset and length of the register set for the device
6- interrupts : Should contain SPI interrupt
7- resets: phandle to the reset controller asserting this device in
8 reset
9 See ../reset/reset.txt for details.
10- dmas : Must contain an entry for each entry in clock-names.
11 See ../dma/dma.txt for details.
12- dma-names : Must include the following entries:
13 - rx
14 - tx
15- clocks : Must contain an entry for each entry in clock-names.
16 See ../clocks/clock-bindings.txt for details.
17
18- #address-cells: Number of cells required to define a chip select
19 address on the SPI bus. Should be set to 1.
20- #size-cells: Should be zero.
21
22Optional properties:
23- spi-max-frequency: Specifies maximum SPI clock frequency,
24 Units - Hz. Definition as per
25 Documentation/devicetree/bindings/spi/spi-bus.txt
26- cs-gpios: should specify GPIOs used for chipselects.
27
28Example:
29
30spi0: spi@b00d0000 {
31 compatible = "sirf,prima2-spi";
32 reg = <0xb00d0000 0x10000>;
33 interrupts = <15>;
34 dmas = <&dmac1 9>,
35 <&dmac1 4>;
36 dma-names = "rx", "tx";
37 #address-cells = <1>;
38 #size-cells = <0>;
39 clocks = <&clks 19>;
40 resets = <&rstc 26>;
41};
diff --git a/Documentation/devicetree/bindings/spi/spi-st-ssc.txt b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt
new file mode 100644
index 000000000000..fe54959ec957
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt
@@ -0,0 +1,40 @@
1STMicroelectronics SSC (SPI) Controller
2---------------------------------------
3
4Required properties:
5- compatible : "st,comms-ssc4-spi"
6- reg : Offset and length of the device's register set
7- interrupts : The interrupt specifier
8- clock-names : Must contain "ssc"
9- clocks : Must contain an entry for each name in clock-names
10 See ../clk/*
11- pinctrl-names : Uses "default", can use "sleep" if provided
12 See ../pinctrl/pinctrl-binding.txt
13
14Optional properties:
15- cs-gpios : List of GPIO chip selects
16 See ../spi/spi-bus.txt
17
18Child nodes represent devices on the SPI bus
19 See ../spi/spi-bus.txt
20
21Example:
22 spi@9840000 {
23 compatible = "st,comms-ssc4-spi";
24 reg = <0x9840000 0x110>;
25 interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
26 clocks = <&clk_s_c0_flexgen CLK_EXT2F_A9>;
27 clock-names = "ssc";
28 pinctrl-0 = <&pinctrl_spi0_default>;
29 pinctrl-names = "default";
30 cs-gpios = <&pio17 5 0>;
31 #address-cells = <1>;
32 #size-cells = <0>;
33
34 st95hf@0{
35 compatible = "st,st95hf";
36 reg = <0>;
37 spi-max-frequency = <1000000>;
38 interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
39 };
40 };
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx
index 4223c2d3b508..cfd31d94c872 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -26,6 +26,12 @@ Supported chips:
26 Datasheet: Publicly available at the Texas Instruments website 26 Datasheet: Publicly available at the Texas Instruments website
27 http://www.ti.com/ 27 http://www.ti.com/
28 28
29 * Texas Instruments INA231
30 Prefix: 'ina231'
31 Addresses: I2C 0x40 - 0x4f
32 Datasheet: Publicly available at the Texas Instruments website
33 http://www.ti.com/
34
29Author: Lothar Felten <l-felten@ti.com> 35Author: Lothar Felten <l-felten@ti.com>
30 36
31Description 37Description
@@ -41,9 +47,18 @@ interface. The INA220 monitors both shunt drop and supply voltage.
41The INA226 is a current shunt and power monitor with an I2C interface. 47The INA226 is a current shunt and power monitor with an I2C interface.
42The INA226 monitors both a shunt voltage drop and bus supply voltage. 48The INA226 monitors both a shunt voltage drop and bus supply voltage.
43 49
44The INA230 is a high or low side current shunt and power monitor with an I2C 50INA230 and INA231 are high or low side current shunt and power monitors
45interface. The INA230 monitors both a shunt voltage drop and bus supply voltage. 51with an I2C interface. The chips monitor both a shunt voltage drop and
52bus supply voltage.
46 53
47The shunt value in micro-ohms can be set via platform data or device tree. 54The shunt value in micro-ohms can be set via platform data or device tree at
48Please refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings 55compile-time or via the shunt_resistor attribute in sysfs at run-time. Please
56refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings
49if the device tree is used. 57if the device tree is used.
58
59Additionally ina226 supports update_interval attribute as described in
60Documentation/hwmon/sysfs-interface. Internally the interval is the sum of
61bus and shunt voltage conversion times multiplied by the averaging rate. We
62don't touch the conversion times and only modify the number of averages. The
63lower limit of the update_interval is 2 ms, the upper limit is 2253 ms.
64The actual programmed interval may vary from the desired value.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 70a09f8a0383..ca2387ef27ab 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -269,6 +269,50 @@ And there are a number of things that _must_ or _must_not_ be assumed:
269 STORE *(A + 4) = Y; STORE *A = X; 269 STORE *(A + 4) = Y; STORE *A = X;
270 STORE {*A, *(A + 4) } = {X, Y}; 270 STORE {*A, *(A + 4) } = {X, Y};
271 271
272And there are anti-guarantees:
273
274 (*) These guarantees do not apply to bitfields, because compilers often
275 generate code to modify these using non-atomic read-modify-write
276 sequences. Do not attempt to use bitfields to synchronize parallel
277 algorithms.
278
279 (*) Even in cases where bitfields are protected by locks, all fields
280 in a given bitfield must be protected by one lock. If two fields
281 in a given bitfield are protected by different locks, the compiler's
282 non-atomic read-modify-write sequences can cause an update to one
283 field to corrupt the value of an adjacent field.
284
285 (*) These guarantees apply only to properly aligned and sized scalar
286 variables. "Properly sized" currently means variables that are
287 the same size as "char", "short", "int" and "long". "Properly
288 aligned" means the natural alignment, thus no constraints for
289 "char", two-byte alignment for "short", four-byte alignment for
290 "int", and either four-byte or eight-byte alignment for "long",
291 on 32-bit and 64-bit systems, respectively. Note that these
292 guarantees were introduced into the C11 standard, so beware when
293 using older pre-C11 compilers (for example, gcc 4.6). The portion
294 of the standard containing this guarantee is Section 3.14, which
295 defines "memory location" as follows:
296
297 memory location
298 either an object of scalar type, or a maximal sequence
299 of adjacent bit-fields all having nonzero width
300
301 NOTE 1: Two threads of execution can update and access
302 separate memory locations without interfering with
303 each other.
304
305 NOTE 2: A bit-field and an adjacent non-bit-field member
306 are in separate memory locations. The same applies
307 to two bit-fields, if one is declared inside a nested
308 structure declaration and the other is not, or if the two
309 are separated by a zero-length bit-field declaration,
310 or if they are separated by a non-bit-field member
311 declaration. It is not safe to concurrently update two
312 bit-fields in the same structure if all members declared
313 between them are also bit-fields, no matter what the
314 sizes of those intervening bit-fields happen to be.
315
272 316
273========================= 317=========================
274WHAT ARE MEMORY BARRIERS? 318WHAT ARE MEMORY BARRIERS?
@@ -750,7 +794,7 @@ In summary:
750 However, they do -not- guarantee any other sort of ordering: 794 However, they do -not- guarantee any other sort of ordering:
751 Not prior loads against later loads, nor prior stores against 795 Not prior loads against later loads, nor prior stores against
752 later anything. If you need these other forms of ordering, 796 later anything. If you need these other forms of ordering,
753 use smb_rmb(), smp_wmb(), or, in the case of prior stores and 797 use smp_rmb(), smp_wmb(), or, in the case of prior stores and
754 later loads, smp_mb(). 798 later loads, smp_mb().
755 799
756 (*) If both legs of the "if" statement begin with identical stores 800 (*) If both legs of the "if" statement begin with identical stores
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt
index c6af4bac5aa8..54f10478e8e3 100644
--- a/Documentation/networking/netlink_mmap.txt
+++ b/Documentation/networking/netlink_mmap.txt
@@ -199,16 +199,9 @@ frame header.
199TX limitations 199TX limitations
200-------------- 200--------------
201 201
202Kernel processing usually involves validation of the message received by 202As of Jan 2015 the message is always copied from the ring frame to an
203user-space, then processing its contents. The kernel must assure that 203allocated buffer due to unresolved security concerns.
204userspace is not able to modify the message contents after they have been 204See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX.").
205validated. In order to do so, the message is copied from the ring frame
206to an allocated buffer if either of these conditions is false:
207
208- only a single mapping of the ring exists
209- the file descriptor is not shared between processes
210
211This means that for threaded programs, the kernel will fall back to copying.
212 205
213Example 206Example
214------- 207-------
diff --git a/MAINTAINERS b/MAINTAINERS
index aaa039dee999..d66a97dd3a12 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4953,6 +4953,16 @@ F: Documentation/input/multi-touch-protocol.txt
4953F: drivers/input/input-mt.c 4953F: drivers/input/input-mt.c
4954K: \b(ABS|SYN)_MT_ 4954K: \b(ABS|SYN)_MT_
4955 4955
4956INTEL ASoC BDW/HSW DRIVERS
4957M: Jie Yang <yang.jie@linux.intel.com>
4958L: alsa-devel@alsa-project.org
4959S: Supported
4960F: sound/soc/intel/sst-haswell*
4961F: sound/soc/intel/sst-dsp*
4962F: sound/soc/intel/sst-firmware.c
4963F: sound/soc/intel/broadwell.c
4964F: sound/soc/intel/haswell.c
4965
4956INTEL C600 SERIES SAS CONTROLLER DRIVER 4966INTEL C600 SERIES SAS CONTROLLER DRIVER
4957M: Intel SCU Linux support <intel-linux-scu@intel.com> 4967M: Intel SCU Linux support <intel-linux-scu@intel.com>
4958M: Artur Paszkiewicz <artur.paszkiewicz@intel.com> 4968M: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
@@ -9241,7 +9251,6 @@ F: drivers/net/ethernet/dlink/sundance.c
9241 9251
9242SUPERH 9252SUPERH
9243L: linux-sh@vger.kernel.org 9253L: linux-sh@vger.kernel.org
9244W: http://www.linux-sh.org
9245Q: http://patchwork.kernel.org/project/linux-sh/list/ 9254Q: http://patchwork.kernel.org/project/linux-sh/list/
9246S: Orphan 9255S: Orphan
9247F: Documentation/sh/ 9256F: Documentation/sh/
diff --git a/Makefile b/Makefile
index c8e17c05f916..b15036b1890c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 19 2PATCHLEVEL = 19
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Diseased Newt 5NAME = Diseased Newt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 68be9017593d..132c70e2d2f1 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -263,16 +263,37 @@ restart: adr r0, LC0
263 * OK... Let's do some funky business here. 263 * OK... Let's do some funky business here.
264 * If we do have a DTB appended to zImage, and we do have 264 * If we do have a DTB appended to zImage, and we do have
265 * an ATAG list around, we want the later to be translated 265 * an ATAG list around, we want the later to be translated
266 * and folded into the former here. To be on the safe side, 266 * and folded into the former here. No GOT fixup has occurred
267 * let's temporarily move the stack away into the malloc 267 * yet, but none of the code we're about to call uses any
268 * area. No GOT fixup has occurred yet, but none of the 268 * global variable.
269 * code we're about to call uses any global variable.
270 */ 269 */
271 add sp, sp, #0x10000 270
271 /* Get the initial DTB size */
272 ldr r5, [r6, #4]
273#ifndef __ARMEB__
274 /* convert to little endian */
275 eor r1, r5, r5, ror #16
276 bic r1, r1, #0x00ff0000
277 mov r5, r5, ror #8
278 eor r5, r5, r1, lsr #8
279#endif
280 /* 50% DTB growth should be good enough */
281 add r5, r5, r5, lsr #1
282 /* preserve 64-bit alignment */
283 add r5, r5, #7
284 bic r5, r5, #7
285 /* clamp to 32KB min and 1MB max */
286 cmp r5, #(1 << 15)
287 movlo r5, #(1 << 15)
288 cmp r5, #(1 << 20)
289 movhi r5, #(1 << 20)
290 /* temporarily relocate the stack past the DTB work space */
291 add sp, sp, r5
292
272 stmfd sp!, {r0-r3, ip, lr} 293 stmfd sp!, {r0-r3, ip, lr}
273 mov r0, r8 294 mov r0, r8
274 mov r1, r6 295 mov r1, r6
275 sub r2, sp, r6 296 mov r2, r5
276 bl atags_to_fdt 297 bl atags_to_fdt
277 298
278 /* 299 /*
@@ -285,11 +306,11 @@ restart: adr r0, LC0
285 bic r0, r0, #1 306 bic r0, r0, #1
286 add r0, r0, #0x100 307 add r0, r0, #0x100
287 mov r1, r6 308 mov r1, r6
288 sub r2, sp, r6 309 mov r2, r5
289 bleq atags_to_fdt 310 bleq atags_to_fdt
290 311
291 ldmfd sp!, {r0-r3, ip, lr} 312 ldmfd sp!, {r0-r3, ip, lr}
292 sub sp, sp, #0x10000 313 sub sp, sp, r5
293#endif 314#endif
294 315
295 mov r8, r6 @ use the appended device tree 316 mov r8, r6 @ use the appended device tree
@@ -306,7 +327,7 @@ restart: adr r0, LC0
306 subs r1, r5, r1 327 subs r1, r5, r1
307 addhi r9, r9, r1 328 addhi r9, r9, r1
308 329
309 /* Get the dtb's size */ 330 /* Get the current DTB size */
310 ldr r5, [r6, #4] 331 ldr r5, [r6, #4]
311#ifndef __ARMEB__ 332#ifndef __ARMEB__
312 /* convert r5 (dtb size) to little endian */ 333 /* convert r5 (dtb size) to little endian */
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index b8168f1f8139..24ff27049ce0 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -368,7 +368,7 @@
368 }; 368 };
369 369
370 i2s1: i2s@13960000 { 370 i2s1: i2s@13960000 {
371 compatible = "samsung,s5pv210-i2s"; 371 compatible = "samsung,s3c6410-i2s";
372 reg = <0x13960000 0x100>; 372 reg = <0x13960000 0x100>;
373 clocks = <&clock CLK_I2S1>; 373 clocks = <&clock CLK_I2S1>;
374 clock-names = "iis"; 374 clock-names = "iis";
@@ -378,7 +378,7 @@
378 }; 378 };
379 379
380 i2s2: i2s@13970000 { 380 i2s2: i2s@13970000 {
381 compatible = "samsung,s5pv210-i2s"; 381 compatible = "samsung,s3c6410-i2s";
382 reg = <0x13970000 0x100>; 382 reg = <0x13970000 0x100>;
383 clocks = <&clock CLK_I2S2>; 383 clocks = <&clock CLK_I2S2>;
384 clock-names = "iis"; 384 clock-names = "iis";
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index 2260f1855820..8944f4991c3c 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -22,10 +22,12 @@
22 22
23__invalid_entry: 23__invalid_entry:
24 v7m_exception_entry 24 v7m_exception_entry
25#ifdef CONFIG_PRINTK
25 adr r0, strerr 26 adr r0, strerr
26 mrs r1, ipsr 27 mrs r1, ipsr
27 mov r2, lr 28 mov r2, lr
28 bl printk 29 bl printk
30#endif
29 mov r0, sp 31 mov r0, sp
30 bl show_regs 32 bl show_regs
311: b 1b 331: b 1b
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 466bd299b1a8..3afee5f40f4f 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -23,6 +23,7 @@ config KVM
23 select HAVE_KVM_CPU_RELAX_INTERCEPT 23 select HAVE_KVM_CPU_RELAX_INTERCEPT
24 select KVM_MMIO 24 select KVM_MMIO
25 select KVM_ARM_HOST 25 select KVM_ARM_HOST
26 select SRCU
26 depends on ARM_VIRT_EXT && ARM_LPAE 27 depends on ARM_VIRT_EXT && ARM_LPAE
27 ---help--- 28 ---help---
28 Support hosting virtualized guest machines. You will also 29 Support hosting virtualized guest machines. You will also
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 03823e784f63..c43c71455566 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN
1012 1012
1013config ARM_KERNMEM_PERMS 1013config ARM_KERNMEM_PERMS
1014 bool "Restrict kernel memory permissions" 1014 bool "Restrict kernel memory permissions"
1015 depends on MMU
1015 help 1016 help
1016 If this is set, kernel memory other than kernel text (and rodata) 1017 If this is set, kernel memory other than kernel text (and rodata)
1017 will be made non-executable. The tradeoff is that each region is 1018 will be made non-executable. The tradeoff is that each region is
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 91892569710f..845769e41332 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
144 /* Update the list of reserved ASIDs and the ASID bitmap. */ 144 /* Update the list of reserved ASIDs and the ASID bitmap. */
145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
146 for_each_possible_cpu(i) { 146 for_each_possible_cpu(i) {
147 if (i == cpu) { 147 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
148 asid = 0; 148 /*
149 } else { 149 * If this CPU has already been through a
150 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 150 * rollover, but hasn't run another task in
151 /* 151 * the meantime, we must preserve its reserved
152 * If this CPU has already been through a 152 * ASID, as this is the only trace we have of
153 * rollover, but hasn't run another task in 153 * the process it is still running.
154 * the meantime, we must preserve its reserved 154 */
155 * ASID, as this is the only trace we have of 155 if (asid == 0)
156 * the process it is still running. 156 asid = per_cpu(reserved_asids, i);
157 */ 157 __set_bit(asid & ~ASID_MASK, asid_map);
158 if (asid == 0)
159 asid = per_cpu(reserved_asids, i);
160 __set_bit(asid & ~ASID_MASK, asid_map);
161 }
162 per_cpu(reserved_asids, i) = asid; 158 per_cpu(reserved_asids, i) = asid;
163 } 159 }
164 160
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index a673c7f7e208..903dba064a03 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2048,6 +2048,9 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
2048{ 2048{
2049 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 2049 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
2050 2050
2051 if (!mapping)
2052 return;
2053
2051 __arm_iommu_detach_device(dev); 2054 __arm_iommu_detach_device(dev);
2052 arm_iommu_release_mapping(mapping); 2055 arm_iommu_release_mapping(mapping);
2053} 2056}
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 8ba85e9ea388..b334084d3675 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -26,6 +26,7 @@ config KVM
26 select KVM_ARM_HOST 26 select KVM_ARM_HOST
27 select KVM_ARM_VGIC 27 select KVM_ARM_VGIC
28 select KVM_ARM_TIMER 28 select KVM_ARM_TIMER
29 select SRCU
29 ---help--- 30 ---help---
30 Support hosting virtualized guest machines. 31 Support hosting virtualized guest machines.
31 32
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3289969ee423..843713c05b79 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2656,27 +2656,21 @@ config TRAD_SIGNALS
2656 bool 2656 bool
2657 2657
2658config MIPS32_COMPAT 2658config MIPS32_COMPAT
2659 bool "Kernel support for Linux/MIPS 32-bit binary compatibility" 2659 bool
2660 depends on 64BIT
2661 help
2662 Select this option if you want Linux/MIPS 32-bit binary
2663 compatibility. Since all software available for Linux/MIPS is
2664 currently 32-bit you should say Y here.
2665 2660
2666config COMPAT 2661config COMPAT
2667 bool 2662 bool
2668 depends on MIPS32_COMPAT
2669 select ARCH_WANT_OLD_COMPAT_IPC
2670 default y
2671 2663
2672config SYSVIPC_COMPAT 2664config SYSVIPC_COMPAT
2673 bool 2665 bool
2674 depends on COMPAT && SYSVIPC
2675 default y
2676 2666
2677config MIPS32_O32 2667config MIPS32_O32
2678 bool "Kernel support for o32 binaries" 2668 bool "Kernel support for o32 binaries"
2679 depends on MIPS32_COMPAT 2669 depends on 64BIT
2670 select ARCH_WANT_OLD_COMPAT_IPC
2671 select COMPAT
2672 select MIPS32_COMPAT
2673 select SYSVIPC_COMPAT if SYSVIPC
2680 help 2674 help
2681 Select this option if you want to run o32 binaries. These are pure 2675 Select this option if you want to run o32 binaries. These are pure
2682 32-bit binaries as used by the 32-bit Linux/MIPS port. Most of 2676 32-bit binaries as used by the 32-bit Linux/MIPS port. Most of
@@ -2686,7 +2680,10 @@ config MIPS32_O32
2686 2680
2687config MIPS32_N32 2681config MIPS32_N32
2688 bool "Kernel support for n32 binaries" 2682 bool "Kernel support for n32 binaries"
2689 depends on MIPS32_COMPAT 2683 depends on 64BIT
2684 select COMPAT
2685 select MIPS32_COMPAT
2686 select SYSVIPC_COMPAT if SYSVIPC
2690 help 2687 help
2691 Select this option if you want to run n32 binaries. These are 2688 Select this option if you want to run n32 binaries. These are
2692 64-bit binaries using 32-bit quantities for addressing and certain 2689 64-bit binaries using 32-bit quantities for addressing and certain
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c
index 8585078ae50e..2a4c52e27f41 100644
--- a/arch/mips/boot/elf2ecoff.c
+++ b/arch/mips/boot/elf2ecoff.c
@@ -49,7 +49,8 @@
49/* 49/*
50 * Some extra ELF definitions 50 * Some extra ELF definitions
51 */ 51 */
52#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */ 52#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */
53#define PT_MIPS_ABIFLAGS 0x70000003 /* Records ABI related flags */
53 54
54/* -------------------------------------------------------------------- */ 55/* -------------------------------------------------------------------- */
55 56
@@ -349,39 +350,46 @@ int main(int argc, char *argv[])
349 350
350 for (i = 0; i < ex.e_phnum; i++) { 351 for (i = 0; i < ex.e_phnum; i++) {
351 /* Section types we can ignore... */ 352 /* Section types we can ignore... */
352 if (ph[i].p_type == PT_NULL || ph[i].p_type == PT_NOTE || 353 switch (ph[i].p_type) {
353 ph[i].p_type == PT_PHDR 354 case PT_NULL:
354 || ph[i].p_type == PT_MIPS_REGINFO) 355 case PT_NOTE:
356 case PT_PHDR:
357 case PT_MIPS_REGINFO:
358 case PT_MIPS_ABIFLAGS:
355 continue; 359 continue;
356 /* Section types we can't handle... */
357 else if (ph[i].p_type != PT_LOAD) {
358 fprintf(stderr,
359 "Program header %d type %d can't be converted.\n",
360 ex.e_phnum, ph[i].p_type);
361 exit(1);
362 }
363 /* Writable (data) segment? */
364 if (ph[i].p_flags & PF_W) {
365 struct sect ndata, nbss;
366 360
367 ndata.vaddr = ph[i].p_vaddr; 361 case PT_LOAD:
368 ndata.len = ph[i].p_filesz; 362 /* Writable (data) segment? */
369 nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz; 363 if (ph[i].p_flags & PF_W) {
370 nbss.len = ph[i].p_memsz - ph[i].p_filesz; 364 struct sect ndata, nbss;
365
366 ndata.vaddr = ph[i].p_vaddr;
367 ndata.len = ph[i].p_filesz;
368 nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz;
369 nbss.len = ph[i].p_memsz - ph[i].p_filesz;
371 370
372 combine(&data, &ndata, 0); 371 combine(&data, &ndata, 0);
373 combine(&bss, &nbss, 1); 372 combine(&bss, &nbss, 1);
374 } else { 373 } else {
375 struct sect ntxt; 374 struct sect ntxt;
376 375
377 ntxt.vaddr = ph[i].p_vaddr; 376 ntxt.vaddr = ph[i].p_vaddr;
378 ntxt.len = ph[i].p_filesz; 377 ntxt.len = ph[i].p_filesz;
379 378
380 combine(&text, &ntxt, 0); 379 combine(&text, &ntxt, 0);
380 }
381 /* Remember the lowest segment start address. */
382 if (ph[i].p_vaddr < cur_vma)
383 cur_vma = ph[i].p_vaddr;
384 break;
385
386 default:
387 /* Section types we can't handle... */
388 fprintf(stderr,
389 "Program header %d type %d can't be converted.\n",
390 ex.e_phnum, ph[i].p_type);
391 exit(1);
381 } 392 }
382 /* Remember the lowest segment start address. */
383 if (ph[i].p_vaddr < cur_vma)
384 cur_vma = ph[i].p_vaddr;
385 } 393 }
386 394
387 /* Sections must be in order to be converted... */ 395 /* Sections must be in order to be converted... */
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index ecd903dd1c45..8b1eeffa12ed 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -240,9 +240,7 @@ static int octeon_cpu_disable(void)
240 240
241 set_cpu_online(cpu, false); 241 set_cpu_online(cpu, false);
242 cpu_clear(cpu, cpu_callin_map); 242 cpu_clear(cpu, cpu_callin_map);
243 local_irq_disable();
244 octeon_fixup_irqs(); 243 octeon_fixup_irqs();
245 local_irq_enable();
246 244
247 flush_cache_all(); 245 flush_cache_all();
248 local_flush_tlb_all(); 246 local_flush_tlb_all();
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index f57b96dcf7df..61a4460d67d3 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -132,7 +132,6 @@ CONFIG_IP_NF_MATCH_ECN=m
132CONFIG_IP_NF_MATCH_TTL=m 132CONFIG_IP_NF_MATCH_TTL=m
133CONFIG_IP_NF_FILTER=m 133CONFIG_IP_NF_FILTER=m
134CONFIG_IP_NF_TARGET_REJECT=m 134CONFIG_IP_NF_TARGET_REJECT=m
135CONFIG_IP_NF_TARGET_ULOG=m
136CONFIG_IP_NF_MANGLE=m 135CONFIG_IP_NF_MANGLE=m
137CONFIG_IP_NF_TARGET_CLUSTERIP=m 136CONFIG_IP_NF_TARGET_CLUSTERIP=m
138CONFIG_IP_NF_TARGET_ECN=m 137CONFIG_IP_NF_TARGET_ECN=m
@@ -175,7 +174,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
175CONFIG_BRIDGE_EBT_REDIRECT=m 174CONFIG_BRIDGE_EBT_REDIRECT=m
176CONFIG_BRIDGE_EBT_SNAT=m 175CONFIG_BRIDGE_EBT_SNAT=m
177CONFIG_BRIDGE_EBT_LOG=m 176CONFIG_BRIDGE_EBT_LOG=m
178CONFIG_BRIDGE_EBT_ULOG=m
179CONFIG_BRIDGE_EBT_NFLOG=m 177CONFIG_BRIDGE_EBT_NFLOG=m
180CONFIG_IP_SCTP=m 178CONFIG_IP_SCTP=m
181CONFIG_BRIDGE=m 179CONFIG_BRIDGE=m
@@ -220,8 +218,6 @@ CONFIG_NET_ACT_SKBEDIT=m
220CONFIG_NET_CLS_IND=y 218CONFIG_NET_CLS_IND=y
221CONFIG_CFG80211=m 219CONFIG_CFG80211=m
222CONFIG_MAC80211=m 220CONFIG_MAC80211=m
223CONFIG_MAC80211_RC_PID=y
224CONFIG_MAC80211_RC_DEFAULT_PID=y
225CONFIG_MAC80211_MESH=y 221CONFIG_MAC80211_MESH=y
226CONFIG_RFKILL=m 222CONFIG_RFKILL=m
227CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 223CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -248,19 +244,13 @@ CONFIG_ATA_OVER_ETH=m
248CONFIG_IDE=y 244CONFIG_IDE=y
249CONFIG_BLK_DEV_IDECD=y 245CONFIG_BLK_DEV_IDECD=y
250CONFIG_IDE_GENERIC=y 246CONFIG_IDE_GENERIC=y
251CONFIG_BLK_DEV_GENERIC=y
252CONFIG_BLK_DEV_PIIX=y
253CONFIG_BLK_DEV_IT8213=m
254CONFIG_BLK_DEV_TC86C001=m
255CONFIG_RAID_ATTRS=m 247CONFIG_RAID_ATTRS=m
256CONFIG_SCSI=m 248CONFIG_BLK_DEV_SD=y
257CONFIG_BLK_DEV_SD=m
258CONFIG_CHR_DEV_ST=m 249CONFIG_CHR_DEV_ST=m
259CONFIG_CHR_DEV_OSST=m 250CONFIG_CHR_DEV_OSST=m
260CONFIG_BLK_DEV_SR=m 251CONFIG_BLK_DEV_SR=m
261CONFIG_BLK_DEV_SR_VENDOR=y 252CONFIG_BLK_DEV_SR_VENDOR=y
262CONFIG_CHR_DEV_SG=m 253CONFIG_CHR_DEV_SG=m
263CONFIG_SCSI_MULTI_LUN=y
264CONFIG_SCSI_CONSTANTS=y 254CONFIG_SCSI_CONSTANTS=y
265CONFIG_SCSI_LOGGING=y 255CONFIG_SCSI_LOGGING=y
266CONFIG_SCSI_SCAN_ASYNC=y 256CONFIG_SCSI_SCAN_ASYNC=y
@@ -273,6 +263,8 @@ CONFIG_SCSI_AACRAID=m
273CONFIG_SCSI_AIC7XXX=m 263CONFIG_SCSI_AIC7XXX=m
274CONFIG_AIC7XXX_RESET_DELAY_MS=15000 264CONFIG_AIC7XXX_RESET_DELAY_MS=15000
275# CONFIG_AIC7XXX_DEBUG_ENABLE is not set 265# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
266CONFIG_ATA=y
267CONFIG_ATA_PIIX=y
276CONFIG_MD=y 268CONFIG_MD=y
277CONFIG_BLK_DEV_MD=m 269CONFIG_BLK_DEV_MD=m
278CONFIG_MD_LINEAR=m 270CONFIG_MD_LINEAR=m
@@ -340,6 +332,7 @@ CONFIG_UIO=m
340CONFIG_UIO_CIF=m 332CONFIG_UIO_CIF=m
341CONFIG_EXT2_FS=y 333CONFIG_EXT2_FS=y
342CONFIG_EXT3_FS=y 334CONFIG_EXT3_FS=y
335CONFIG_EXT4_FS=y
343CONFIG_REISERFS_FS=m 336CONFIG_REISERFS_FS=m
344CONFIG_REISERFS_PROC_INFO=y 337CONFIG_REISERFS_PROC_INFO=y
345CONFIG_REISERFS_FS_XATTR=y 338CONFIG_REISERFS_FS_XATTR=y
@@ -441,4 +434,3 @@ CONFIG_CRYPTO_SERPENT=m
441CONFIG_CRYPTO_TEA=m 434CONFIG_CRYPTO_TEA=m
442CONFIG_CRYPTO_TWOFISH=m 435CONFIG_CRYPTO_TWOFISH=m
443# CONFIG_CRYPTO_ANSI_CPRNG is not set 436# CONFIG_CRYPTO_ANSI_CPRNG is not set
444CONFIG_CRC16=m
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 994d21939676..affebb78f5d6 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -64,7 +64,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
64 return SIGFPE; 64 return SIGFPE;
65 65
66 /* set FRE */ 66 /* set FRE */
67 write_c0_config5(read_c0_config5() | MIPS_CONF5_FRE); 67 set_c0_config5(MIPS_CONF5_FRE);
68 goto fr_common; 68 goto fr_common;
69 69
70 case FPU_64BIT: 70 case FPU_64BIT:
@@ -74,8 +74,10 @@ static inline int __enable_fpu(enum fpu_mode mode)
74#endif 74#endif
75 /* fall through */ 75 /* fall through */
76 case FPU_32BIT: 76 case FPU_32BIT:
77 /* clear FRE */ 77 if (cpu_has_fre) {
78 write_c0_config5(read_c0_config5() & ~MIPS_CONF5_FRE); 78 /* clear FRE */
79 clear_c0_config5(MIPS_CONF5_FRE);
80 }
79fr_common: 81fr_common:
80 /* set CU1 & change FR appropriately */ 82 /* set CU1 & change FR appropriately */
81 fr = (int)mode & FPU_FR_MASK; 83 fr = (int)mode & FPU_FR_MASK;
@@ -182,25 +184,32 @@ static inline int init_fpu(void)
182 int ret = 0; 184 int ret = 0;
183 185
184 if (cpu_has_fpu) { 186 if (cpu_has_fpu) {
187 unsigned int config5;
188
185 ret = __own_fpu(); 189 ret = __own_fpu();
186 if (!ret) { 190 if (ret)
187 unsigned int config5 = read_c0_config5(); 191 return ret;
188
189 /*
190 * Ensure FRE is clear whilst running _init_fpu, since
191 * single precision FP instructions are used. If FRE
192 * was set then we'll just end up initialising all 32
193 * 64b registers.
194 */
195 write_c0_config5(config5 & ~MIPS_CONF5_FRE);
196 enable_fpu_hazard();
197 192
193 if (!cpu_has_fre) {
198 _init_fpu(); 194 _init_fpu();
199 195
200 /* Restore FRE */ 196 return 0;
201 write_c0_config5(config5);
202 enable_fpu_hazard();
203 } 197 }
198
199 /*
200 * Ensure FRE is clear whilst running _init_fpu, since
201 * single precision FP instructions are used. If FRE
202 * was set then we'll just end up initialising all 32
203 * 64b registers.
204 */
205 config5 = clear_c0_config5(MIPS_CONF5_FRE);
206 enable_fpu_hazard();
207
208 _init_fpu();
209
210 /* Restore FRE */
211 write_c0_config5(config5);
212 enable_fpu_hazard();
204 } else 213 } else
205 fpu_emulator_init_fpu(); 214 fpu_emulator_init_fpu();
206 215
diff --git a/arch/mips/include/asm/fw/arc/hinv.h b/arch/mips/include/asm/fw/arc/hinv.h
index f8d37d1df5de..9fac64a26353 100644
--- a/arch/mips/include/asm/fw/arc/hinv.h
+++ b/arch/mips/include/asm/fw/arc/hinv.h
@@ -119,7 +119,7 @@ union key_u {
119#define SGI_ARCS_REV 10 /* rev .10, 3/04/92 */ 119#define SGI_ARCS_REV 10 /* rev .10, 3/04/92 */
120#endif 120#endif
121 121
122typedef struct component { 122typedef struct {
123 CONFIGCLASS Class; 123 CONFIGCLASS Class;
124 CONFIGTYPE Type; 124 CONFIGTYPE Type;
125 IDENTIFIERFLAG Flags; 125 IDENTIFIERFLAG Flags;
@@ -140,7 +140,7 @@ struct cfgdata {
140}; 140};
141 141
142/* System ID */ 142/* System ID */
143typedef struct systemid { 143typedef struct {
144 CHAR VendorId[8]; 144 CHAR VendorId[8];
145 CHAR ProductId[8]; 145 CHAR ProductId[8];
146} SYSTEMID; 146} SYSTEMID;
@@ -166,7 +166,7 @@ typedef enum memorytype {
166#endif /* _NT_PROM */ 166#endif /* _NT_PROM */
167} MEMORYTYPE; 167} MEMORYTYPE;
168 168
169typedef struct memorydescriptor { 169typedef struct {
170 MEMORYTYPE Type; 170 MEMORYTYPE Type;
171 LONG BasePage; 171 LONG BasePage;
172 LONG PageCount; 172 LONG PageCount;
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index b95a827d763e..59c0901bdd84 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -89,9 +89,9 @@ static inline bool mips_cm_has_l2sync(void)
89 89
90/* Macros to ease the creation of register access functions */ 90/* Macros to ease the creation of register access functions */
91#define BUILD_CM_R_(name, off) \ 91#define BUILD_CM_R_(name, off) \
92static inline u32 *addr_gcr_##name(void) \ 92static inline u32 __iomem *addr_gcr_##name(void) \
93{ \ 93{ \
94 return (u32 *)(mips_cm_base + (off)); \ 94 return (u32 __iomem *)(mips_cm_base + (off)); \
95} \ 95} \
96 \ 96 \
97static inline u32 read_gcr_##name(void) \ 97static inline u32 read_gcr_##name(void) \
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 5e4aef304b02..5b720d8c2745 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1386,12 +1386,27 @@ do { \
1386 __res; \ 1386 __res; \
1387}) 1387})
1388 1388
1389#define _write_32bit_cp1_register(dest, val, gas_hardfloat) \
1390do { \
1391 __asm__ __volatile__( \
1392 " .set push \n" \
1393 " .set reorder \n" \
1394 " "STR(gas_hardfloat)" \n" \
1395 " ctc1 %0,"STR(dest)" \n" \
1396 " .set pop \n" \
1397 : : "r" (val)); \
1398} while (0)
1399
1389#ifdef GAS_HAS_SET_HARDFLOAT 1400#ifdef GAS_HAS_SET_HARDFLOAT
1390#define read_32bit_cp1_register(source) \ 1401#define read_32bit_cp1_register(source) \
1391 _read_32bit_cp1_register(source, .set hardfloat) 1402 _read_32bit_cp1_register(source, .set hardfloat)
1403#define write_32bit_cp1_register(dest, val) \
1404 _write_32bit_cp1_register(dest, val, .set hardfloat)
1392#else 1405#else
1393#define read_32bit_cp1_register(source) \ 1406#define read_32bit_cp1_register(source) \
1394 _read_32bit_cp1_register(source, ) 1407 _read_32bit_cp1_register(source, )
1408#define write_32bit_cp1_register(dest, val) \
1409 _write_32bit_cp1_register(dest, val, )
1395#endif 1410#endif
1396 1411
1397#ifdef HAVE_AS_DSP 1412#ifdef HAVE_AS_DSP
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index bb7963753730..6499d93ae68d 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -29,13 +29,7 @@
29static inline long syscall_get_nr(struct task_struct *task, 29static inline long syscall_get_nr(struct task_struct *task,
30 struct pt_regs *regs) 30 struct pt_regs *regs)
31{ 31{
32 /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ 32 return current_thread_info()->syscall;
33 if ((config_enabled(CONFIG_32BIT) ||
34 test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
35 (regs->regs[2] == __NR_syscall))
36 return regs->regs[4];
37 else
38 return regs->regs[2];
39} 33}
40 34
41static inline unsigned long mips_get_syscall_arg(unsigned long *arg, 35static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 99eea59604e9..e4440f92b366 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -36,6 +36,7 @@ struct thread_info {
36 */ 36 */
37 struct restart_block restart_block; 37 struct restart_block restart_block;
38 struct pt_regs *regs; 38 struct pt_regs *regs;
39 long syscall; /* syscall number */
39}; 40};
40 41
41/* 42/*
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index d001bb1ad177..c03088f9f514 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -376,16 +376,17 @@
376#define __NR_getrandom (__NR_Linux + 353) 376#define __NR_getrandom (__NR_Linux + 353)
377#define __NR_memfd_create (__NR_Linux + 354) 377#define __NR_memfd_create (__NR_Linux + 354)
378#define __NR_bpf (__NR_Linux + 355) 378#define __NR_bpf (__NR_Linux + 355)
379#define __NR_execveat (__NR_Linux + 356)
379 380
380/* 381/*
381 * Offset of the last Linux o32 flavoured syscall 382 * Offset of the last Linux o32 flavoured syscall
382 */ 383 */
383#define __NR_Linux_syscalls 355 384#define __NR_Linux_syscalls 356
384 385
385#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 386#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
386 387
387#define __NR_O32_Linux 4000 388#define __NR_O32_Linux 4000
388#define __NR_O32_Linux_syscalls 355 389#define __NR_O32_Linux_syscalls 356
389 390
390#if _MIPS_SIM == _MIPS_SIM_ABI64 391#if _MIPS_SIM == _MIPS_SIM_ABI64
391 392
@@ -709,16 +710,17 @@
709#define __NR_getrandom (__NR_Linux + 313) 710#define __NR_getrandom (__NR_Linux + 313)
710#define __NR_memfd_create (__NR_Linux + 314) 711#define __NR_memfd_create (__NR_Linux + 314)
711#define __NR_bpf (__NR_Linux + 315) 712#define __NR_bpf (__NR_Linux + 315)
713#define __NR_execveat (__NR_Linux + 316)
712 714
713/* 715/*
714 * Offset of the last Linux 64-bit flavoured syscall 716 * Offset of the last Linux 64-bit flavoured syscall
715 */ 717 */
716#define __NR_Linux_syscalls 315 718#define __NR_Linux_syscalls 316
717 719
718#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 720#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
719 721
720#define __NR_64_Linux 5000 722#define __NR_64_Linux 5000
721#define __NR_64_Linux_syscalls 315 723#define __NR_64_Linux_syscalls 316
722 724
723#if _MIPS_SIM == _MIPS_SIM_NABI32 725#if _MIPS_SIM == _MIPS_SIM_NABI32
724 726
@@ -1046,15 +1048,16 @@
1046#define __NR_getrandom (__NR_Linux + 317) 1048#define __NR_getrandom (__NR_Linux + 317)
1047#define __NR_memfd_create (__NR_Linux + 318) 1049#define __NR_memfd_create (__NR_Linux + 318)
1048#define __NR_bpf (__NR_Linux + 319) 1050#define __NR_bpf (__NR_Linux + 319)
1051#define __NR_execveat (__NR_Linux + 320)
1049 1052
1050/* 1053/*
1051 * Offset of the last N32 flavoured syscall 1054 * Offset of the last N32 flavoured syscall
1052 */ 1055 */
1053#define __NR_Linux_syscalls 319 1056#define __NR_Linux_syscalls 320
1054 1057
1055#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1058#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1056 1059
1057#define __NR_N32_Linux 6000 1060#define __NR_N32_Linux 6000
1058#define __NR_N32_Linux_syscalls 319 1061#define __NR_N32_Linux_syscalls 320
1059 1062
1060#endif /* _UAPI_ASM_UNISTD_H */ 1063#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/jz4740/irq.c b/arch/mips/jz4740/irq.c
index 2531da1d3add..97206b3deb97 100644
--- a/arch/mips/jz4740/irq.c
+++ b/arch/mips/jz4740/irq.c
@@ -30,6 +30,9 @@
30#include <asm/irq_cpu.h> 30#include <asm/irq_cpu.h>
31 31
32#include <asm/mach-jz4740/base.h> 32#include <asm/mach-jz4740/base.h>
33#include <asm/mach-jz4740/irq.h>
34
35#include "irq.h"
33 36
34static void __iomem *jz_intc_base; 37static void __iomem *jz_intc_base;
35 38
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index c92b15df6893..a5b5b56485c1 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -19,8 +19,8 @@ enum {
19int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, 19int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
20 bool is_interp, struct arch_elf_state *state) 20 bool is_interp, struct arch_elf_state *state)
21{ 21{
22 struct elfhdr *ehdr = _ehdr; 22 struct elf32_hdr *ehdr = _ehdr;
23 struct elf_phdr *phdr = _phdr; 23 struct elf32_phdr *phdr = _phdr;
24 struct mips_elf_abiflags_v0 abiflags; 24 struct mips_elf_abiflags_v0 abiflags;
25 int ret; 25 int ret;
26 26
@@ -48,7 +48,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
48 return 0; 48 return 0;
49} 49}
50 50
51static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi) 51static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi)
52{ 52{
53 /* If the ABI requirement is provided, simply return that */ 53 /* If the ABI requirement is provided, simply return that */
54 if (in_abi != -1) 54 if (in_abi != -1)
@@ -65,7 +65,7 @@ static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi)
65int arch_check_elf(void *_ehdr, bool has_interpreter, 65int arch_check_elf(void *_ehdr, bool has_interpreter,
66 struct arch_elf_state *state) 66 struct arch_elf_state *state)
67{ 67{
68 struct elfhdr *ehdr = _ehdr; 68 struct elf32_hdr *ehdr = _ehdr;
69 unsigned fp_abi, interp_fp_abi, abi0, abi1; 69 unsigned fp_abi, interp_fp_abi, abi0, abi1;
70 70
71 /* Ignore non-O32 binaries */ 71 /* Ignore non-O32 binaries */
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 590c2c980fd3..6eb7a3f515fc 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -57,6 +57,8 @@ static struct irq_chip mips_cpu_irq_controller = {
57 .irq_mask_ack = mask_mips_irq, 57 .irq_mask_ack = mask_mips_irq,
58 .irq_unmask = unmask_mips_irq, 58 .irq_unmask = unmask_mips_irq,
59 .irq_eoi = unmask_mips_irq, 59 .irq_eoi = unmask_mips_irq,
60 .irq_disable = mask_mips_irq,
61 .irq_enable = unmask_mips_irq,
60}; 62};
61 63
62/* 64/*
@@ -93,6 +95,8 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
93 .irq_mask_ack = mips_mt_cpu_irq_ack, 95 .irq_mask_ack = mips_mt_cpu_irq_ack,
94 .irq_unmask = unmask_mips_irq, 96 .irq_unmask = unmask_mips_irq,
95 .irq_eoi = unmask_mips_irq, 97 .irq_eoi = unmask_mips_irq,
98 .irq_disable = mask_mips_irq,
99 .irq_enable = unmask_mips_irq,
96}; 100};
97 101
98asmlinkage void __weak plat_irq_dispatch(void) 102asmlinkage void __weak plat_irq_dispatch(void)
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index eb76434828e8..85bff5d513e5 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -82,6 +82,30 @@ void flush_thread(void)
82{ 82{
83} 83}
84 84
85int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
86{
87 /*
88 * Save any process state which is live in hardware registers to the
89 * parent context prior to duplication. This prevents the new child
90 * state becoming stale if the parent is preempted before copy_thread()
91 * gets a chance to save the parent's live hardware registers to the
92 * child context.
93 */
94 preempt_disable();
95
96 if (is_msa_enabled())
97 save_msa(current);
98 else if (is_fpu_owner())
99 _save_fp(current);
100
101 save_dsp(current);
102
103 preempt_enable();
104
105 *dst = *src;
106 return 0;
107}
108
85int copy_thread(unsigned long clone_flags, unsigned long usp, 109int copy_thread(unsigned long clone_flags, unsigned long usp,
86 unsigned long arg, struct task_struct *p) 110 unsigned long arg, struct task_struct *p)
87{ 111{
@@ -92,18 +116,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
92 116
93 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; 117 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
94 118
95 preempt_disable();
96
97 if (is_msa_enabled())
98 save_msa(p);
99 else if (is_fpu_owner())
100 save_fp(p);
101
102 if (cpu_has_dsp)
103 save_dsp(p);
104
105 preempt_enable();
106
107 /* set up new TSS. */ 119 /* set up new TSS. */
108 childregs = (struct pt_regs *) childksp - 1; 120 childregs = (struct pt_regs *) childksp - 1;
109 /* Put the stack after the struct pt_regs. */ 121 /* Put the stack after the struct pt_regs. */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 9d1487d83293..510452812594 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -770,6 +770,8 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
770 long ret = 0; 770 long ret = 0;
771 user_exit(); 771 user_exit();
772 772
773 current_thread_info()->syscall = syscall;
774
773 if (secure_computing() == -1) 775 if (secure_computing() == -1)
774 return -1; 776 return -1;
775 777
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 00cad1005a16..6e8de80bb446 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -181,6 +181,7 @@ illegal_syscall:
181 sll t1, t0, 2 181 sll t1, t0, 2
182 beqz v0, einval 182 beqz v0, einval
183 lw t2, sys_call_table(t1) # syscall routine 183 lw t2, sys_call_table(t1) # syscall routine
184 sw a0, PT_R2(sp) # call routine directly on restart
184 185
185 /* Some syscalls like execve get their arguments from struct pt_regs 186 /* Some syscalls like execve get their arguments from struct pt_regs
186 and claim zero arguments in the syscall table. Thus we have to 187 and claim zero arguments in the syscall table. Thus we have to
@@ -580,3 +581,4 @@ EXPORT(sys_call_table)
580 PTR sys_getrandom 581 PTR sys_getrandom
581 PTR sys_memfd_create 582 PTR sys_memfd_create
582 PTR sys_bpf /* 4355 */ 583 PTR sys_bpf /* 4355 */
584 PTR sys_execveat
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 5251565e344b..ad4d44635c76 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -435,4 +435,5 @@ EXPORT(sys_call_table)
435 PTR sys_getrandom 435 PTR sys_getrandom
436 PTR sys_memfd_create 436 PTR sys_memfd_create
437 PTR sys_bpf /* 5315 */ 437 PTR sys_bpf /* 5315 */
438 PTR sys_execveat
438 .size sys_call_table,.-sys_call_table 439 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 77e74398b828..446cc654da56 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -428,4 +428,5 @@ EXPORT(sysn32_call_table)
428 PTR sys_getrandom 428 PTR sys_getrandom
429 PTR sys_memfd_create 429 PTR sys_memfd_create
430 PTR sys_bpf 430 PTR sys_bpf
431 PTR compat_sys_execveat /* 6320 */
431 .size sysn32_call_table,.-sysn32_call_table 432 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 6f8db9f728e8..d07b210fbeff 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -186,6 +186,7 @@ LEAF(sys32_syscall)
186 dsll t1, t0, 3 186 dsll t1, t0, 3
187 beqz v0, einval 187 beqz v0, einval
188 ld t2, sys32_call_table(t1) # syscall routine 188 ld t2, sys32_call_table(t1) # syscall routine
189 sd a0, PT_R2(sp) # call routine directly on restart
189 190
190 move a0, a1 # shift argument registers 191 move a0, a1 # shift argument registers
191 move a1, a2 192 move a1, a2
@@ -565,4 +566,5 @@ EXPORT(sys32_call_table)
565 PTR sys_getrandom 566 PTR sys_getrandom
566 PTR sys_memfd_create 567 PTR sys_memfd_create
567 PTR sys_bpf /* 4355 */ 568 PTR sys_bpf /* 4355 */
569 PTR compat_sys_execveat
568 .size sys32_call_table,.-sys32_call_table 570 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 1e0a93c5a3e7..e36a859af666 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -44,8 +44,8 @@ static void cmp_init_secondary(void)
44 struct cpuinfo_mips *c __maybe_unused = &current_cpu_data; 44 struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
45 45
46 /* Assume GIC is present */ 46 /* Assume GIC is present */
47 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | 47 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
48 STATUSF_IP7); 48 STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
49 49
50 /* Enable per-cpu interrupts: platform specific */ 50 /* Enable per-cpu interrupts: platform specific */
51 51
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index ad86951b73bd..17ea705f6c40 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -161,7 +161,8 @@ static void vsmp_init_secondary(void)
161#ifdef CONFIG_MIPS_GIC 161#ifdef CONFIG_MIPS_GIC
162 /* This is Malta specific: IPI,performance and timer interrupts */ 162 /* This is Malta specific: IPI,performance and timer interrupts */
163 if (gic_present) 163 if (gic_present)
164 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 164 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
165 STATUSF_IP4 | STATUSF_IP5 |
165 STATUSF_IP6 | STATUSF_IP7); 166 STATUSF_IP6 | STATUSF_IP7);
166 else 167 else
167#endif 168#endif
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index c94c4e92e17d..1c0d8c50b7e1 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -123,10 +123,10 @@ asmlinkage void start_secondary(void)
123 unsigned int cpu; 123 unsigned int cpu;
124 124
125 cpu_probe(); 125 cpu_probe();
126 cpu_report();
127 per_cpu_trap_init(false); 126 per_cpu_trap_init(false);
128 mips_clockevent_init(); 127 mips_clockevent_init();
129 mp_ops->init_secondary(); 128 mp_ops->init_secondary();
129 cpu_report();
130 130
131 /* 131 /*
132 * XXX parity protection should be folded in here when it's converted 132 * XXX parity protection should be folded in here when it's converted
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index ad3d2031c327..c3b41e24c05a 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1231,7 +1231,8 @@ static int enable_restore_fp_context(int msa)
1231 1231
1232 /* Restore the scalar FP control & status register */ 1232 /* Restore the scalar FP control & status register */
1233 if (!was_fpu_owner) 1233 if (!was_fpu_owner)
1234 asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); 1234 write_32bit_cp1_register(CP1_STATUS,
1235 current->thread.fpu.fcr31);
1235 } 1236 }
1236 1237
1237out: 1238out:
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 30e334e823bd..2ae12825529f 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
20 select PREEMPT_NOTIFIERS 20 select PREEMPT_NOTIFIERS
21 select ANON_INODES 21 select ANON_INODES
22 select KVM_MMIO 22 select KVM_MMIO
23 select SRCU
23 ---help--- 24 ---help---
24 Support for hosting Guest kernels. 25 Support for hosting Guest kernels.
25 Currently supported on MIPS32 processors. 26 Currently supported on MIPS32 processors.
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index e90b2e899291..30639a6e9b8c 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -489,6 +489,8 @@ static void r4k_tlb_configure(void)
489#ifdef CONFIG_64BIT 489#ifdef CONFIG_64BIT
490 pg |= PG_ELPA; 490 pg |= PG_ELPA;
491#endif 491#endif
492 if (cpu_has_rixiex)
493 pg |= PG_IEC;
492 write_c0_pagegrain(pg); 494 write_c0_pagegrain(pg);
493 } 495 }
494 496
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
index faed90240ded..6d6df839948f 100644
--- a/arch/mn10300/include/asm/cacheflush.h
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -159,13 +159,6 @@ extern void flush_icache_range(unsigned long start, unsigned long end);
159#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 159#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
160 memcpy(dst, src, len) 160 memcpy(dst, src, len)
161 161
162/*
163 * Internal debugging function
164 */
165#ifdef CONFIG_DEBUG_PAGEALLOC
166extern void kernel_map_pages(struct page *page, int numpages, int enable);
167#endif
168
169#endif /* __ASSEMBLY__ */ 162#endif /* __ASSEMBLY__ */
170 163
171#endif /* _ASM_CACHEFLUSH_H */ 164#endif /* _ASM_CACHEFLUSH_H */
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 34429d5a0ccd..d194c0427b26 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -159,9 +159,11 @@ bad_area:
159bad_area_nosemaphore: 159bad_area_nosemaphore:
160 /* User mode accesses just cause a SIGSEGV */ 160 /* User mode accesses just cause a SIGSEGV */
161 if (user_mode(regs)) { 161 if (user_mode(regs)) {
162 pr_alert("%s: unhandled page fault (%d) at 0x%08lx, " 162 if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
163 "cause %ld\n", current->comm, SIGSEGV, address, cause); 163 pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
164 show_regs(regs); 164 "cause %ld\n", current->comm, SIGSEGV, address, cause);
165 show_regs(regs);
166 }
165 _exception(SIGSEGV, regs, code, address); 167 _exception(SIGSEGV, regs, code, address);
166 return; 168 return;
167 } 169 }
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 5b9312220e84..30b35fff2dea 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -60,13 +60,6 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
60#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 60#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
61 memcpy(dst, src, len) 61 memcpy(dst, src, len)
62 62
63
64
65#ifdef CONFIG_DEBUG_PAGEALLOC
66/* internal debugging function */
67void kernel_map_pages(struct page *page, int numpages, int enable);
68#endif
69
70#endif /* __KERNEL__ */ 63#endif /* __KERNEL__ */
71 64
72#endif /* _ASM_POWERPC_CACHEFLUSH_H */ 65#endif /* _ASM_POWERPC_CACHEFLUSH_H */
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index f5769f19ae25..11850f310fb4 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
21 select PREEMPT_NOTIFIERS 21 select PREEMPT_NOTIFIERS
22 select ANON_INODES 22 select ANON_INODES
23 select HAVE_KVM_EVENTFD 23 select HAVE_KVM_EVENTFD
24 select SRCU
24 25
25config KVM_BOOK3S_HANDLER 26config KVM_BOOK3S_HANDLER
26 bool 27 bool
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 3e20383d0921..58fae7d098cf 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -4,10 +4,6 @@
4/* Caches aren't brain-dead on the s390. */ 4/* Caches aren't brain-dead on the s390. */
5#include <asm-generic/cacheflush.h> 5#include <asm-generic/cacheflush.h>
6 6
7#ifdef CONFIG_DEBUG_PAGEALLOC
8void kernel_map_pages(struct page *page, int numpages, int enable);
9#endif
10
11int set_memory_ro(unsigned long addr, int numpages); 7int set_memory_ro(unsigned long addr, int numpages);
12int set_memory_rw(unsigned long addr, int numpages); 8int set_memory_rw(unsigned long addr, int numpages);
13int set_memory_nx(unsigned long addr, int numpages); 9int set_memory_nx(unsigned long addr, int numpages);
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 646db9c467d1..5fce52cf0e57 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -28,6 +28,7 @@ config KVM
28 select HAVE_KVM_IRQCHIP 28 select HAVE_KVM_IRQCHIP
29 select HAVE_KVM_IRQFD 29 select HAVE_KVM_IRQFD
30 select HAVE_KVM_IRQ_ROUTING 30 select HAVE_KVM_IRQ_ROUTING
31 select SRCU
31 ---help--- 32 ---help---
32 Support hosting paravirtualized guest machines using the SIE 33 Support hosting paravirtualized guest machines using the SIE
33 virtualization capability on the mainframe. This should work 34 virtualization capability on the mainframe. This should work
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
index 38965379e350..68513c41e10d 100644
--- a/arch/sparc/include/asm/cacheflush_64.h
+++ b/arch/sparc/include/asm/cacheflush_64.h
@@ -74,11 +74,6 @@ void flush_ptrace_access(struct vm_area_struct *, struct page *,
74#define flush_cache_vmap(start, end) do { } while (0) 74#define flush_cache_vmap(start, end) do { } while (0)
75#define flush_cache_vunmap(start, end) do { } while (0) 75#define flush_cache_vunmap(start, end) do { } while (0)
76 76
77#ifdef CONFIG_DEBUG_PAGEALLOC
78/* internal debugging function */
79void kernel_map_pages(struct page *page, int numpages, int enable);
80#endif
81
82#endif /* !__ASSEMBLY__ */ 77#endif /* !__ASSEMBLY__ */
83 78
84#endif /* _SPARC64_CACHEFLUSH_H */ 79#endif /* _SPARC64_CACHEFLUSH_H */
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
index 2298cb1daff7..1e968f7550dc 100644
--- a/arch/tile/kvm/Kconfig
+++ b/arch/tile/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
21 depends on HAVE_KVM && MODULES 21 depends on HAVE_KVM && MODULES
22 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
23 select ANON_INODES 23 select ANON_INODES
24 select SRCU
24 ---help--- 25 ---help---
25 Support hosting paravirtualized guest machines. 26 Support hosting paravirtualized guest machines.
26 27
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0dc9d0144a27..85588a891c06 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -138,6 +138,7 @@ config X86
138 select HAVE_ACPI_APEI_NMI if ACPI 138 select HAVE_ACPI_APEI_NMI if ACPI
139 select ACPI_LEGACY_TABLES_LOOKUP if ACPI 139 select ACPI_LEGACY_TABLES_LOOKUP if ACPI
140 select X86_FEATURE_NAMES if PROC_FS 140 select X86_FEATURE_NAMES if PROC_FS
141 select SRCU
141 142
142config INSTRUCTION_DECODER 143config INSTRUCTION_DECODER
143 def_bool y 144 def_bool y
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 15c29096136b..36a83617eb21 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -552,7 +552,7 @@ static int __init microcode_init(void)
552 int error; 552 int error;
553 553
554 if (paravirt_enabled() || dis_ucode_ldr) 554 if (paravirt_enabled() || dis_ucode_ldr)
555 return 0; 555 return -EINVAL;
556 556
557 if (c->x86_vendor == X86_VENDOR_INTEL) 557 if (c->x86_vendor == X86_VENDOR_INTEL)
558 microcode_ops = init_intel_microcode(); 558 microcode_ops = init_intel_microcode();
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index f9d16ff56c6b..7dc7ba577ecd 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -40,6 +40,7 @@ config KVM
40 select HAVE_KVM_MSI 40 select HAVE_KVM_MSI
41 select HAVE_KVM_CPU_RELAX_INTERCEPT 41 select HAVE_KVM_CPU_RELAX_INTERCEPT
42 select KVM_VFIO 42 select KVM_VFIO
43 select SRCU
43 ---help--- 44 ---help---
44 Support hosting fully virtualized guest machines using hardware 45 Support hosting fully virtualized guest machines using hardware
45 virtualization extensions. You will need a fairly recent 46 virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 7b20bccf3648..2fb384724ebb 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
448 DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"), 448 DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
449 }, 449 },
450 }, 450 },
451 {
452 .callback = set_scan_all,
453 .ident = "Stratus/NEC ftServer",
454 .matches = {
455 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
456 DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
457 },
458 },
459 {
460 .callback = set_scan_all,
461 .ident = "Stratus/NEC ftServer",
462 .matches = {
463 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
464 DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
465 },
466 },
451 {} 467 {}
452}; 468};
453 469
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 44b9271580b5..852aa4c92da0 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -293,7 +293,6 @@ static void mrst_power_off_unused_dev(struct pci_dev *dev)
293DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev); 293DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev);
294DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev); 294DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev);
295DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev); 295DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev);
296DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0812, mrst_power_off_unused_dev);
297DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev); 296DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev);
298 297
299/* 298/*
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 6774a0e69867..1630a20d5dcf 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -15,26 +15,6 @@
15 15
16static void blk_mq_sysfs_release(struct kobject *kobj) 16static void blk_mq_sysfs_release(struct kobject *kobj)
17{ 17{
18 struct request_queue *q;
19
20 q = container_of(kobj, struct request_queue, mq_kobj);
21 free_percpu(q->queue_ctx);
22}
23
24static void blk_mq_ctx_release(struct kobject *kobj)
25{
26 struct blk_mq_ctx *ctx;
27
28 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29 kobject_put(&ctx->queue->mq_kobj);
30}
31
32static void blk_mq_hctx_release(struct kobject *kobj)
33{
34 struct blk_mq_hw_ctx *hctx;
35
36 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
37 kfree(hctx);
38} 18}
39 19
40struct blk_mq_ctx_sysfs_entry { 20struct blk_mq_ctx_sysfs_entry {
@@ -338,13 +318,13 @@ static struct kobj_type blk_mq_ktype = {
338static struct kobj_type blk_mq_ctx_ktype = { 318static struct kobj_type blk_mq_ctx_ktype = {
339 .sysfs_ops = &blk_mq_sysfs_ops, 319 .sysfs_ops = &blk_mq_sysfs_ops,
340 .default_attrs = default_ctx_attrs, 320 .default_attrs = default_ctx_attrs,
341 .release = blk_mq_ctx_release, 321 .release = blk_mq_sysfs_release,
342}; 322};
343 323
344static struct kobj_type blk_mq_hw_ktype = { 324static struct kobj_type blk_mq_hw_ktype = {
345 .sysfs_ops = &blk_mq_hw_sysfs_ops, 325 .sysfs_ops = &blk_mq_hw_sysfs_ops,
346 .default_attrs = default_hw_ctx_attrs, 326 .default_attrs = default_hw_ctx_attrs,
347 .release = blk_mq_hctx_release, 327 .release = blk_mq_sysfs_release,
348}; 328};
349 329
350static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) 330static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -375,7 +355,6 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
375 return ret; 355 return ret;
376 356
377 hctx_for_each_ctx(hctx, ctx, i) { 357 hctx_for_each_ctx(hctx, ctx, i) {
378 kobject_get(&q->mq_kobj);
379 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); 358 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
380 if (ret) 359 if (ret)
381 break; 360 break;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9ee3b87c4498..2390c5541e71 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1867,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1867 mutex_unlock(&set->tag_list_lock); 1867 mutex_unlock(&set->tag_list_lock);
1868} 1868}
1869 1869
1870/*
1871 * It is the actual release handler for mq, but we do it from
1872 * request queue's release handler for avoiding use-after-free
1873 * and headache because q->mq_kobj shouldn't have been introduced,
1874 * but we can't group ctx/kctx kobj without it.
1875 */
1876void blk_mq_release(struct request_queue *q)
1877{
1878 struct blk_mq_hw_ctx *hctx;
1879 unsigned int i;
1880
1881 /* hctx kobj stays in hctx */
1882 queue_for_each_hw_ctx(q, hctx, i)
1883 kfree(hctx);
1884
1885 kfree(q->queue_hw_ctx);
1886
1887 /* ctx kobj stays in queue_ctx */
1888 free_percpu(q->queue_ctx);
1889}
1890
1870struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 1891struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1871{ 1892{
1872 struct blk_mq_hw_ctx **hctxs; 1893 struct blk_mq_hw_ctx **hctxs;
@@ -2000,10 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q)
2000 2021
2001 percpu_ref_exit(&q->mq_usage_counter); 2022 percpu_ref_exit(&q->mq_usage_counter);
2002 2023
2003 kfree(q->queue_hw_ctx);
2004 kfree(q->mq_map); 2024 kfree(q->mq_map);
2005 2025
2006 q->queue_hw_ctx = NULL;
2007 q->mq_map = NULL; 2026 q->mq_map = NULL;
2008 2027
2009 mutex_lock(&all_q_mutex); 2028 mutex_lock(&all_q_mutex);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 4f4f943c22c3..6a48c4c0d8a2 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -62,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q);
62 62
63extern void blk_mq_rq_timed_out(struct request *req, bool reserved); 63extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
64 64
65void blk_mq_release(struct request_queue *q);
66
65/* 67/*
66 * Basic implementation of sparser bitmap, allowing the user to spread 68 * Basic implementation of sparser bitmap, allowing the user to spread
67 * the bits over more cachelines. 69 * the bits over more cachelines.
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 935ea2aa0730..faaf36ade7eb 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj)
517 517
518 if (!q->mq_ops) 518 if (!q->mq_ops)
519 blk_free_flush_queue(q->fq); 519 blk_free_flush_queue(q->fq);
520 else
521 blk_mq_release(q);
520 522
521 blk_trace_shutdown(q); 523 blk_trace_shutdown(q);
522 524
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 4f3febf8a589..e75737fd7eef 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ACPI support for Intel Lynxpoint LPSS. 2 * ACPI support for Intel Lynxpoint LPSS.
3 * 3 *
4 * Copyright (C) 2013, 2014, Intel Corporation 4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * 7 *
@@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss");
60#define LPSS_CLK_DIVIDER BIT(2) 60#define LPSS_CLK_DIVIDER BIT(2)
61#define LPSS_LTR BIT(3) 61#define LPSS_LTR BIT(3)
62#define LPSS_SAVE_CTX BIT(4) 62#define LPSS_SAVE_CTX BIT(4)
63#define LPSS_DEV_PROXY BIT(5)
64#define LPSS_PROXY_REQ BIT(6)
65 63
66struct lpss_private_data; 64struct lpss_private_data;
67 65
@@ -72,10 +70,8 @@ struct lpss_device_desc {
72 void (*setup)(struct lpss_private_data *pdata); 70 void (*setup)(struct lpss_private_data *pdata);
73}; 71};
74 72
75static struct device *proxy_device;
76
77static struct lpss_device_desc lpss_dma_desc = { 73static struct lpss_device_desc lpss_dma_desc = {
78 .flags = LPSS_CLK | LPSS_PROXY_REQ, 74 .flags = LPSS_CLK,
79}; 75};
80 76
81struct lpss_private_data { 77struct lpss_private_data {
@@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
150}; 146};
151 147
152static struct lpss_device_desc byt_uart_dev_desc = { 148static struct lpss_device_desc byt_uart_dev_desc = {
153 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | 149 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
154 LPSS_DEV_PROXY,
155 .prv_offset = 0x800, 150 .prv_offset = 0x800,
156 .setup = lpss_uart_setup, 151 .setup = lpss_uart_setup,
157}; 152};
158 153
159static struct lpss_device_desc byt_spi_dev_desc = { 154static struct lpss_device_desc byt_spi_dev_desc = {
160 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | 155 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
161 LPSS_DEV_PROXY,
162 .prv_offset = 0x400, 156 .prv_offset = 0x400,
163}; 157};
164 158
165static struct lpss_device_desc byt_sdio_dev_desc = { 159static struct lpss_device_desc byt_sdio_dev_desc = {
166 .flags = LPSS_CLK | LPSS_DEV_PROXY, 160 .flags = LPSS_CLK,
167}; 161};
168 162
169static struct lpss_device_desc byt_i2c_dev_desc = { 163static struct lpss_device_desc byt_i2c_dev_desc = {
170 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY, 164 .flags = LPSS_CLK | LPSS_SAVE_CTX,
171 .prv_offset = 0x800, 165 .prv_offset = 0x800,
172 .setup = byt_i2c_setup, 166 .setup = byt_i2c_setup,
173}; 167};
@@ -374,8 +368,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
374 adev->driver_data = pdata; 368 adev->driver_data = pdata;
375 pdev = acpi_create_platform_device(adev); 369 pdev = acpi_create_platform_device(adev);
376 if (!IS_ERR_OR_NULL(pdev)) { 370 if (!IS_ERR_OR_NULL(pdev)) {
377 if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY)
378 proxy_device = &pdev->dev;
379 return 1; 371 return 1;
380 } 372 }
381 373
@@ -600,14 +592,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
600 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 592 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
601 acpi_lpss_save_ctx(dev, pdata); 593 acpi_lpss_save_ctx(dev, pdata);
602 594
603 ret = acpi_dev_runtime_suspend(dev); 595 return acpi_dev_runtime_suspend(dev);
604 if (ret)
605 return ret;
606
607 if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device)
608 return pm_runtime_put_sync_suspend(proxy_device);
609
610 return 0;
611} 596}
612 597
613static int acpi_lpss_runtime_resume(struct device *dev) 598static int acpi_lpss_runtime_resume(struct device *dev)
@@ -615,12 +600,6 @@ static int acpi_lpss_runtime_resume(struct device *dev)
615 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 600 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
616 int ret; 601 int ret;
617 602
618 if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) {
619 ret = pm_runtime_get_sync(proxy_device);
620 if (ret)
621 return ret;
622 }
623
624 ret = acpi_dev_runtime_resume(dev); 603 ret = acpi_dev_runtime_resume(dev);
625 if (ret) 604 if (ret)
626 return ret; 605 return ret;
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 0da5865df5b1..beb8b27d4621 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -51,9 +51,11 @@ struct regmap_async {
51struct regmap { 51struct regmap {
52 union { 52 union {
53 struct mutex mutex; 53 struct mutex mutex;
54 spinlock_t spinlock; 54 struct {
55 spinlock_t spinlock;
56 unsigned long spinlock_flags;
57 };
55 }; 58 };
56 unsigned long spinlock_flags;
57 regmap_lock lock; 59 regmap_lock lock;
58 regmap_unlock unlock; 60 regmap_unlock unlock;
59 void *lock_arg; /* This is passed to lock/unlock functions */ 61 void *lock_arg; /* This is passed to lock/unlock functions */
@@ -233,6 +235,10 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
233 235
234void regmap_async_complete_cb(struct regmap_async *async, int ret); 236void regmap_async_complete_cb(struct regmap_async *async, int ret);
235 237
238enum regmap_endian regmap_get_val_endian(struct device *dev,
239 const struct regmap_bus *bus,
240 const struct regmap_config *config);
241
236extern struct regcache_ops regcache_rbtree_ops; 242extern struct regcache_ops regcache_rbtree_ops;
237extern struct regcache_ops regcache_lzo_ops; 243extern struct regcache_ops regcache_lzo_ops;
238extern struct regcache_ops regcache_flat_ops; 244extern struct regcache_ops regcache_flat_ops;
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index e4c45d2299c1..8d304e2a943d 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -74,8 +74,8 @@ static int regmap_ac97_reg_write(void *context, unsigned int reg,
74} 74}
75 75
76static const struct regmap_bus ac97_regmap_bus = { 76static const struct regmap_bus ac97_regmap_bus = {
77 .reg_write = regmap_ac97_reg_write, 77 .reg_write = regmap_ac97_reg_write,
78 .reg_read = regmap_ac97_reg_read, 78 .reg_read = regmap_ac97_reg_read,
79}; 79};
80 80
81/** 81/**
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 053150a7f9f2..4b76e33110a2 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -14,6 +14,7 @@
14#include <linux/i2c.h> 14#include <linux/i2c.h>
15#include <linux/module.h> 15#include <linux/module.h>
16 16
17#include "internal.h"
17 18
18static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, 19static int regmap_smbus_byte_reg_read(void *context, unsigned int reg,
19 unsigned int *val) 20 unsigned int *val)
@@ -87,6 +88,42 @@ static struct regmap_bus regmap_smbus_word = {
87 .reg_read = regmap_smbus_word_reg_read, 88 .reg_read = regmap_smbus_word_reg_read,
88}; 89};
89 90
91static int regmap_smbus_word_read_swapped(void *context, unsigned int reg,
92 unsigned int *val)
93{
94 struct device *dev = context;
95 struct i2c_client *i2c = to_i2c_client(dev);
96 int ret;
97
98 if (reg > 0xff)
99 return -EINVAL;
100
101 ret = i2c_smbus_read_word_swapped(i2c, reg);
102 if (ret < 0)
103 return ret;
104
105 *val = ret;
106
107 return 0;
108}
109
110static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
111 unsigned int val)
112{
113 struct device *dev = context;
114 struct i2c_client *i2c = to_i2c_client(dev);
115
116 if (val > 0xffff || reg > 0xff)
117 return -EINVAL;
118
119 return i2c_smbus_write_word_swapped(i2c, reg, val);
120}
121
122static struct regmap_bus regmap_smbus_word_swapped = {
123 .reg_write = regmap_smbus_word_write_swapped,
124 .reg_read = regmap_smbus_word_read_swapped,
125};
126
90static int regmap_i2c_write(void *context, const void *data, size_t count) 127static int regmap_i2c_write(void *context, const void *data, size_t count)
91{ 128{
92 struct device *dev = context; 129 struct device *dev = context;
@@ -180,7 +217,14 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
180 else if (config->val_bits == 16 && config->reg_bits == 8 && 217 else if (config->val_bits == 16 && config->reg_bits == 8 &&
181 i2c_check_functionality(i2c->adapter, 218 i2c_check_functionality(i2c->adapter,
182 I2C_FUNC_SMBUS_WORD_DATA)) 219 I2C_FUNC_SMBUS_WORD_DATA))
183 return &regmap_smbus_word; 220 switch (regmap_get_val_endian(&i2c->dev, NULL, config)) {
221 case REGMAP_ENDIAN_LITTLE:
222 return &regmap_smbus_word;
223 case REGMAP_ENDIAN_BIG:
224 return &regmap_smbus_word_swapped;
225 default: /* everything else is not supported */
226 break;
227 }
184 else if (config->val_bits == 8 && config->reg_bits == 8 && 228 else if (config->val_bits == 8 && config->reg_bits == 8 &&
185 i2c_check_functionality(i2c->adapter, 229 i2c_check_functionality(i2c->adapter,
186 I2C_FUNC_SMBUS_BYTE_DATA)) 230 I2C_FUNC_SMBUS_BYTE_DATA))
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index d2f8a818d200..f99b098ddabf 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -473,9 +473,9 @@ static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
473 return REGMAP_ENDIAN_BIG; 473 return REGMAP_ENDIAN_BIG;
474} 474}
475 475
476static enum regmap_endian regmap_get_val_endian(struct device *dev, 476enum regmap_endian regmap_get_val_endian(struct device *dev,
477 const struct regmap_bus *bus, 477 const struct regmap_bus *bus,
478 const struct regmap_config *config) 478 const struct regmap_config *config)
479{ 479{
480 struct device_node *np; 480 struct device_node *np;
481 enum regmap_endian endian; 481 enum regmap_endian endian;
@@ -513,6 +513,7 @@ static enum regmap_endian regmap_get_val_endian(struct device *dev,
513 /* Use this if no other value was found */ 513 /* Use this if no other value was found */
514 return REGMAP_ENDIAN_BIG; 514 return REGMAP_ENDIAN_BIG;
515} 515}
516EXPORT_SYMBOL_GPL(regmap_get_val_endian);
516 517
517/** 518/**
518 * regmap_init(): Initialise register map 519 * regmap_init(): Initialise register map
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 04645c09fe5e..9cd6968e2f92 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -569,19 +569,19 @@ static void fast_mix(struct fast_pool *f)
569 __u32 c = f->pool[2], d = f->pool[3]; 569 __u32 c = f->pool[2], d = f->pool[3];
570 570
571 a += b; c += d; 571 a += b; c += d;
572 b = rol32(a, 6); d = rol32(c, 27); 572 b = rol32(b, 6); d = rol32(d, 27);
573 d ^= a; b ^= c; 573 d ^= a; b ^= c;
574 574
575 a += b; c += d; 575 a += b; c += d;
576 b = rol32(a, 16); d = rol32(c, 14); 576 b = rol32(b, 16); d = rol32(d, 14);
577 d ^= a; b ^= c; 577 d ^= a; b ^= c;
578 578
579 a += b; c += d; 579 a += b; c += d;
580 b = rol32(a, 6); d = rol32(c, 27); 580 b = rol32(b, 6); d = rol32(d, 27);
581 d ^= a; b ^= c; 581 d ^= a; b ^= c;
582 582
583 a += b; c += d; 583 a += b; c += d;
584 b = rol32(a, 16); d = rol32(c, 14); 584 b = rol32(b, 16); d = rol32(d, 14);
585 d ^= a; b ^= c; 585 d ^= a; b ^= c;
586 586
587 f->pool[0] = a; f->pool[1] = b; 587 f->pool[0] = a; f->pool[1] = b;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 3f44f292d066..91f86131bb7a 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -13,6 +13,7 @@ config COMMON_CLK
13 bool 13 bool
14 select HAVE_CLK_PREPARE 14 select HAVE_CLK_PREPARE
15 select CLKDEV_LOOKUP 15 select CLKDEV_LOOKUP
16 select SRCU
16 ---help--- 17 ---help---
17 The common clock framework is a single definition of struct 18 The common clock framework is a single definition of struct
18 clk, useful across many platforms, as well as an 19 clk, useful across many platforms, as well as an
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 29b2ef5a68b9..a171fef2c2b6 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -2,6 +2,7 @@ menu "CPU Frequency scaling"
2 2
3config CPU_FREQ 3config CPU_FREQ
4 bool "CPU Frequency scaling" 4 bool "CPU Frequency scaling"
5 select SRCU
5 help 6 help
6 CPU Frequency scaling allows you to change the clock speed of 7 CPU Frequency scaling allows you to change the clock speed of
7 CPUs on the fly. This is a nice method to save power, because 8 CPUs on the fly. This is a nice method to save power, because
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index faf4e70c42e0..3891f6781298 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,5 +1,6 @@
1menuconfig PM_DEVFREQ 1menuconfig PM_DEVFREQ
2 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" 2 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
3 select SRCU
3 help 4 help
4 A device may have a list of frequencies and voltages available. 5 A device may have a list of frequencies and voltages available.
5 devfreq, a generic DVFS framework can be registered for a device 6 devfreq, a generic DVFS framework can be registered for a device
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index da9c316059bc..eea5d7e578c9 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client,
801 client->irq = irq_of_parse_and_map(client->dev.of_node, 0); 801 client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
802 } else { 802 } else {
803 pdata = dev_get_platdata(&client->dev); 803 pdata = dev_get_platdata(&client->dev);
804 if (!pdata || !gpio_is_valid(pdata->base)) { 804 if (!pdata) {
805 dev_dbg(&client->dev, "invalid platform data\n"); 805 pdata = devm_kzalloc(&client->dev,
806 return -EINVAL; 806 sizeof(struct mcp23s08_platform_data),
807 GFP_KERNEL);
808 pdata->base = -1;
807 } 809 }
808 } 810 }
809 811
@@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi)
924 } else { 926 } else {
925 type = spi_get_device_id(spi)->driver_data; 927 type = spi_get_device_id(spi)->driver_data;
926 pdata = dev_get_platdata(&spi->dev); 928 pdata = dev_get_platdata(&spi->dev);
927 if (!pdata || !gpio_is_valid(pdata->base)) { 929 if (!pdata) {
928 dev_dbg(&spi->dev, 930 pdata = devm_kzalloc(&spi->dev,
929 "invalid or missing platform data\n"); 931 sizeof(struct mcp23s08_platform_data),
930 return -EINVAL; 932 GFP_KERNEL);
933 pdata->base = -1;
931 } 934 }
932 935
933 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { 936 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 30646cfe0efa..f476ae2eb0b3 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -88,6 +88,8 @@ struct gpio_bank {
88#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 88#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
89#define LINE_USED(line, offset) (line & (BIT(offset))) 89#define LINE_USED(line, offset) (line & (BIT(offset)))
90 90
91static void omap_gpio_unmask_irq(struct irq_data *d);
92
91static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) 93static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
92{ 94{
93 return bank->chip.base + gpio_irq; 95 return bank->chip.base + gpio_irq;
@@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
477 return readl_relaxed(reg) & mask; 479 return readl_relaxed(reg) & mask;
478} 480}
479 481
482static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio,
483 unsigned offset)
484{
485 if (!LINE_USED(bank->mod_usage, offset)) {
486 omap_enable_gpio_module(bank, offset);
487 omap_set_gpio_direction(bank, offset, 1);
488 }
489 bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
490}
491
480static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 492static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
481{ 493{
482 struct gpio_bank *bank = omap_irq_data_get_bank(d); 494 struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
506 spin_lock_irqsave(&bank->lock, flags); 518 spin_lock_irqsave(&bank->lock, flags);
507 offset = GPIO_INDEX(bank, gpio); 519 offset = GPIO_INDEX(bank, gpio);
508 retval = omap_set_gpio_triggering(bank, offset, type); 520 retval = omap_set_gpio_triggering(bank, offset, type);
509 if (!LINE_USED(bank->mod_usage, offset)) { 521 omap_gpio_init_irq(bank, gpio, offset);
510 omap_enable_gpio_module(bank, offset); 522 if (!omap_gpio_is_input(bank, BIT(offset))) {
511 omap_set_gpio_direction(bank, offset, 1);
512 } else if (!omap_gpio_is_input(bank, BIT(offset))) {
513 spin_unlock_irqrestore(&bank->lock, flags); 523 spin_unlock_irqrestore(&bank->lock, flags);
514 return -EINVAL; 524 return -EINVAL;
515 } 525 }
516
517 bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
518 spin_unlock_irqrestore(&bank->lock, flags); 526 spin_unlock_irqrestore(&bank->lock, flags);
519 527
520 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 528 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -792,6 +800,24 @@ exit:
792 pm_runtime_put(bank->dev); 800 pm_runtime_put(bank->dev);
793} 801}
794 802
803static unsigned int omap_gpio_irq_startup(struct irq_data *d)
804{
805 struct gpio_bank *bank = omap_irq_data_get_bank(d);
806 unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
807 unsigned long flags;
808 unsigned offset = GPIO_INDEX(bank, gpio);
809
810 if (!BANK_USED(bank))
811 pm_runtime_get_sync(bank->dev);
812
813 spin_lock_irqsave(&bank->lock, flags);
814 omap_gpio_init_irq(bank, gpio, offset);
815 spin_unlock_irqrestore(&bank->lock, flags);
816 omap_gpio_unmask_irq(d);
817
818 return 0;
819}
820
795static void omap_gpio_irq_shutdown(struct irq_data *d) 821static void omap_gpio_irq_shutdown(struct irq_data *d)
796{ 822{
797 struct gpio_bank *bank = omap_irq_data_get_bank(d); 823 struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
1181 if (!irqc) 1207 if (!irqc)
1182 return -ENOMEM; 1208 return -ENOMEM;
1183 1209
1210 irqc->irq_startup = omap_gpio_irq_startup,
1184 irqc->irq_shutdown = omap_gpio_irq_shutdown, 1211 irqc->irq_shutdown = omap_gpio_irq_shutdown,
1185 irqc->irq_ack = omap_gpio_ack_irq, 1212 irqc->irq_ack = omap_gpio_ack_irq,
1186 irqc->irq_mask = omap_gpio_mask_irq, 1213 irqc->irq_mask = omap_gpio_mask_irq,
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index f62aa115d79a..7722ed53bd65 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -648,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name,
648 if (tdev != NULL) { 648 if (tdev != NULL) {
649 status = sysfs_create_link(&dev->kobj, &tdev->kobj, 649 status = sysfs_create_link(&dev->kobj, &tdev->kobj,
650 name); 650 name);
651 put_device(tdev);
651 } else { 652 } else {
652 status = -ENODEV; 653 status = -ENODEV;
653 } 654 }
@@ -695,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
695 } 696 }
696 697
697 status = sysfs_set_active_low(desc, dev, value); 698 status = sysfs_set_active_low(desc, dev, value);
698 699 put_device(dev);
699unlock: 700unlock:
700 mutex_unlock(&sysfs_lock); 701 mutex_unlock(&sysfs_lock);
701 702
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 0d8694f015c1..0fd592799d58 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -822,7 +822,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
822 * Unconditionally decrement this counter, regardless of the queue's 822 * Unconditionally decrement this counter, regardless of the queue's
823 * type. 823 * type.
824 */ 824 */
825 dqm->total_queue_count++; 825 dqm->total_queue_count--;
826 pr_debug("Total of %d queues are accountable so far\n", 826 pr_debug("Total of %d queues are accountable so far\n",
827 dqm->total_queue_count); 827 dqm->total_queue_count);
828 mutex_unlock(&dqm->lock); 828 mutex_unlock(&dqm->lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index a8be6df85347..1c385c23dd0b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -95,10 +95,10 @@ static int __init kfd_module_init(void)
95 } 95 }
96 96
97 /* Verify module parameters */ 97 /* Verify module parameters */
98 if ((max_num_of_queues_per_device < 0) || 98 if ((max_num_of_queues_per_device < 1) ||
99 (max_num_of_queues_per_device > 99 (max_num_of_queues_per_device >
100 KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { 100 KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
101 pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); 101 pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
102 return -1; 102 return -1;
103 } 103 }
104 104
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index f37cf5efe642..2fda1927bff7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -315,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
315 BUG_ON(!pqm); 315 BUG_ON(!pqm);
316 316
317 pqn = get_queue_by_qid(pqm, qid); 317 pqn = get_queue_by_qid(pqm, qid);
318 BUG_ON(!pqn); 318 if (!pqn) {
319 pr_debug("amdkfd: No queue %d exists for update operation\n",
320 qid);
321 return -EFAULT;
322 }
319 323
320 pqn->q->properties.queue_address = p->queue_address; 324 pqn->q->properties.queue_address = p->queue_address;
321 pqn->q->properties.queue_size = p->queue_size; 325 pqn->q->properties.queue_size = p->queue_size;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index c2a1cba1e984..b9140032962d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -16,9 +16,12 @@
16#include "cirrus_drv.h" 16#include "cirrus_drv.h"
17 17
18int cirrus_modeset = -1; 18int cirrus_modeset = -1;
19int cirrus_bpp = 24;
19 20
20MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); 21MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
21module_param_named(modeset, cirrus_modeset, int, 0400); 22module_param_named(modeset, cirrus_modeset, int, 0400);
23MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
24module_param_named(bpp, cirrus_bpp, int, 0400);
22 25
23/* 26/*
24 * This is the generic driver code. This binds the driver to the drm core, 27 * This is the generic driver code. This binds the driver to the drm core,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 693a4565c4ff..705061537a27 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
262 262
263int cirrus_bo_push_sysram(struct cirrus_bo *bo); 263int cirrus_bo_push_sysram(struct cirrus_bo *bo);
264int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); 264int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
265
266extern int cirrus_bpp;
267
265#endif /* __CIRRUS_DRV_H__ */ 268#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 4c2d68e9102d..e4b976658087 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
320 const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */ 320 const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
321 const int max_size = cdev->mc.vram_size; 321 const int max_size = cdev->mc.vram_size;
322 322
323 if (bpp > cirrus_bpp)
324 return false;
323 if (bpp > 32) 325 if (bpp > 32)
324 return false; 326 return false;
325 327
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 99d4a74ffeaf..61385f2298bf 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
501 int count; 501 int count;
502 502
503 /* Just add a static list of modes */ 503 /* Just add a static list of modes */
504 count = drm_add_modes_noedid(connector, 1280, 1024); 504 if (cirrus_bpp <= 24) {
505 drm_set_preferred_mode(connector, 1024, 768); 505 count = drm_add_modes_noedid(connector, 1280, 1024);
506 drm_set_preferred_mode(connector, 1024, 768);
507 } else {
508 count = drm_add_modes_noedid(connector, 800, 600);
509 drm_set_preferred_mode(connector, 800, 600);
510 }
506 return count; 511 return count;
507} 512}
508 513
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 9e7f23dd14bd..87d5fb21cb61 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -34,7 +34,8 @@
34 34
35static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, 35static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
36 uint64_t saddr, uint64_t daddr, 36 uint64_t saddr, uint64_t daddr,
37 int flag, int n) 37 int flag, int n,
38 struct reservation_object *resv)
38{ 39{
39 unsigned long start_jiffies; 40 unsigned long start_jiffies;
40 unsigned long end_jiffies; 41 unsigned long end_jiffies;
@@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
47 case RADEON_BENCHMARK_COPY_DMA: 48 case RADEON_BENCHMARK_COPY_DMA:
48 fence = radeon_copy_dma(rdev, saddr, daddr, 49 fence = radeon_copy_dma(rdev, saddr, daddr,
49 size / RADEON_GPU_PAGE_SIZE, 50 size / RADEON_GPU_PAGE_SIZE,
50 NULL); 51 resv);
51 break; 52 break;
52 case RADEON_BENCHMARK_COPY_BLIT: 53 case RADEON_BENCHMARK_COPY_BLIT:
53 fence = radeon_copy_blit(rdev, saddr, daddr, 54 fence = radeon_copy_blit(rdev, saddr, daddr,
54 size / RADEON_GPU_PAGE_SIZE, 55 size / RADEON_GPU_PAGE_SIZE,
55 NULL); 56 resv);
56 break; 57 break;
57 default: 58 default:
58 DRM_ERROR("Unknown copy method\n"); 59 DRM_ERROR("Unknown copy method\n");
@@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
120 121
121 if (rdev->asic->copy.dma) { 122 if (rdev->asic->copy.dma) {
122 time = radeon_benchmark_do_move(rdev, size, saddr, daddr, 123 time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
123 RADEON_BENCHMARK_COPY_DMA, n); 124 RADEON_BENCHMARK_COPY_DMA, n,
125 dobj->tbo.resv);
124 if (time < 0) 126 if (time < 0)
125 goto out_cleanup; 127 goto out_cleanup;
126 if (time > 0) 128 if (time > 0)
@@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
130 132
131 if (rdev->asic->copy.blit) { 133 if (rdev->asic->copy.blit) {
132 time = radeon_benchmark_do_move(rdev, size, saddr, daddr, 134 time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
133 RADEON_BENCHMARK_COPY_BLIT, n); 135 RADEON_BENCHMARK_COPY_BLIT, n,
136 dobj->tbo.resv);
134 if (time < 0) 137 if (time < 0)
135 goto out_cleanup; 138 goto out_cleanup;
136 if (time > 0) 139 if (time > 0)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 102116902a07..913fafa597ad 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
960 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && 960 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
961 pll->flags & RADEON_PLL_USE_REF_DIV) 961 pll->flags & RADEON_PLL_USE_REF_DIV)
962 ref_div_max = pll->reference_div; 962 ref_div_max = pll->reference_div;
963 else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
964 /* fix for problems on RS880 */
965 ref_div_max = min(pll->max_ref_div, 7u);
963 else 966 else
964 ref_div_max = pll->max_ref_div; 967 ref_div_max = pll->max_ref_div;
965 968
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d0b4f7d1140d..ac3c1310b953 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
146 struct radeon_bo_va *bo_va; 146 struct radeon_bo_va *bo_va;
147 int r; 147 int r;
148 148
149 if (rdev->family < CHIP_CAYMAN) { 149 if ((rdev->family < CHIP_CAYMAN) ||
150 (!rdev->accel_working)) {
150 return 0; 151 return 0;
151 } 152 }
152 153
@@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
176 struct radeon_bo_va *bo_va; 177 struct radeon_bo_va *bo_va;
177 int r; 178 int r;
178 179
179 if (rdev->family < CHIP_CAYMAN) { 180 if ((rdev->family < CHIP_CAYMAN) ||
181 (!rdev->accel_working)) {
180 return; 182 return;
181 } 183 }
182 184
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3cf9c1fa6475..686411e4e4f6 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
605 return -ENOMEM; 605 return -ENOMEM;
606 } 606 }
607 607
608 vm = &fpriv->vm;
609 r = radeon_vm_init(rdev, vm);
610 if (r) {
611 kfree(fpriv);
612 return r;
613 }
614
615 if (rdev->accel_working) { 608 if (rdev->accel_working) {
609 vm = &fpriv->vm;
610 r = radeon_vm_init(rdev, vm);
611 if (r) {
612 kfree(fpriv);
613 return r;
614 }
615
616 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 616 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
617 if (r) { 617 if (r) {
618 radeon_vm_fini(rdev, vm); 618 radeon_vm_fini(rdev, vm);
@@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
668 radeon_vm_bo_rmv(rdev, vm->ib_bo_va); 668 radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
669 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 669 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
670 } 670 }
671 radeon_vm_fini(rdev, vm);
671 } 672 }
672 673
673 radeon_vm_fini(rdev, vm);
674 kfree(fpriv); 674 kfree(fpriv);
675 file_priv->driver_priv = NULL; 675 file_priv->driver_priv = NULL;
676 } 676 }
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 07b506b41008..791818165c76 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
119 if (ring == R600_RING_TYPE_DMA_INDEX) 119 if (ring == R600_RING_TYPE_DMA_INDEX)
120 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, 120 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
121 size / RADEON_GPU_PAGE_SIZE, 121 size / RADEON_GPU_PAGE_SIZE,
122 NULL); 122 vram_obj->tbo.resv);
123 else 123 else
124 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, 124 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
125 size / RADEON_GPU_PAGE_SIZE, 125 size / RADEON_GPU_PAGE_SIZE,
126 NULL); 126 vram_obj->tbo.resv);
127 if (IS_ERR(fence)) { 127 if (IS_ERR(fence)) {
128 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 128 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
129 r = PTR_ERR(fence); 129 r = PTR_ERR(fence);
@@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
170 if (ring == R600_RING_TYPE_DMA_INDEX) 170 if (ring == R600_RING_TYPE_DMA_INDEX)
171 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, 171 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
172 size / RADEON_GPU_PAGE_SIZE, 172 size / RADEON_GPU_PAGE_SIZE,
173 NULL); 173 vram_obj->tbo.resv);
174 else 174 else
175 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, 175 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
176 size / RADEON_GPU_PAGE_SIZE, 176 size / RADEON_GPU_PAGE_SIZE,
177 NULL); 177 vram_obj->tbo.resv);
178 if (IS_ERR(fence)) { 178 if (IS_ERR(fence)) {
179 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 179 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
180 r = PTR_ERR(fence); 180 r = PTR_ERR(fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 06d2246d07f1..2a5a4a9e772d 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -743,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
743 */ 743 */
744 744
745 /* NI is optimized for 256KB fragments, SI and newer for 64KB */ 745 /* NI is optimized for 256KB fragments, SI and newer for 64KB */
746 uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? 746 uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
747 (rdev->family == CHIP_ARUBA)) ?
747 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; 748 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
748 uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; 749 uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
750 (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
749 751
750 uint64_t frag_start = ALIGN(pe_start, frag_align); 752 uint64_t frag_start = ALIGN(pe_start, frag_align);
751 uint64_t frag_end = pe_end & ~(frag_align - 1); 753 uint64_t frag_end = pe_end & ~(frag_align - 1);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index a7de26d1ac80..d931cbbed240 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1389,6 +1389,7 @@ config SENSORS_ADS1015
1389config SENSORS_ADS7828 1389config SENSORS_ADS7828
1390 tristate "Texas Instruments ADS7828 and compatibles" 1390 tristate "Texas Instruments ADS7828 and compatibles"
1391 depends on I2C 1391 depends on I2C
1392 select REGMAP_I2C
1392 help 1393 help
1393 If you say yes here you get support for Texas Instruments ADS7828 and 1394 If you say yes here you get support for Texas Instruments ADS7828 and
1394 ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while 1395 ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while
@@ -1430,8 +1431,8 @@ config SENSORS_INA2XX
1430 tristate "Texas Instruments INA219 and compatibles" 1431 tristate "Texas Instruments INA219 and compatibles"
1431 depends on I2C 1432 depends on I2C
1432 help 1433 help
1433 If you say yes here you get support for INA219, INA220, INA226, and 1434 If you say yes here you get support for INA219, INA220, INA226,
1434 INA230 power monitor chips. 1435 INA230, and INA231 power monitor chips.
1435 1436
1436 The INA2xx driver is configured for the default configuration of 1437 The INA2xx driver is configured for the default configuration of
1437 the part as described in the datasheet. 1438 the part as described in the datasheet.
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 13875968c844..6cb89c0ebab6 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -221,7 +221,7 @@ static ssize_t show_min(struct device *dev,
221 struct abx500_temp *data = dev_get_drvdata(dev); 221 struct abx500_temp *data = dev_get_drvdata(dev);
222 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 222 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
223 223
224 return sprintf(buf, "%ld\n", data->min[attr->index]); 224 return sprintf(buf, "%lu\n", data->min[attr->index]);
225} 225}
226 226
227static ssize_t show_max(struct device *dev, 227static ssize_t show_max(struct device *dev,
@@ -230,7 +230,7 @@ static ssize_t show_max(struct device *dev,
230 struct abx500_temp *data = dev_get_drvdata(dev); 230 struct abx500_temp *data = dev_get_drvdata(dev);
231 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 231 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
232 232
233 return sprintf(buf, "%ld\n", data->max[attr->index]); 233 return sprintf(buf, "%lu\n", data->max[attr->index]);
234} 234}
235 235
236static ssize_t show_max_hyst(struct device *dev, 236static ssize_t show_max_hyst(struct device *dev,
@@ -239,7 +239,7 @@ static ssize_t show_max_hyst(struct device *dev,
239 struct abx500_temp *data = dev_get_drvdata(dev); 239 struct abx500_temp *data = dev_get_drvdata(dev);
240 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 240 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
241 241
242 return sprintf(buf, "%ld\n", data->max_hyst[attr->index]); 242 return sprintf(buf, "%lu\n", data->max_hyst[attr->index]);
243} 243}
244 244
245static ssize_t show_min_alarm(struct device *dev, 245static ssize_t show_min_alarm(struct device *dev,
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index f4f9b219bf16..11955467fc0f 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/hwmon.h> 17#include <linux/hwmon.h>
18#include <linux/hwmon-sysfs.h> 18#include <linux/hwmon-sysfs.h>
19#include <linux/bitops.h>
19 20
20/* 21/*
21 * AD7314 temperature masks 22 * AD7314 temperature masks
@@ -67,7 +68,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
67 switch (spi_get_device_id(chip->spi_dev)->driver_data) { 68 switch (spi_get_device_id(chip->spi_dev)->driver_data) {
68 case ad7314: 69 case ad7314:
69 data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT; 70 data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
70 data = (data << 6) >> 6; 71 data = sign_extend32(data, 9);
71 72
72 return sprintf(buf, "%d\n", 250 * data); 73 return sprintf(buf, "%d\n", 250 * data);
73 case adt7301: 74 case adt7301:
@@ -78,7 +79,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
78 * register. 1lsb - 31.25 milli degrees centigrade 79 * register. 1lsb - 31.25 milli degrees centigrade
79 */ 80 */
80 data = ret & ADT7301_TEMP_MASK; 81 data = ret & ADT7301_TEMP_MASK;
81 data = (data << 2) >> 2; 82 data = sign_extend32(data, 13);
82 83
83 return sprintf(buf, "%d\n", 84 return sprintf(buf, "%d\n",
84 DIV_ROUND_CLOSEST(data * 3125, 100)); 85 DIV_ROUND_CLOSEST(data * 3125, 100));
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 0625e50d7a6e..ad2b47e40345 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -27,6 +27,7 @@
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/regulator/consumer.h> 28#include <linux/regulator/consumer.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/bitops.h>
30 31
31/* Addresses to scan 32/* Addresses to scan
32 * The chip also supports addresses 0x35..0x37. Don't scan those addresses 33 * The chip also supports addresses 0x35..0x37. Don't scan those addresses
@@ -189,7 +190,7 @@ static ssize_t adc128_show_temp(struct device *dev,
189 if (IS_ERR(data)) 190 if (IS_ERR(data))
190 return PTR_ERR(data); 191 return PTR_ERR(data);
191 192
192 temp = (data->temp[index] << 7) >> 7; /* sign extend */ 193 temp = sign_extend32(data->temp[index], 8);
193 return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */ 194 return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */
194} 195}
195 196
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index a622d40eec17..bce4e9ff21bf 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -30,14 +30,12 @@
30#include <linux/hwmon-sysfs.h> 30#include <linux/hwmon-sysfs.h>
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/jiffies.h>
34#include <linux/module.h> 33#include <linux/module.h>
35#include <linux/mutex.h>
36#include <linux/platform_data/ads7828.h> 34#include <linux/platform_data/ads7828.h>
35#include <linux/regmap.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38 37
39/* The ADS7828 registers */ 38/* The ADS7828 registers */
40#define ADS7828_NCH 8 /* 8 channels supported */
41#define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */ 39#define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */
42#define ADS7828_CMD_PD1 0x04 /* Internal vref OFF && A/D ON */ 40#define ADS7828_CMD_PD1 0x04 /* Internal vref OFF && A/D ON */
43#define ADS7828_CMD_PD3 0x0C /* Internal vref ON && A/D ON */ 41#define ADS7828_CMD_PD3 0x0C /* Internal vref ON && A/D ON */
@@ -50,17 +48,9 @@ enum ads7828_chips { ads7828, ads7830 };
50 48
51/* Client specific data */ 49/* Client specific data */
52struct ads7828_data { 50struct ads7828_data {
53 struct i2c_client *client; 51 struct regmap *regmap;
54 struct mutex update_lock; /* Mutex protecting updates */
55 unsigned long last_updated; /* Last updated time (in jiffies) */
56 u16 adc_input[ADS7828_NCH]; /* ADS7828_NCH samples */
57 bool valid; /* Validity flag */
58 bool diff_input; /* Differential input */
59 bool ext_vref; /* External voltage reference */
60 unsigned int vref_mv; /* voltage reference value */
61 u8 cmd_byte; /* Command byte without channel bits */ 52 u8 cmd_byte; /* Command byte without channel bits */
62 unsigned int lsb_resol; /* Resolution of the ADC sample LSB */ 53 unsigned int lsb_resol; /* Resolution of the ADC sample LSB */
63 s32 (*read_channel)(const struct i2c_client *client, u8 command);
64}; 54};
65 55
66/* Command byte C2,C1,C0 - see datasheet */ 56/* Command byte C2,C1,C0 - see datasheet */
@@ -69,42 +59,22 @@ static inline u8 ads7828_cmd_byte(u8 cmd, int ch)
69 return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4); 59 return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4);
70} 60}
71 61
72/* Update data for the device (all 8 channels) */
73static struct ads7828_data *ads7828_update_device(struct device *dev)
74{
75 struct ads7828_data *data = dev_get_drvdata(dev);
76 struct i2c_client *client = data->client;
77
78 mutex_lock(&data->update_lock);
79
80 if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
81 || !data->valid) {
82 unsigned int ch;
83 dev_dbg(&client->dev, "Starting ads7828 update\n");
84
85 for (ch = 0; ch < ADS7828_NCH; ch++) {
86 u8 cmd = ads7828_cmd_byte(data->cmd_byte, ch);
87 data->adc_input[ch] = data->read_channel(client, cmd);
88 }
89 data->last_updated = jiffies;
90 data->valid = true;
91 }
92
93 mutex_unlock(&data->update_lock);
94
95 return data;
96}
97
98/* sysfs callback function */ 62/* sysfs callback function */
99static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da, 63static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da,
100 char *buf) 64 char *buf)
101{ 65{
102 struct sensor_device_attribute *attr = to_sensor_dev_attr(da); 66 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
103 struct ads7828_data *data = ads7828_update_device(dev); 67 struct ads7828_data *data = dev_get_drvdata(dev);
104 unsigned int value = DIV_ROUND_CLOSEST(data->adc_input[attr->index] * 68 u8 cmd = ads7828_cmd_byte(data->cmd_byte, attr->index);
105 data->lsb_resol, 1000); 69 unsigned int regval;
70 int err;
106 71
107 return sprintf(buf, "%d\n", value); 72 err = regmap_read(data->regmap, cmd, &regval);
73 if (err < 0)
74 return err;
75
76 return sprintf(buf, "%d\n",
77 DIV_ROUND_CLOSEST(regval * data->lsb_resol, 1000));
108} 78}
109 79
110static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0); 80static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0);
@@ -130,6 +100,16 @@ static struct attribute *ads7828_attrs[] = {
130 100
131ATTRIBUTE_GROUPS(ads7828); 101ATTRIBUTE_GROUPS(ads7828);
132 102
103static const struct regmap_config ads2828_regmap_config = {
104 .reg_bits = 8,
105 .val_bits = 16,
106};
107
108static const struct regmap_config ads2830_regmap_config = {
109 .reg_bits = 8,
110 .val_bits = 8,
111};
112
133static int ads7828_probe(struct i2c_client *client, 113static int ads7828_probe(struct i2c_client *client,
134 const struct i2c_device_id *id) 114 const struct i2c_device_id *id)
135{ 115{
@@ -137,42 +117,40 @@ static int ads7828_probe(struct i2c_client *client,
137 struct ads7828_platform_data *pdata = dev_get_platdata(dev); 117 struct ads7828_platform_data *pdata = dev_get_platdata(dev);
138 struct ads7828_data *data; 118 struct ads7828_data *data;
139 struct device *hwmon_dev; 119 struct device *hwmon_dev;
120 unsigned int vref_mv = ADS7828_INT_VREF_MV;
121 bool diff_input = false;
122 bool ext_vref = false;
140 123
141 data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL); 124 data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL);
142 if (!data) 125 if (!data)
143 return -ENOMEM; 126 return -ENOMEM;
144 127
145 if (pdata) { 128 if (pdata) {
146 data->diff_input = pdata->diff_input; 129 diff_input = pdata->diff_input;
147 data->ext_vref = pdata->ext_vref; 130 ext_vref = pdata->ext_vref;
148 if (data->ext_vref) 131 if (ext_vref && pdata->vref_mv)
149 data->vref_mv = pdata->vref_mv; 132 vref_mv = pdata->vref_mv;
150 } 133 }
151 134
152 /* Bound Vref with min/max values if it was provided */ 135 /* Bound Vref with min/max values */
153 if (data->vref_mv) 136 vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN,
154 data->vref_mv = clamp_val(data->vref_mv, 137 ADS7828_EXT_VREF_MV_MAX);
155 ADS7828_EXT_VREF_MV_MIN,
156 ADS7828_EXT_VREF_MV_MAX);
157 else
158 data->vref_mv = ADS7828_INT_VREF_MV;
159 138
160 /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */ 139 /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */
161 if (id->driver_data == ads7828) { 140 if (id->driver_data == ads7828) {
162 data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 4096); 141 data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 4096);
163 data->read_channel = i2c_smbus_read_word_swapped; 142 data->regmap = devm_regmap_init_i2c(client,
143 &ads2828_regmap_config);
164 } else { 144 } else {
165 data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 256); 145 data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 256);
166 data->read_channel = i2c_smbus_read_byte_data; 146 data->regmap = devm_regmap_init_i2c(client,
147 &ads2830_regmap_config);
167 } 148 }
168 149
169 data->cmd_byte = data->ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; 150 data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
170 if (!data->diff_input) 151 if (!diff_input)
171 data->cmd_byte |= ADS7828_CMD_SD_SE; 152 data->cmd_byte |= ADS7828_CMD_SD_SE;
172 153
173 data->client = client;
174 mutex_init(&data->update_lock);
175
176 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, 154 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
177 data, 155 data,
178 ads7828_groups); 156 ads7828_groups);
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index e01feba909c3..d1542b7d4bc3 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -35,6 +35,7 @@
35#include <linux/hwmon-sysfs.h> 35#include <linux/hwmon-sysfs.h>
36#include <linux/jiffies.h> 36#include <linux/jiffies.h>
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/delay.h>
38 39
39#include <linux/platform_data/ina2xx.h> 40#include <linux/platform_data/ina2xx.h>
40 41
@@ -51,7 +52,6 @@
51#define INA226_ALERT_LIMIT 0x07 52#define INA226_ALERT_LIMIT 0x07
52#define INA226_DIE_ID 0xFF 53#define INA226_DIE_ID 0xFF
53 54
54
55/* register count */ 55/* register count */
56#define INA219_REGISTERS 6 56#define INA219_REGISTERS 6
57#define INA226_REGISTERS 8 57#define INA226_REGISTERS 8
@@ -64,6 +64,24 @@
64 64
65/* worst case is 68.10 ms (~14.6Hz, ina219) */ 65/* worst case is 68.10 ms (~14.6Hz, ina219) */
66#define INA2XX_CONVERSION_RATE 15 66#define INA2XX_CONVERSION_RATE 15
67#define INA2XX_MAX_DELAY 69 /* worst case delay in ms */
68
69#define INA2XX_RSHUNT_DEFAULT 10000
70
71/* bit mask for reading the averaging setting in the configuration register */
72#define INA226_AVG_RD_MASK 0x0E00
73
74#define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9)
75#define INA226_SHIFT_AVG(val) ((val) << 9)
76
77/* common attrs, ina226 attrs and NULL */
78#define INA2XX_MAX_ATTRIBUTE_GROUPS 3
79
80/*
81 * Both bus voltage and shunt voltage conversion times for ina226 are set
82 * to 0b0100 on POR, which translates to 2200 microseconds in total.
83 */
84#define INA226_TOTAL_CONV_TIME_DEFAULT 2200
67 85
68enum ina2xx_ids { ina219, ina226 }; 86enum ina2xx_ids { ina219, ina226 };
69 87
@@ -81,11 +99,16 @@ struct ina2xx_data {
81 struct i2c_client *client; 99 struct i2c_client *client;
82 const struct ina2xx_config *config; 100 const struct ina2xx_config *config;
83 101
102 long rshunt;
103 u16 curr_config;
104
84 struct mutex update_lock; 105 struct mutex update_lock;
85 bool valid; 106 bool valid;
86 unsigned long last_updated; 107 unsigned long last_updated;
108 int update_interval; /* in jiffies */
87 109
88 int kind; 110 int kind;
111 const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS];
89 u16 regs[INA2XX_MAX_REGISTERS]; 112 u16 regs[INA2XX_MAX_REGISTERS];
90}; 113};
91 114
@@ -110,34 +133,156 @@ static const struct ina2xx_config ina2xx_config[] = {
110 }, 133 },
111}; 134};
112 135
113static struct ina2xx_data *ina2xx_update_device(struct device *dev) 136/*
137 * Available averaging rates for ina226. The indices correspond with
138 * the bit values expected by the chip (according to the ina226 datasheet,
139 * table 3 AVG bit settings, found at
140 * http://www.ti.com/lit/ds/symlink/ina226.pdf.
141 */
142static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 };
143
144static int ina226_avg_bits(int avg)
145{
146 int i;
147
148 /* Get the closest average from the tab. */
149 for (i = 0; i < ARRAY_SIZE(ina226_avg_tab) - 1; i++) {
150 if (avg <= (ina226_avg_tab[i] + ina226_avg_tab[i + 1]) / 2)
151 break;
152 }
153
154 return i; /* Return 0b0111 for values greater than 1024. */
155}
156
157static int ina226_reg_to_interval(u16 config)
158{
159 int avg = ina226_avg_tab[INA226_READ_AVG(config)];
160
161 /*
162 * Multiply the total conversion time by the number of averages.
163 * Return the result in milliseconds.
164 */
165 return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000);
166}
167
168static u16 ina226_interval_to_reg(int interval, u16 config)
169{
170 int avg, avg_bits;
171
172 avg = DIV_ROUND_CLOSEST(interval * 1000,
173 INA226_TOTAL_CONV_TIME_DEFAULT);
174 avg_bits = ina226_avg_bits(avg);
175
176 return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits);
177}
178
179static void ina226_set_update_interval(struct ina2xx_data *data)
180{
181 int ms;
182
183 ms = ina226_reg_to_interval(data->curr_config);
184 data->update_interval = msecs_to_jiffies(ms);
185}
186
187static int ina2xx_calibrate(struct ina2xx_data *data)
188{
189 u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
190 data->rshunt);
191
192 return i2c_smbus_write_word_swapped(data->client,
193 INA2XX_CALIBRATION, val);
194}
195
196/*
197 * Initialize the configuration and calibration registers.
198 */
199static int ina2xx_init(struct ina2xx_data *data)
114{ 200{
115 struct ina2xx_data *data = dev_get_drvdata(dev);
116 struct i2c_client *client = data->client; 201 struct i2c_client *client = data->client;
117 struct ina2xx_data *ret = data; 202 int ret;
118 203
119 mutex_lock(&data->update_lock); 204 /* device configuration */
205 ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
206 data->curr_config);
207 if (ret < 0)
208 return ret;
120 209
121 if (time_after(jiffies, data->last_updated + 210 /*
122 HZ / INA2XX_CONVERSION_RATE) || !data->valid) { 211 * Set current LSB to 1mA, shunt is in uOhms
212 * (equation 13 in datasheet).
213 */
214 return ina2xx_calibrate(data);
215}
123 216
124 int i; 217static int ina2xx_do_update(struct device *dev)
218{
219 struct ina2xx_data *data = dev_get_drvdata(dev);
220 struct i2c_client *client = data->client;
221 int i, rv, retry;
125 222
126 dev_dbg(&client->dev, "Starting ina2xx update\n"); 223 dev_dbg(&client->dev, "Starting ina2xx update\n");
127 224
225 for (retry = 5; retry; retry--) {
128 /* Read all registers */ 226 /* Read all registers */
129 for (i = 0; i < data->config->registers; i++) { 227 for (i = 0; i < data->config->registers; i++) {
130 int rv = i2c_smbus_read_word_swapped(client, i); 228 rv = i2c_smbus_read_word_swapped(client, i);
131 if (rv < 0) { 229 if (rv < 0)
132 ret = ERR_PTR(rv); 230 return rv;
133 goto abort;
134 }
135 data->regs[i] = rv; 231 data->regs[i] = rv;
136 } 232 }
233
234 /*
235 * If the current value in the calibration register is 0, the
236 * power and current registers will also remain at 0. In case
237 * the chip has been reset let's check the calibration
238 * register and reinitialize if needed.
239 */
240 if (data->regs[INA2XX_CALIBRATION] == 0) {
241 dev_warn(dev, "chip not calibrated, reinitializing\n");
242
243 rv = ina2xx_init(data);
244 if (rv < 0)
245 return rv;
246
247 /*
248 * Let's make sure the power and current registers
249 * have been updated before trying again.
250 */
251 msleep(INA2XX_MAX_DELAY);
252 continue;
253 }
254
137 data->last_updated = jiffies; 255 data->last_updated = jiffies;
138 data->valid = 1; 256 data->valid = 1;
257
258 return 0;
139 } 259 }
140abort: 260
261 /*
262 * If we're here then although all write operations succeeded, the
263 * chip still returns 0 in the calibration register. Nothing more we
264 * can do here.
265 */
266 dev_err(dev, "unable to reinitialize the chip\n");
267 return -ENODEV;
268}
269
270static struct ina2xx_data *ina2xx_update_device(struct device *dev)
271{
272 struct ina2xx_data *data = dev_get_drvdata(dev);
273 struct ina2xx_data *ret = data;
274 unsigned long after;
275 int rv;
276
277 mutex_lock(&data->update_lock);
278
279 after = data->last_updated + data->update_interval;
280 if (time_after(jiffies, after) || !data->valid) {
281 rv = ina2xx_do_update(dev);
282 if (rv < 0)
283 ret = ERR_PTR(rv);
284 }
285
141 mutex_unlock(&data->update_lock); 286 mutex_unlock(&data->update_lock);
142 return ret; 287 return ret;
143} 288}
@@ -164,6 +309,10 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
164 /* signed register, LSB=1mA (selected), in mA */ 309 /* signed register, LSB=1mA (selected), in mA */
165 val = (s16)data->regs[reg]; 310 val = (s16)data->regs[reg];
166 break; 311 break;
312 case INA2XX_CALIBRATION:
313 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
314 data->regs[reg]);
315 break;
167 default: 316 default:
168 /* programmer goofed */ 317 /* programmer goofed */
169 WARN_ON_ONCE(1); 318 WARN_ON_ONCE(1);
@@ -187,6 +336,85 @@ static ssize_t ina2xx_show_value(struct device *dev,
187 ina2xx_get_value(data, attr->index)); 336 ina2xx_get_value(data, attr->index));
188} 337}
189 338
339static ssize_t ina2xx_set_shunt(struct device *dev,
340 struct device_attribute *da,
341 const char *buf, size_t count)
342{
343 struct ina2xx_data *data = ina2xx_update_device(dev);
344 unsigned long val;
345 int status;
346
347 if (IS_ERR(data))
348 return PTR_ERR(data);
349
350 status = kstrtoul(buf, 10, &val);
351 if (status < 0)
352 return status;
353
354 if (val == 0 ||
355 /* Values greater than the calibration factor make no sense. */
356 val > data->config->calibration_factor)
357 return -EINVAL;
358
359 mutex_lock(&data->update_lock);
360 data->rshunt = val;
361 status = ina2xx_calibrate(data);
362 mutex_unlock(&data->update_lock);
363 if (status < 0)
364 return status;
365
366 return count;
367}
368
369static ssize_t ina226_set_interval(struct device *dev,
370 struct device_attribute *da,
371 const char *buf, size_t count)
372{
373 struct ina2xx_data *data = dev_get_drvdata(dev);
374 unsigned long val;
375 int status;
376
377 status = kstrtoul(buf, 10, &val);
378 if (status < 0)
379 return status;
380
381 if (val > INT_MAX || val == 0)
382 return -EINVAL;
383
384 mutex_lock(&data->update_lock);
385 data->curr_config = ina226_interval_to_reg(val,
386 data->regs[INA2XX_CONFIG]);
387 status = i2c_smbus_write_word_swapped(data->client,
388 INA2XX_CONFIG,
389 data->curr_config);
390
391 ina226_set_update_interval(data);
392 /* Make sure the next access re-reads all registers. */
393 data->valid = 0;
394 mutex_unlock(&data->update_lock);
395 if (status < 0)
396 return status;
397
398 return count;
399}
400
401static ssize_t ina226_show_interval(struct device *dev,
402 struct device_attribute *da, char *buf)
403{
404 struct ina2xx_data *data = ina2xx_update_device(dev);
405
406 if (IS_ERR(data))
407 return PTR_ERR(data);
408
409 /*
410 * We don't use data->update_interval here as we want to display
411 * the actual interval used by the chip and jiffies_to_msecs()
412 * doesn't seem to be accurate enough.
413 */
414 return snprintf(buf, PAGE_SIZE, "%d\n",
415 ina226_reg_to_interval(data->regs[INA2XX_CONFIG]));
416}
417
190/* shunt voltage */ 418/* shunt voltage */
191static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL, 419static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL,
192 INA2XX_SHUNT_VOLTAGE); 420 INA2XX_SHUNT_VOLTAGE);
@@ -203,15 +431,37 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina2xx_show_value, NULL,
203static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, 431static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
204 INA2XX_POWER); 432 INA2XX_POWER);
205 433
434/* shunt resistance */
435static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
436 ina2xx_show_value, ina2xx_set_shunt,
437 INA2XX_CALIBRATION);
438
439/* update interval (ina226 only) */
440static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
441 ina226_show_interval, ina226_set_interval, 0);
442
206/* pointers to created device attributes */ 443/* pointers to created device attributes */
207static struct attribute *ina2xx_attrs[] = { 444static struct attribute *ina2xx_attrs[] = {
208 &sensor_dev_attr_in0_input.dev_attr.attr, 445 &sensor_dev_attr_in0_input.dev_attr.attr,
209 &sensor_dev_attr_in1_input.dev_attr.attr, 446 &sensor_dev_attr_in1_input.dev_attr.attr,
210 &sensor_dev_attr_curr1_input.dev_attr.attr, 447 &sensor_dev_attr_curr1_input.dev_attr.attr,
211 &sensor_dev_attr_power1_input.dev_attr.attr, 448 &sensor_dev_attr_power1_input.dev_attr.attr,
449 &sensor_dev_attr_shunt_resistor.dev_attr.attr,
212 NULL, 450 NULL,
213}; 451};
214ATTRIBUTE_GROUPS(ina2xx); 452
453static const struct attribute_group ina2xx_group = {
454 .attrs = ina2xx_attrs,
455};
456
457static struct attribute *ina226_attrs[] = {
458 &sensor_dev_attr_update_interval.dev_attr.attr,
459 NULL,
460};
461
462static const struct attribute_group ina226_group = {
463 .attrs = ina226_attrs,
464};
215 465
216static int ina2xx_probe(struct i2c_client *client, 466static int ina2xx_probe(struct i2c_client *client,
217 const struct i2c_device_id *id) 467 const struct i2c_device_id *id)
@@ -221,9 +471,8 @@ static int ina2xx_probe(struct i2c_client *client,
221 struct device *dev = &client->dev; 471 struct device *dev = &client->dev;
222 struct ina2xx_data *data; 472 struct ina2xx_data *data;
223 struct device *hwmon_dev; 473 struct device *hwmon_dev;
224 long shunt = 10000; /* default shunt value 10mOhms */
225 u32 val; 474 u32 val;
226 int ret; 475 int ret, group = 0;
227 476
228 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) 477 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
229 return -ENODEV; 478 return -ENODEV;
@@ -234,50 +483,52 @@ static int ina2xx_probe(struct i2c_client *client,
234 483
235 if (dev_get_platdata(dev)) { 484 if (dev_get_platdata(dev)) {
236 pdata = dev_get_platdata(dev); 485 pdata = dev_get_platdata(dev);
237 shunt = pdata->shunt_uohms; 486 data->rshunt = pdata->shunt_uohms;
238 } else if (!of_property_read_u32(dev->of_node, 487 } else if (!of_property_read_u32(dev->of_node,
239 "shunt-resistor", &val)) { 488 "shunt-resistor", &val)) {
240 shunt = val; 489 data->rshunt = val;
490 } else {
491 data->rshunt = INA2XX_RSHUNT_DEFAULT;
241 } 492 }
242 493
243 if (shunt <= 0)
244 return -ENODEV;
245
246 /* set the device type */ 494 /* set the device type */
247 data->kind = id->driver_data; 495 data->kind = id->driver_data;
248 data->config = &ina2xx_config[data->kind]; 496 data->config = &ina2xx_config[data->kind];
249 497 data->curr_config = data->config->config_default;
250 /* device configuration */ 498 data->client = client;
251 ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
252 data->config->config_default);
253 if (ret < 0) {
254 dev_err(dev,
255 "error writing to the config register: %d", ret);
256 return -ENODEV;
257 }
258 499
259 /* 500 /*
260 * Set current LSB to 1mA, shunt is in uOhms 501 * Ina226 has a variable update_interval. For ina219 we
261 * (equation 13 in datasheet). 502 * use a constant value.
262 */ 503 */
263 ret = i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION, 504 if (data->kind == ina226)
264 data->config->calibration_factor / shunt); 505 ina226_set_update_interval(data);
506 else
507 data->update_interval = HZ / INA2XX_CONVERSION_RATE;
508
509 if (data->rshunt <= 0 ||
510 data->rshunt > data->config->calibration_factor)
511 return -ENODEV;
512
513 ret = ina2xx_init(data);
265 if (ret < 0) { 514 if (ret < 0) {
266 dev_err(dev, 515 dev_err(dev, "error configuring the device: %d\n", ret);
267 "error writing to the calibration register: %d", ret);
268 return -ENODEV; 516 return -ENODEV;
269 } 517 }
270 518
271 data->client = client;
272 mutex_init(&data->update_lock); 519 mutex_init(&data->update_lock);
273 520
521 data->groups[group++] = &ina2xx_group;
522 if (data->kind == ina226)
523 data->groups[group++] = &ina226_group;
524
274 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, 525 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
275 data, ina2xx_groups); 526 data, data->groups);
276 if (IS_ERR(hwmon_dev)) 527 if (IS_ERR(hwmon_dev))
277 return PTR_ERR(hwmon_dev); 528 return PTR_ERR(hwmon_dev);
278 529
279 dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n", 530 dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
280 id->name, shunt); 531 id->name, data->rshunt);
281 532
282 return 0; 533 return 0;
283} 534}
@@ -287,6 +538,7 @@ static const struct i2c_device_id ina2xx_id[] = {
287 { "ina220", ina219 }, 538 { "ina220", ina219 },
288 { "ina226", ina226 }, 539 { "ina226", ina226 },
289 { "ina230", ina226 }, 540 { "ina230", ina226 },
541 { "ina231", ina226 },
290 { } 542 { }
291}; 543};
292MODULE_DEVICE_TABLE(i2c, ina2xx_id); 544MODULE_DEVICE_TABLE(i2c, ina2xx_id);
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 388f8bcd898e..996bdfd5cf25 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -201,7 +201,7 @@ struct jc42_data {
201#define JC42_TEMP_MIN 0 201#define JC42_TEMP_MIN 0
202#define JC42_TEMP_MAX 125000 202#define JC42_TEMP_MAX 125000
203 203
204static u16 jc42_temp_to_reg(int temp, bool extended) 204static u16 jc42_temp_to_reg(long temp, bool extended)
205{ 205{
206 int ntemp = clamp_val(temp, 206 int ntemp = clamp_val(temp,
207 extended ? JC42_TEMP_MIN_EXTENDED : 207 extended ? JC42_TEMP_MIN_EXTENDED :
@@ -213,11 +213,7 @@ static u16 jc42_temp_to_reg(int temp, bool extended)
213 213
214static int jc42_temp_from_reg(s16 reg) 214static int jc42_temp_from_reg(s16 reg)
215{ 215{
216 reg &= 0x1fff; 216 reg = sign_extend32(reg, 12);
217
218 /* sign extend register */
219 if (reg & 0x1000)
220 reg |= 0xf000;
221 217
222 /* convert from 0.0625 to 0.001 resolution */ 218 /* convert from 0.0625 to 0.001 resolution */
223 return reg * 125 / 2; 219 return reg * 125 / 2;
@@ -308,15 +304,18 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
308 const char *buf, size_t count) 304 const char *buf, size_t count)
309{ 305{
310 struct jc42_data *data = dev_get_drvdata(dev); 306 struct jc42_data *data = dev_get_drvdata(dev);
311 unsigned long val; 307 long val;
312 int diff, hyst; 308 int diff, hyst;
313 int err; 309 int err;
314 int ret = count; 310 int ret = count;
315 311
316 if (kstrtoul(buf, 10, &val) < 0) 312 if (kstrtol(buf, 10, &val) < 0)
317 return -EINVAL; 313 return -EINVAL;
318 314
315 val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED :
316 JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX);
319 diff = jc42_temp_from_reg(data->temp[t_crit]) - val; 317 diff = jc42_temp_from_reg(data->temp[t_crit]) - val;
318
320 hyst = 0; 319 hyst = 0;
321 if (diff > 0) { 320 if (diff > 0) {
322 if (diff < 2250) 321 if (diff < 2250)
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index ec5678289e4a..55765790907b 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -779,7 +779,7 @@ static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg)
779 return reg != REG_BANK && reg <= 0x20; 779 return reg != REG_BANK && reg <= 0x20;
780} 780}
781 781
782static struct regmap_config nct7802_regmap_config = { 782static const struct regmap_config nct7802_regmap_config = {
783 .reg_bits = 8, 783 .reg_bits = 8,
784 .val_bits = 8, 784 .val_bits = 8,
785 .cache_type = REGCACHE_RBTREE, 785 .cache_type = REGCACHE_RBTREE,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index ba9f478f64ee..9da2735f1424 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -253,7 +253,7 @@ static int tmp102_remove(struct i2c_client *client)
253 return 0; 253 return 0;
254} 254}
255 255
256#ifdef CONFIG_PM 256#ifdef CONFIG_PM_SLEEP
257static int tmp102_suspend(struct device *dev) 257static int tmp102_suspend(struct device *dev)
258{ 258{
259 struct i2c_client *client = to_i2c_client(dev); 259 struct i2c_client *client = to_i2c_client(dev);
@@ -279,17 +279,10 @@ static int tmp102_resume(struct device *dev)
279 config &= ~TMP102_CONF_SD; 279 config &= ~TMP102_CONF_SD;
280 return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config); 280 return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
281} 281}
282
283static const struct dev_pm_ops tmp102_dev_pm_ops = {
284 .suspend = tmp102_suspend,
285 .resume = tmp102_resume,
286};
287
288#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops)
289#else
290#define TMP102_DEV_PM_OPS NULL
291#endif /* CONFIG_PM */ 282#endif /* CONFIG_PM */
292 283
284static SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume);
285
293static const struct i2c_device_id tmp102_id[] = { 286static const struct i2c_device_id tmp102_id[] = {
294 { "tmp102", 0 }, 287 { "tmp102", 0 },
295 { } 288 { }
@@ -298,7 +291,7 @@ MODULE_DEVICE_TABLE(i2c, tmp102_id);
298 291
299static struct i2c_driver tmp102_driver = { 292static struct i2c_driver tmp102_driver = {
300 .driver.name = DRIVER_NAME, 293 .driver.name = DRIVER_NAME,
301 .driver.pm = TMP102_DEV_PM_OPS, 294 .driver.pm = &tmp102_dev_pm_ops,
302 .probe = tmp102_probe, 295 .probe = tmp102_probe,
303 .remove = tmp102_remove, 296 .remove = tmp102_remove,
304 .id_table = tmp102_id, 297 .id_table = tmp102_id,
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b716b0815644..643c08a025a5 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -258,6 +258,5 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
258 258
259IB_UVERBS_DECLARE_EX_CMD(create_flow); 259IB_UVERBS_DECLARE_EX_CMD(create_flow);
260IB_UVERBS_DECLARE_EX_CMD(destroy_flow); 260IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
261IB_UVERBS_DECLARE_EX_CMD(query_device);
262 261
263#endif /* UVERBS_H */ 262#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 532d8eba8b02..b7943ff16ed3 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -400,52 +400,6 @@ err:
400 return ret; 400 return ret;
401} 401}
402 402
403static void copy_query_dev_fields(struct ib_uverbs_file *file,
404 struct ib_uverbs_query_device_resp *resp,
405 struct ib_device_attr *attr)
406{
407 resp->fw_ver = attr->fw_ver;
408 resp->node_guid = file->device->ib_dev->node_guid;
409 resp->sys_image_guid = attr->sys_image_guid;
410 resp->max_mr_size = attr->max_mr_size;
411 resp->page_size_cap = attr->page_size_cap;
412 resp->vendor_id = attr->vendor_id;
413 resp->vendor_part_id = attr->vendor_part_id;
414 resp->hw_ver = attr->hw_ver;
415 resp->max_qp = attr->max_qp;
416 resp->max_qp_wr = attr->max_qp_wr;
417 resp->device_cap_flags = attr->device_cap_flags;
418 resp->max_sge = attr->max_sge;
419 resp->max_sge_rd = attr->max_sge_rd;
420 resp->max_cq = attr->max_cq;
421 resp->max_cqe = attr->max_cqe;
422 resp->max_mr = attr->max_mr;
423 resp->max_pd = attr->max_pd;
424 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
425 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
426 resp->max_res_rd_atom = attr->max_res_rd_atom;
427 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
428 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
429 resp->atomic_cap = attr->atomic_cap;
430 resp->max_ee = attr->max_ee;
431 resp->max_rdd = attr->max_rdd;
432 resp->max_mw = attr->max_mw;
433 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
434 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
435 resp->max_mcast_grp = attr->max_mcast_grp;
436 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
437 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
438 resp->max_ah = attr->max_ah;
439 resp->max_fmr = attr->max_fmr;
440 resp->max_map_per_fmr = attr->max_map_per_fmr;
441 resp->max_srq = attr->max_srq;
442 resp->max_srq_wr = attr->max_srq_wr;
443 resp->max_srq_sge = attr->max_srq_sge;
444 resp->max_pkeys = attr->max_pkeys;
445 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
446 resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
447}
448
449ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 403ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
450 const char __user *buf, 404 const char __user *buf,
451 int in_len, int out_len) 405 int in_len, int out_len)
@@ -466,7 +420,47 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
466 return ret; 420 return ret;
467 421
468 memset(&resp, 0, sizeof resp); 422 memset(&resp, 0, sizeof resp);
469 copy_query_dev_fields(file, &resp, &attr); 423
424 resp.fw_ver = attr.fw_ver;
425 resp.node_guid = file->device->ib_dev->node_guid;
426 resp.sys_image_guid = attr.sys_image_guid;
427 resp.max_mr_size = attr.max_mr_size;
428 resp.page_size_cap = attr.page_size_cap;
429 resp.vendor_id = attr.vendor_id;
430 resp.vendor_part_id = attr.vendor_part_id;
431 resp.hw_ver = attr.hw_ver;
432 resp.max_qp = attr.max_qp;
433 resp.max_qp_wr = attr.max_qp_wr;
434 resp.device_cap_flags = attr.device_cap_flags;
435 resp.max_sge = attr.max_sge;
436 resp.max_sge_rd = attr.max_sge_rd;
437 resp.max_cq = attr.max_cq;
438 resp.max_cqe = attr.max_cqe;
439 resp.max_mr = attr.max_mr;
440 resp.max_pd = attr.max_pd;
441 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
442 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
443 resp.max_res_rd_atom = attr.max_res_rd_atom;
444 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
445 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
446 resp.atomic_cap = attr.atomic_cap;
447 resp.max_ee = attr.max_ee;
448 resp.max_rdd = attr.max_rdd;
449 resp.max_mw = attr.max_mw;
450 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
451 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
452 resp.max_mcast_grp = attr.max_mcast_grp;
453 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
454 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
455 resp.max_ah = attr.max_ah;
456 resp.max_fmr = attr.max_fmr;
457 resp.max_map_per_fmr = attr.max_map_per_fmr;
458 resp.max_srq = attr.max_srq;
459 resp.max_srq_wr = attr.max_srq_wr;
460 resp.max_srq_sge = attr.max_srq_sge;
461 resp.max_pkeys = attr.max_pkeys;
462 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
463 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
470 464
471 if (copy_to_user((void __user *) (unsigned long) cmd.response, 465 if (copy_to_user((void __user *) (unsigned long) cmd.response,
472 &resp, sizeof resp)) 466 &resp, sizeof resp))
@@ -3293,52 +3287,3 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3293 3287
3294 return ret ? ret : in_len; 3288 return ret ? ret : in_len;
3295} 3289}
3296
3297int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3298 struct ib_udata *ucore,
3299 struct ib_udata *uhw)
3300{
3301 struct ib_uverbs_ex_query_device_resp resp;
3302 struct ib_uverbs_ex_query_device cmd;
3303 struct ib_device_attr attr;
3304 struct ib_device *device;
3305 int err;
3306
3307 device = file->device->ib_dev;
3308 if (ucore->inlen < sizeof(cmd))
3309 return -EINVAL;
3310
3311 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3312 if (err)
3313 return err;
3314
3315 if (cmd.reserved)
3316 return -EINVAL;
3317
3318 err = device->query_device(device, &attr);
3319 if (err)
3320 return err;
3321
3322 memset(&resp, 0, sizeof(resp));
3323 copy_query_dev_fields(file, &resp.base, &attr);
3324 resp.comp_mask = 0;
3325
3326#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3327 if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) {
3328 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3329 resp.odp_caps.per_transport_caps.rc_odp_caps =
3330 attr.odp_caps.per_transport_caps.rc_odp_caps;
3331 resp.odp_caps.per_transport_caps.uc_odp_caps =
3332 attr.odp_caps.per_transport_caps.uc_odp_caps;
3333 resp.odp_caps.per_transport_caps.ud_odp_caps =
3334 attr.odp_caps.per_transport_caps.ud_odp_caps;
3335 resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP;
3336 }
3337#endif
3338
3339 err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
3340 if (err)
3341 return err;
3342
3343 return 0;
3344}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e6c23b9eab33..5db1a8cc388d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -123,7 +123,6 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
123 struct ib_udata *uhw) = { 123 struct ib_udata *uhw) = {
124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, 124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, 125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
126 [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device
127}; 126};
128 127
129static void ib_uverbs_add_one(struct ib_device *device); 128static void ib_uverbs_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8a87404e9c76..03bf81211a54 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1331,8 +1331,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1331 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 1331 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1332 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 1332 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
1333 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 1333 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
1334 dev->ib_dev.uverbs_ex_cmd_mask =
1335 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
1336 1334
1337 dev->ib_dev.query_device = mlx5_ib_query_device; 1335 dev->ib_dev.query_device = mlx5_ib_query_device;
1338 dev->ib_dev.query_port = mlx5_ib_query_port; 1336 dev->ib_dev.query_port = mlx5_ib_query_port;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 8ba80a6d3a46..d7562beb5423 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -98,15 +98,9 @@ enum {
98 98
99 IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */ 99 IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
100 IPOIB_MCAST_FLAG_SENDONLY = 1, 100 IPOIB_MCAST_FLAG_SENDONLY = 1,
101 /* 101 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
102 * For IPOIB_MCAST_FLAG_BUSY
103 * When set, in flight join and mcast->mc is unreliable
104 * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
105 * haven't started yet
106 * When clear and mcast->mc is valid pointer, join was successful
107 */
108 IPOIB_MCAST_FLAG_BUSY = 2,
109 IPOIB_MCAST_FLAG_ATTACHED = 3, 102 IPOIB_MCAST_FLAG_ATTACHED = 3,
103 IPOIB_MCAST_JOIN_STARTED = 4,
110 104
111 MAX_SEND_CQE = 16, 105 MAX_SEND_CQE = 16,
112 IPOIB_CM_COPYBREAK = 256, 106 IPOIB_CM_COPYBREAK = 256,
@@ -323,7 +317,6 @@ struct ipoib_dev_priv {
323 struct list_head multicast_list; 317 struct list_head multicast_list;
324 struct rb_root multicast_tree; 318 struct rb_root multicast_tree;
325 319
326 struct workqueue_struct *wq;
327 struct delayed_work mcast_task; 320 struct delayed_work mcast_task;
328 struct work_struct carrier_on_task; 321 struct work_struct carrier_on_task;
329 struct work_struct flush_light; 322 struct work_struct flush_light;
@@ -484,10 +477,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
484void ipoib_pkey_event(struct work_struct *work); 477void ipoib_pkey_event(struct work_struct *work);
485void ipoib_ib_dev_cleanup(struct net_device *dev); 478void ipoib_ib_dev_cleanup(struct net_device *dev);
486 479
487int ipoib_ib_dev_open(struct net_device *dev); 480int ipoib_ib_dev_open(struct net_device *dev, int flush);
488int ipoib_ib_dev_up(struct net_device *dev); 481int ipoib_ib_dev_up(struct net_device *dev);
489int ipoib_ib_dev_down(struct net_device *dev); 482int ipoib_ib_dev_down(struct net_device *dev, int flush);
490int ipoib_ib_dev_stop(struct net_device *dev); 483int ipoib_ib_dev_stop(struct net_device *dev, int flush);
491void ipoib_pkey_dev_check_presence(struct net_device *dev); 484void ipoib_pkey_dev_check_presence(struct net_device *dev);
492 485
493int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 486int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -499,7 +492,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
499 492
500void ipoib_mcast_restart_task(struct work_struct *work); 493void ipoib_mcast_restart_task(struct work_struct *work);
501int ipoib_mcast_start_thread(struct net_device *dev); 494int ipoib_mcast_start_thread(struct net_device *dev);
502int ipoib_mcast_stop_thread(struct net_device *dev); 495int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
503 496
504void ipoib_mcast_dev_down(struct net_device *dev); 497void ipoib_mcast_dev_down(struct net_device *dev);
505void ipoib_mcast_dev_flush(struct net_device *dev); 498void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 56959adb6c7d..933efcea0d03 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
474 } 474 }
475 475
476 spin_lock_irq(&priv->lock); 476 spin_lock_irq(&priv->lock);
477 queue_delayed_work(priv->wq, 477 queue_delayed_work(ipoib_workqueue,
478 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 478 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
479 /* Add this entry to passive ids list head, but do not re-add it 479 /* Add this entry to passive ids list head, but do not re-add it
480 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ 480 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
576 spin_lock_irqsave(&priv->lock, flags); 576 spin_lock_irqsave(&priv->lock, flags);
577 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); 577 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
578 ipoib_cm_start_rx_drain(priv); 578 ipoib_cm_start_rx_drain(priv);
579 queue_work(priv->wq, &priv->cm.rx_reap_task); 579 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
580 spin_unlock_irqrestore(&priv->lock, flags); 580 spin_unlock_irqrestore(&priv->lock, flags);
581 } else 581 } else
582 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", 582 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
603 spin_lock_irqsave(&priv->lock, flags); 603 spin_lock_irqsave(&priv->lock, flags);
604 list_move(&p->list, &priv->cm.rx_reap_list); 604 list_move(&p->list, &priv->cm.rx_reap_list);
605 spin_unlock_irqrestore(&priv->lock, flags); 605 spin_unlock_irqrestore(&priv->lock, flags);
606 queue_work(priv->wq, &priv->cm.rx_reap_task); 606 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
607 } 607 }
608 return; 608 return;
609 } 609 }
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
827 827
828 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 828 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
829 list_move(&tx->list, &priv->cm.reap_list); 829 list_move(&tx->list, &priv->cm.reap_list);
830 queue_work(priv->wq, &priv->cm.reap_task); 830 queue_work(ipoib_workqueue, &priv->cm.reap_task);
831 } 831 }
832 832
833 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); 833 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1255 1255
1256 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1256 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1257 list_move(&tx->list, &priv->cm.reap_list); 1257 list_move(&tx->list, &priv->cm.reap_list);
1258 queue_work(priv->wq, &priv->cm.reap_task); 1258 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1259 } 1259 }
1260 1260
1261 spin_unlock_irqrestore(&priv->lock, flags); 1261 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
1284 tx->dev = dev; 1284 tx->dev = dev;
1285 list_add(&tx->list, &priv->cm.start_list); 1285 list_add(&tx->list, &priv->cm.start_list);
1286 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1286 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1287 queue_work(priv->wq, &priv->cm.start_task); 1287 queue_work(ipoib_workqueue, &priv->cm.start_task);
1288 return tx; 1288 return tx;
1289} 1289}
1290 1290
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1295 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1295 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1296 spin_lock_irqsave(&priv->lock, flags); 1296 spin_lock_irqsave(&priv->lock, flags);
1297 list_move(&tx->list, &priv->cm.reap_list); 1297 list_move(&tx->list, &priv->cm.reap_list);
1298 queue_work(priv->wq, &priv->cm.reap_task); 1298 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1299 ipoib_dbg(priv, "Reap connection for gid %pI6\n", 1299 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1300 tx->neigh->daddr + 4); 1300 tx->neigh->daddr + 4);
1301 tx->neigh = NULL; 1301 tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1417 1417
1418 skb_queue_tail(&priv->cm.skb_queue, skb); 1418 skb_queue_tail(&priv->cm.skb_queue, skb);
1419 if (e) 1419 if (e)
1420 queue_work(priv->wq, &priv->cm.skb_task); 1420 queue_work(ipoib_workqueue, &priv->cm.skb_task);
1421} 1421}
1422 1422
1423static void ipoib_cm_rx_reap(struct work_struct *work) 1423static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1450 } 1450 }
1451 1451
1452 if (!list_empty(&priv->cm.passive_ids)) 1452 if (!list_empty(&priv->cm.passive_ids))
1453 queue_delayed_work(priv->wq, 1453 queue_delayed_work(ipoib_workqueue,
1454 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 1454 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1455 spin_unlock_irq(&priv->lock); 1455 spin_unlock_irq(&priv->lock);
1456} 1456}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index fe65abb5150c..72626c348174 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
655 __ipoib_reap_ah(dev); 655 __ipoib_reap_ah(dev);
656 656
657 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) 657 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
658 queue_delayed_work(priv->wq, &priv->ah_reap_task, 658 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
659 round_jiffies_relative(HZ)); 659 round_jiffies_relative(HZ));
660} 660}
661 661
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
664 drain_tx_cq((struct net_device *)ctx); 664 drain_tx_cq((struct net_device *)ctx);
665} 665}
666 666
667int ipoib_ib_dev_open(struct net_device *dev) 667int ipoib_ib_dev_open(struct net_device *dev, int flush)
668{ 668{
669 struct ipoib_dev_priv *priv = netdev_priv(dev); 669 struct ipoib_dev_priv *priv = netdev_priv(dev);
670 int ret; 670 int ret;
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
696 } 696 }
697 697
698 clear_bit(IPOIB_STOP_REAPER, &priv->flags); 698 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
699 queue_delayed_work(priv->wq, &priv->ah_reap_task, 699 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
700 round_jiffies_relative(HZ)); 700 round_jiffies_relative(HZ));
701 701
702 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 702 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
706dev_stop: 706dev_stop:
707 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 707 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
708 napi_enable(&priv->napi); 708 napi_enable(&priv->napi);
709 ipoib_ib_dev_stop(dev); 709 ipoib_ib_dev_stop(dev, flush);
710 return -1; 710 return -1;
711} 711}
712 712
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
738 return ipoib_mcast_start_thread(dev); 738 return ipoib_mcast_start_thread(dev);
739} 739}
740 740
741int ipoib_ib_dev_down(struct net_device *dev) 741int ipoib_ib_dev_down(struct net_device *dev, int flush)
742{ 742{
743 struct ipoib_dev_priv *priv = netdev_priv(dev); 743 struct ipoib_dev_priv *priv = netdev_priv(dev);
744 744
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
747 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 747 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
748 netif_carrier_off(dev); 748 netif_carrier_off(dev);
749 749
750 ipoib_mcast_stop_thread(dev); 750 ipoib_mcast_stop_thread(dev, flush);
751 ipoib_mcast_dev_flush(dev); 751 ipoib_mcast_dev_flush(dev);
752 752
753 ipoib_flush_paths(dev); 753 ipoib_flush_paths(dev);
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev)
807 local_bh_enable(); 807 local_bh_enable();
808} 808}
809 809
810int ipoib_ib_dev_stop(struct net_device *dev) 810int ipoib_ib_dev_stop(struct net_device *dev, int flush)
811{ 811{
812 struct ipoib_dev_priv *priv = netdev_priv(dev); 812 struct ipoib_dev_priv *priv = netdev_priv(dev);
813 struct ib_qp_attr qp_attr; 813 struct ib_qp_attr qp_attr;
@@ -880,7 +880,8 @@ timeout:
880 /* Wait for all AHs to be reaped */ 880 /* Wait for all AHs to be reaped */
881 set_bit(IPOIB_STOP_REAPER, &priv->flags); 881 set_bit(IPOIB_STOP_REAPER, &priv->flags);
882 cancel_delayed_work(&priv->ah_reap_task); 882 cancel_delayed_work(&priv->ah_reap_task);
883 flush_workqueue(priv->wq); 883 if (flush)
884 flush_workqueue(ipoib_workqueue);
884 885
885 begin = jiffies; 886 begin = jiffies;
886 887
@@ -917,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
917 (unsigned long) dev); 918 (unsigned long) dev);
918 919
919 if (dev->flags & IFF_UP) { 920 if (dev->flags & IFF_UP) {
920 if (ipoib_ib_dev_open(dev)) { 921 if (ipoib_ib_dev_open(dev, 1)) {
921 ipoib_transport_dev_cleanup(dev); 922 ipoib_transport_dev_cleanup(dev);
922 return -ENODEV; 923 return -ENODEV;
923 } 924 }
@@ -1039,12 +1040,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1039 } 1040 }
1040 1041
1041 if (level >= IPOIB_FLUSH_NORMAL) 1042 if (level >= IPOIB_FLUSH_NORMAL)
1042 ipoib_ib_dev_down(dev); 1043 ipoib_ib_dev_down(dev, 0);
1043 1044
1044 if (level == IPOIB_FLUSH_HEAVY) { 1045 if (level == IPOIB_FLUSH_HEAVY) {
1045 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 1046 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1046 ipoib_ib_dev_stop(dev); 1047 ipoib_ib_dev_stop(dev, 0);
1047 if (ipoib_ib_dev_open(dev) != 0) 1048 if (ipoib_ib_dev_open(dev, 0) != 0)
1048 return; 1049 return;
1049 if (netif_queue_stopped(dev)) 1050 if (netif_queue_stopped(dev))
1050 netif_start_queue(dev); 1051 netif_start_queue(dev);
@@ -1096,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
1096 */ 1097 */
1097 ipoib_flush_paths(dev); 1098 ipoib_flush_paths(dev);
1098 1099
1099 ipoib_mcast_stop_thread(dev); 1100 ipoib_mcast_stop_thread(dev, 1);
1100 ipoib_mcast_dev_flush(dev); 1101 ipoib_mcast_dev_flush(dev);
1101 1102
1102 ipoib_transport_dev_cleanup(dev); 1103 ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6bad17d4d588..58b5aa3b6f2d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
108 108
109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
110 110
111 if (ipoib_ib_dev_open(dev)) { 111 if (ipoib_ib_dev_open(dev, 1)) {
112 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 112 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
113 return 0; 113 return 0;
114 goto err_disable; 114 goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
139 return 0; 139 return 0;
140 140
141err_stop: 141err_stop:
142 ipoib_ib_dev_stop(dev); 142 ipoib_ib_dev_stop(dev, 1);
143 143
144err_disable: 144err_disable:
145 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 145 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
157 157
158 netif_stop_queue(dev); 158 netif_stop_queue(dev);
159 159
160 ipoib_ib_dev_down(dev); 160 ipoib_ib_dev_down(dev, 1);
161 ipoib_ib_dev_stop(dev); 161 ipoib_ib_dev_stop(dev, 0);
162 162
163 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 163 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
164 struct ipoib_dev_priv *cpriv; 164 struct ipoib_dev_priv *cpriv;
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
839 return; 839 return;
840 } 840 }
841 841
842 queue_work(priv->wq, &priv->restart_task); 842 queue_work(ipoib_workqueue, &priv->restart_task);
843} 843}
844 844
845static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 845static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
954 __ipoib_reap_neigh(priv); 954 __ipoib_reap_neigh(priv);
955 955
956 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 956 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
957 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 957 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
958 arp_tbl.gc_interval); 958 arp_tbl.gc_interval);
959} 959}
960 960
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1133 1133
1134 /* start garbage collection */ 1134 /* start garbage collection */
1135 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1135 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1136 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1136 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
1137 arp_tbl.gc_interval); 1137 arp_tbl.gc_interval);
1138 1138
1139 return 0; 1139 return 0;
@@ -1262,13 +1262,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1262{ 1262{
1263 struct ipoib_dev_priv *priv = netdev_priv(dev); 1263 struct ipoib_dev_priv *priv = netdev_priv(dev);
1264 1264
1265 if (ipoib_neigh_hash_init(priv) < 0)
1266 goto out;
1265 /* Allocate RX/TX "rings" to hold queued skbs */ 1267 /* Allocate RX/TX "rings" to hold queued skbs */
1266 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 1268 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1267 GFP_KERNEL); 1269 GFP_KERNEL);
1268 if (!priv->rx_ring) { 1270 if (!priv->rx_ring) {
1269 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 1271 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
1270 ca->name, ipoib_recvq_size); 1272 ca->name, ipoib_recvq_size);
1271 goto out; 1273 goto out_neigh_hash_cleanup;
1272 } 1274 }
1273 1275
1274 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 1276 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1283,24 +1285,16 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1283 if (ipoib_ib_dev_init(dev, ca, port)) 1285 if (ipoib_ib_dev_init(dev, ca, port))
1284 goto out_tx_ring_cleanup; 1286 goto out_tx_ring_cleanup;
1285 1287
1286 /*
1287 * Must be after ipoib_ib_dev_init so we can allocate a per
1288 * device wq there and use it here
1289 */
1290 if (ipoib_neigh_hash_init(priv) < 0)
1291 goto out_dev_uninit;
1292
1293 return 0; 1288 return 0;
1294 1289
1295out_dev_uninit:
1296 ipoib_ib_dev_cleanup(dev);
1297
1298out_tx_ring_cleanup: 1290out_tx_ring_cleanup:
1299 vfree(priv->tx_ring); 1291 vfree(priv->tx_ring);
1300 1292
1301out_rx_ring_cleanup: 1293out_rx_ring_cleanup:
1302 kfree(priv->rx_ring); 1294 kfree(priv->rx_ring);
1303 1295
1296out_neigh_hash_cleanup:
1297 ipoib_neigh_hash_uninit(dev);
1304out: 1298out:
1305 return -ENOMEM; 1299 return -ENOMEM;
1306} 1300}
@@ -1323,12 +1317,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
1323 } 1317 }
1324 unregister_netdevice_many(&head); 1318 unregister_netdevice_many(&head);
1325 1319
1326 /*
1327 * Must be before ipoib_ib_dev_cleanup or we delete an in use
1328 * work queue
1329 */
1330 ipoib_neigh_hash_uninit(dev);
1331
1332 ipoib_ib_dev_cleanup(dev); 1320 ipoib_ib_dev_cleanup(dev);
1333 1321
1334 kfree(priv->rx_ring); 1322 kfree(priv->rx_ring);
@@ -1336,6 +1324,8 @@ void ipoib_dev_cleanup(struct net_device *dev)
1336 1324
1337 priv->rx_ring = NULL; 1325 priv->rx_ring = NULL;
1338 priv->tx_ring = NULL; 1326 priv->tx_ring = NULL;
1327
1328 ipoib_neigh_hash_uninit(dev);
1339} 1329}
1340 1330
1341static const struct header_ops ipoib_header_ops = { 1331static const struct header_ops ipoib_header_ops = {
@@ -1646,7 +1636,7 @@ register_failed:
1646 /* Stop GC if started before flush */ 1636 /* Stop GC if started before flush */
1647 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1637 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1648 cancel_delayed_work(&priv->neigh_reap_task); 1638 cancel_delayed_work(&priv->neigh_reap_task);
1649 flush_workqueue(priv->wq); 1639 flush_workqueue(ipoib_workqueue);
1650 1640
1651event_failed: 1641event_failed:
1652 ipoib_dev_cleanup(priv->dev); 1642 ipoib_dev_cleanup(priv->dev);
@@ -1717,7 +1707,7 @@ static void ipoib_remove_one(struct ib_device *device)
1717 /* Stop GC */ 1707 /* Stop GC */
1718 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1708 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1719 cancel_delayed_work(&priv->neigh_reap_task); 1709 cancel_delayed_work(&priv->neigh_reap_task);
1720 flush_workqueue(priv->wq); 1710 flush_workqueue(ipoib_workqueue);
1721 1711
1722 unregister_netdev(priv->dev); 1712 unregister_netdev(priv->dev);
1723 free_netdev(priv->dev); 1713 free_netdev(priv->dev);
@@ -1758,13 +1748,8 @@ static int __init ipoib_init_module(void)
1758 * unregister_netdev() and linkwatch_event take the rtnl lock, 1748 * unregister_netdev() and linkwatch_event take the rtnl lock,
1759 * so flush_scheduled_work() can deadlock during device 1749 * so flush_scheduled_work() can deadlock during device
1760 * removal. 1750 * removal.
1761 *
1762 * In addition, bringing one device up and another down at the
1763 * same time can deadlock a single workqueue, so we have this
1764 * global fallback workqueue, but we also attempt to open a
1765 * per device workqueue each time we bring an interface up
1766 */ 1751 */
1767 ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); 1752 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1768 if (!ipoib_workqueue) { 1753 if (!ipoib_workqueue) {
1769 ret = -ENOMEM; 1754 ret = -ENOMEM;
1770 goto err_fs; 1755 goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index bc50dd0d0e4d..ffb83b5f7e80 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -190,6 +190,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
190 spin_unlock_irq(&priv->lock); 190 spin_unlock_irq(&priv->lock);
191 priv->tx_wr.wr.ud.remote_qkey = priv->qkey; 191 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
192 set_qkey = 1; 192 set_qkey = 1;
193
194 if (!ipoib_cm_admin_enabled(dev)) {
195 rtnl_lock();
196 dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
197 rtnl_unlock();
198 }
193 } 199 }
194 200
195 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 201 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -271,27 +277,16 @@ ipoib_mcast_sendonly_join_complete(int status,
271 struct ipoib_mcast *mcast = multicast->context; 277 struct ipoib_mcast *mcast = multicast->context;
272 struct net_device *dev = mcast->dev; 278 struct net_device *dev = mcast->dev;
273 279
274 /*
275 * We have to take the mutex to force mcast_sendonly_join to
276 * return from ib_sa_multicast_join and set mcast->mc to a
277 * valid value. Otherwise we were racing with ourselves in
278 * that we might fail here, but get a valid return from
279 * ib_sa_multicast_join after we had cleared mcast->mc here,
280 * resulting in mis-matched joins and leaves and a deadlock
281 */
282 mutex_lock(&mcast_mutex);
283
284 /* We trap for port events ourselves. */ 280 /* We trap for port events ourselves. */
285 if (status == -ENETRESET) 281 if (status == -ENETRESET)
286 goto out; 282 return 0;
287 283
288 if (!status) 284 if (!status)
289 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 285 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
290 286
291 if (status) { 287 if (status) {
292 if (mcast->logcount++ < 20) 288 if (mcast->logcount++ < 20)
293 ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast " 289 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
294 "join failed for %pI6, status %d\n",
295 mcast->mcmember.mgid.raw, status); 290 mcast->mcmember.mgid.raw, status);
296 291
297 /* Flush out any queued packets */ 292 /* Flush out any queued packets */
@@ -301,15 +296,11 @@ ipoib_mcast_sendonly_join_complete(int status,
301 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 296 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
302 } 297 }
303 netif_tx_unlock_bh(dev); 298 netif_tx_unlock_bh(dev);
299
300 /* Clear the busy flag so we try again */
301 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
302 &mcast->flags);
304 } 303 }
305out:
306 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
307 if (status)
308 mcast->mc = NULL;
309 complete(&mcast->done);
310 if (status == -ENETRESET)
311 status = 0;
312 mutex_unlock(&mcast_mutex);
313 return status; 304 return status;
314} 305}
315 306
@@ -327,14 +318,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
327 int ret = 0; 318 int ret = 0;
328 319
329 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 320 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
330 ipoib_dbg_mcast(priv, "device shutting down, no sendonly " 321 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
331 "multicast joins\n");
332 return -ENODEV; 322 return -ENODEV;
333 } 323 }
334 324
335 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { 325 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
336 ipoib_dbg_mcast(priv, "multicast entry busy, skipping " 326 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
337 "sendonly join\n");
338 return -EBUSY; 327 return -EBUSY;
339 } 328 }
340 329
@@ -342,9 +331,6 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
342 rec.port_gid = priv->local_gid; 331 rec.port_gid = priv->local_gid;
343 rec.pkey = cpu_to_be16(priv->pkey); 332 rec.pkey = cpu_to_be16(priv->pkey);
344 333
345 mutex_lock(&mcast_mutex);
346 init_completion(&mcast->done);
347 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
348 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, 334 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
349 priv->port, &rec, 335 priv->port, &rec,
350 IB_SA_MCMEMBER_REC_MGID | 336 IB_SA_MCMEMBER_REC_MGID |
@@ -357,14 +343,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
357 if (IS_ERR(mcast->mc)) { 343 if (IS_ERR(mcast->mc)) {
358 ret = PTR_ERR(mcast->mc); 344 ret = PTR_ERR(mcast->mc);
359 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 345 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
360 complete(&mcast->done); 346 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
361 ipoib_warn(priv, "ib_sa_join_multicast for sendonly join " 347 ret);
362 "failed (ret = %d)\n", ret);
363 } else { 348 } else {
364 ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting " 349 ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
365 "sendonly join\n", mcast->mcmember.mgid.raw); 350 mcast->mcmember.mgid.raw);
366 } 351 }
367 mutex_unlock(&mcast_mutex);
368 352
369 return ret; 353 return ret;
370} 354}
@@ -375,29 +359,18 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
375 carrier_on_task); 359 carrier_on_task);
376 struct ib_port_attr attr; 360 struct ib_port_attr attr;
377 361
362 /*
363 * Take rtnl_lock to avoid racing with ipoib_stop() and
364 * turning the carrier back on while a device is being
365 * removed.
366 */
378 if (ib_query_port(priv->ca, priv->port, &attr) || 367 if (ib_query_port(priv->ca, priv->port, &attr) ||
379 attr.state != IB_PORT_ACTIVE) { 368 attr.state != IB_PORT_ACTIVE) {
380 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); 369 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
381 return; 370 return;
382 } 371 }
383 372
384 /* 373 rtnl_lock();
385 * Take rtnl_lock to avoid racing with ipoib_stop() and
386 * turning the carrier back on while a device is being
387 * removed. However, ipoib_stop() will attempt to flush
388 * the workqueue while holding the rtnl lock, so loop
389 * on trylock until either we get the lock or we see
390 * FLAG_ADMIN_UP go away as that signals that we are bailing
391 * and can safely ignore the carrier on work.
392 */
393 while (!rtnl_trylock()) {
394 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
395 return;
396 else
397 msleep(20);
398 }
399 if (!ipoib_cm_admin_enabled(priv->dev))
400 dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
401 netif_carrier_on(priv->dev); 374 netif_carrier_on(priv->dev);
402 rtnl_unlock(); 375 rtnl_unlock();
403} 376}
@@ -412,63 +385,60 @@ static int ipoib_mcast_join_complete(int status,
412 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", 385 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
413 mcast->mcmember.mgid.raw, status); 386 mcast->mcmember.mgid.raw, status);
414 387
415 /*
416 * We have to take the mutex to force mcast_join to
417 * return from ib_sa_multicast_join and set mcast->mc to a
418 * valid value. Otherwise we were racing with ourselves in
419 * that we might fail here, but get a valid return from
420 * ib_sa_multicast_join after we had cleared mcast->mc here,
421 * resulting in mis-matched joins and leaves and a deadlock
422 */
423 mutex_lock(&mcast_mutex);
424
425 /* We trap for port events ourselves. */ 388 /* We trap for port events ourselves. */
426 if (status == -ENETRESET) 389 if (status == -ENETRESET) {
390 status = 0;
427 goto out; 391 goto out;
392 }
428 393
429 if (!status) 394 if (!status)
430 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 395 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
431 396
432 if (!status) { 397 if (!status) {
433 mcast->backoff = 1; 398 mcast->backoff = 1;
399 mutex_lock(&mcast_mutex);
434 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 400 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
435 queue_delayed_work(priv->wq, &priv->mcast_task, 0); 401 queue_delayed_work(ipoib_workqueue,
402 &priv->mcast_task, 0);
403 mutex_unlock(&mcast_mutex);
436 404
437 /* 405 /*
438 * Defer carrier on work to priv->wq to avoid a 406 * Defer carrier on work to ipoib_workqueue to avoid a
439 * deadlock on rtnl_lock here. 407 * deadlock on rtnl_lock here.
440 */ 408 */
441 if (mcast == priv->broadcast) 409 if (mcast == priv->broadcast)
442 queue_work(priv->wq, &priv->carrier_on_task); 410 queue_work(ipoib_workqueue, &priv->carrier_on_task);
443 } else {
444 if (mcast->logcount++ < 20) {
445 if (status == -ETIMEDOUT || status == -EAGAIN) {
446 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
447 mcast->mcmember.mgid.raw, status);
448 } else {
449 ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
450 mcast->mcmember.mgid.raw, status);
451 }
452 }
453 411
454 mcast->backoff *= 2; 412 status = 0;
455 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 413 goto out;
456 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
457 } 414 }
458out: 415
416 if (mcast->logcount++ < 20) {
417 if (status == -ETIMEDOUT || status == -EAGAIN) {
418 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
419 mcast->mcmember.mgid.raw, status);
420 } else {
421 ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
422 mcast->mcmember.mgid.raw, status);
423 }
424 }
425
426 mcast->backoff *= 2;
427 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
428 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
429
430 /* Clear the busy flag so we try again */
431 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
432
433 mutex_lock(&mcast_mutex);
459 spin_lock_irq(&priv->lock); 434 spin_lock_irq(&priv->lock);
460 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 435 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
461 if (status) 436 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
462 mcast->mc = NULL;
463 complete(&mcast->done);
464 if (status == -ENETRESET)
465 status = 0;
466 if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
467 queue_delayed_work(priv->wq, &priv->mcast_task,
468 mcast->backoff * HZ); 437 mcast->backoff * HZ);
469 spin_unlock_irq(&priv->lock); 438 spin_unlock_irq(&priv->lock);
470 mutex_unlock(&mcast_mutex); 439 mutex_unlock(&mcast_mutex);
471 440out:
441 complete(&mcast->done);
472 return status; 442 return status;
473} 443}
474 444
@@ -517,9 +487,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
517 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 487 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
518 } 488 }
519 489
520 mutex_lock(&mcast_mutex);
521 init_completion(&mcast->done);
522 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 490 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
491 init_completion(&mcast->done);
492 set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
493
523 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, 494 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
524 &rec, comp_mask, GFP_KERNEL, 495 &rec, comp_mask, GFP_KERNEL,
525 ipoib_mcast_join_complete, mcast); 496 ipoib_mcast_join_complete, mcast);
@@ -533,11 +504,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
533 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 504 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
534 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 505 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
535 506
507 mutex_lock(&mcast_mutex);
536 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 508 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
537 queue_delayed_work(priv->wq, &priv->mcast_task, 509 queue_delayed_work(ipoib_workqueue,
510 &priv->mcast_task,
538 mcast->backoff * HZ); 511 mcast->backoff * HZ);
512 mutex_unlock(&mcast_mutex);
539 } 513 }
540 mutex_unlock(&mcast_mutex);
541} 514}
542 515
543void ipoib_mcast_join_task(struct work_struct *work) 516void ipoib_mcast_join_task(struct work_struct *work)
@@ -574,8 +547,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
574 ipoib_warn(priv, "failed to allocate broadcast group\n"); 547 ipoib_warn(priv, "failed to allocate broadcast group\n");
575 mutex_lock(&mcast_mutex); 548 mutex_lock(&mcast_mutex);
576 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 549 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
577 queue_delayed_work(priv->wq, &priv->mcast_task, 550 queue_delayed_work(ipoib_workqueue,
578 HZ); 551 &priv->mcast_task, HZ);
579 mutex_unlock(&mcast_mutex); 552 mutex_unlock(&mcast_mutex);
580 return; 553 return;
581 } 554 }
@@ -590,8 +563,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
590 } 563 }
591 564
592 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 565 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
593 if (IS_ERR_OR_NULL(priv->broadcast->mc) && 566 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
594 !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
595 ipoib_mcast_join(dev, priv->broadcast, 0); 567 ipoib_mcast_join(dev, priv->broadcast, 0);
596 return; 568 return;
597 } 569 }
@@ -599,33 +571,23 @@ void ipoib_mcast_join_task(struct work_struct *work)
599 while (1) { 571 while (1) {
600 struct ipoib_mcast *mcast = NULL; 572 struct ipoib_mcast *mcast = NULL;
601 573
602 /*
603 * Need the mutex so our flags are consistent, need the
604 * priv->lock so we don't race with list removals in either
605 * mcast_dev_flush or mcast_restart_task
606 */
607 mutex_lock(&mcast_mutex);
608 spin_lock_irq(&priv->lock); 574 spin_lock_irq(&priv->lock);
609 list_for_each_entry(mcast, &priv->multicast_list, list) { 575 list_for_each_entry(mcast, &priv->multicast_list, list) {
610 if (IS_ERR_OR_NULL(mcast->mc) && 576 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
611 !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && 577 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
612 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 578 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
613 /* Found the next unjoined group */ 579 /* Found the next unjoined group */
614 break; 580 break;
615 } 581 }
616 } 582 }
617 spin_unlock_irq(&priv->lock); 583 spin_unlock_irq(&priv->lock);
618 mutex_unlock(&mcast_mutex);
619 584
620 if (&mcast->list == &priv->multicast_list) { 585 if (&mcast->list == &priv->multicast_list) {
621 /* All done */ 586 /* All done */
622 break; 587 break;
623 } 588 }
624 589
625 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 590 ipoib_mcast_join(dev, mcast, 1);
626 ipoib_mcast_sendonly_join(mcast);
627 else
628 ipoib_mcast_join(dev, mcast, 1);
629 return; 591 return;
630 } 592 }
631 593
@@ -642,13 +604,13 @@ int ipoib_mcast_start_thread(struct net_device *dev)
642 604
643 mutex_lock(&mcast_mutex); 605 mutex_lock(&mcast_mutex);
644 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 606 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
645 queue_delayed_work(priv->wq, &priv->mcast_task, 0); 607 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
646 mutex_unlock(&mcast_mutex); 608 mutex_unlock(&mcast_mutex);
647 609
648 return 0; 610 return 0;
649} 611}
650 612
651int ipoib_mcast_stop_thread(struct net_device *dev) 613int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
652{ 614{
653 struct ipoib_dev_priv *priv = netdev_priv(dev); 615 struct ipoib_dev_priv *priv = netdev_priv(dev);
654 616
@@ -659,7 +621,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
659 cancel_delayed_work(&priv->mcast_task); 621 cancel_delayed_work(&priv->mcast_task);
660 mutex_unlock(&mcast_mutex); 622 mutex_unlock(&mcast_mutex);
661 623
662 flush_workqueue(priv->wq); 624 if (flush)
625 flush_workqueue(ipoib_workqueue);
663 626
664 return 0; 627 return 0;
665} 628}
@@ -670,9 +633,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
670 int ret = 0; 633 int ret = 0;
671 634
672 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 635 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
673 ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
674
675 if (!IS_ERR_OR_NULL(mcast->mc))
676 ib_sa_free_multicast(mcast->mc); 636 ib_sa_free_multicast(mcast->mc);
677 637
678 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 638 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -725,8 +685,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
725 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); 685 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
726 __ipoib_mcast_add(dev, mcast); 686 __ipoib_mcast_add(dev, mcast);
727 list_add_tail(&mcast->list, &priv->multicast_list); 687 list_add_tail(&mcast->list, &priv->multicast_list);
728 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
729 queue_delayed_work(priv->wq, &priv->mcast_task, 0);
730 } 688 }
731 689
732 if (!mcast->ah) { 690 if (!mcast->ah) {
@@ -740,6 +698,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
740 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 698 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
741 ipoib_dbg_mcast(priv, "no address vector, " 699 ipoib_dbg_mcast(priv, "no address vector, "
742 "but multicast join already started\n"); 700 "but multicast join already started\n");
701 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
702 ipoib_mcast_sendonly_join(mcast);
743 703
744 /* 704 /*
745 * If lookup completes between here and out:, don't 705 * If lookup completes between here and out:, don't
@@ -799,12 +759,9 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
799 759
800 spin_unlock_irqrestore(&priv->lock, flags); 760 spin_unlock_irqrestore(&priv->lock, flags);
801 761
802 /* 762 /* seperate between the wait to the leave*/
803 * make sure the in-flight joins have finished before we attempt
804 * to leave
805 */
806 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) 763 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
807 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 764 if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
808 wait_for_completion(&mcast->done); 765 wait_for_completion(&mcast->done);
809 766
810 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 767 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -837,6 +794,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
837 794
838 ipoib_dbg_mcast(priv, "restarting multicast task\n"); 795 ipoib_dbg_mcast(priv, "restarting multicast task\n");
839 796
797 ipoib_mcast_stop_thread(dev, 0);
798
840 local_irq_save(flags); 799 local_irq_save(flags);
841 netif_addr_lock(dev); 800 netif_addr_lock(dev);
842 spin_lock(&priv->lock); 801 spin_lock(&priv->lock);
@@ -921,38 +880,14 @@ void ipoib_mcast_restart_task(struct work_struct *work)
921 netif_addr_unlock(dev); 880 netif_addr_unlock(dev);
922 local_irq_restore(flags); 881 local_irq_restore(flags);
923 882
924 /* 883 /* We have to cancel outside of the spinlock */
925 * make sure the in-flight joins have finished before we attempt
926 * to leave
927 */
928 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
929 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
930 wait_for_completion(&mcast->done);
931
932 /*
933 * We have to cancel outside of the spinlock, but we have to
934 * take the rtnl lock or else we race with the removal of
935 * entries from the remove list in mcast_dev_flush as part
936 * of ipoib_stop(). We detect the drop of the ADMIN_UP flag
937 * to signal that we have hit this particular race, and we
938 * return since we know we don't need to do anything else
939 * anyway.
940 */
941 while (!rtnl_trylock()) {
942 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
943 return;
944 else
945 msleep(20);
946 }
947 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 884 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
948 ipoib_mcast_leave(mcast->dev, mcast); 885 ipoib_mcast_leave(mcast->dev, mcast);
949 ipoib_mcast_free(mcast); 886 ipoib_mcast_free(mcast);
950 } 887 }
951 /* 888
952 * Restart our join task if needed 889 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
953 */ 890 ipoib_mcast_start_thread(dev);
954 ipoib_mcast_start_thread(dev);
955 rtnl_unlock();
956} 891}
957 892
958#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 893#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b72a753eb41d..c56d5d44c53b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
145 int ret, size; 145 int ret, size;
146 int i; 146 int i;
147 147
148 /*
149 * the various IPoIB tasks assume they will never race against
150 * themselves, so always use a single thread workqueue
151 */
152 priv->wq = create_singlethread_workqueue("ipoib_wq");
153 if (!priv->wq) {
154 printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
155 return -ENODEV;
156 }
157
158 priv->pd = ib_alloc_pd(priv->ca); 148 priv->pd = ib_alloc_pd(priv->ca);
159 if (IS_ERR(priv->pd)) { 149 if (IS_ERR(priv->pd)) {
160 printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name); 150 printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
161 goto out_free_wq; 151 return -ENODEV;
162 } 152 }
163 153
164 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); 154 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -252,10 +242,6 @@ out_free_mr:
252 242
253out_free_pd: 243out_free_pd:
254 ib_dealloc_pd(priv->pd); 244 ib_dealloc_pd(priv->pd);
255
256out_free_wq:
257 destroy_workqueue(priv->wq);
258 priv->wq = NULL;
259 return -ENODEV; 245 return -ENODEV;
260} 246}
261 247
@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
284 270
285 if (ib_dealloc_pd(priv->pd)) 271 if (ib_dealloc_pd(priv->pd))
286 ipoib_warn(priv, "ib_dealloc_pd failed\n"); 272 ipoib_warn(priv, "ib_dealloc_pd failed\n");
287
288 if (priv->wq) {
289 flush_workqueue(priv->wq);
290 destroy_workqueue(priv->wq);
291 priv->wq = NULL;
292 }
293} 273}
294 274
295void ipoib_event(struct ib_event_handler *handler, 275void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 2b0468e3df6a..56b96c63dc4b 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -37,6 +37,7 @@ static struct irq_domain *gic_irq_domain;
37static int gic_shared_intrs; 37static int gic_shared_intrs;
38static int gic_vpes; 38static int gic_vpes;
39static unsigned int gic_cpu_pin; 39static unsigned int gic_cpu_pin;
40static unsigned int timer_cpu_pin;
40static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 41static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
41 42
42static void __gic_irq_dispatch(void); 43static void __gic_irq_dispatch(void);
@@ -616,6 +617,8 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
616 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val); 617 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
617 break; 618 break;
618 case GIC_LOCAL_INT_TIMER: 619 case GIC_LOCAL_INT_TIMER:
620 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
621 val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
619 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val); 622 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
620 break; 623 break;
621 case GIC_LOCAL_INT_PERFCTR: 624 case GIC_LOCAL_INT_PERFCTR:
@@ -713,12 +716,36 @@ static void __init __gic_init(unsigned long gic_base_addr,
713 if (cpu_has_veic) { 716 if (cpu_has_veic) {
714 /* Always use vector 1 in EIC mode */ 717 /* Always use vector 1 in EIC mode */
715 gic_cpu_pin = 0; 718 gic_cpu_pin = 0;
719 timer_cpu_pin = gic_cpu_pin;
716 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, 720 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
717 __gic_irq_dispatch); 721 __gic_irq_dispatch);
718 } else { 722 } else {
719 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; 723 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
720 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, 724 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
721 gic_irq_dispatch); 725 gic_irq_dispatch);
726 /*
727 * With the CMP implementation of SMP (deprecated), other CPUs
728 * are started by the bootloader and put into a timer based
729 * waiting poll loop. We must not re-route those CPU's local
730 * timer interrupts as the wait instruction will never finish,
731 * so just handle whatever CPU interrupt it is routed to by
732 * default.
733 *
734 * This workaround should be removed when CMP support is
735 * dropped.
736 */
737 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
738 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
739 timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
740 GIC_VPE_TIMER_MAP)) &
741 GIC_MAP_MSK;
742 irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
743 GIC_CPU_PIN_OFFSET +
744 timer_cpu_pin,
745 gic_irq_dispatch);
746 } else {
747 timer_cpu_pin = gic_cpu_pin;
748 }
722 } 749 }
723 750
724 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + 751 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 0b380603a578..d7c286656a25 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
1474 add_ai(plci, &parms[5]); 1474 add_ai(plci, &parms[5]);
1475 sig_req(plci, REJECT, 0); 1475 sig_req(plci, REJECT, 0);
1476 } 1476 }
1477 else if (Reject == 1 || Reject > 9) 1477 else if (Reject == 1 || Reject >= 9)
1478 { 1478 {
1479 add_ai(plci, &parms[5]); 1479 add_ai(plci, &parms[5]);
1480 sig_req(plci, HANGUP, 0); 1480 sig_req(plci, HANGUP, 0);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 5bdedf6df153..c355a226a024 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -5,6 +5,7 @@
5menuconfig MD 5menuconfig MD
6 bool "Multiple devices driver support (RAID and LVM)" 6 bool "Multiple devices driver support (RAID and LVM)"
7 depends on BLOCK 7 depends on BLOCK
8 select SRCU
8 help 9 help
9 Support multiple physical spindles through a single logical device. 10 Support multiple physical spindles through a single logical device.
10 Required for RAID and logical volume management. 11 Required for RAID and logical volume management.
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index da3604e73e8a..1695ee5f3ffc 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -72,6 +72,19 @@ __acquires(bitmap->lock)
72 /* this page has not been allocated yet */ 72 /* this page has not been allocated yet */
73 73
74 spin_unlock_irq(&bitmap->lock); 74 spin_unlock_irq(&bitmap->lock);
75 /* It is possible that this is being called inside a
76 * prepare_to_wait/finish_wait loop from raid5c:make_request().
77 * In general it is not permitted to sleep in that context as it
78 * can cause the loop to spin freely.
79 * That doesn't apply here as we can only reach this point
80 * once with any loop.
81 * When this function completes, either bp[page].map or
82 * bp[page].hijacked. In either case, this function will
83 * abort before getting to this point again. So there is
84 * no risk of a free-spin, and so it is safe to assert
85 * that sleeping here is allowed.
86 */
87 sched_annotate_sleep();
75 mappage = kzalloc(PAGE_SIZE, GFP_NOIO); 88 mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
76 spin_lock_irq(&bitmap->lock); 89 spin_lock_irq(&bitmap->lock);
77 90
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c1b0d52bfcb0..b98765f6f77f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3195,6 +3195,11 @@ static void handle_stripe_dirtying(struct r5conf *conf,
3195 (unsigned long long)sh->sector, 3195 (unsigned long long)sh->sector,
3196 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 3196 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
3197 } 3197 }
3198
3199 if (rcw > disks && rmw > disks &&
3200 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3201 set_bit(STRIPE_DELAYED, &sh->state);
3202
3198 /* now if nothing is locked, and if we have enough data, 3203 /* now if nothing is locked, and if we have enough data,
3199 * we can start a write request 3204 * we can start a write request
3200 */ 3205 */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d6607ee9c855..84673ebcf428 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -197,6 +197,7 @@ config NETCONSOLE_DYNAMIC
197 197
198config NETPOLL 198config NETPOLL
199 def_bool NETCONSOLE 199 def_bool NETCONSOLE
200 select SRCU
200 201
201config NET_POLL_CONTROLLER 202config NET_POLL_CONTROLLER
202 def_bool NETPOLL 203 def_bool NETPOLL
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 5e40a8b68cbe..b3b922adc0e4 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1415 1415
1416 cfhsi = netdev_priv(dev); 1416 cfhsi = netdev_priv(dev);
1417 cfhsi_netlink_parms(data, cfhsi); 1417 cfhsi_netlink_parms(data, cfhsi);
1418 dev_net_set(cfhsi->ndev, src_net);
1419 1418
1420 get_ops = symbol_get(cfhsi_get_ops); 1419 get_ops = symbol_get(cfhsi_get_ops);
1421 if (!get_ops) { 1420 if (!get_ops) {
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 7a5e4aa5415e..77f1f6048ddd 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -45,7 +45,7 @@ config AMD8111_ETH
45 45
46config LANCE 46config LANCE
47 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" 47 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
48 depends on ISA && ISA_DMA_API 48 depends on ISA && ISA_DMA_API && !ARM
49 ---help--- 49 ---help---
50 If you have a network (Ethernet) card of this type, say Y and read 50 If you have a network (Ethernet) card of this type, say Y and read
51 the Ethernet-HOWTO, available from 51 the Ethernet-HOWTO, available from
@@ -142,7 +142,7 @@ config PCMCIA_NMCLAN
142 142
143config NI65 143config NI65
144 tristate "NI6510 support" 144 tristate "NI6510 support"
145 depends on ISA && ISA_DMA_API 145 depends on ISA && ISA_DMA_API && !ARM
146 ---help--- 146 ---help---
147 If you have a network (Ethernet) card of this type, say Y and read 147 If you have a network (Ethernet) card of this type, say Y and read
148 the Ethernet-HOWTO, available from 148 the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 5b22764ba88d..27245efe9f50 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
952 do { 952 do {
953 /* WARNING: MACE_IR is a READ/CLEAR port! */ 953 /* WARNING: MACE_IR is a READ/CLEAR port! */
954 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); 954 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
955 if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS)
956 return IRQ_NONE;
955 957
956 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); 958 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
957 959
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 7bb5f07dbeef..e5ffb2ccb67d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -523,6 +523,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
523 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 523 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
524 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 524 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
525 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 525 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
526 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
526 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 527 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
527 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 528 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
528 HASHTBLSZ); 529 HASHTBLSZ);
@@ -552,13 +553,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
552 break; 553 break;
553 } 554 }
554 555
555 /* The Queue and Channel counts are zero based so increment them 556 /* The Queue, Channel and TC counts are zero based so increment them
556 * to get the actual number 557 * to get the actual number
557 */ 558 */
558 hw_feat->rx_q_cnt++; 559 hw_feat->rx_q_cnt++;
559 hw_feat->tx_q_cnt++; 560 hw_feat->tx_q_cnt++;
560 hw_feat->rx_ch_cnt++; 561 hw_feat->rx_ch_cnt++;
561 hw_feat->tx_ch_cnt++; 562 hw_feat->tx_ch_cnt++;
563 hw_feat->tc_cnt++;
562 564
563 DBGPR("<--xgbe_get_all_hw_features\n"); 565 DBGPR("<--xgbe_get_all_hw_features\n");
564} 566}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 83a50280bb70..793f3b73eeff 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -369,6 +369,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
369 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 369 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
370 break; 370 break;
371 371
372 /* read fpqnum field after dataaddr field */
373 dma_rmb();
372 if (is_rx_desc(raw_desc)) 374 if (is_rx_desc(raw_desc))
373 ret = xgene_enet_rx_frame(ring, raw_desc); 375 ret = xgene_enet_rx_frame(ring, raw_desc);
374 else 376 else
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 7403dff8f14a..905ac5f5d9a6 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -32,7 +32,8 @@ config CS89x0
32 will be called cs89x0. 32 will be called cs89x0.
33 33
34config CS89x0_PLATFORM 34config CS89x0_PLATFORM
35 bool "CS89x0 platform driver support" 35 bool "CS89x0 platform driver support" if HAS_IOPORT_MAP
36 default !HAS_IOPORT_MAP
36 depends on CS89x0 37 depends on CS89x0
37 help 38 help
38 Say Y to compile the cs89x0 driver as a platform driver. This 39 Say Y to compile the cs89x0 driver as a platform driver. This
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3e1a9c1a67a9..fda12fb32ec7 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1586 return -EBUSY; 1586 return -EBUSY;
1587 1587
1588 /* Fill regular entries */ 1588 /* Fill regular entries */
1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); 1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
1590 i++) 1590 i++)
1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1592 /* Fill the rest with fall-troughs */ 1592 /* Fill the rest with fall-troughs */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 63c807c9b21c..edea13b0ee85 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
1907 1907
1908static int igbvf_tso(struct igbvf_adapter *adapter, 1908static int igbvf_tso(struct igbvf_adapter *adapter,
1909 struct igbvf_ring *tx_ring, 1909 struct igbvf_ring *tx_ring,
1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
1911 __be16 protocol)
1911{ 1912{
1912 struct e1000_adv_tx_context_desc *context_desc; 1913 struct e1000_adv_tx_context_desc *context_desc;
1913 struct igbvf_buffer *buffer_info; 1914 struct igbvf_buffer *buffer_info;
@@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1927 l4len = tcp_hdrlen(skb); 1928 l4len = tcp_hdrlen(skb);
1928 *hdr_len += l4len; 1929 *hdr_len += l4len;
1929 1930
1930 if (skb->protocol == htons(ETH_P_IP)) { 1931 if (protocol == htons(ETH_P_IP)) {
1931 struct iphdr *iph = ip_hdr(skb); 1932 struct iphdr *iph = ip_hdr(skb);
1932 iph->tot_len = 0; 1933 iph->tot_len = 0;
1933 iph->check = 0; 1934 iph->check = 0;
@@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1958 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1959 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1959 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1960 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1960 1961
1961 if (skb->protocol == htons(ETH_P_IP)) 1962 if (protocol == htons(ETH_P_IP))
1962 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1963 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1963 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1964 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1964 1965
@@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1984 1985
1985static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, 1986static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
1986 struct igbvf_ring *tx_ring, 1987 struct igbvf_ring *tx_ring,
1987 struct sk_buff *skb, u32 tx_flags) 1988 struct sk_buff *skb, u32 tx_flags,
1989 __be16 protocol)
1988{ 1990{
1989 struct e1000_adv_tx_context_desc *context_desc; 1991 struct e1000_adv_tx_context_desc *context_desc;
1990 unsigned int i; 1992 unsigned int i;
@@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
2011 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 2013 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2012 2014
2013 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2015 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2014 switch (skb->protocol) { 2016 switch (protocol) {
2015 case htons(ETH_P_IP): 2017 case htons(ETH_P_IP):
2016 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 2018 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2017 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2019 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2211 u8 hdr_len = 0; 2213 u8 hdr_len = 0;
2212 int count = 0; 2214 int count = 0;
2213 int tso = 0; 2215 int tso = 0;
2216 __be16 protocol = vlan_get_protocol(skb);
2214 2217
2215 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2218 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2216 dev_kfree_skb_any(skb); 2219 dev_kfree_skb_any(skb);
@@ -2239,13 +2242,13 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2239 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); 2242 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
2240 } 2243 }
2241 2244
2242 if (skb->protocol == htons(ETH_P_IP)) 2245 if (protocol == htons(ETH_P_IP))
2243 tx_flags |= IGBVF_TX_FLAGS_IPV4; 2246 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2244 2247
2245 first = tx_ring->next_to_use; 2248 first = tx_ring->next_to_use;
2246 2249
2247 tso = skb_is_gso(skb) ? 2250 tso = skb_is_gso(skb) ?
2248 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; 2251 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
2249 if (unlikely(tso < 0)) { 2252 if (unlikely(tso < 0)) {
2250 dev_kfree_skb_any(skb); 2253 dev_kfree_skb_any(skb);
2251 return NETDEV_TX_OK; 2254 return NETDEV_TX_OK;
@@ -2253,7 +2256,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2253 2256
2254 if (tso) 2257 if (tso)
2255 tx_flags |= IGBVF_TX_FLAGS_TSO; 2258 tx_flags |= IGBVF_TX_FLAGS_TSO;
2256 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2259 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
2257 (skb->ip_summed == CHECKSUM_PARTIAL)) 2260 (skb->ip_summed == CHECKSUM_PARTIAL))
2258 tx_flags |= IGBVF_TX_FLAGS_CSUM; 2261 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2259 2262
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2ed2c7de2304..67b02bde179e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7227,11 +7227,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7227 if (!vhdr) 7227 if (!vhdr)
7228 goto out_drop; 7228 goto out_drop;
7229 7229
7230 protocol = vhdr->h_vlan_encapsulated_proto;
7231 tx_flags |= ntohs(vhdr->h_vlan_TCI) << 7230 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
7232 IXGBE_TX_FLAGS_VLAN_SHIFT; 7231 IXGBE_TX_FLAGS_VLAN_SHIFT;
7233 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; 7232 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
7234 } 7233 }
7234 protocol = vlan_get_protocol(skb);
7235 7235
7236 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 7236 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
7237 adapter->ptp_clock && 7237 adapter->ptp_clock &&
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 62a0d8e0f17d..38c7a0be8197 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3099,7 +3099,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3099 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 3099 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3100 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 3100 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3101 3101
3102 if (skb->protocol == htons(ETH_P_IP)) { 3102 if (first->protocol == htons(ETH_P_IP)) {
3103 struct iphdr *iph = ip_hdr(skb); 3103 struct iphdr *iph = ip_hdr(skb);
3104 iph->tot_len = 0; 3104 iph->tot_len = 0;
3105 iph->check = 0; 3105 iph->check = 0;
@@ -3156,7 +3156,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3156 3156
3157 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3157 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3158 u8 l4_hdr = 0; 3158 u8 l4_hdr = 0;
3159 switch (skb->protocol) { 3159 switch (first->protocol) {
3160 case htons(ETH_P_IP): 3160 case htons(ETH_P_IP):
3161 vlan_macip_lens |= skb_network_header_len(skb); 3161 vlan_macip_lens |= skb_network_header_len(skb);
3162 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 3162 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index bdd4eea2247c..210691c89b6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -235,7 +235,8 @@ do { \
235extern int mlx4_log_num_mgm_entry_size; 235extern int mlx4_log_num_mgm_entry_size;
236extern int log_mtts_per_seg; 236extern int log_mtts_per_seg;
237 237
238#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) 238#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
239 MLX4_MFUNC_MAX))
239#define ALL_SLAVES 0xff 240#define ALL_SLAVES 0xff
240 241
241struct mlx4_bitmap { 242struct mlx4_bitmap {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 18e5de72e9b4..4e1f58cf19ce 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -967,7 +967,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
967 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, 967 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
968 budget); 968 budget);
969 work_done = qlcnic_process_rcv_ring(sds_ring, budget); 969 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
970 if ((work_done < budget) && tx_complete) { 970
971 /* Check if we need a repoll */
972 if (!tx_complete)
973 work_done = budget;
974
975 if (work_done < budget) {
971 napi_complete(&sds_ring->napi); 976 napi_complete(&sds_ring->napi);
972 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 977 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
973 qlcnic_enable_sds_intr(adapter, sds_ring); 978 qlcnic_enable_sds_intr(adapter, sds_ring);
@@ -992,6 +997,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
992 napi_complete(&tx_ring->napi); 997 napi_complete(&tx_ring->napi);
993 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 998 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
994 qlcnic_enable_tx_intr(adapter, tx_ring); 999 qlcnic_enable_tx_intr(adapter, tx_ring);
1000 } else {
1001 /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
1002 work_done = budget;
995 } 1003 }
996 1004
997 return work_done; 1005 return work_done;
@@ -1950,7 +1958,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
1950 1958
1951 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 1959 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1952 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); 1960 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1953 if ((work_done < budget) && tx_complete) { 1961
1962 /* Check if we need a repoll */
1963 if (!tx_complete)
1964 work_done = budget;
1965
1966 if (work_done < budget) {
1954 napi_complete(&sds_ring->napi); 1967 napi_complete(&sds_ring->napi);
1955 qlcnic_enable_sds_intr(adapter, sds_ring); 1968 qlcnic_enable_sds_intr(adapter, sds_ring);
1956 } 1969 }
@@ -1973,7 +1986,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1973 1986
1974 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 1987 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1975 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); 1988 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1976 if ((work_done < budget) && tx_complete) { 1989
1990 /* Check if we need a repoll */
1991 if (!tx_complete)
1992 work_done = budget;
1993
1994 if (work_done < budget) {
1977 napi_complete(&sds_ring->napi); 1995 napi_complete(&sds_ring->napi);
1978 qlcnic_enable_sds_intr(adapter, sds_ring); 1996 qlcnic_enable_sds_intr(adapter, sds_ring);
1979 } 1997 }
@@ -1995,6 +2013,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
1995 napi_complete(&tx_ring->napi); 2013 napi_complete(&tx_ring->napi);
1996 if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) 2014 if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
1997 qlcnic_enable_tx_intr(adapter, tx_ring); 2015 qlcnic_enable_tx_intr(adapter, tx_ring);
2016 } else {
2017 /* need a repoll */
2018 work_done = budget;
1998 } 2019 }
1999 2020
2000 return work_done; 2021 return work_done;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 6c904a6cad2a..ef5aed3b1225 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
2351{ 2351{
2352 struct ql_adapter *qdev = netdev_priv(ndev); 2352 struct ql_adapter *qdev = netdev_priv(ndev);
2353 int status = 0; 2353 int status = 0;
2354 bool need_restart = netif_running(ndev);
2354 2355
2355 status = ql_adapter_down(qdev); 2356 if (need_restart) {
2356 if (status) { 2357 status = ql_adapter_down(qdev);
2357 netif_err(qdev, link, qdev->ndev, 2358 if (status) {
2358 "Failed to bring down the adapter\n"); 2359 netif_err(qdev, link, qdev->ndev,
2359 return status; 2360 "Failed to bring down the adapter\n");
2361 return status;
2362 }
2360 } 2363 }
2361 2364
2362 /* update the features with resent change */ 2365 /* update the features with resent change */
2363 ndev->features = features; 2366 ndev->features = features;
2364 2367
2365 status = ql_adapter_up(qdev); 2368 if (need_restart) {
2366 if (status) { 2369 status = ql_adapter_up(qdev);
2367 netif_err(qdev, link, qdev->ndev, 2370 if (status) {
2368 "Failed to bring up the adapter\n"); 2371 netif_err(qdev, link, qdev->ndev,
2369 return status; 2372 "Failed to bring up the adapter\n");
2373 return status;
2374 }
2370 } 2375 }
2376
2371 return status; 2377 return status;
2372} 2378}
2373 2379
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index d2835bf7b4fb..3699b98d5b2c 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1119,6 +1119,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1119 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; 1119 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1120 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 1120 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1121 } 1121 }
1122 nskb->queue_mapping = skb->queue_mapping;
1122 dev_kfree_skb(skb); 1123 dev_kfree_skb(skb);
1123 skb = nskb; 1124 skb = nskb;
1124 } 1125 }
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9f49c0129a78..7cd4eb38abfa 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -716,7 +716,7 @@ int netvsc_send(struct hv_device *device,
716 u64 req_id; 716 u64 req_id;
717 unsigned int section_index = NETVSC_INVALID_INDEX; 717 unsigned int section_index = NETVSC_INVALID_INDEX;
718 u32 msg_size = 0; 718 u32 msg_size = 0;
719 struct sk_buff *skb; 719 struct sk_buff *skb = NULL;
720 u16 q_idx = packet->q_idx; 720 u16 q_idx = packet->q_idx;
721 721
722 722
@@ -743,8 +743,6 @@ int netvsc_send(struct hv_device *device,
743 packet); 743 packet);
744 skb = (struct sk_buff *) 744 skb = (struct sk_buff *)
745 (unsigned long)packet->send_completion_tid; 745 (unsigned long)packet->send_completion_tid;
746 if (skb)
747 dev_kfree_skb_any(skb);
748 packet->page_buf_cnt = 0; 746 packet->page_buf_cnt = 0;
749 } 747 }
750 } 748 }
@@ -810,6 +808,13 @@ int netvsc_send(struct hv_device *device,
810 packet, ret); 808 packet, ret);
811 } 809 }
812 810
811 if (ret != 0) {
812 if (section_index != NETVSC_INVALID_INDEX)
813 netvsc_free_send_slot(net_device, section_index);
814 } else if (skb) {
815 dev_kfree_skb_any(skb);
816 }
817
813 return ret; 818 return ret;
814} 819}
815 820
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 7df221788cd4..919f4fccc322 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -17,7 +17,6 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/uio.h> 18#include <linux/uio.h>
19 19
20#include <net/ipv6.h>
21#include <net/net_namespace.h> 20#include <net/net_namespace.h>
22#include <net/rtnetlink.h> 21#include <net/rtnetlink.h>
23#include <net/sock.h> 22#include <net/sock.h>
@@ -81,7 +80,7 @@ static struct cdev macvtap_cdev;
81static const struct proto_ops macvtap_socket_ops; 80static const struct proto_ops macvtap_socket_ops;
82 81
83#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 82#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
84 NETIF_F_TSO6) 83 NETIF_F_TSO6 | NETIF_F_UFO)
85#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 84#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
86#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 85#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
87 86
@@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
586 gso_type = SKB_GSO_TCPV6; 585 gso_type = SKB_GSO_TCPV6;
587 break; 586 break;
588 case VIRTIO_NET_HDR_GSO_UDP: 587 case VIRTIO_NET_HDR_GSO_UDP:
589 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
590 current->comm);
591 gso_type = SKB_GSO_UDP; 588 gso_type = SKB_GSO_UDP;
592 if (skb->protocol == htons(ETH_P_IPV6))
593 ipv6_proxy_select_ident(skb);
594 break; 589 break;
595 default: 590 default:
596 return -EINVAL; 591 return -EINVAL;
@@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
636 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 631 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
637 else if (sinfo->gso_type & SKB_GSO_TCPV6) 632 else if (sinfo->gso_type & SKB_GSO_TCPV6)
638 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 633 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
634 else if (sinfo->gso_type & SKB_GSO_UDP)
635 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
639 else 636 else
640 BUG(); 637 BUG();
641 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 638 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
965 if (arg & TUN_F_TSO6) 962 if (arg & TUN_F_TSO6)
966 feature_mask |= NETIF_F_TSO6; 963 feature_mask |= NETIF_F_TSO6;
967 } 964 }
965
966 if (arg & TUN_F_UFO)
967 feature_mask |= NETIF_F_UFO;
968 } 968 }
969 969
970 /* tun/tap driver inverts the usage for TSO offloads, where 970 /* tun/tap driver inverts the usage for TSO offloads, where
@@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
975 * When user space turns off TSO, we turn off GSO/LRO so that 975 * When user space turns off TSO, we turn off GSO/LRO so that
976 * user-space will not receive TSO frames. 976 * user-space will not receive TSO frames.
977 */ 977 */
978 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) 978 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
979 features |= RX_OFFLOADS; 979 features |= RX_OFFLOADS;
980 else 980 else
981 features &= ~RX_OFFLOADS; 981 features &= ~RX_OFFLOADS;
@@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1090 case TUNSETOFFLOAD: 1090 case TUNSETOFFLOAD:
1091 /* let the user check for future flags */ 1091 /* let the user check for future flags */
1092 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1092 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1093 TUN_F_TSO_ECN)) 1093 TUN_F_TSO_ECN | TUN_F_UFO))
1094 return -EINVAL; 1094 return -EINVAL;
1095 1095
1096 rtnl_lock(); 1096 rtnl_lock();
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index 602c625d95d5..b5edc7f96a39 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
246 /* 246 /*
247 * See if we managed to reduce the size of the packet. 247 * See if we managed to reduce the size of the packet.
248 */ 248 */
249 if (olen < isize) { 249 if (olen < isize && olen <= osize) {
250 state->stats.comp_bytes += olen; 250 state->stats.comp_bytes += olen;
251 state->stats.comp_packets++; 251 state->stats.comp_packets++;
252 } else { 252 } else {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8c8dc16839a7..10f9e4021b5a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -65,7 +65,6 @@
65#include <linux/nsproxy.h> 65#include <linux/nsproxy.h>
66#include <linux/virtio_net.h> 66#include <linux/virtio_net.h>
67#include <linux/rcupdate.h> 67#include <linux/rcupdate.h>
68#include <net/ipv6.h>
69#include <net/net_namespace.h> 68#include <net/net_namespace.h>
70#include <net/netns/generic.h> 69#include <net/netns/generic.h>
71#include <net/rtnetlink.h> 70#include <net/rtnetlink.h>
@@ -187,7 +186,7 @@ struct tun_struct {
187 struct net_device *dev; 186 struct net_device *dev;
188 netdev_features_t set_features; 187 netdev_features_t set_features;
189#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 188#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
190 NETIF_F_TSO6) 189 NETIF_F_TSO6|NETIF_F_UFO)
191 190
192 int vnet_hdr_sz; 191 int vnet_hdr_sz;
193 int sndbuf; 192 int sndbuf;
@@ -1167,8 +1166,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1167 break; 1166 break;
1168 } 1167 }
1169 1168
1170 skb_reset_network_header(skb);
1171
1172 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 1169 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1173 pr_debug("GSO!\n"); 1170 pr_debug("GSO!\n");
1174 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 1171 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1179,20 +1176,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1179 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 1176 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1180 break; 1177 break;
1181 case VIRTIO_NET_HDR_GSO_UDP: 1178 case VIRTIO_NET_HDR_GSO_UDP:
1182 {
1183 static bool warned;
1184
1185 if (!warned) {
1186 warned = true;
1187 netdev_warn(tun->dev,
1188 "%s: using disabled UFO feature; please fix this program\n",
1189 current->comm);
1190 }
1191 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1179 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1192 if (skb->protocol == htons(ETH_P_IPV6))
1193 ipv6_proxy_select_ident(skb);
1194 break; 1180 break;
1195 }
1196 default: 1181 default:
1197 tun->dev->stats.rx_frame_errors++; 1182 tun->dev->stats.rx_frame_errors++;
1198 kfree_skb(skb); 1183 kfree_skb(skb);
@@ -1221,6 +1206,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1221 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1206 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1222 } 1207 }
1223 1208
1209 skb_reset_network_header(skb);
1224 skb_probe_transport_header(skb, 0); 1210 skb_probe_transport_header(skb, 0);
1225 1211
1226 rxhash = skb_get_hash(skb); 1212 rxhash = skb_get_hash(skb);
@@ -1298,6 +1284,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1298 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1284 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1299 else if (sinfo->gso_type & SKB_GSO_TCPV6) 1285 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1300 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1286 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1287 else if (sinfo->gso_type & SKB_GSO_UDP)
1288 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1301 else { 1289 else {
1302 pr_err("unexpected GSO type: " 1290 pr_err("unexpected GSO type: "
1303 "0x%x, gso_size %d, hdr_len %d\n", 1291 "0x%x, gso_size %d, hdr_len %d\n",
@@ -1746,6 +1734,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
1746 features |= NETIF_F_TSO6; 1734 features |= NETIF_F_TSO6;
1747 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 1735 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1748 } 1736 }
1737
1738 if (arg & TUN_F_UFO) {
1739 features |= NETIF_F_UFO;
1740 arg &= ~TUN_F_UFO;
1741 }
1749 } 1742 }
1750 1743
1751 /* This gives the user a way to test for new features in future by 1744 /* This gives the user a way to test for new features in future by
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 99b69af14274..4a1e9c489f1f 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
77 int ret; 77 int ret;
78 78
79 udelay(1); 79 udelay(1);
80 ret = sr_read_reg(dev, EPCR, &tmp); 80 ret = sr_read_reg(dev, SR_EPCR, &tmp);
81 if (ret < 0) 81 if (ret < 0)
82 return ret; 82 return ret;
83 83
@@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
98 98
99 mutex_lock(&dev->phy_mutex); 99 mutex_lock(&dev->phy_mutex);
100 100
101 sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 101 sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
102 sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); 102 sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
103 103
104 ret = wait_phy_eeprom_ready(dev, phy); 104 ret = wait_phy_eeprom_ready(dev, phy);
105 if (ret < 0) 105 if (ret < 0)
106 goto out_unlock; 106 goto out_unlock;
107 107
108 sr_write_reg(dev, EPCR, 0x0); 108 sr_write_reg(dev, SR_EPCR, 0x0);
109 ret = sr_read(dev, EPDR, 2, value); 109 ret = sr_read(dev, SR_EPDR, 2, value);
110 110
111 netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", 111 netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
112 phy, reg, *value, ret); 112 phy, reg, *value, ret);
@@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
123 123
124 mutex_lock(&dev->phy_mutex); 124 mutex_lock(&dev->phy_mutex);
125 125
126 ret = sr_write(dev, EPDR, 2, &value); 126 ret = sr_write(dev, SR_EPDR, 2, &value);
127 if (ret < 0) 127 if (ret < 0)
128 goto out_unlock; 128 goto out_unlock;
129 129
130 sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 130 sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
131 sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : 131 sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
132 (EPCR_WEP | EPCR_ERPRW)); 132 (EPCR_WEP | EPCR_ERPRW));
133 133
134 ret = wait_phy_eeprom_ready(dev, phy); 134 ret = wait_phy_eeprom_ready(dev, phy);
135 if (ret < 0) 135 if (ret < 0)
136 goto out_unlock; 136 goto out_unlock;
137 137
138 sr_write_reg(dev, EPCR, 0x0); 138 sr_write_reg(dev, SR_EPCR, 0x0);
139 139
140out_unlock: 140out_unlock:
141 mutex_unlock(&dev->phy_mutex); 141 mutex_unlock(&dev->phy_mutex);
@@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
188 if (loc == MII_BMSR) { 188 if (loc == MII_BMSR) {
189 u8 value; 189 u8 value;
190 190
191 sr_read_reg(dev, NSR, &value); 191 sr_read_reg(dev, SR_NSR, &value);
192 if (value & NSR_LINKST) 192 if (value & NSR_LINKST)
193 rc = 1; 193 rc = 1;
194 } 194 }
@@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev)
228 int rc = 0; 228 int rc = 0;
229 229
230 /* Get the Link Status directly */ 230 /* Get the Link Status directly */
231 sr_read_reg(dev, NSR, &value); 231 sr_read_reg(dev, SR_NSR, &value);
232 if (value & NSR_LINKST) 232 if (value & NSR_LINKST)
233 rc = 1; 233 rc = 1;
234 234
@@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev)
281 } 281 }
282 } 282 }
283 283
284 sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes); 284 sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
285 sr_write_reg_async(dev, RCR, rx_ctl); 285 sr_write_reg_async(dev, SR_RCR, rx_ctl);
286} 286}
287 287
288static int sr9700_set_mac_address(struct net_device *netdev, void *p) 288static int sr9700_set_mac_address(struct net_device *netdev, void *p)
@@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
297 } 297 }
298 298
299 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 299 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
300 sr_write_async(dev, PAR, 6, netdev->dev_addr); 300 sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
301 301
302 return 0; 302 return 0;
303} 303}
@@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
340 mii->phy_id_mask = 0x1f; 340 mii->phy_id_mask = 0x1f;
341 mii->reg_num_mask = 0x1f; 341 mii->reg_num_mask = 0x1f;
342 342
343 sr_write_reg(dev, NCR, NCR_RST); 343 sr_write_reg(dev, SR_NCR, NCR_RST);
344 udelay(20); 344 udelay(20);
345 345
346 /* read MAC 346 /* read MAC
@@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
348 * EEPROM automatically to PAR. In case there is no EEPROM externally, 348 * EEPROM automatically to PAR. In case there is no EEPROM externally,
349 * a default MAC address is stored in PAR for making chip work properly. 349 * a default MAC address is stored in PAR for making chip work properly.
350 */ 350 */
351 if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) { 351 if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
352 netdev_err(netdev, "Error reading MAC address\n"); 352 netdev_err(netdev, "Error reading MAC address\n");
353 ret = -ENODEV; 353 ret = -ENODEV;
354 goto out; 354 goto out;
355 } 355 }
356 356
357 /* power up and reset phy */ 357 /* power up and reset phy */
358 sr_write_reg(dev, PRR, PRR_PHY_RST); 358 sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
359 /* at least 10ms, here 20ms for safe */ 359 /* at least 10ms, here 20ms for safe */
360 mdelay(20); 360 mdelay(20);
361 sr_write_reg(dev, PRR, 0); 361 sr_write_reg(dev, SR_PRR, 0);
362 /* at least 1ms, here 2ms for reading right register */ 362 /* at least 1ms, here 2ms for reading right register */
363 udelay(2 * 1000); 363 udelay(2 * 1000);
364 364
diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h
index fd687c575e74..258b030277e7 100644
--- a/drivers/net/usb/sr9700.h
+++ b/drivers/net/usb/sr9700.h
@@ -14,13 +14,13 @@
14/* sr9700 spec. register table on Linux platform */ 14/* sr9700 spec. register table on Linux platform */
15 15
16/* Network Control Reg */ 16/* Network Control Reg */
17#define NCR 0x00 17#define SR_NCR 0x00
18#define NCR_RST (1 << 0) 18#define NCR_RST (1 << 0)
19#define NCR_LBK (3 << 1) 19#define NCR_LBK (3 << 1)
20#define NCR_FDX (1 << 3) 20#define NCR_FDX (1 << 3)
21#define NCR_WAKEEN (1 << 6) 21#define NCR_WAKEEN (1 << 6)
22/* Network Status Reg */ 22/* Network Status Reg */
23#define NSR 0x01 23#define SR_NSR 0x01
24#define NSR_RXRDY (1 << 0) 24#define NSR_RXRDY (1 << 0)
25#define NSR_RXOV (1 << 1) 25#define NSR_RXOV (1 << 1)
26#define NSR_TX1END (1 << 2) 26#define NSR_TX1END (1 << 2)
@@ -30,7 +30,7 @@
30#define NSR_LINKST (1 << 6) 30#define NSR_LINKST (1 << 6)
31#define NSR_SPEED (1 << 7) 31#define NSR_SPEED (1 << 7)
32/* Tx Control Reg */ 32/* Tx Control Reg */
33#define TCR 0x02 33#define SR_TCR 0x02
34#define TCR_CRC_DIS (1 << 1) 34#define TCR_CRC_DIS (1 << 1)
35#define TCR_PAD_DIS (1 << 2) 35#define TCR_PAD_DIS (1 << 2)
36#define TCR_LC_CARE (1 << 3) 36#define TCR_LC_CARE (1 << 3)
@@ -38,7 +38,7 @@
38#define TCR_EXCECM (1 << 5) 38#define TCR_EXCECM (1 << 5)
39#define TCR_LF_EN (1 << 6) 39#define TCR_LF_EN (1 << 6)
40/* Tx Status Reg for Packet Index 1 */ 40/* Tx Status Reg for Packet Index 1 */
41#define TSR1 0x03 41#define SR_TSR1 0x03
42#define TSR1_EC (1 << 2) 42#define TSR1_EC (1 << 2)
43#define TSR1_COL (1 << 3) 43#define TSR1_COL (1 << 3)
44#define TSR1_LC (1 << 4) 44#define TSR1_LC (1 << 4)
@@ -46,7 +46,7 @@
46#define TSR1_LOC (1 << 6) 46#define TSR1_LOC (1 << 6)
47#define TSR1_TLF (1 << 7) 47#define TSR1_TLF (1 << 7)
48/* Tx Status Reg for Packet Index 2 */ 48/* Tx Status Reg for Packet Index 2 */
49#define TSR2 0x04 49#define SR_TSR2 0x04
50#define TSR2_EC (1 << 2) 50#define TSR2_EC (1 << 2)
51#define TSR2_COL (1 << 3) 51#define TSR2_COL (1 << 3)
52#define TSR2_LC (1 << 4) 52#define TSR2_LC (1 << 4)
@@ -54,7 +54,7 @@
54#define TSR2_LOC (1 << 6) 54#define TSR2_LOC (1 << 6)
55#define TSR2_TLF (1 << 7) 55#define TSR2_TLF (1 << 7)
56/* Rx Control Reg*/ 56/* Rx Control Reg*/
57#define RCR 0x05 57#define SR_RCR 0x05
58#define RCR_RXEN (1 << 0) 58#define RCR_RXEN (1 << 0)
59#define RCR_PRMSC (1 << 1) 59#define RCR_PRMSC (1 << 1)
60#define RCR_RUNT (1 << 2) 60#define RCR_RUNT (1 << 2)
@@ -62,87 +62,87 @@
62#define RCR_DIS_CRC (1 << 4) 62#define RCR_DIS_CRC (1 << 4)
63#define RCR_DIS_LONG (1 << 5) 63#define RCR_DIS_LONG (1 << 5)
64/* Rx Status Reg */ 64/* Rx Status Reg */
65#define RSR 0x06 65#define SR_RSR 0x06
66#define RSR_AE (1 << 2) 66#define RSR_AE (1 << 2)
67#define RSR_MF (1 << 6) 67#define RSR_MF (1 << 6)
68#define RSR_RF (1 << 7) 68#define RSR_RF (1 << 7)
69/* Rx Overflow Counter Reg */ 69/* Rx Overflow Counter Reg */
70#define ROCR 0x07 70#define SR_ROCR 0x07
71#define ROCR_ROC (0x7F << 0) 71#define ROCR_ROC (0x7F << 0)
72#define ROCR_RXFU (1 << 7) 72#define ROCR_RXFU (1 << 7)
73/* Back Pressure Threshold Reg */ 73/* Back Pressure Threshold Reg */
74#define BPTR 0x08 74#define SR_BPTR 0x08
75#define BPTR_JPT (0x0F << 0) 75#define BPTR_JPT (0x0F << 0)
76#define BPTR_BPHW (0x0F << 4) 76#define BPTR_BPHW (0x0F << 4)
77/* Flow Control Threshold Reg */ 77/* Flow Control Threshold Reg */
78#define FCTR 0x09 78#define SR_FCTR 0x09
79#define FCTR_LWOT (0x0F << 0) 79#define FCTR_LWOT (0x0F << 0)
80#define FCTR_HWOT (0x0F << 4) 80#define FCTR_HWOT (0x0F << 4)
81/* rx/tx Flow Control Reg */ 81/* rx/tx Flow Control Reg */
82#define FCR 0x0A 82#define SR_FCR 0x0A
83#define FCR_FLCE (1 << 0) 83#define FCR_FLCE (1 << 0)
84#define FCR_BKPA (1 << 4) 84#define FCR_BKPA (1 << 4)
85#define FCR_TXPEN (1 << 5) 85#define FCR_TXPEN (1 << 5)
86#define FCR_TXPF (1 << 6) 86#define FCR_TXPF (1 << 6)
87#define FCR_TXP0 (1 << 7) 87#define FCR_TXP0 (1 << 7)
88/* Eeprom & Phy Control Reg */ 88/* Eeprom & Phy Control Reg */
89#define EPCR 0x0B 89#define SR_EPCR 0x0B
90#define EPCR_ERRE (1 << 0) 90#define EPCR_ERRE (1 << 0)
91#define EPCR_ERPRW (1 << 1) 91#define EPCR_ERPRW (1 << 1)
92#define EPCR_ERPRR (1 << 2) 92#define EPCR_ERPRR (1 << 2)
93#define EPCR_EPOS (1 << 3) 93#define EPCR_EPOS (1 << 3)
94#define EPCR_WEP (1 << 4) 94#define EPCR_WEP (1 << 4)
95/* Eeprom & Phy Address Reg */ 95/* Eeprom & Phy Address Reg */
96#define EPAR 0x0C 96#define SR_EPAR 0x0C
97#define EPAR_EROA (0x3F << 0) 97#define EPAR_EROA (0x3F << 0)
98#define EPAR_PHY_ADR_MASK (0x03 << 6) 98#define EPAR_PHY_ADR_MASK (0x03 << 6)
99#define EPAR_PHY_ADR (0x01 << 6) 99#define EPAR_PHY_ADR (0x01 << 6)
100/* Eeprom & Phy Data Reg */ 100/* Eeprom & Phy Data Reg */
101#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ 101#define SR_EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
102/* Wakeup Control Reg */ 102/* Wakeup Control Reg */
103#define WCR 0x0F 103#define SR_WCR 0x0F
104#define WCR_MAGICST (1 << 0) 104#define WCR_MAGICST (1 << 0)
105#define WCR_LINKST (1 << 2) 105#define WCR_LINKST (1 << 2)
106#define WCR_MAGICEN (1 << 3) 106#define WCR_MAGICEN (1 << 3)
107#define WCR_LINKEN (1 << 5) 107#define WCR_LINKEN (1 << 5)
108/* Physical Address Reg */ 108/* Physical Address Reg */
109#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ 109#define SR_PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
110/* Multicast Address Reg */ 110/* Multicast Address Reg */
111#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ 111#define SR_MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
112/* 0x1e unused */ 112/* 0x1e unused */
113/* Phy Reset Reg */ 113/* Phy Reset Reg */
114#define PRR 0x1F 114#define SR_PRR 0x1F
115#define PRR_PHY_RST (1 << 0) 115#define PRR_PHY_RST (1 << 0)
116/* Tx sdram Write Pointer Address Low */ 116/* Tx sdram Write Pointer Address Low */
117#define TWPAL 0x20 117#define SR_TWPAL 0x20
118/* Tx sdram Write Pointer Address High */ 118/* Tx sdram Write Pointer Address High */
119#define TWPAH 0x21 119#define SR_TWPAH 0x21
120/* Tx sdram Read Pointer Address Low */ 120/* Tx sdram Read Pointer Address Low */
121#define TRPAL 0x22 121#define SR_TRPAL 0x22
122/* Tx sdram Read Pointer Address High */ 122/* Tx sdram Read Pointer Address High */
123#define TRPAH 0x23 123#define SR_TRPAH 0x23
124/* Rx sdram Write Pointer Address Low */ 124/* Rx sdram Write Pointer Address Low */
125#define RWPAL 0x24 125#define SR_RWPAL 0x24
126/* Rx sdram Write Pointer Address High */ 126/* Rx sdram Write Pointer Address High */
127#define RWPAH 0x25 127#define SR_RWPAH 0x25
128/* Rx sdram Read Pointer Address Low */ 128/* Rx sdram Read Pointer Address Low */
129#define RRPAL 0x26 129#define SR_RRPAL 0x26
130/* Rx sdram Read Pointer Address High */ 130/* Rx sdram Read Pointer Address High */
131#define RRPAH 0x27 131#define SR_RRPAH 0x27
132/* Vendor ID register */ 132/* Vendor ID register */
133#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ 133#define SR_VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */
134/* Product ID register */ 134/* Product ID register */
135#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ 135#define SR_PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */
136/* CHIP Revision register */ 136/* CHIP Revision register */
137#define CHIPR 0x2C 137#define SR_CHIPR 0x2C
138/* 0x2D --> 0xEF unused */ 138/* 0x2D --> 0xEF unused */
139/* USB Device Address */ 139/* USB Device Address */
140#define USBDA 0xF0 140#define SR_USBDA 0xF0
141#define USBDA_USBFA (0x7F << 0) 141#define USBDA_USBFA (0x7F << 0)
142/* RX packet Counter Reg */ 142/* RX packet Counter Reg */
143#define RXC 0xF1 143#define SR_RXC 0xF1
144/* Tx packet Counter & USB Status Reg */ 144/* Tx packet Counter & USB Status Reg */
145#define TXC_USBS 0xF2 145#define SR_TXC_USBS 0xF2
146#define TXC_USBS_TXC0 (1 << 0) 146#define TXC_USBS_TXC0 (1 << 0)
147#define TXC_USBS_TXC1 (1 << 1) 147#define TXC_USBS_TXC1 (1 << 1)
148#define TXC_USBS_TXC2 (1 << 2) 148#define TXC_USBS_TXC2 (1 << 2)
@@ -150,7 +150,7 @@
150#define TXC_USBS_SUSFLAG (1 << 6) 150#define TXC_USBS_SUSFLAG (1 << 6)
151#define TXC_USBS_RXFAULT (1 << 7) 151#define TXC_USBS_RXFAULT (1 << 7)
152/* USB Control register */ 152/* USB Control register */
153#define USBC 0xF4 153#define SR_USBC 0xF4
154#define USBC_EP3NAK (1 << 4) 154#define USBC_EP3NAK (1 << 4)
155#define USBC_EP3ACK (1 << 5) 155#define USBC_EP3ACK (1 << 5)
156 156
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5ca97713bfb3..059fdf1bf5ee 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
490 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 490 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
491 break; 491 break;
492 case VIRTIO_NET_HDR_GSO_UDP: 492 case VIRTIO_NET_HDR_GSO_UDP:
493 {
494 static bool warned;
495
496 if (!warned) {
497 warned = true;
498 netdev_warn(dev,
499 "host using disabled UFO feature; please fix it\n");
500 }
501 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 493 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
502 break; 494 break;
503 }
504 case VIRTIO_NET_HDR_GSO_TCPV6: 495 case VIRTIO_NET_HDR_GSO_TCPV6:
505 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 496 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
506 break; 497 break;
@@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
888 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 879 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
889 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 880 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
890 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 881 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
882 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
883 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
891 else 884 else
892 BUG(); 885 BUG();
893 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 886 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -1748,7 +1741,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1748 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1741 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1749 1742
1750 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1743 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1751 dev->hw_features |= NETIF_F_TSO 1744 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
1752 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 1745 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1753 } 1746 }
1754 /* Individual feature bits: what can host handle? */ 1747 /* Individual feature bits: what can host handle? */
@@ -1758,9 +1751,11 @@ static int virtnet_probe(struct virtio_device *vdev)
1758 dev->hw_features |= NETIF_F_TSO6; 1751 dev->hw_features |= NETIF_F_TSO6;
1759 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 1752 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1760 dev->hw_features |= NETIF_F_TSO_ECN; 1753 dev->hw_features |= NETIF_F_TSO_ECN;
1754 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1755 dev->hw_features |= NETIF_F_UFO;
1761 1756
1762 if (gso) 1757 if (gso)
1763 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 1758 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1764 /* (!csum && gso) case will be fixed by register_netdev() */ 1759 /* (!csum && gso) case will be fixed by register_netdev() */
1765 } 1760 }
1766 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 1761 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1798,7 +1793,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1798 /* If we can receive ANY GSO packets, we must allocate large ones. */ 1793 /* If we can receive ANY GSO packets, we must allocate large ones. */
1799 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 1794 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1800 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 1795 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1801 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 1796 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
1797 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
1802 vi->big_packets = true; 1798 vi->big_packets = true;
1803 1799
1804 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 1800 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1994,9 +1990,9 @@ static struct virtio_device_id id_table[] = {
1994static unsigned int features[] = { 1990static unsigned int features[] = {
1995 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 1991 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1996 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 1992 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1997 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, 1993 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1998 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 1994 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1999 VIRTIO_NET_F_GUEST_ECN, 1995 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
2000 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 1996 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
2001 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1997 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
2002 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, 1998 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7fbd89fbe107..a8c755dcab14 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2432,10 +2432,10 @@ static void vxlan_sock_work(struct work_struct *work)
2432 dev_put(vxlan->dev); 2432 dev_put(vxlan->dev);
2433} 2433}
2434 2434
2435static int vxlan_newlink(struct net *net, struct net_device *dev, 2435static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2436 struct nlattr *tb[], struct nlattr *data[]) 2436 struct nlattr *tb[], struct nlattr *data[])
2437{ 2437{
2438 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2438 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2439 struct vxlan_dev *vxlan = netdev_priv(dev); 2439 struct vxlan_dev *vxlan = netdev_priv(dev);
2440 struct vxlan_rdst *dst = &vxlan->default_dst; 2440 struct vxlan_rdst *dst = &vxlan->default_dst;
2441 __u32 vni; 2441 __u32 vni;
@@ -2445,7 +2445,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2445 if (!data[IFLA_VXLAN_ID]) 2445 if (!data[IFLA_VXLAN_ID])
2446 return -EINVAL; 2446 return -EINVAL;
2447 2447
2448 vxlan->net = dev_net(dev); 2448 vxlan->net = src_net;
2449 2449
2450 vni = nla_get_u32(data[IFLA_VXLAN_ID]); 2450 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2451 dst->remote_vni = vni; 2451 dst->remote_vni = vni;
@@ -2481,7 +2481,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2481 if (data[IFLA_VXLAN_LINK] && 2481 if (data[IFLA_VXLAN_LINK] &&
2482 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { 2482 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
2483 struct net_device *lowerdev 2483 struct net_device *lowerdev
2484 = __dev_get_by_index(net, dst->remote_ifindex); 2484 = __dev_get_by_index(src_net, dst->remote_ifindex);
2485 2485
2486 if (!lowerdev) { 2486 if (!lowerdev) {
2487 pr_info("ifindex %d does not exist\n", dst->remote_ifindex); 2487 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2557,7 +2557,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2557 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 2557 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2558 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 2558 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2559 2559
2560 if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, 2560 if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2561 vxlan->dst_port)) { 2561 vxlan->dst_port)) {
2562 pr_info("duplicate VNI %u\n", vni); 2562 pr_info("duplicate VNI %u\n", vni);
2563 return -EEXIST; 2563 return -EEXIST;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 94e234975c61..a2fdd15f285a 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -25,7 +25,7 @@ if WAN
25# There is no way to detect a comtrol sv11 - force it modular for now. 25# There is no way to detect a comtrol sv11 - force it modular for now.
26config HOSTESS_SV11 26config HOSTESS_SV11
27 tristate "Comtrol Hostess SV-11 support" 27 tristate "Comtrol Hostess SV-11 support"
28 depends on ISA && m && ISA_DMA_API && INET && HDLC 28 depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
29 help 29 help
30 Driver for Comtrol Hostess SV-11 network card which 30 Driver for Comtrol Hostess SV-11 network card which
31 operates on low speed synchronous serial links at up to 31 operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
37# The COSA/SRP driver has not been tested as non-modular yet. 37# The COSA/SRP driver has not been tested as non-modular yet.
38config COSA 38config COSA
39 tristate "COSA/SRP sync serial boards support" 39 tristate "COSA/SRP sync serial boards support"
40 depends on ISA && m && ISA_DMA_API && HDLC 40 depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
41 ---help--- 41 ---help---
42 Driver for COSA and SRP synchronous serial boards. 42 Driver for COSA and SRP synchronous serial boards.
43 43
@@ -87,7 +87,7 @@ config LANMEDIA
87# There is no way to detect a Sealevel board. Force it modular 87# There is no way to detect a Sealevel board. Force it modular
88config SEALEVEL_4021 88config SEALEVEL_4021
89 tristate "Sealevel Systems 4021 support" 89 tristate "Sealevel Systems 4021 support"
90 depends on ISA && m && ISA_DMA_API && INET && HDLC 90 depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
91 help 91 help
92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. 92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
93 93
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 9259a732e8a4..037f74f0fcf6 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -578,6 +578,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
578 goto err_rx_unbind; 578 goto err_rx_unbind;
579 } 579 }
580 queue->task = task; 580 queue->task = task;
581 get_task_struct(task);
581 582
582 task = kthread_create(xenvif_dealloc_kthread, 583 task = kthread_create(xenvif_dealloc_kthread,
583 (void *)queue, "%s-dealloc", queue->name); 584 (void *)queue, "%s-dealloc", queue->name);
@@ -634,6 +635,7 @@ void xenvif_disconnect(struct xenvif *vif)
634 635
635 if (queue->task) { 636 if (queue->task) {
636 kthread_stop(queue->task); 637 kthread_stop(queue->task);
638 put_task_struct(queue->task);
637 queue->task = NULL; 639 queue->task = NULL;
638 } 640 }
639 641
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 908e65e9b821..c8ce701a7efb 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -2109,8 +2109,7 @@ int xenvif_kthread_guest_rx(void *data)
2109 */ 2109 */
2110 if (unlikely(vif->disabled && queue->id == 0)) { 2110 if (unlikely(vif->disabled && queue->id == 0)) {
2111 xenvif_carrier_off(vif); 2111 xenvif_carrier_off(vif);
2112 xenvif_rx_queue_purge(queue); 2112 break;
2113 continue;
2114 } 2113 }
2115 2114
2116 if (!skb_queue_empty(&queue->rx_queue)) 2115 if (!skb_queue_empty(&queue->rx_queue))
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index df781cdf13c1..17ca98657a28 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -283,6 +283,9 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
283 struct msi_msg msg; 283 struct msi_msg msg;
284 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); 284 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
285 285
286 if (desc->msi_attrib.is_msix)
287 return -EINVAL;
288
286 irq = assign_irq(1, desc, &pos); 289 irq = assign_irq(1, desc, &pos);
287 if (irq < 0) 290 if (irq < 0)
288 return irq; 291 return irq;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e52356aa09b8..903d5078b5ed 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -324,18 +324,52 @@ static void quirk_s3_64M(struct pci_dev *dev)
324DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); 324DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
325DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); 325DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
326 326
327static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
328 const char *name)
329{
330 u32 region;
331 struct pci_bus_region bus_region;
332 struct resource *res = dev->resource + pos;
333
334 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
335
336 if (!region)
337 return;
338
339 res->name = pci_name(dev);
340 res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
341 res->flags |=
342 (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
343 region &= ~(size - 1);
344
345 /* Convert from PCI bus to resource space */
346 bus_region.start = region;
347 bus_region.end = region + size - 1;
348 pcibios_bus_to_resource(dev->bus, res, &bus_region);
349
350 dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
351 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
352}
353
327/* 354/*
328 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS 355 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
329 * ver. 1.33 20070103) don't set the correct ISA PCI region header info. 356 * ver. 1.33 20070103) don't set the correct ISA PCI region header info.
330 * BAR0 should be 8 bytes; instead, it may be set to something like 8k 357 * BAR0 should be 8 bytes; instead, it may be set to something like 8k
331 * (which conflicts w/ BAR1's memory range). 358 * (which conflicts w/ BAR1's memory range).
359 *
360 * CS553x's ISA PCI BARs may also be read-only (ref:
361 * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
332 */ 362 */
333static void quirk_cs5536_vsa(struct pci_dev *dev) 363static void quirk_cs5536_vsa(struct pci_dev *dev)
334{ 364{
365 static char *name = "CS5536 ISA bridge";
366
335 if (pci_resource_len(dev, 0) != 8) { 367 if (pci_resource_len(dev, 0) != 8) {
336 struct resource *res = &dev->resource[0]; 368 quirk_io(dev, 0, 8, name); /* SMB */
337 res->end = res->start + 8 - 1; 369 quirk_io(dev, 1, 256, name); /* GPIO */
338 dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n"); 370 quirk_io(dev, 2, 64, name); /* MFGPT */
371 dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
372 name);
339 } 373 }
340} 374}
341DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); 375DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c3a60b57a865..a6f116aa5235 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -414,6 +414,14 @@ config REGULATOR_MAX77802
414 Exynos5420/Exynos5800 SoCs to control various voltages. 414 Exynos5420/Exynos5800 SoCs to control various voltages.
415 It includes support for control of voltage and ramp speed. 415 It includes support for control of voltage and ramp speed.
416 416
417config REGULATOR_MAX77843
418 tristate "Maxim 77843 regulator"
419 depends on MFD_MAX77843
420 help
421 This driver controls a Maxim 77843 regulator.
422 The regulator include two 'SAFEOUT' for USB(Universal Serial Bus)
423 This is suitable for Exynos5433 SoC chips.
424
417config REGULATOR_MC13XXX_CORE 425config REGULATOR_MC13XXX_CORE
418 tristate 426 tristate
419 427
@@ -433,6 +441,15 @@ config REGULATOR_MC13892
433 Say y here to support the regulators found on the Freescale MC13892 441 Say y here to support the regulators found on the Freescale MC13892
434 PMIC. 442 PMIC.
435 443
444config REGULATOR_MT6397
445 tristate "MediaTek MT6397 PMIC"
446 depends on MFD_MT6397
447 help
448 Say y here to select this option to enable the power regulator of
449 MediaTek MT6397 PMIC.
450 This driver supports the control of different power rails of device
451 through regulator interface.
452
436config REGULATOR_PALMAS 453config REGULATOR_PALMAS
437 tristate "TI Palmas PMIC Regulators" 454 tristate "TI Palmas PMIC Regulators"
438 depends on MFD_PALMAS 455 depends on MFD_PALMAS
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 1f28ebfc6f3a..2c4da15e1545 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -55,9 +55,11 @@ obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
55obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o 55obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
56obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o 56obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
57obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o 57obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
58obj-$(CONFIG_REGULATOR_MAX77843) += max77843.o
58obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o 59obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
59obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o 60obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
60obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o 61obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
62obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
61obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o 63obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
62obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o 64obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
63obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o 65obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f23d7e1f2ee7..e4331f5e5d7d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -32,11 +32,13 @@
32 32
33#define AXP20X_FREQ_DCDC_MASK 0x0f 33#define AXP20X_FREQ_DCDC_MASK 0x0f
34 34
35#define AXP20X_DESC_IO(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \ 35#define AXP20X_DESC_IO(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \
36 _emask, _enable_val, _disable_val) \ 36 _ereg, _emask, _enable_val, _disable_val) \
37 [AXP20X_##_id] = { \ 37 [AXP20X_##_id] = { \
38 .name = #_id, \ 38 .name = #_id, \
39 .supply_name = (_supply), \ 39 .supply_name = (_supply), \
40 .of_match = of_match_ptr(_match), \
41 .regulators_node = of_match_ptr("regulators"), \
40 .type = REGULATOR_VOLTAGE, \ 42 .type = REGULATOR_VOLTAGE, \
41 .id = AXP20X_##_id, \ 43 .id = AXP20X_##_id, \
42 .n_voltages = (((_max) - (_min)) / (_step) + 1), \ 44 .n_voltages = (((_max) - (_min)) / (_step) + 1), \
@@ -52,11 +54,13 @@
52 .ops = &axp20x_ops, \ 54 .ops = &axp20x_ops, \
53 } 55 }
54 56
55#define AXP20X_DESC(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \ 57#define AXP20X_DESC(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \
56 _emask) \ 58 _ereg, _emask) \
57 [AXP20X_##_id] = { \ 59 [AXP20X_##_id] = { \
58 .name = #_id, \ 60 .name = #_id, \
59 .supply_name = (_supply), \ 61 .supply_name = (_supply), \
62 .of_match = of_match_ptr(_match), \
63 .regulators_node = of_match_ptr("regulators"), \
60 .type = REGULATOR_VOLTAGE, \ 64 .type = REGULATOR_VOLTAGE, \
61 .id = AXP20X_##_id, \ 65 .id = AXP20X_##_id, \
62 .n_voltages = (((_max) - (_min)) / (_step) + 1), \ 66 .n_voltages = (((_max) - (_min)) / (_step) + 1), \
@@ -70,10 +74,12 @@
70 .ops = &axp20x_ops, \ 74 .ops = &axp20x_ops, \
71 } 75 }
72 76
73#define AXP20X_DESC_FIXED(_id, _supply, _volt) \ 77#define AXP20X_DESC_FIXED(_id, _match, _supply, _volt) \
74 [AXP20X_##_id] = { \ 78 [AXP20X_##_id] = { \
75 .name = #_id, \ 79 .name = #_id, \
76 .supply_name = (_supply), \ 80 .supply_name = (_supply), \
81 .of_match = of_match_ptr(_match), \
82 .regulators_node = of_match_ptr("regulators"), \
77 .type = REGULATOR_VOLTAGE, \ 83 .type = REGULATOR_VOLTAGE, \
78 .id = AXP20X_##_id, \ 84 .id = AXP20X_##_id, \
79 .n_voltages = 1, \ 85 .n_voltages = 1, \
@@ -82,10 +88,13 @@
82 .ops = &axp20x_ops_fixed \ 88 .ops = &axp20x_ops_fixed \
83 } 89 }
84 90
85#define AXP20X_DESC_TABLE(_id, _supply, _table, _vreg, _vmask, _ereg, _emask) \ 91#define AXP20X_DESC_TABLE(_id, _match, _supply, _table, _vreg, _vmask, _ereg, \
92 _emask) \
86 [AXP20X_##_id] = { \ 93 [AXP20X_##_id] = { \
87 .name = #_id, \ 94 .name = #_id, \
88 .supply_name = (_supply), \ 95 .supply_name = (_supply), \
96 .of_match = of_match_ptr(_match), \
97 .regulators_node = of_match_ptr("regulators"), \
89 .type = REGULATOR_VOLTAGE, \ 98 .type = REGULATOR_VOLTAGE, \
90 .id = AXP20X_##_id, \ 99 .id = AXP20X_##_id, \
91 .n_voltages = ARRAY_SIZE(_table), \ 100 .n_voltages = ARRAY_SIZE(_table), \
@@ -127,36 +136,20 @@ static struct regulator_ops axp20x_ops = {
127}; 136};
128 137
129static const struct regulator_desc axp20x_regulators[] = { 138static const struct regulator_desc axp20x_regulators[] = {
130 AXP20X_DESC(DCDC2, "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT, 0x3f, 139 AXP20X_DESC(DCDC2, "dcdc2", "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT,
131 AXP20X_PWR_OUT_CTRL, 0x10), 140 0x3f, AXP20X_PWR_OUT_CTRL, 0x10),
132 AXP20X_DESC(DCDC3, "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT, 0x7f, 141 AXP20X_DESC(DCDC3, "dcdc3", "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT,
133 AXP20X_PWR_OUT_CTRL, 0x02), 142 0x7f, AXP20X_PWR_OUT_CTRL, 0x02),
134 AXP20X_DESC_FIXED(LDO1, "acin", 1300), 143 AXP20X_DESC_FIXED(LDO1, "ldo1", "acin", 1300),
135 AXP20X_DESC(LDO2, "ldo24in", 1800, 3300, 100, AXP20X_LDO24_V_OUT, 0xf0, 144 AXP20X_DESC(LDO2, "ldo2", "ldo24in", 1800, 3300, 100,
136 AXP20X_PWR_OUT_CTRL, 0x04), 145 AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04),
137 AXP20X_DESC(LDO3, "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, 0x7f, 146 AXP20X_DESC(LDO3, "ldo3", "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT,
138 AXP20X_PWR_OUT_CTRL, 0x40), 147 0x7f, AXP20X_PWR_OUT_CTRL, 0x40),
139 AXP20X_DESC_TABLE(LDO4, "ldo24in", axp20x_ldo4_data, AXP20X_LDO24_V_OUT, 0x0f, 148 AXP20X_DESC_TABLE(LDO4, "ldo4", "ldo24in", axp20x_ldo4_data,
140 AXP20X_PWR_OUT_CTRL, 0x08), 149 AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL, 0x08),
141 AXP20X_DESC_IO(LDO5, "ldo5in", 1800, 3300, 100, AXP20X_LDO5_V_OUT, 0xf0, 150 AXP20X_DESC_IO(LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
142 AXP20X_GPIO0_CTRL, 0x07, AXP20X_IO_ENABLED, 151 AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07,
143 AXP20X_IO_DISABLED), 152 AXP20X_IO_ENABLED, AXP20X_IO_DISABLED),
144};
145
146#define AXP_MATCH(_name, _id) \
147 [AXP20X_##_id] = { \
148 .name = #_name, \
149 .driver_data = (void *) &axp20x_regulators[AXP20X_##_id], \
150 }
151
152static struct of_regulator_match axp20x_matches[] = {
153 AXP_MATCH(dcdc2, DCDC2),
154 AXP_MATCH(dcdc3, DCDC3),
155 AXP_MATCH(ldo1, LDO1),
156 AXP_MATCH(ldo2, LDO2),
157 AXP_MATCH(ldo3, LDO3),
158 AXP_MATCH(ldo4, LDO4),
159 AXP_MATCH(ldo5, LDO5),
160}; 153};
161 154
162static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) 155static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
@@ -193,13 +186,6 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
193 if (!regulators) { 186 if (!regulators) {
194 dev_warn(&pdev->dev, "regulators node not found\n"); 187 dev_warn(&pdev->dev, "regulators node not found\n");
195 } else { 188 } else {
196 ret = of_regulator_match(&pdev->dev, regulators, axp20x_matches,
197 ARRAY_SIZE(axp20x_matches));
198 if (ret < 0) {
199 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
200 return ret;
201 }
202
203 dcdcfreq = 1500; 189 dcdcfreq = 1500;
204 of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq); 190 of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq);
205 ret = axp20x_set_dcdc_freq(pdev, dcdcfreq); 191 ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
@@ -233,23 +219,17 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
233{ 219{
234 struct regulator_dev *rdev; 220 struct regulator_dev *rdev;
235 struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent); 221 struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
236 struct regulator_config config = { }; 222 struct regulator_config config = {
237 struct regulator_init_data *init_data; 223 .dev = pdev->dev.parent,
224 .regmap = axp20x->regmap,
225 };
238 int ret, i; 226 int ret, i;
239 u32 workmode; 227 u32 workmode;
240 228
241 ret = axp20x_regulator_parse_dt(pdev); 229 /* This only sets the dcdc freq. Ignore any errors */
242 if (ret) 230 axp20x_regulator_parse_dt(pdev);
243 return ret;
244 231
245 for (i = 0; i < AXP20X_REG_ID_MAX; i++) { 232 for (i = 0; i < AXP20X_REG_ID_MAX; i++) {
246 init_data = axp20x_matches[i].init_data;
247
248 config.dev = pdev->dev.parent;
249 config.init_data = init_data;
250 config.regmap = axp20x->regmap;
251 config.of_node = axp20x_matches[i].of_node;
252
253 rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i], 233 rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i],
254 &config); 234 &config);
255 if (IS_ERR(rdev)) { 235 if (IS_ERR(rdev)) {
@@ -259,7 +239,8 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
259 return PTR_ERR(rdev); 239 return PTR_ERR(rdev);
260 } 240 }
261 241
262 ret = of_property_read_u32(axp20x_matches[i].of_node, "x-powers,dcdc-workmode", 242 ret = of_property_read_u32(rdev->dev.of_node,
243 "x-powers,dcdc-workmode",
263 &workmode); 244 &workmode);
264 if (!ret) { 245 if (!ret) {
265 if (axp20x_set_dcdc_workmode(rdev, i, workmode)) 246 if (axp20x_set_dcdc_workmode(rdev, i, workmode))
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 9c48fb32f660..b899947d839d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -632,49 +632,34 @@ static ssize_t regulator_bypass_show(struct device *dev,
632static DEVICE_ATTR(bypass, 0444, 632static DEVICE_ATTR(bypass, 0444,
633 regulator_bypass_show, NULL); 633 regulator_bypass_show, NULL);
634 634
635/*
636 * These are the only attributes are present for all regulators.
637 * Other attributes are a function of regulator functionality.
638 */
639static struct attribute *regulator_dev_attrs[] = {
640 &dev_attr_name.attr,
641 &dev_attr_num_users.attr,
642 &dev_attr_type.attr,
643 NULL,
644};
645ATTRIBUTE_GROUPS(regulator_dev);
646
647static void regulator_dev_release(struct device *dev)
648{
649 struct regulator_dev *rdev = dev_get_drvdata(dev);
650 kfree(rdev);
651}
652
653static struct class regulator_class = {
654 .name = "regulator",
655 .dev_release = regulator_dev_release,
656 .dev_groups = regulator_dev_groups,
657};
658
659/* Calculate the new optimum regulator operating mode based on the new total 635/* Calculate the new optimum regulator operating mode based on the new total
660 * consumer load. All locks held by caller */ 636 * consumer load. All locks held by caller */
661static void drms_uA_update(struct regulator_dev *rdev) 637static int drms_uA_update(struct regulator_dev *rdev)
662{ 638{
663 struct regulator *sibling; 639 struct regulator *sibling;
664 int current_uA = 0, output_uV, input_uV, err; 640 int current_uA = 0, output_uV, input_uV, err;
665 unsigned int mode; 641 unsigned int mode;
666 642
643 /*
644 * first check to see if we can set modes at all, otherwise just
645 * tell the consumer everything is OK.
646 */
667 err = regulator_check_drms(rdev); 647 err = regulator_check_drms(rdev);
668 if (err < 0 || !rdev->desc->ops->get_optimum_mode || 648 if (err < 0)
669 (!rdev->desc->ops->get_voltage && 649 return 0;
670 !rdev->desc->ops->get_voltage_sel) || 650
671 !rdev->desc->ops->set_mode) 651 if (!rdev->desc->ops->get_optimum_mode)
672 return; 652 return 0;
653
654 if (!rdev->desc->ops->set_mode)
655 return -EINVAL;
673 656
674 /* get output voltage */ 657 /* get output voltage */
675 output_uV = _regulator_get_voltage(rdev); 658 output_uV = _regulator_get_voltage(rdev);
676 if (output_uV <= 0) 659 if (output_uV <= 0) {
677 return; 660 rdev_err(rdev, "invalid output voltage found\n");
661 return -EINVAL;
662 }
678 663
679 /* get input voltage */ 664 /* get input voltage */
680 input_uV = 0; 665 input_uV = 0;
@@ -682,8 +667,10 @@ static void drms_uA_update(struct regulator_dev *rdev)
682 input_uV = regulator_get_voltage(rdev->supply); 667 input_uV = regulator_get_voltage(rdev->supply);
683 if (input_uV <= 0) 668 if (input_uV <= 0)
684 input_uV = rdev->constraints->input_uV; 669 input_uV = rdev->constraints->input_uV;
685 if (input_uV <= 0) 670 if (input_uV <= 0) {
686 return; 671 rdev_err(rdev, "invalid input voltage found\n");
672 return -EINVAL;
673 }
687 674
688 /* calc total requested load */ 675 /* calc total requested load */
689 list_for_each_entry(sibling, &rdev->consumer_list, list) 676 list_for_each_entry(sibling, &rdev->consumer_list, list)
@@ -695,8 +682,17 @@ static void drms_uA_update(struct regulator_dev *rdev)
695 682
696 /* check the new mode is allowed */ 683 /* check the new mode is allowed */
697 err = regulator_mode_constrain(rdev, &mode); 684 err = regulator_mode_constrain(rdev, &mode);
698 if (err == 0) 685 if (err < 0) {
699 rdev->desc->ops->set_mode(rdev, mode); 686 rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
687 current_uA, input_uV, output_uV);
688 return err;
689 }
690
691 err = rdev->desc->ops->set_mode(rdev, mode);
692 if (err < 0)
693 rdev_err(rdev, "failed to set optimum mode %x\n", mode);
694
695 return err;
700} 696}
701 697
702static int suspend_set_state(struct regulator_dev *rdev, 698static int suspend_set_state(struct regulator_dev *rdev,
@@ -3026,75 +3022,13 @@ EXPORT_SYMBOL_GPL(regulator_get_mode);
3026int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) 3022int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
3027{ 3023{
3028 struct regulator_dev *rdev = regulator->rdev; 3024 struct regulator_dev *rdev = regulator->rdev;
3029 struct regulator *consumer; 3025 int ret;
3030 int ret, output_uV, input_uV = 0, total_uA_load = 0;
3031 unsigned int mode;
3032
3033 if (rdev->supply)
3034 input_uV = regulator_get_voltage(rdev->supply);
3035 3026
3036 mutex_lock(&rdev->mutex); 3027 mutex_lock(&rdev->mutex);
3037
3038 /*
3039 * first check to see if we can set modes at all, otherwise just
3040 * tell the consumer everything is OK.
3041 */
3042 regulator->uA_load = uA_load; 3028 regulator->uA_load = uA_load;
3043 ret = regulator_check_drms(rdev); 3029 ret = drms_uA_update(rdev);
3044 if (ret < 0) {
3045 ret = 0;
3046 goto out;
3047 }
3048
3049 if (!rdev->desc->ops->get_optimum_mode)
3050 goto out;
3051
3052 /*
3053 * we can actually do this so any errors are indicators of
3054 * potential real failure.
3055 */
3056 ret = -EINVAL;
3057
3058 if (!rdev->desc->ops->set_mode)
3059 goto out;
3060
3061 /* get output voltage */
3062 output_uV = _regulator_get_voltage(rdev);
3063 if (output_uV <= 0) {
3064 rdev_err(rdev, "invalid output voltage found\n");
3065 goto out;
3066 }
3067
3068 /* No supply? Use constraint voltage */
3069 if (input_uV <= 0)
3070 input_uV = rdev->constraints->input_uV;
3071 if (input_uV <= 0) {
3072 rdev_err(rdev, "invalid input voltage found\n");
3073 goto out;
3074 }
3075
3076 /* calc total requested load for this regulator */
3077 list_for_each_entry(consumer, &rdev->consumer_list, list)
3078 total_uA_load += consumer->uA_load;
3079
3080 mode = rdev->desc->ops->get_optimum_mode(rdev,
3081 input_uV, output_uV,
3082 total_uA_load);
3083 ret = regulator_mode_constrain(rdev, &mode);
3084 if (ret < 0) {
3085 rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
3086 total_uA_load, input_uV, output_uV);
3087 goto out;
3088 }
3089
3090 ret = rdev->desc->ops->set_mode(rdev, mode);
3091 if (ret < 0) {
3092 rdev_err(rdev, "failed to set optimum mode %x\n", mode);
3093 goto out;
3094 }
3095 ret = mode;
3096out:
3097 mutex_unlock(&rdev->mutex); 3030 mutex_unlock(&rdev->mutex);
3031
3098 return ret; 3032 return ret;
3099} 3033}
3100EXPORT_SYMBOL_GPL(regulator_set_optimum_mode); 3034EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
@@ -3436,126 +3370,136 @@ int regulator_mode_to_status(unsigned int mode)
3436} 3370}
3437EXPORT_SYMBOL_GPL(regulator_mode_to_status); 3371EXPORT_SYMBOL_GPL(regulator_mode_to_status);
3438 3372
3373static struct attribute *regulator_dev_attrs[] = {
3374 &dev_attr_name.attr,
3375 &dev_attr_num_users.attr,
3376 &dev_attr_type.attr,
3377 &dev_attr_microvolts.attr,
3378 &dev_attr_microamps.attr,
3379 &dev_attr_opmode.attr,
3380 &dev_attr_state.attr,
3381 &dev_attr_status.attr,
3382 &dev_attr_bypass.attr,
3383 &dev_attr_requested_microamps.attr,
3384 &dev_attr_min_microvolts.attr,
3385 &dev_attr_max_microvolts.attr,
3386 &dev_attr_min_microamps.attr,
3387 &dev_attr_max_microamps.attr,
3388 &dev_attr_suspend_standby_state.attr,
3389 &dev_attr_suspend_mem_state.attr,
3390 &dev_attr_suspend_disk_state.attr,
3391 &dev_attr_suspend_standby_microvolts.attr,
3392 &dev_attr_suspend_mem_microvolts.attr,
3393 &dev_attr_suspend_disk_microvolts.attr,
3394 &dev_attr_suspend_standby_mode.attr,
3395 &dev_attr_suspend_mem_mode.attr,
3396 &dev_attr_suspend_disk_mode.attr,
3397 NULL
3398};
3399
3439/* 3400/*
3440 * To avoid cluttering sysfs (and memory) with useless state, only 3401 * To avoid cluttering sysfs (and memory) with useless state, only
3441 * create attributes that can be meaningfully displayed. 3402 * create attributes that can be meaningfully displayed.
3442 */ 3403 */
3443static int add_regulator_attributes(struct regulator_dev *rdev) 3404static umode_t regulator_attr_is_visible(struct kobject *kobj,
3405 struct attribute *attr, int idx)
3444{ 3406{
3445 struct device *dev = &rdev->dev; 3407 struct device *dev = kobj_to_dev(kobj);
3408 struct regulator_dev *rdev = container_of(dev, struct regulator_dev, dev);
3446 const struct regulator_ops *ops = rdev->desc->ops; 3409 const struct regulator_ops *ops = rdev->desc->ops;
3447 int status = 0; 3410 umode_t mode = attr->mode;
3411
3412 /* these three are always present */
3413 if (attr == &dev_attr_name.attr ||
3414 attr == &dev_attr_num_users.attr ||
3415 attr == &dev_attr_type.attr)
3416 return mode;
3448 3417
3449 /* some attributes need specific methods to be displayed */ 3418 /* some attributes need specific methods to be displayed */
3450 if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || 3419 if (attr == &dev_attr_microvolts.attr) {
3451 (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) || 3420 if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
3452 (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) || 3421 (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
3453 (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) { 3422 (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
3454 status = device_create_file(dev, &dev_attr_microvolts); 3423 (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1))
3455 if (status < 0) 3424 return mode;
3456 return status; 3425 return 0;
3457 }
3458 if (ops->get_current_limit) {
3459 status = device_create_file(dev, &dev_attr_microamps);
3460 if (status < 0)
3461 return status;
3462 }
3463 if (ops->get_mode) {
3464 status = device_create_file(dev, &dev_attr_opmode);
3465 if (status < 0)
3466 return status;
3467 }
3468 if (rdev->ena_pin || ops->is_enabled) {
3469 status = device_create_file(dev, &dev_attr_state);
3470 if (status < 0)
3471 return status;
3472 }
3473 if (ops->get_status) {
3474 status = device_create_file(dev, &dev_attr_status);
3475 if (status < 0)
3476 return status;
3477 }
3478 if (ops->get_bypass) {
3479 status = device_create_file(dev, &dev_attr_bypass);
3480 if (status < 0)
3481 return status;
3482 } 3426 }
3483 3427
3428 if (attr == &dev_attr_microamps.attr)
3429 return ops->get_current_limit ? mode : 0;
3430
3431 if (attr == &dev_attr_opmode.attr)
3432 return ops->get_mode ? mode : 0;
3433
3434 if (attr == &dev_attr_state.attr)
3435 return (rdev->ena_pin || ops->is_enabled) ? mode : 0;
3436
3437 if (attr == &dev_attr_status.attr)
3438 return ops->get_status ? mode : 0;
3439
3440 if (attr == &dev_attr_bypass.attr)
3441 return ops->get_bypass ? mode : 0;
3442
3484 /* some attributes are type-specific */ 3443 /* some attributes are type-specific */
3485 if (rdev->desc->type == REGULATOR_CURRENT) { 3444 if (attr == &dev_attr_requested_microamps.attr)
3486 status = device_create_file(dev, &dev_attr_requested_microamps); 3445 return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
3487 if (status < 0)
3488 return status;
3489 }
3490 3446
3491 /* all the other attributes exist to support constraints; 3447 /* all the other attributes exist to support constraints;
3492 * don't show them if there are no constraints, or if the 3448 * don't show them if there are no constraints, or if the
3493 * relevant supporting methods are missing. 3449 * relevant supporting methods are missing.
3494 */ 3450 */
3495 if (!rdev->constraints) 3451 if (!rdev->constraints)
3496 return status; 3452 return 0;
3497 3453
3498 /* constraints need specific supporting methods */ 3454 /* constraints need specific supporting methods */
3499 if (ops->set_voltage || ops->set_voltage_sel) { 3455 if (attr == &dev_attr_min_microvolts.attr ||
3500 status = device_create_file(dev, &dev_attr_min_microvolts); 3456 attr == &dev_attr_max_microvolts.attr)
3501 if (status < 0) 3457 return (ops->set_voltage || ops->set_voltage_sel) ? mode : 0;
3502 return status; 3458
3503 status = device_create_file(dev, &dev_attr_max_microvolts); 3459 if (attr == &dev_attr_min_microamps.attr ||
3504 if (status < 0) 3460 attr == &dev_attr_max_microamps.attr)
3505 return status; 3461 return ops->set_current_limit ? mode : 0;
3506 }
3507 if (ops->set_current_limit) {
3508 status = device_create_file(dev, &dev_attr_min_microamps);
3509 if (status < 0)
3510 return status;
3511 status = device_create_file(dev, &dev_attr_max_microamps);
3512 if (status < 0)
3513 return status;
3514 }
3515
3516 status = device_create_file(dev, &dev_attr_suspend_standby_state);
3517 if (status < 0)
3518 return status;
3519 status = device_create_file(dev, &dev_attr_suspend_mem_state);
3520 if (status < 0)
3521 return status;
3522 status = device_create_file(dev, &dev_attr_suspend_disk_state);
3523 if (status < 0)
3524 return status;
3525 3462
3526 if (ops->set_suspend_voltage) { 3463 if (attr == &dev_attr_suspend_standby_state.attr ||
3527 status = device_create_file(dev, 3464 attr == &dev_attr_suspend_mem_state.attr ||
3528 &dev_attr_suspend_standby_microvolts); 3465 attr == &dev_attr_suspend_disk_state.attr)
3529 if (status < 0) 3466 return mode;
3530 return status; 3467
3531 status = device_create_file(dev, 3468 if (attr == &dev_attr_suspend_standby_microvolts.attr ||
3532 &dev_attr_suspend_mem_microvolts); 3469 attr == &dev_attr_suspend_mem_microvolts.attr ||
3533 if (status < 0) 3470 attr == &dev_attr_suspend_disk_microvolts.attr)
3534 return status; 3471 return ops->set_suspend_voltage ? mode : 0;
3535 status = device_create_file(dev, 3472
3536 &dev_attr_suspend_disk_microvolts); 3473 if (attr == &dev_attr_suspend_standby_mode.attr ||
3537 if (status < 0) 3474 attr == &dev_attr_suspend_mem_mode.attr ||
3538 return status; 3475 attr == &dev_attr_suspend_disk_mode.attr)
3539 } 3476 return ops->set_suspend_mode ? mode : 0;
3540 3477
3541 if (ops->set_suspend_mode) { 3478 return mode;
3542 status = device_create_file(dev,
3543 &dev_attr_suspend_standby_mode);
3544 if (status < 0)
3545 return status;
3546 status = device_create_file(dev,
3547 &dev_attr_suspend_mem_mode);
3548 if (status < 0)
3549 return status;
3550 status = device_create_file(dev,
3551 &dev_attr_suspend_disk_mode);
3552 if (status < 0)
3553 return status;
3554 }
3555
3556 return status;
3557} 3479}
3558 3480
3481static const struct attribute_group regulator_dev_group = {
3482 .attrs = regulator_dev_attrs,
3483 .is_visible = regulator_attr_is_visible,
3484};
3485
3486static const struct attribute_group *regulator_dev_groups[] = {
3487 &regulator_dev_group,
3488 NULL
3489};
3490
3491static void regulator_dev_release(struct device *dev)
3492{
3493 struct regulator_dev *rdev = dev_get_drvdata(dev);
3494 kfree(rdev);
3495}
3496
3497static struct class regulator_class = {
3498 .name = "regulator",
3499 .dev_release = regulator_dev_release,
3500 .dev_groups = regulator_dev_groups,
3501};
3502
3559static void rdev_init_debugfs(struct regulator_dev *rdev) 3503static void rdev_init_debugfs(struct regulator_dev *rdev)
3560{ 3504{
3561 rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root); 3505 rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
@@ -3575,7 +3519,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
3575/** 3519/**
3576 * regulator_register - register regulator 3520 * regulator_register - register regulator
3577 * @regulator_desc: regulator to register 3521 * @regulator_desc: regulator to register
3578 * @config: runtime configuration for regulator 3522 * @cfg: runtime configuration for regulator
3579 * 3523 *
3580 * Called by regulator drivers to register a regulator. 3524 * Called by regulator drivers to register a regulator.
3581 * Returns a valid pointer to struct regulator_dev on success 3525 * Returns a valid pointer to struct regulator_dev on success
@@ -3583,20 +3527,21 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
3583 */ 3527 */
3584struct regulator_dev * 3528struct regulator_dev *
3585regulator_register(const struct regulator_desc *regulator_desc, 3529regulator_register(const struct regulator_desc *regulator_desc,
3586 const struct regulator_config *config) 3530 const struct regulator_config *cfg)
3587{ 3531{
3588 const struct regulation_constraints *constraints = NULL; 3532 const struct regulation_constraints *constraints = NULL;
3589 const struct regulator_init_data *init_data; 3533 const struct regulator_init_data *init_data;
3590 static atomic_t regulator_no = ATOMIC_INIT(0); 3534 struct regulator_config *config = NULL;
3535 static atomic_t regulator_no = ATOMIC_INIT(-1);
3591 struct regulator_dev *rdev; 3536 struct regulator_dev *rdev;
3592 struct device *dev; 3537 struct device *dev;
3593 int ret, i; 3538 int ret, i;
3594 const char *supply = NULL; 3539 const char *supply = NULL;
3595 3540
3596 if (regulator_desc == NULL || config == NULL) 3541 if (regulator_desc == NULL || cfg == NULL)
3597 return ERR_PTR(-EINVAL); 3542 return ERR_PTR(-EINVAL);
3598 3543
3599 dev = config->dev; 3544 dev = cfg->dev;
3600 WARN_ON(!dev); 3545 WARN_ON(!dev);
3601 3546
3602 if (regulator_desc->name == NULL || regulator_desc->ops == NULL) 3547 if (regulator_desc->name == NULL || regulator_desc->ops == NULL)
@@ -3626,7 +3571,17 @@ regulator_register(const struct regulator_desc *regulator_desc,
3626 if (rdev == NULL) 3571 if (rdev == NULL)
3627 return ERR_PTR(-ENOMEM); 3572 return ERR_PTR(-ENOMEM);
3628 3573
3629 init_data = regulator_of_get_init_data(dev, regulator_desc, 3574 /*
3575 * Duplicate the config so the driver could override it after
3576 * parsing init data.
3577 */
3578 config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL);
3579 if (config == NULL) {
3580 kfree(rdev);
3581 return ERR_PTR(-ENOMEM);
3582 }
3583
3584 init_data = regulator_of_get_init_data(dev, regulator_desc, config,
3630 &rdev->dev.of_node); 3585 &rdev->dev.of_node);
3631 if (!init_data) { 3586 if (!init_data) {
3632 init_data = config->init_data; 3587 init_data = config->init_data;
@@ -3660,8 +3615,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
3660 /* register with sysfs */ 3615 /* register with sysfs */
3661 rdev->dev.class = &regulator_class; 3616 rdev->dev.class = &regulator_class;
3662 rdev->dev.parent = dev; 3617 rdev->dev.parent = dev;
3663 dev_set_name(&rdev->dev, "regulator.%d", 3618 dev_set_name(&rdev->dev, "regulator.%lu",
3664 atomic_inc_return(&regulator_no) - 1); 3619 (unsigned long) atomic_inc_return(&regulator_no));
3665 ret = device_register(&rdev->dev); 3620 ret = device_register(&rdev->dev);
3666 if (ret != 0) { 3621 if (ret != 0) {
3667 put_device(&rdev->dev); 3622 put_device(&rdev->dev);
@@ -3694,11 +3649,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
3694 if (ret < 0) 3649 if (ret < 0)
3695 goto scrub; 3650 goto scrub;
3696 3651
3697 /* add attributes supported by this regulator */
3698 ret = add_regulator_attributes(rdev);
3699 if (ret < 0)
3700 goto scrub;
3701
3702 if (init_data && init_data->supply_regulator) 3652 if (init_data && init_data->supply_regulator)
3703 supply = init_data->supply_regulator; 3653 supply = init_data->supply_regulator;
3704 else if (regulator_desc->supply_name) 3654 else if (regulator_desc->supply_name)
@@ -3754,6 +3704,7 @@ add_dev:
3754 rdev_init_debugfs(rdev); 3704 rdev_init_debugfs(rdev);
3755out: 3705out:
3756 mutex_unlock(&regulator_list_mutex); 3706 mutex_unlock(&regulator_list_mutex);
3707 kfree(config);
3757 return rdev; 3708 return rdev;
3758 3709
3759unset_supplies: 3710unset_supplies:
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index c78d2106d6cb..01343419555e 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -24,6 +24,7 @@
24#include <linux/regmap.h> 24#include <linux/regmap.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/of_gpio.h>
27#include <linux/regulator/of_regulator.h> 28#include <linux/regulator/of_regulator.h>
28#include <linux/regulator/da9211.h> 29#include <linux/regulator/da9211.h>
29#include "da9211-regulator.h" 30#include "da9211-regulator.h"
@@ -276,7 +277,10 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
276 continue; 277 continue;
277 278
278 pdata->init_data[n] = da9211_matches[i].init_data; 279 pdata->init_data[n] = da9211_matches[i].init_data;
279 280 pdata->reg_node[n] = da9211_matches[i].of_node;
281 pdata->gpio_ren[n] =
282 of_get_named_gpio(da9211_matches[i].of_node,
283 "enable-gpios", 0);
280 n++; 284 n++;
281 } 285 }
282 286
@@ -364,7 +368,15 @@ static int da9211_regulator_init(struct da9211 *chip)
364 config.dev = chip->dev; 368 config.dev = chip->dev;
365 config.driver_data = chip; 369 config.driver_data = chip;
366 config.regmap = chip->regmap; 370 config.regmap = chip->regmap;
367 config.of_node = chip->dev->of_node; 371 config.of_node = chip->pdata->reg_node[i];
372
373 if (gpio_is_valid(chip->pdata->gpio_ren[i])) {
374 config.ena_gpio = chip->pdata->gpio_ren[i];
375 config.ena_gpio_initialized = true;
376 } else {
377 config.ena_gpio = -EINVAL;
378 config.ena_gpio_initialized = false;
379 }
368 380
369 chip->rdev[i] = devm_regulator_register(chip->dev, 381 chip->rdev[i] = devm_regulator_register(chip->dev,
370 &da9211_regulators[i], &config); 382 &da9211_regulators[i], &config);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 6c43ab2d5121..3c25db89a021 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -147,7 +147,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
147 return REGULATOR_MODE_NORMAL; 147 return REGULATOR_MODE_NORMAL;
148} 148}
149 149
150static int slew_rates[] = { 150static const int slew_rates[] = {
151 64000, 151 64000,
152 32000, 152 32000,
153 16000, 153 16000,
@@ -296,7 +296,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
296 return PTR_ERR_OR_ZERO(di->rdev); 296 return PTR_ERR_OR_ZERO(di->rdev);
297} 297}
298 298
299static struct regmap_config fan53555_regmap_config = { 299static const struct regmap_config fan53555_regmap_config = {
300 .reg_bits = 8, 300 .reg_bits = 8,
301 .val_bits = 8, 301 .val_bits = 8,
302}; 302};
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 80ba2a35a04b..c74ac8734023 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -38,11 +38,13 @@ struct regulator {
38#ifdef CONFIG_OF 38#ifdef CONFIG_OF
39struct regulator_init_data *regulator_of_get_init_data(struct device *dev, 39struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
40 const struct regulator_desc *desc, 40 const struct regulator_desc *desc,
41 struct regulator_config *config,
41 struct device_node **node); 42 struct device_node **node);
42#else 43#else
43static inline struct regulator_init_data * 44static inline struct regulator_init_data *
44regulator_of_get_init_data(struct device *dev, 45regulator_of_get_init_data(struct device *dev,
45 const struct regulator_desc *desc, 46 const struct regulator_desc *desc,
47 struct regulator_config *config,
46 struct device_node **node) 48 struct device_node **node)
47{ 49{
48 return NULL; 50 return NULL;
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 92fefd98da58..6e3a15fe00f1 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -177,8 +177,10 @@ static int isl9305_i2c_probe(struct i2c_client *i2c,
177 177
178#ifdef CONFIG_OF 178#ifdef CONFIG_OF
179static const struct of_device_id isl9305_dt_ids[] = { 179static const struct of_device_id isl9305_dt_ids[] = {
180 { .compatible = "isl,isl9305" }, 180 { .compatible = "isl,isl9305" }, /* for backward compat., don't use */
181 { .compatible = "isl,isl9305h" }, 181 { .compatible = "isil,isl9305" },
182 { .compatible = "isl,isl9305h" }, /* for backward compat., don't use */
183 { .compatible = "isil,isl9305h" },
182 {}, 184 {},
183}; 185};
184#endif 186#endif
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 021d64d856bb..3de328ab41f3 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -106,7 +106,6 @@ struct lp872x {
106 struct device *dev; 106 struct device *dev;
107 enum lp872x_id chipid; 107 enum lp872x_id chipid;
108 struct lp872x_platform_data *pdata; 108 struct lp872x_platform_data *pdata;
109 struct regulator_dev **regulators;
110 int num_regulators; 109 int num_regulators;
111 enum lp872x_dvs_state dvs_pin; 110 enum lp872x_dvs_state dvs_pin;
112 int dvs_gpio; 111 int dvs_gpio;
@@ -801,8 +800,6 @@ static int lp872x_regulator_register(struct lp872x *lp)
801 dev_err(lp->dev, "regulator register err"); 800 dev_err(lp->dev, "regulator register err");
802 return PTR_ERR(rdev); 801 return PTR_ERR(rdev);
803 } 802 }
804
805 *(lp->regulators + i) = rdev;
806 } 803 }
807 804
808 return 0; 805 return 0;
@@ -906,7 +903,7 @@ static struct lp872x_platform_data
906static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) 903static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
907{ 904{
908 struct lp872x *lp; 905 struct lp872x *lp;
909 int ret, size, num_regulators; 906 int ret;
910 const int lp872x_num_regulators[] = { 907 const int lp872x_num_regulators[] = {
911 [LP8720] = LP8720_NUM_REGULATORS, 908 [LP8720] = LP8720_NUM_REGULATORS,
912 [LP8725] = LP8725_NUM_REGULATORS, 909 [LP8725] = LP8725_NUM_REGULATORS,
@@ -918,38 +915,27 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
918 915
919 lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL); 916 lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
920 if (!lp) 917 if (!lp)
921 goto err_mem; 918 return -ENOMEM;
922
923 num_regulators = lp872x_num_regulators[id->driver_data];
924 size = sizeof(struct regulator_dev *) * num_regulators;
925 919
926 lp->regulators = devm_kzalloc(&cl->dev, size, GFP_KERNEL); 920 lp->num_regulators = lp872x_num_regulators[id->driver_data];
927 if (!lp->regulators)
928 goto err_mem;
929 921
930 lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config); 922 lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config);
931 if (IS_ERR(lp->regmap)) { 923 if (IS_ERR(lp->regmap)) {
932 ret = PTR_ERR(lp->regmap); 924 ret = PTR_ERR(lp->regmap);
933 dev_err(&cl->dev, "regmap init i2c err: %d\n", ret); 925 dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
934 goto err_dev; 926 return ret;
935 } 927 }
936 928
937 lp->dev = &cl->dev; 929 lp->dev = &cl->dev;
938 lp->pdata = dev_get_platdata(&cl->dev); 930 lp->pdata = dev_get_platdata(&cl->dev);
939 lp->chipid = id->driver_data; 931 lp->chipid = id->driver_data;
940 lp->num_regulators = num_regulators;
941 i2c_set_clientdata(cl, lp); 932 i2c_set_clientdata(cl, lp);
942 933
943 ret = lp872x_config(lp); 934 ret = lp872x_config(lp);
944 if (ret) 935 if (ret)
945 goto err_dev; 936 return ret;
946 937
947 return lp872x_regulator_register(lp); 938 return lp872x_regulator_register(lp);
948
949err_mem:
950 return -ENOMEM;
951err_dev:
952 return ret;
953} 939}
954 940
955static const struct of_device_id lp872x_dt_ids[] = { 941static const struct of_device_id lp872x_dt_ids[] = {
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index bf9a44c5fdd2..b3678d289619 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -103,6 +103,8 @@ static struct regulator_ops max14577_charger_ops = {
103static const struct regulator_desc max14577_supported_regulators[] = { 103static const struct regulator_desc max14577_supported_regulators[] = {
104 [MAX14577_SAFEOUT] = { 104 [MAX14577_SAFEOUT] = {
105 .name = "SAFEOUT", 105 .name = "SAFEOUT",
106 .of_match = of_match_ptr("SAFEOUT"),
107 .regulators_node = of_match_ptr("regulators"),
106 .id = MAX14577_SAFEOUT, 108 .id = MAX14577_SAFEOUT,
107 .ops = &max14577_safeout_ops, 109 .ops = &max14577_safeout_ops,
108 .type = REGULATOR_VOLTAGE, 110 .type = REGULATOR_VOLTAGE,
@@ -114,6 +116,8 @@ static const struct regulator_desc max14577_supported_regulators[] = {
114 }, 116 },
115 [MAX14577_CHARGER] = { 117 [MAX14577_CHARGER] = {
116 .name = "CHARGER", 118 .name = "CHARGER",
119 .of_match = of_match_ptr("CHARGER"),
120 .regulators_node = of_match_ptr("regulators"),
117 .id = MAX14577_CHARGER, 121 .id = MAX14577_CHARGER,
118 .ops = &max14577_charger_ops, 122 .ops = &max14577_charger_ops,
119 .type = REGULATOR_CURRENT, 123 .type = REGULATOR_CURRENT,
@@ -137,6 +141,8 @@ static struct regulator_ops max77836_ldo_ops = {
137static const struct regulator_desc max77836_supported_regulators[] = { 141static const struct regulator_desc max77836_supported_regulators[] = {
138 [MAX14577_SAFEOUT] = { 142 [MAX14577_SAFEOUT] = {
139 .name = "SAFEOUT", 143 .name = "SAFEOUT",
144 .of_match = of_match_ptr("SAFEOUT"),
145 .regulators_node = of_match_ptr("regulators"),
140 .id = MAX14577_SAFEOUT, 146 .id = MAX14577_SAFEOUT,
141 .ops = &max14577_safeout_ops, 147 .ops = &max14577_safeout_ops,
142 .type = REGULATOR_VOLTAGE, 148 .type = REGULATOR_VOLTAGE,
@@ -148,6 +154,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
148 }, 154 },
149 [MAX14577_CHARGER] = { 155 [MAX14577_CHARGER] = {
150 .name = "CHARGER", 156 .name = "CHARGER",
157 .of_match = of_match_ptr("CHARGER"),
158 .regulators_node = of_match_ptr("regulators"),
151 .id = MAX14577_CHARGER, 159 .id = MAX14577_CHARGER,
152 .ops = &max14577_charger_ops, 160 .ops = &max14577_charger_ops,
153 .type = REGULATOR_CURRENT, 161 .type = REGULATOR_CURRENT,
@@ -157,6 +165,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
157 }, 165 },
158 [MAX77836_LDO1] = { 166 [MAX77836_LDO1] = {
159 .name = "LDO1", 167 .name = "LDO1",
168 .of_match = of_match_ptr("LDO1"),
169 .regulators_node = of_match_ptr("regulators"),
160 .id = MAX77836_LDO1, 170 .id = MAX77836_LDO1,
161 .ops = &max77836_ldo_ops, 171 .ops = &max77836_ldo_ops,
162 .type = REGULATOR_VOLTAGE, 172 .type = REGULATOR_VOLTAGE,
@@ -171,6 +181,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
171 }, 181 },
172 [MAX77836_LDO2] = { 182 [MAX77836_LDO2] = {
173 .name = "LDO2", 183 .name = "LDO2",
184 .of_match = of_match_ptr("LDO2"),
185 .regulators_node = of_match_ptr("regulators"),
174 .id = MAX77836_LDO2, 186 .id = MAX77836_LDO2,
175 .ops = &max77836_ldo_ops, 187 .ops = &max77836_ldo_ops,
176 .type = REGULATOR_VOLTAGE, 188 .type = REGULATOR_VOLTAGE,
@@ -198,43 +210,6 @@ static struct of_regulator_match max77836_regulator_matches[] = {
198 { .name = "LDO2", }, 210 { .name = "LDO2", },
199}; 211};
200 212
201static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
202 enum maxim_device_type dev_type)
203{
204 int ret;
205 struct device_node *np;
206 struct of_regulator_match *regulator_matches;
207 unsigned int regulator_matches_size;
208
209 np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
210 if (!np) {
211 dev_err(&pdev->dev, "Failed to get child OF node for regulators\n");
212 return -EINVAL;
213 }
214
215 switch (dev_type) {
216 case MAXIM_DEVICE_TYPE_MAX77836:
217 regulator_matches = max77836_regulator_matches;
218 regulator_matches_size = ARRAY_SIZE(max77836_regulator_matches);
219 break;
220 case MAXIM_DEVICE_TYPE_MAX14577:
221 default:
222 regulator_matches = max14577_regulator_matches;
223 regulator_matches_size = ARRAY_SIZE(max14577_regulator_matches);
224 }
225
226 ret = of_regulator_match(&pdev->dev, np, regulator_matches,
227 regulator_matches_size);
228 if (ret < 0)
229 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
230 else
231 ret = 0;
232
233 of_node_put(np);
234
235 return ret;
236}
237
238static inline struct regulator_init_data *match_init_data(int index, 213static inline struct regulator_init_data *match_init_data(int index,
239 enum maxim_device_type dev_type) 214 enum maxim_device_type dev_type)
240{ 215{
@@ -261,11 +236,6 @@ static inline struct device_node *match_of_node(int index,
261 } 236 }
262} 237}
263#else /* CONFIG_OF */ 238#else /* CONFIG_OF */
264static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
265 enum maxim_device_type dev_type)
266{
267 return 0;
268}
269static inline struct regulator_init_data *match_init_data(int index, 239static inline struct regulator_init_data *match_init_data(int index,
270 enum maxim_device_type dev_type) 240 enum maxim_device_type dev_type)
271{ 241{
@@ -308,16 +278,12 @@ static int max14577_regulator_probe(struct platform_device *pdev)
308{ 278{
309 struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent); 279 struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
310 struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev); 280 struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev);
311 int i, ret; 281 int i, ret = 0;
312 struct regulator_config config = {}; 282 struct regulator_config config = {};
313 const struct regulator_desc *supported_regulators; 283 const struct regulator_desc *supported_regulators;
314 unsigned int supported_regulators_size; 284 unsigned int supported_regulators_size;
315 enum maxim_device_type dev_type = max14577->dev_type; 285 enum maxim_device_type dev_type = max14577->dev_type;
316 286
317 ret = max14577_regulator_dt_parse_pdata(pdev, dev_type);
318 if (ret)
319 return ret;
320
321 switch (dev_type) { 287 switch (dev_type) {
322 case MAXIM_DEVICE_TYPE_MAX77836: 288 case MAXIM_DEVICE_TYPE_MAX77836:
323 supported_regulators = max77836_supported_regulators; 289 supported_regulators = max77836_supported_regulators;
@@ -329,7 +295,7 @@ static int max14577_regulator_probe(struct platform_device *pdev)
329 supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators); 295 supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators);
330 } 296 }
331 297
332 config.dev = &pdev->dev; 298 config.dev = max14577->dev;
333 config.driver_data = max14577; 299 config.driver_data = max14577;
334 300
335 for (i = 0; i < supported_regulators_size; i++) { 301 for (i = 0; i < supported_regulators_size; i++) {
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index 10d206266ac2..15fb1416bfbd 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -26,6 +26,7 @@
26#include <linux/bug.h> 26#include <linux/bug.h>
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/of_gpio.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/regulator/driver.h> 32#include <linux/regulator/driver.h>
@@ -46,6 +47,11 @@
46#define MAX77686_DVS_UVSTEP 12500 47#define MAX77686_DVS_UVSTEP 12500
47 48
48/* 49/*
50 * Value for configuring buck[89] and LDO{20,21,22} as GPIO control.
51 * It is the same as 'off' for other regulators.
52 */
53#define MAX77686_GPIO_CONTROL 0x0
54/*
49 * Values used for configuring LDOs and bucks. 55 * Values used for configuring LDOs and bucks.
50 * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26 56 * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26
51 */ 57 */
@@ -82,6 +88,8 @@ enum max77686_ramp_rate {
82}; 88};
83 89
84struct max77686_data { 90struct max77686_data {
91 u64 gpio_enabled:MAX77686_REGULATORS;
92
85 /* Array indexed by regulator id */ 93 /* Array indexed by regulator id */
86 unsigned int opmode[MAX77686_REGULATORS]; 94 unsigned int opmode[MAX77686_REGULATORS];
87}; 95};
@@ -100,6 +108,26 @@ static unsigned int max77686_get_opmode_shift(int id)
100 } 108 }
101} 109}
102 110
111/*
112 * When regulator is configured for GPIO control then it
113 * replaces "normal" mode. Any change from low power mode to normal
114 * should actually change to GPIO control.
115 * Map normal mode to proper value for such regulators.
116 */
117static unsigned int max77686_map_normal_mode(struct max77686_data *max77686,
118 int id)
119{
120 switch (id) {
121 case MAX77686_BUCK8:
122 case MAX77686_BUCK9:
123 case MAX77686_LDO20 ... MAX77686_LDO22:
124 if (max77686->gpio_enabled & (1 << id))
125 return MAX77686_GPIO_CONTROL;
126 }
127
128 return MAX77686_NORMAL;
129}
130
103/* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */ 131/* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */
104static int max77686_set_suspend_disable(struct regulator_dev *rdev) 132static int max77686_set_suspend_disable(struct regulator_dev *rdev)
105{ 133{
@@ -136,7 +164,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
136 val = MAX77686_LDO_LOWPOWER_PWRREQ; 164 val = MAX77686_LDO_LOWPOWER_PWRREQ;
137 break; 165 break;
138 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ 166 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
139 val = MAX77686_NORMAL; 167 val = max77686_map_normal_mode(max77686, id);
140 break; 168 break;
141 default: 169 default:
142 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", 170 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -160,7 +188,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
160{ 188{
161 unsigned int val; 189 unsigned int val;
162 struct max77686_data *max77686 = rdev_get_drvdata(rdev); 190 struct max77686_data *max77686 = rdev_get_drvdata(rdev);
163 int ret; 191 int ret, id = rdev_get_id(rdev);
164 192
165 switch (mode) { 193 switch (mode) {
166 case REGULATOR_MODE_STANDBY: /* switch off */ 194 case REGULATOR_MODE_STANDBY: /* switch off */
@@ -170,7 +198,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
170 val = MAX77686_LDO_LOWPOWER_PWRREQ; 198 val = MAX77686_LDO_LOWPOWER_PWRREQ;
171 break; 199 break;
172 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ 200 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
173 val = MAX77686_NORMAL; 201 val = max77686_map_normal_mode(max77686, id);
174 break; 202 break;
175 default: 203 default:
176 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", 204 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -184,7 +212,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
184 if (ret) 212 if (ret)
185 return ret; 213 return ret;
186 214
187 max77686->opmode[rdev_get_id(rdev)] = val; 215 max77686->opmode[id] = val;
188 return 0; 216 return 0;
189} 217}
190 218
@@ -197,7 +225,7 @@ static int max77686_enable(struct regulator_dev *rdev)
197 shift = max77686_get_opmode_shift(id); 225 shift = max77686_get_opmode_shift(id);
198 226
199 if (max77686->opmode[id] == MAX77686_OFF_PWRREQ) 227 if (max77686->opmode[id] == MAX77686_OFF_PWRREQ)
200 max77686->opmode[id] = MAX77686_NORMAL; 228 max77686->opmode[id] = max77686_map_normal_mode(max77686, id);
201 229
202 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 230 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
203 rdev->desc->enable_mask, 231 rdev->desc->enable_mask,
@@ -229,6 +257,36 @@ static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
229 MAX77686_RAMP_RATE_MASK, ramp_value << 6); 257 MAX77686_RAMP_RATE_MASK, ramp_value << 6);
230} 258}
231 259
260static int max77686_of_parse_cb(struct device_node *np,
261 const struct regulator_desc *desc,
262 struct regulator_config *config)
263{
264 struct max77686_data *max77686 = config->driver_data;
265
266 switch (desc->id) {
267 case MAX77686_BUCK8:
268 case MAX77686_BUCK9:
269 case MAX77686_LDO20 ... MAX77686_LDO22:
270 config->ena_gpio = of_get_named_gpio(np,
271 "maxim,ena-gpios", 0);
272 config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
273 config->ena_gpio_initialized = true;
274 break;
275 default:
276 return 0;
277 }
278
279 if (gpio_is_valid(config->ena_gpio)) {
280 max77686->gpio_enabled |= (1 << desc->id);
281
282 return regmap_update_bits(config->regmap, desc->enable_reg,
283 desc->enable_mask,
284 MAX77686_GPIO_CONTROL);
285 }
286
287 return 0;
288}
289
232static struct regulator_ops max77686_ops = { 290static struct regulator_ops max77686_ops = {
233 .list_voltage = regulator_list_voltage_linear, 291 .list_voltage = regulator_list_voltage_linear,
234 .map_voltage = regulator_map_voltage_linear, 292 .map_voltage = regulator_map_voltage_linear,
@@ -283,6 +341,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
283 .name = "LDO"#num, \ 341 .name = "LDO"#num, \
284 .of_match = of_match_ptr("LDO"#num), \ 342 .of_match = of_match_ptr("LDO"#num), \
285 .regulators_node = of_match_ptr("voltage-regulators"), \ 343 .regulators_node = of_match_ptr("voltage-regulators"), \
344 .of_parse_cb = max77686_of_parse_cb, \
286 .id = MAX77686_LDO##num, \ 345 .id = MAX77686_LDO##num, \
287 .ops = &max77686_ops, \ 346 .ops = &max77686_ops, \
288 .type = REGULATOR_VOLTAGE, \ 347 .type = REGULATOR_VOLTAGE, \
@@ -355,6 +414,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
355 .name = "BUCK"#num, \ 414 .name = "BUCK"#num, \
356 .of_match = of_match_ptr("BUCK"#num), \ 415 .of_match = of_match_ptr("BUCK"#num), \
357 .regulators_node = of_match_ptr("voltage-regulators"), \ 416 .regulators_node = of_match_ptr("voltage-regulators"), \
417 .of_parse_cb = max77686_of_parse_cb, \
358 .id = MAX77686_BUCK##num, \ 418 .id = MAX77686_BUCK##num, \
359 .ops = &max77686_ops, \ 419 .ops = &max77686_ops, \
360 .type = REGULATOR_VOLTAGE, \ 420 .type = REGULATOR_VOLTAGE, \
diff --git a/drivers/regulator/max77843.c b/drivers/regulator/max77843.c
new file mode 100644
index 000000000000..c132ef527cdd
--- /dev/null
+++ b/drivers/regulator/max77843.c
@@ -0,0 +1,227 @@
1/*
2 * max77843.c - Regulator driver for the Maxim MAX77843
3 *
4 * Copyright (C) 2015 Samsung Electronics
5 * Author: Jaewon Kim <jaewon02.kim@samsung.com>
6 * Author: Beomho Seo <beomho.seo@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/regulator/driver.h>
17#include <linux/regulator/machine.h>
18#include <linux/mfd/max77843-private.h>
19#include <linux/regulator/of_regulator.h>
20
21enum max77843_regulator_type {
22 MAX77843_SAFEOUT1 = 0,
23 MAX77843_SAFEOUT2,
24 MAX77843_CHARGER,
25
26 MAX77843_NUM,
27};
28
29static const unsigned int max77843_safeout_voltage_table[] = {
30 4850000,
31 4900000,
32 4950000,
33 3300000,
34};
35
36static int max77843_reg_is_enabled(struct regulator_dev *rdev)
37{
38 struct regmap *regmap = rdev->regmap;
39 int ret;
40 unsigned int reg;
41
42 ret = regmap_read(regmap, rdev->desc->enable_reg, &reg);
43 if (ret) {
44 dev_err(&rdev->dev, "Fialed to read charger register\n");
45 return ret;
46 }
47
48 return (reg & rdev->desc->enable_mask) == rdev->desc->enable_mask;
49}
50
51static int max77843_reg_get_current_limit(struct regulator_dev *rdev)
52{
53 struct regmap *regmap = rdev->regmap;
54 unsigned int chg_min_uA = rdev->constraints->min_uA;
55 unsigned int chg_max_uA = rdev->constraints->max_uA;
56 unsigned int val;
57 int ret;
58 unsigned int reg, sel;
59
60 ret = regmap_read(regmap, MAX77843_CHG_REG_CHG_CNFG_02, &reg);
61 if (ret) {
62 dev_err(&rdev->dev, "Failed to read charger register\n");
63 return ret;
64 }
65
66 sel = reg & MAX77843_CHG_FAST_CHG_CURRENT_MASK;
67
68 if (sel < 0x03)
69 sel = 0;
70 else
71 sel -= 2;
72
73 val = chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel;
74 if (val > chg_max_uA)
75 return -EINVAL;
76
77 return val;
78}
79
80static int max77843_reg_set_current_limit(struct regulator_dev *rdev,
81 int min_uA, int max_uA)
82{
83 struct regmap *regmap = rdev->regmap;
84 unsigned int chg_min_uA = rdev->constraints->min_uA;
85 int sel = 0;
86
87 while (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel < min_uA)
88 sel++;
89
90 if (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel > max_uA)
91 return -EINVAL;
92
93 sel += 2;
94
95 return regmap_write(regmap, MAX77843_CHG_REG_CHG_CNFG_02, sel);
96}
97
98static struct regulator_ops max77843_charger_ops = {
99 .is_enabled = max77843_reg_is_enabled,
100 .enable = regulator_enable_regmap,
101 .disable = regulator_disable_regmap,
102 .get_current_limit = max77843_reg_get_current_limit,
103 .set_current_limit = max77843_reg_set_current_limit,
104};
105
106static struct regulator_ops max77843_regulator_ops = {
107 .is_enabled = regulator_is_enabled_regmap,
108 .enable = regulator_enable_regmap,
109 .disable = regulator_disable_regmap,
110 .list_voltage = regulator_list_voltage_table,
111 .get_voltage_sel = regulator_get_voltage_sel_regmap,
112 .set_voltage_sel = regulator_set_voltage_sel_regmap,
113};
114
115static const struct regulator_desc max77843_supported_regulators[] = {
116 [MAX77843_SAFEOUT1] = {
117 .name = "SAFEOUT1",
118 .id = MAX77843_SAFEOUT1,
119 .ops = &max77843_regulator_ops,
120 .of_match = of_match_ptr("SAFEOUT1"),
121 .regulators_node = of_match_ptr("regulators"),
122 .type = REGULATOR_VOLTAGE,
123 .owner = THIS_MODULE,
124 .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table),
125 .volt_table = max77843_safeout_voltage_table,
126 .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
127 .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1,
128 .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
129 .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK,
130 },
131 [MAX77843_SAFEOUT2] = {
132 .name = "SAFEOUT2",
133 .id = MAX77843_SAFEOUT2,
134 .ops = &max77843_regulator_ops,
135 .of_match = of_match_ptr("SAFEOUT2"),
136 .regulators_node = of_match_ptr("regulators"),
137 .type = REGULATOR_VOLTAGE,
138 .owner = THIS_MODULE,
139 .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table),
140 .volt_table = max77843_safeout_voltage_table,
141 .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
142 .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2,
143 .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
144 .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK,
145 },
146 [MAX77843_CHARGER] = {
147 .name = "CHARGER",
148 .id = MAX77843_CHARGER,
149 .ops = &max77843_charger_ops,
150 .of_match = of_match_ptr("CHARGER"),
151 .regulators_node = of_match_ptr("regulators"),
152 .type = REGULATOR_CURRENT,
153 .owner = THIS_MODULE,
154 .enable_reg = MAX77843_CHG_REG_CHG_CNFG_00,
155 .enable_mask = MAX77843_CHG_MASK,
156 },
157};
158
159static struct regmap *max77843_get_regmap(struct max77843 *max77843, int reg_id)
160{
161 switch (reg_id) {
162 case MAX77843_SAFEOUT1:
163 case MAX77843_SAFEOUT2:
164 return max77843->regmap;
165 case MAX77843_CHARGER:
166 return max77843->regmap_chg;
167 default:
168 return max77843->regmap;
169 }
170}
171
172static int max77843_regulator_probe(struct platform_device *pdev)
173{
174 struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
175 struct regulator_config config = {};
176 int i;
177
178 config.dev = max77843->dev;
179 config.driver_data = max77843;
180
181 for (i = 0; i < ARRAY_SIZE(max77843_supported_regulators); i++) {
182 struct regulator_dev *regulator;
183
184 config.regmap = max77843_get_regmap(max77843,
185 max77843_supported_regulators[i].id);
186
187 regulator = devm_regulator_register(&pdev->dev,
188 &max77843_supported_regulators[i], &config);
189 if (IS_ERR(regulator)) {
190 dev_err(&pdev->dev,
191 "Failed to regiser regulator-%d\n", i);
192 return PTR_ERR(regulator);
193 }
194 }
195
196 return 0;
197}
198
199static const struct platform_device_id max77843_regulator_id[] = {
200 { "max77843-regulator", },
201 { /* sentinel */ },
202};
203
204static struct platform_driver max77843_regulator_driver = {
205 .driver = {
206 .name = "max77843-regulator",
207 },
208 .probe = max77843_regulator_probe,
209 .id_table = max77843_regulator_id,
210};
211
212static int __init max77843_regulator_init(void)
213{
214 return platform_driver_register(&max77843_regulator_driver);
215}
216subsys_initcall(max77843_regulator_init);
217
218static void __exit max77843_regulator_exit(void)
219{
220 platform_driver_unregister(&max77843_regulator_driver);
221}
222module_exit(max77843_regulator_exit);
223
224MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
225MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
226MODULE_DESCRIPTION("Maxim MAX77843 regulator driver");
227MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index c8bddcc8f911..81229579ece9 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -115,7 +115,7 @@ static unsigned int max8649_get_mode(struct regulator_dev *rdev)
115 return REGULATOR_MODE_NORMAL; 115 return REGULATOR_MODE_NORMAL;
116} 116}
117 117
118static struct regulator_ops max8649_dcdc_ops = { 118static const struct regulator_ops max8649_dcdc_ops = {
119 .set_voltage_sel = regulator_set_voltage_sel_regmap, 119 .set_voltage_sel = regulator_set_voltage_sel_regmap,
120 .get_voltage_sel = regulator_get_voltage_sel_regmap, 120 .get_voltage_sel = regulator_get_voltage_sel_regmap,
121 .list_voltage = regulator_list_voltage_linear, 121 .list_voltage = regulator_list_voltage_linear,
@@ -143,7 +143,7 @@ static struct regulator_desc dcdc_desc = {
143 .enable_is_inverted = true, 143 .enable_is_inverted = true,
144}; 144};
145 145
146static struct regmap_config max8649_regmap_config = { 146static const struct regmap_config max8649_regmap_config = {
147 .reg_bits = 8, 147 .reg_bits = 8,
148 .val_bits = 8, 148 .val_bits = 8,
149}; 149};
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
new file mode 100644
index 000000000000..a5b2f4762677
--- /dev/null
+++ b/drivers/regulator/mt6397-regulator.c
@@ -0,0 +1,332 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Flora Fu <flora.fu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/regmap.h>
19#include <linux/mfd/mt6397/core.h>
20#include <linux/mfd/mt6397/registers.h>
21#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h>
23#include <linux/regulator/mt6397-regulator.h>
24#include <linux/regulator/of_regulator.h>
25
26/*
27 * MT6397 regulators' information
28 *
29 * @desc: standard fields of regulator description.
30 * @qi: Mask for query enable signal status of regulators
31 * @vselon_reg: Register sections for hardware control mode of bucks
32 * @vselctrl_reg: Register for controlling the buck control mode.
33 * @vselctrl_mask: Mask for query buck's voltage control mode.
34 */
35struct mt6397_regulator_info {
36 struct regulator_desc desc;
37 u32 qi;
38 u32 vselon_reg;
39 u32 vselctrl_reg;
40 u32 vselctrl_mask;
41};
42
43#define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
44 vosel, vosel_mask, voselon, vosel_ctrl) \
45[MT6397_ID_##vreg] = { \
46 .desc = { \
47 .name = #vreg, \
48 .of_match = of_match_ptr(match), \
49 .ops = &mt6397_volt_range_ops, \
50 .type = REGULATOR_VOLTAGE, \
51 .id = MT6397_ID_##vreg, \
52 .owner = THIS_MODULE, \
53 .n_voltages = (max - min)/step + 1, \
54 .linear_ranges = volt_ranges, \
55 .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
56 .vsel_reg = vosel, \
57 .vsel_mask = vosel_mask, \
58 .enable_reg = enreg, \
59 .enable_mask = BIT(0), \
60 }, \
61 .qi = BIT(13), \
62 .vselon_reg = voselon, \
63 .vselctrl_reg = vosel_ctrl, \
64 .vselctrl_mask = BIT(1), \
65}
66
67#define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
68 vosel_mask) \
69[MT6397_ID_##vreg] = { \
70 .desc = { \
71 .name = #vreg, \
72 .of_match = of_match_ptr(match), \
73 .ops = &mt6397_volt_table_ops, \
74 .type = REGULATOR_VOLTAGE, \
75 .id = MT6397_ID_##vreg, \
76 .owner = THIS_MODULE, \
77 .n_voltages = ARRAY_SIZE(ldo_volt_table), \
78 .volt_table = ldo_volt_table, \
79 .vsel_reg = vosel, \
80 .vsel_mask = vosel_mask, \
81 .enable_reg = enreg, \
82 .enable_mask = BIT(enbit), \
83 }, \
84 .qi = BIT(15), \
85}
86
87#define MT6397_REG_FIXED(match, vreg, enreg, enbit, volt) \
88[MT6397_ID_##vreg] = { \
89 .desc = { \
90 .name = #vreg, \
91 .of_match = of_match_ptr(match), \
92 .ops = &mt6397_volt_fixed_ops, \
93 .type = REGULATOR_VOLTAGE, \
94 .id = MT6397_ID_##vreg, \
95 .owner = THIS_MODULE, \
96 .n_voltages = 1, \
97 .enable_reg = enreg, \
98 .enable_mask = BIT(enbit), \
99 .min_uV = volt, \
100 }, \
101 .qi = BIT(15), \
102}
103
104static const struct regulator_linear_range buck_volt_range1[] = {
105 REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
106};
107
108static const struct regulator_linear_range buck_volt_range2[] = {
109 REGULATOR_LINEAR_RANGE(800000, 0, 0x7f, 6250),
110};
111
112static const struct regulator_linear_range buck_volt_range3[] = {
113 REGULATOR_LINEAR_RANGE(1500000, 0, 0x1f, 20000),
114};
115
116static const u32 ldo_volt_table1[] = {
117 1500000, 1800000, 2500000, 2800000,
118};
119
120static const u32 ldo_volt_table2[] = {
121 1800000, 3300000,
122};
123
124static const u32 ldo_volt_table3[] = {
125 3000000, 3300000,
126};
127
128static const u32 ldo_volt_table4[] = {
129 1220000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
130};
131
132static const u32 ldo_volt_table5[] = {
133 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
134};
135
136static const u32 ldo_volt_table5_v2[] = {
137 1200000, 1000000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
138};
139
140static const u32 ldo_volt_table6[] = {
141 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000,
142};
143
144static const u32 ldo_volt_table7[] = {
145 1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000,
146};
147
148static int mt6397_get_status(struct regulator_dev *rdev)
149{
150 int ret;
151 u32 regval;
152 struct mt6397_regulator_info *info = rdev_get_drvdata(rdev);
153
154 ret = regmap_read(rdev->regmap, info->desc.enable_reg, &regval);
155 if (ret != 0) {
156 dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
157 return ret;
158 }
159
160 return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
161}
162
163static struct regulator_ops mt6397_volt_range_ops = {
164 .list_voltage = regulator_list_voltage_linear_range,
165 .map_voltage = regulator_map_voltage_linear_range,
166 .set_voltage_sel = regulator_set_voltage_sel_regmap,
167 .get_voltage_sel = regulator_get_voltage_sel_regmap,
168 .set_voltage_time_sel = regulator_set_voltage_time_sel,
169 .enable = regulator_enable_regmap,
170 .disable = regulator_disable_regmap,
171 .is_enabled = regulator_is_enabled_regmap,
172 .get_status = mt6397_get_status,
173};
174
175static struct regulator_ops mt6397_volt_table_ops = {
176 .list_voltage = regulator_list_voltage_table,
177 .map_voltage = regulator_map_voltage_iterate,
178 .set_voltage_sel = regulator_set_voltage_sel_regmap,
179 .get_voltage_sel = regulator_get_voltage_sel_regmap,
180 .set_voltage_time_sel = regulator_set_voltage_time_sel,
181 .enable = regulator_enable_regmap,
182 .disable = regulator_disable_regmap,
183 .is_enabled = regulator_is_enabled_regmap,
184 .get_status = mt6397_get_status,
185};
186
187static struct regulator_ops mt6397_volt_fixed_ops = {
188 .list_voltage = regulator_list_voltage_linear,
189 .enable = regulator_enable_regmap,
190 .disable = regulator_disable_regmap,
191 .is_enabled = regulator_is_enabled_regmap,
192 .get_status = mt6397_get_status,
193};
194
195/* The array is indexed by id(MT6397_ID_XXX) */
196static struct mt6397_regulator_info mt6397_regulators[] = {
197 MT6397_BUCK("buck_vpca15", VPCA15, 700000, 1493750, 6250,
198 buck_volt_range1, MT6397_VCA15_CON7, MT6397_VCA15_CON9, 0x7f,
199 MT6397_VCA15_CON10, MT6397_VCA15_CON5),
200 MT6397_BUCK("buck_vpca7", VPCA7, 700000, 1493750, 6250,
201 buck_volt_range1, MT6397_VPCA7_CON7, MT6397_VPCA7_CON9, 0x7f,
202 MT6397_VPCA7_CON10, MT6397_VPCA7_CON5),
203 MT6397_BUCK("buck_vsramca15", VSRAMCA15, 700000, 1493750, 6250,
204 buck_volt_range1, MT6397_VSRMCA15_CON7, MT6397_VSRMCA15_CON9,
205 0x7f, MT6397_VSRMCA15_CON10, MT6397_VSRMCA15_CON5),
206 MT6397_BUCK("buck_vsramca7", VSRAMCA7, 700000, 1493750, 6250,
207 buck_volt_range1, MT6397_VSRMCA7_CON7, MT6397_VSRMCA7_CON9,
208 0x7f, MT6397_VSRMCA7_CON10, MT6397_VSRMCA7_CON5),
209 MT6397_BUCK("buck_vcore", VCORE, 700000, 1493750, 6250,
210 buck_volt_range1, MT6397_VCORE_CON7, MT6397_VCORE_CON9, 0x7f,
211 MT6397_VCORE_CON10, MT6397_VCORE_CON5),
212 MT6397_BUCK("buck_vgpu", VGPU, 700000, 1493750, 6250, buck_volt_range1,
213 MT6397_VGPU_CON7, MT6397_VGPU_CON9, 0x7f,
214 MT6397_VGPU_CON10, MT6397_VGPU_CON5),
215 MT6397_BUCK("buck_vdrm", VDRM, 800000, 1593750, 6250, buck_volt_range2,
216 MT6397_VDRM_CON7, MT6397_VDRM_CON9, 0x7f,
217 MT6397_VDRM_CON10, MT6397_VDRM_CON5),
218 MT6397_BUCK("buck_vio18", VIO18, 1500000, 2120000, 20000,
219 buck_volt_range3, MT6397_VIO18_CON7, MT6397_VIO18_CON9, 0x1f,
220 MT6397_VIO18_CON10, MT6397_VIO18_CON5),
221 MT6397_REG_FIXED("ldo_vtcxo", VTCXO, MT6397_ANALDO_CON0, 10, 2800000),
222 MT6397_REG_FIXED("ldo_va28", VA28, MT6397_ANALDO_CON1, 14, 2800000),
223 MT6397_LDO("ldo_vcama", VCAMA, ldo_volt_table1,
224 MT6397_ANALDO_CON2, 15, MT6397_ANALDO_CON6, 0xC0),
225 MT6397_REG_FIXED("ldo_vio28", VIO28, MT6397_DIGLDO_CON0, 14, 2800000),
226 MT6397_REG_FIXED("ldo_vusb", VUSB, MT6397_DIGLDO_CON1, 14, 3300000),
227 MT6397_LDO("ldo_vmc", VMC, ldo_volt_table2,
228 MT6397_DIGLDO_CON2, 12, MT6397_DIGLDO_CON29, 0x10),
229 MT6397_LDO("ldo_vmch", VMCH, ldo_volt_table3,
230 MT6397_DIGLDO_CON3, 14, MT6397_DIGLDO_CON17, 0x80),
231 MT6397_LDO("ldo_vemc3v3", VEMC3V3, ldo_volt_table3,
232 MT6397_DIGLDO_CON4, 14, MT6397_DIGLDO_CON18, 0x10),
233 MT6397_LDO("ldo_vgp1", VGP1, ldo_volt_table4,
234 MT6397_DIGLDO_CON5, 15, MT6397_DIGLDO_CON19, 0xE0),
235 MT6397_LDO("ldo_vgp2", VGP2, ldo_volt_table5,
236 MT6397_DIGLDO_CON6, 15, MT6397_DIGLDO_CON20, 0xE0),
237 MT6397_LDO("ldo_vgp3", VGP3, ldo_volt_table5,
238 MT6397_DIGLDO_CON7, 15, MT6397_DIGLDO_CON21, 0xE0),
239 MT6397_LDO("ldo_vgp4", VGP4, ldo_volt_table5,
240 MT6397_DIGLDO_CON8, 15, MT6397_DIGLDO_CON22, 0xE0),
241 MT6397_LDO("ldo_vgp5", VGP5, ldo_volt_table6,
242 MT6397_DIGLDO_CON9, 15, MT6397_DIGLDO_CON23, 0xE0),
243 MT6397_LDO("ldo_vgp6", VGP6, ldo_volt_table5,
244 MT6397_DIGLDO_CON10, 15, MT6397_DIGLDO_CON33, 0xE0),
245 MT6397_LDO("ldo_vibr", VIBR, ldo_volt_table7,
246 MT6397_DIGLDO_CON24, 15, MT6397_DIGLDO_CON25, 0xE00),
247};
248
249static int mt6397_set_buck_vosel_reg(struct platform_device *pdev)
250{
251 struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
252 int i;
253 u32 regval;
254
255 for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
256 if (mt6397_regulators[i].vselctrl_reg) {
257 if (regmap_read(mt6397->regmap,
258 mt6397_regulators[i].vselctrl_reg,
259 &regval) < 0) {
260 dev_err(&pdev->dev,
261 "Failed to read buck ctrl\n");
262 return -EIO;
263 }
264
265 if (regval & mt6397_regulators[i].vselctrl_mask) {
266 mt6397_regulators[i].desc.vsel_reg =
267 mt6397_regulators[i].vselon_reg;
268 }
269 }
270 }
271
272 return 0;
273}
274
275static int mt6397_regulator_probe(struct platform_device *pdev)
276{
277 struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
278 struct regulator_config config = {};
279 struct regulator_dev *rdev;
280 int i;
281 u32 reg_value, version;
282
283 /* Query buck controller to select activated voltage register part */
284 if (mt6397_set_buck_vosel_reg(pdev))
285 return -EIO;
286
287 /* Read PMIC chip revision to update constraints and voltage table */
288 if (regmap_read(mt6397->regmap, MT6397_CID, &reg_value) < 0) {
289 dev_err(&pdev->dev, "Failed to read Chip ID\n");
290 return -EIO;
291 }
292 dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
293
294 version = (reg_value & 0xFF);
295 switch (version) {
296 case MT6397_REGULATOR_ID91:
297 mt6397_regulators[MT6397_ID_VGP2].desc.volt_table =
298 ldo_volt_table5_v2;
299 break;
300 default:
301 break;
302 }
303
304 for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
305 config.dev = &pdev->dev;
306 config.driver_data = &mt6397_regulators[i];
307 config.regmap = mt6397->regmap;
308 rdev = devm_regulator_register(&pdev->dev,
309 &mt6397_regulators[i].desc, &config);
310 if (IS_ERR(rdev)) {
311 dev_err(&pdev->dev, "failed to register %s\n",
312 mt6397_regulators[i].desc.name);
313 return PTR_ERR(rdev);
314 }
315 }
316
317 return 0;
318}
319
320static struct platform_driver mt6397_regulator_driver = {
321 .driver = {
322 .name = "mt6397-regulator",
323 },
324 .probe = mt6397_regulator_probe,
325};
326
327module_platform_driver(mt6397_regulator_driver);
328
329MODULE_AUTHOR("Flora Fu <flora.fu@mediatek.com>");
330MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6397 PMIC");
331MODULE_LICENSE("GPL");
332MODULE_ALIAS("platform:mt6397-regulator");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 91eaaf010524..24e812c48d93 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -270,6 +270,7 @@ EXPORT_SYMBOL_GPL(of_regulator_match);
270 270
271struct regulator_init_data *regulator_of_get_init_data(struct device *dev, 271struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
272 const struct regulator_desc *desc, 272 const struct regulator_desc *desc,
273 struct regulator_config *config,
273 struct device_node **node) 274 struct device_node **node)
274{ 275{
275 struct device_node *search, *child; 276 struct device_node *search, *child;
@@ -307,6 +308,16 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
307 break; 308 break;
308 } 309 }
309 310
311 if (desc->of_parse_cb) {
312 if (desc->of_parse_cb(child, desc, config)) {
313 dev_err(dev,
314 "driver callback failed to parse DT for regulator %s\n",
315 child->name);
316 init_data = NULL;
317 break;
318 }
319 }
320
310 of_node_get(child); 321 of_node_get(child);
311 *node = child; 322 *node = child;
312 break; 323 break;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index c879dff597ee..8cc8d1877c44 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -56,7 +56,7 @@
56#define PFUZE100_VGEN5VOL 0x70 56#define PFUZE100_VGEN5VOL 0x70
57#define PFUZE100_VGEN6VOL 0x71 57#define PFUZE100_VGEN6VOL 0x71
58 58
59enum chips { PFUZE100, PFUZE200 }; 59enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3 };
60 60
61struct pfuze_regulator { 61struct pfuze_regulator {
62 struct regulator_desc desc; 62 struct regulator_desc desc;
@@ -80,9 +80,18 @@ static const int pfuze100_vsnvs[] = {
80 1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000, 80 1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000,
81}; 81};
82 82
83static const int pfuze3000_sw2lo[] = {
84 1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000,
85};
86
87static const int pfuze3000_sw2hi[] = {
88 2500000, 2800000, 2850000, 3000000, 3100000, 3150000, 3200000, 3300000,
89};
90
83static const struct i2c_device_id pfuze_device_id[] = { 91static const struct i2c_device_id pfuze_device_id[] = {
84 {.name = "pfuze100", .driver_data = PFUZE100}, 92 {.name = "pfuze100", .driver_data = PFUZE100},
85 {.name = "pfuze200", .driver_data = PFUZE200}, 93 {.name = "pfuze200", .driver_data = PFUZE200},
94 {.name = "pfuze3000", .driver_data = PFUZE3000},
86 { } 95 { }
87}; 96};
88MODULE_DEVICE_TABLE(i2c, pfuze_device_id); 97MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
@@ -90,6 +99,7 @@ MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
90static const struct of_device_id pfuze_dt_ids[] = { 99static const struct of_device_id pfuze_dt_ids[] = {
91 { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100}, 100 { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
92 { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200}, 101 { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
102 { .compatible = "fsl,pfuze3000", .data = (void *)PFUZE3000},
93 { } 103 { }
94}; 104};
95MODULE_DEVICE_TABLE(of, pfuze_dt_ids); 105MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
@@ -219,6 +229,60 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
219 .stby_mask = 0x20, \ 229 .stby_mask = 0x20, \
220 } 230 }
221 231
232#define PFUZE3000_VCC_REG(_chip, _name, base, min, max, step) { \
233 .desc = { \
234 .name = #_name, \
235 .n_voltages = ((max) - (min)) / (step) + 1, \
236 .ops = &pfuze100_ldo_regulator_ops, \
237 .type = REGULATOR_VOLTAGE, \
238 .id = _chip ## _ ## _name, \
239 .owner = THIS_MODULE, \
240 .min_uV = (min), \
241 .uV_step = (step), \
242 .vsel_reg = (base), \
243 .vsel_mask = 0x3, \
244 .enable_reg = (base), \
245 .enable_mask = 0x10, \
246 }, \
247 .stby_reg = (base), \
248 .stby_mask = 0x20, \
249}
250
251
252#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \
253 .desc = { \
254 .name = #_name,\
255 .n_voltages = ((max) - (min)) / (step) + 1, \
256 .ops = &pfuze100_sw_regulator_ops, \
257 .type = REGULATOR_VOLTAGE, \
258 .id = _chip ## _ ## _name, \
259 .owner = THIS_MODULE, \
260 .min_uV = (min), \
261 .uV_step = (step), \
262 .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
263 .vsel_mask = 0x7, \
264 }, \
265 .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
266 .stby_mask = 0x7, \
267}
268
269#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
270 .desc = { \
271 .name = #_name,\
272 .n_voltages = ((max) - (min)) / (step) + 1, \
273 .ops = &pfuze100_sw_regulator_ops, \
274 .type = REGULATOR_VOLTAGE, \
275 .id = _chip ## _ ## _name, \
276 .owner = THIS_MODULE, \
277 .min_uV = (min), \
278 .uV_step = (step), \
279 .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
280 .vsel_mask = 0xf, \
281 }, \
282 .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
283 .stby_mask = 0xf, \
284}
285
222/* PFUZE100 */ 286/* PFUZE100 */
223static struct pfuze_regulator pfuze100_regulators[] = { 287static struct pfuze_regulator pfuze100_regulators[] = {
224 PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000), 288 PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
@@ -254,6 +318,22 @@ static struct pfuze_regulator pfuze200_regulators[] = {
254 PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), 318 PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
255}; 319};
256 320
321static struct pfuze_regulator pfuze3000_regulators[] = {
322 PFUZE100_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 700000, 1475000, 25000),
323 PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
324 PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
325 PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
326 PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
327 PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
328 PFUZE100_FIXED_REG(PFUZE3000, VREFDDR, PFUZE100_VREFDDRCON, 750000),
329 PFUZE100_VGEN_REG(PFUZE3000, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
330 PFUZE100_VGEN_REG(PFUZE3000, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
331 PFUZE3000_VCC_REG(PFUZE3000, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000),
332 PFUZE3000_VCC_REG(PFUZE3000, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000),
333 PFUZE100_VGEN_REG(PFUZE3000, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
334 PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
335};
336
257static struct pfuze_regulator *pfuze_regulators; 337static struct pfuze_regulator *pfuze_regulators;
258 338
259#ifdef CONFIG_OF 339#ifdef CONFIG_OF
@@ -294,6 +374,24 @@ static struct of_regulator_match pfuze200_matches[] = {
294 { .name = "vgen6", }, 374 { .name = "vgen6", },
295}; 375};
296 376
377/* PFUZE3000 */
378static struct of_regulator_match pfuze3000_matches[] = {
379
380 { .name = "sw1a", },
381 { .name = "sw1b", },
382 { .name = "sw2", },
383 { .name = "sw3", },
384 { .name = "swbst", },
385 { .name = "vsnvs", },
386 { .name = "vrefddr", },
387 { .name = "vldo1", },
388 { .name = "vldo2", },
389 { .name = "vccsd", },
390 { .name = "v33", },
391 { .name = "vldo3", },
392 { .name = "vldo4", },
393};
394
297static struct of_regulator_match *pfuze_matches; 395static struct of_regulator_match *pfuze_matches;
298 396
299static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) 397static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
@@ -313,6 +411,11 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
313 } 411 }
314 412
315 switch (chip->chip_id) { 413 switch (chip->chip_id) {
414 case PFUZE3000:
415 pfuze_matches = pfuze3000_matches;
416 ret = of_regulator_match(dev, parent, pfuze3000_matches,
417 ARRAY_SIZE(pfuze3000_matches));
418 break;
316 case PFUZE200: 419 case PFUZE200:
317 pfuze_matches = pfuze200_matches; 420 pfuze_matches = pfuze200_matches;
318 ret = of_regulator_match(dev, parent, pfuze200_matches, 421 ret = of_regulator_match(dev, parent, pfuze200_matches,
@@ -378,7 +481,8 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
378 * as ID=8 in PFUZE100 481 * as ID=8 in PFUZE100
379 */ 482 */
380 dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8"); 483 dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
381 } else if ((value & 0x0f) != pfuze_chip->chip_id) { 484 } else if ((value & 0x0f) != pfuze_chip->chip_id &&
485 (value & 0xf0) >> 4 != pfuze_chip->chip_id) {
382 /* device id NOT match with your setting */ 486 /* device id NOT match with your setting */
383 dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); 487 dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
384 return -ENODEV; 488 return -ENODEV;
@@ -417,7 +521,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
417 int i, ret; 521 int i, ret;
418 const struct of_device_id *match; 522 const struct of_device_id *match;
419 u32 regulator_num; 523 u32 regulator_num;
420 u32 sw_check_start, sw_check_end; 524 u32 sw_check_start, sw_check_end, sw_hi = 0x40;
421 525
422 pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip), 526 pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
423 GFP_KERNEL); 527 GFP_KERNEL);
@@ -458,13 +562,19 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
458 562
459 /* use the right regulators after identify the right device */ 563 /* use the right regulators after identify the right device */
460 switch (pfuze_chip->chip_id) { 564 switch (pfuze_chip->chip_id) {
565 case PFUZE3000:
566 pfuze_regulators = pfuze3000_regulators;
567 regulator_num = ARRAY_SIZE(pfuze3000_regulators);
568 sw_check_start = PFUZE3000_SW2;
569 sw_check_end = PFUZE3000_SW2;
570 sw_hi = 1 << 3;
571 break;
461 case PFUZE200: 572 case PFUZE200:
462 pfuze_regulators = pfuze200_regulators; 573 pfuze_regulators = pfuze200_regulators;
463 regulator_num = ARRAY_SIZE(pfuze200_regulators); 574 regulator_num = ARRAY_SIZE(pfuze200_regulators);
464 sw_check_start = PFUZE200_SW2; 575 sw_check_start = PFUZE200_SW2;
465 sw_check_end = PFUZE200_SW3B; 576 sw_check_end = PFUZE200_SW3B;
466 break; 577 break;
467
468 case PFUZE100: 578 case PFUZE100:
469 default: 579 default:
470 pfuze_regulators = pfuze100_regulators; 580 pfuze_regulators = pfuze100_regulators;
@@ -474,7 +584,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
474 break; 584 break;
475 } 585 }
476 dev_info(&client->dev, "pfuze%s found.\n", 586 dev_info(&client->dev, "pfuze%s found.\n",
477 (pfuze_chip->chip_id == PFUZE100) ? "100" : "200"); 587 (pfuze_chip->chip_id == PFUZE100) ? "100" :
588 ((pfuze_chip->chip_id == PFUZE200) ? "200" : "3000"));
478 589
479 memcpy(pfuze_chip->regulator_descs, pfuze_regulators, 590 memcpy(pfuze_chip->regulator_descs, pfuze_regulators,
480 sizeof(pfuze_chip->regulator_descs)); 591 sizeof(pfuze_chip->regulator_descs));
@@ -498,10 +609,15 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
498 /* SW2~SW4 high bit check and modify the voltage value table */ 609 /* SW2~SW4 high bit check and modify the voltage value table */
499 if (i >= sw_check_start && i <= sw_check_end) { 610 if (i >= sw_check_start && i <= sw_check_end) {
500 regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); 611 regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
501 if (val & 0x40) { 612 if (val & sw_hi) {
502 desc->min_uV = 800000; 613 if (pfuze_chip->chip_id == PFUZE3000) {
503 desc->uV_step = 50000; 614 desc->volt_table = pfuze3000_sw2hi;
504 desc->n_voltages = 51; 615 desc->n_voltages = ARRAY_SIZE(pfuze3000_sw2hi);
616 } else {
617 desc->min_uV = 800000;
618 desc->uV_step = 50000;
619 desc->n_voltages = 51;
620 }
505 } 621 }
506 } 622 }
507 623
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 8364ff331a81..e8647f7cf25e 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -227,9 +227,11 @@ static int rpm_reg_set_mV_sel(struct regulator_dev *rdev,
227 return uV; 227 return uV;
228 228
229 mutex_lock(&vreg->lock); 229 mutex_lock(&vreg->lock);
230 vreg->uV = uV;
231 if (vreg->is_enabled) 230 if (vreg->is_enabled)
232 ret = rpm_reg_write(vreg, req, vreg->uV / 1000); 231 ret = rpm_reg_write(vreg, req, uV / 1000);
232
233 if (!ret)
234 vreg->uV = uV;
233 mutex_unlock(&vreg->lock); 235 mutex_unlock(&vreg->lock);
234 236
235 return ret; 237 return ret;
@@ -252,9 +254,11 @@ static int rpm_reg_set_uV_sel(struct regulator_dev *rdev,
252 return uV; 254 return uV;
253 255
254 mutex_lock(&vreg->lock); 256 mutex_lock(&vreg->lock);
255 vreg->uV = uV;
256 if (vreg->is_enabled) 257 if (vreg->is_enabled)
257 ret = rpm_reg_write(vreg, req, vreg->uV); 258 ret = rpm_reg_write(vreg, req, uV);
259
260 if (!ret)
261 vreg->uV = uV;
258 mutex_unlock(&vreg->lock); 262 mutex_unlock(&vreg->lock);
259 263
260 return ret; 264 return ret;
@@ -674,6 +678,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
674 vreg->desc.owner = THIS_MODULE; 678 vreg->desc.owner = THIS_MODULE;
675 vreg->desc.type = REGULATOR_VOLTAGE; 679 vreg->desc.type = REGULATOR_VOLTAGE;
676 vreg->desc.name = pdev->dev.of_node->name; 680 vreg->desc.name = pdev->dev.of_node->name;
681 vreg->desc.supply_name = "vin";
677 682
678 vreg->rpm = dev_get_drvdata(pdev->dev.parent); 683 vreg->rpm = dev_get_drvdata(pdev->dev.parent);
679 if (!vreg->rpm) { 684 if (!vreg->rpm) {
@@ -768,7 +773,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
768 break; 773 break;
769 } 774 }
770 775
771 if (force_mode < 0) { 776 if (force_mode == -1) {
772 dev_err(&pdev->dev, "invalid force mode\n"); 777 dev_err(&pdev->dev, "invalid force mode\n");
773 return -EINVAL; 778 return -EINVAL;
774 } 779 }
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index c94a3e0f3b91..1f93b752a81c 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -97,7 +97,7 @@ static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
97 RK808_RAMP_RATE_MASK, ramp_value); 97 RK808_RAMP_RATE_MASK, ramp_value);
98} 98}
99 99
100int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv) 100static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
101{ 101{
102 unsigned int reg; 102 unsigned int reg;
103 int sel = regulator_map_voltage_linear_range(rdev, uv, uv); 103 int sel = regulator_map_voltage_linear_range(rdev, uv, uv);
@@ -112,7 +112,7 @@ int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
112 sel); 112 sel);
113} 113}
114 114
115int rk808_set_suspend_enable(struct regulator_dev *rdev) 115static int rk808_set_suspend_enable(struct regulator_dev *rdev)
116{ 116{
117 unsigned int reg; 117 unsigned int reg;
118 118
@@ -123,7 +123,7 @@ int rk808_set_suspend_enable(struct regulator_dev *rdev)
123 0); 123 0);
124} 124}
125 125
126int rk808_set_suspend_disable(struct regulator_dev *rdev) 126static int rk808_set_suspend_disable(struct regulator_dev *rdev)
127{ 127{
128 unsigned int reg; 128 unsigned int reg;
129 129
diff --git a/drivers/regulator/rt5033-regulator.c b/drivers/regulator/rt5033-regulator.c
index 870cc49438db..96d2c18e051a 100644
--- a/drivers/regulator/rt5033-regulator.c
+++ b/drivers/regulator/rt5033-regulator.c
@@ -36,6 +36,8 @@ static struct regulator_ops rt5033_buck_ops = {
36static const struct regulator_desc rt5033_supported_regulators[] = { 36static const struct regulator_desc rt5033_supported_regulators[] = {
37 [RT5033_BUCK] = { 37 [RT5033_BUCK] = {
38 .name = "BUCK", 38 .name = "BUCK",
39 .of_match = of_match_ptr("BUCK"),
40 .regulators_node = of_match_ptr("regulators"),
39 .id = RT5033_BUCK, 41 .id = RT5033_BUCK,
40 .ops = &rt5033_buck_ops, 42 .ops = &rt5033_buck_ops,
41 .type = REGULATOR_VOLTAGE, 43 .type = REGULATOR_VOLTAGE,
@@ -50,6 +52,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
50 }, 52 },
51 [RT5033_LDO] = { 53 [RT5033_LDO] = {
52 .name = "LDO", 54 .name = "LDO",
55 .of_match = of_match_ptr("LDO"),
56 .regulators_node = of_match_ptr("regulators"),
53 .id = RT5033_LDO, 57 .id = RT5033_LDO,
54 .ops = &rt5033_buck_ops, 58 .ops = &rt5033_buck_ops,
55 .type = REGULATOR_VOLTAGE, 59 .type = REGULATOR_VOLTAGE,
@@ -64,6 +68,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
64 }, 68 },
65 [RT5033_SAFE_LDO] = { 69 [RT5033_SAFE_LDO] = {
66 .name = "SAFE_LDO", 70 .name = "SAFE_LDO",
71 .of_match = of_match_ptr("SAFE_LDO"),
72 .regulators_node = of_match_ptr("regulators"),
67 .id = RT5033_SAFE_LDO, 73 .id = RT5033_SAFE_LDO,
68 .ops = &rt5033_safe_ldo_ops, 74 .ops = &rt5033_safe_ldo_ops,
69 .type = REGULATOR_VOLTAGE, 75 .type = REGULATOR_VOLTAGE,
@@ -81,7 +87,7 @@ static int rt5033_regulator_probe(struct platform_device *pdev)
81 int ret, i; 87 int ret, i;
82 struct regulator_config config = {}; 88 struct regulator_config config = {};
83 89
84 config.dev = &pdev->dev; 90 config.dev = rt5033->dev;
85 config.driver_data = rt5033; 91 config.driver_data = rt5033;
86 92
87 for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) { 93 for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) {
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 7380af8bd50d..b941e564b3f3 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -173,7 +173,7 @@ static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
173} 173}
174 174
175/* Operations permitted on VDCDCx */ 175/* Operations permitted on VDCDCx */
176static struct regulator_ops tps65023_dcdc_ops = { 176static const struct regulator_ops tps65023_dcdc_ops = {
177 .is_enabled = regulator_is_enabled_regmap, 177 .is_enabled = regulator_is_enabled_regmap,
178 .enable = regulator_enable_regmap, 178 .enable = regulator_enable_regmap,
179 .disable = regulator_disable_regmap, 179 .disable = regulator_disable_regmap,
@@ -184,7 +184,7 @@ static struct regulator_ops tps65023_dcdc_ops = {
184}; 184};
185 185
186/* Operations permitted on LDOx */ 186/* Operations permitted on LDOx */
187static struct regulator_ops tps65023_ldo_ops = { 187static const struct regulator_ops tps65023_ldo_ops = {
188 .is_enabled = regulator_is_enabled_regmap, 188 .is_enabled = regulator_is_enabled_regmap,
189 .enable = regulator_enable_regmap, 189 .enable = regulator_enable_regmap,
190 .disable = regulator_disable_regmap, 190 .disable = regulator_disable_regmap,
@@ -194,7 +194,7 @@ static struct regulator_ops tps65023_ldo_ops = {
194 .map_voltage = regulator_map_voltage_ascend, 194 .map_voltage = regulator_map_voltage_ascend,
195}; 195};
196 196
197static struct regmap_config tps65023_regmap_config = { 197static const struct regmap_config tps65023_regmap_config = {
198 .reg_bits = 8, 198 .reg_bits = 8,
199 .val_bits = 8, 199 .val_bits = 8,
200}; 200};
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 1dba62c5cf6a..1efebc9eedfb 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -136,11 +136,12 @@ static void __detach_handler (struct kref *kref)
136 struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh; 136 struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
137 struct scsi_device *sdev = scsi_dh_data->sdev; 137 struct scsi_device *sdev = scsi_dh_data->sdev;
138 138
139 scsi_dh->detach(sdev);
140
139 spin_lock_irq(sdev->request_queue->queue_lock); 141 spin_lock_irq(sdev->request_queue->queue_lock);
140 sdev->scsi_dh_data = NULL; 142 sdev->scsi_dh_data = NULL;
141 spin_unlock_irq(sdev->request_queue->queue_lock); 143 spin_unlock_irq(sdev->request_queue->queue_lock);
142 144
143 scsi_dh->detach(sdev);
144 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name); 145 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
145 module_put(scsi_dh->module); 146 module_put(scsi_dh->module);
146} 147}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 399516925d80..05ea0d49a3a3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2800,9 +2800,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
2800 */ 2800 */
2801 sd_set_flush_flag(sdkp); 2801 sd_set_flush_flag(sdkp);
2802 2802
2803 max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), 2803 max_xfer = sdkp->max_xfer_blocks;
2804 sdkp->max_xfer_blocks);
2805 max_xfer <<= ilog2(sdp->sector_size) - 9; 2804 max_xfer <<= ilog2(sdp->sector_size) - 9;
2805
2806 max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
2807 max_xfer);
2806 blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); 2808 blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
2807 set_capacity(disk, sdkp->capacity); 2809 set_capacity(disk, sdkp->capacity);
2808 sd_config_write_same(sdkp); 2810 sd_config_write_same(sdkp);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 99829985c1a1..95ccedabba4f 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -185,6 +185,16 @@ config SPI_DAVINCI
185 help 185 help
186 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. 186 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
187 187
188config SPI_DLN2
189 tristate "Diolan DLN-2 USB SPI adapter"
190 depends on MFD_DLN2
191 help
192 If you say yes to this option, support will be included for Diolan
193 DLN2, a USB to SPI interface.
194
195 This driver can also be built as a module. If so, the module
196 will be called spi-dln2.
197
188config SPI_EFM32 198config SPI_EFM32
189 tristate "EFM32 SPI controller" 199 tristate "EFM32 SPI controller"
190 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) 200 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
@@ -279,7 +289,7 @@ config SPI_FSL_CPM
279 depends on FSL_SOC 289 depends on FSL_SOC
280 290
281config SPI_FSL_SPI 291config SPI_FSL_SPI
282 bool "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller" 292 tristate "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
283 depends on OF 293 depends on OF
284 select SPI_FSL_LIB 294 select SPI_FSL_LIB
285 select SPI_FSL_CPM if FSL_SOC 295 select SPI_FSL_CPM if FSL_SOC
@@ -292,7 +302,6 @@ config SPI_FSL_SPI
292 302
293config SPI_FSL_DSPI 303config SPI_FSL_DSPI
294 tristate "Freescale DSPI controller" 304 tristate "Freescale DSPI controller"
295 select SPI_BITBANG
296 select REGMAP_MMIO 305 select REGMAP_MMIO
297 depends on SOC_VF610 || COMPILE_TEST 306 depends on SOC_VF610 || COMPILE_TEST
298 help 307 help
@@ -300,7 +309,7 @@ config SPI_FSL_DSPI
300 mode. VF610 platform uses the controller. 309 mode. VF610 platform uses the controller.
301 310
302config SPI_FSL_ESPI 311config SPI_FSL_ESPI
303 bool "Freescale eSPI controller" 312 tristate "Freescale eSPI controller"
304 depends on FSL_SOC 313 depends on FSL_SOC
305 select SPI_FSL_LIB 314 select SPI_FSL_LIB
306 help 315 help
@@ -460,7 +469,6 @@ config SPI_S3C24XX_FIQ
460config SPI_S3C64XX 469config SPI_S3C64XX
461 tristate "Samsung S3C64XX series type SPI" 470 tristate "Samsung S3C64XX series type SPI"
462 depends on (PLAT_SAMSUNG || ARCH_EXYNOS) 471 depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
463 select S3C64XX_PL080 if ARCH_S3C64XX
464 help 472 help
465 SPI driver for Samsung S3C64XX and newer SoCs. 473 SPI driver for Samsung S3C64XX and newer SoCs.
466 474
@@ -503,6 +511,13 @@ config SPI_SIRF
503 help 511 help
504 SPI driver for CSR SiRFprimaII SoCs 512 SPI driver for CSR SiRFprimaII SoCs
505 513
514config SPI_ST_SSC4
515 tristate "STMicroelectronics SPI SSC-based driver"
516 depends on ARCH_STI
517 help
518 STMicroelectronics SoCs support for SPI. If you say yes to
519 this option, support will be included for the SSC driven SPI.
520
506config SPI_SUN4I 521config SPI_SUN4I
507 tristate "Allwinner A10 SoCs SPI controller" 522 tristate "Allwinner A10 SoCs SPI controller"
508 depends on ARCH_SUNXI || COMPILE_TEST 523 depends on ARCH_SUNXI || COMPILE_TEST
@@ -595,7 +610,6 @@ config SPI_XTENSA_XTFPGA
595 16 bit words in SPI mode 0, automatically asserting CS on transfer 610 16 bit words in SPI mode 0, automatically asserting CS on transfer
596 start and deasserting on end. 611 start and deasserting on end.
597 612
598
599config SPI_NUC900 613config SPI_NUC900
600 tristate "Nuvoton NUC900 series SPI" 614 tristate "Nuvoton NUC900 series SPI"
601 depends on ARCH_W90X900 615 depends on ARCH_W90X900
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6b9d2ac629cc..d8cbf654976b 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
27obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o 27obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
28obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o 28obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
29obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o 29obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
30obj-$(CONFIG_SPI_DLN2) += spi-dln2.o
30obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o 31obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
31obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o 32obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
32obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o 33obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
@@ -76,6 +77,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
76obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o 77obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
77obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o 78obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
78obj-$(CONFIG_SPI_SIRF) += spi-sirf.o 79obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
80obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
79obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o 81obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
80obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o 82obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
81obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o 83obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 23d8f5f56579..9af7841f2e8c 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1046,6 +1046,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
1046 struct atmel_spi_device *asd; 1046 struct atmel_spi_device *asd;
1047 int timeout; 1047 int timeout;
1048 int ret; 1048 int ret;
1049 unsigned long dma_timeout;
1049 1050
1050 as = spi_master_get_devdata(master); 1051 as = spi_master_get_devdata(master);
1051 1052
@@ -1103,15 +1104,12 @@ static int atmel_spi_one_transfer(struct spi_master *master,
1103 1104
1104 /* interrupts are disabled, so free the lock for schedule */ 1105 /* interrupts are disabled, so free the lock for schedule */
1105 atmel_spi_unlock(as); 1106 atmel_spi_unlock(as);
1106 ret = wait_for_completion_timeout(&as->xfer_completion, 1107 dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
1107 SPI_DMA_TIMEOUT); 1108 SPI_DMA_TIMEOUT);
1108 atmel_spi_lock(as); 1109 atmel_spi_lock(as);
1109 if (WARN_ON(ret == 0)) { 1110 if (WARN_ON(dma_timeout == 0)) {
1110 dev_err(&spi->dev, 1111 dev_err(&spi->dev, "spi transfer timeout\n");
1111 "spi trasfer timeout, err %d\n", ret);
1112 as->done_status = -EIO; 1112 as->done_status = -EIO;
1113 } else {
1114 ret = 0;
1115 } 1113 }
1116 1114
1117 if (as->done_status) 1115 if (as->done_status)
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 326f47973684..f45e085c01a6 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -15,10 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 18 */
23 19
24#include <linux/init.h> 20#include <linux/init.h>
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 98aab457b24d..419a782ab6d5 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -17,10 +17,6 @@
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */ 20 */
25 21
26#include <linux/clk.h> 22#include <linux/clk.h>
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index c20530982e26..e73e2b052c9c 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the
19 * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
20 */ 16 */
21 17
22#include <linux/kernel.h> 18#include <linux/kernel.h>
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index dc7d2c2d643e..5ef6638d5e8a 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 13 */
18 14
19#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index ee4f91ccd8fd..9a95862986c8 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20#include <linux/kernel.h> 16#include <linux/kernel.h>
21#include <linux/init.h> 17#include <linux/init.h>
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 41b5dc4445f6..688956ff5095 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -12,11 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
19 *
20*/ 15*/
21 16
22#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index b3707badb1e5..5e991065f5b0 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 14 */
19 15
20#include <linux/interrupt.h> 16#include <linux/interrupt.h>
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
new file mode 100644
index 000000000000..3b7d91d94fea
--- /dev/null
+++ b/drivers/spi/spi-dln2.c
@@ -0,0 +1,881 @@
1/*
2 * Driver for the Diolan DLN-2 USB-SPI adapter
3 *
4 * Copyright (c) 2014 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/mfd/dln2.h>
15#include <linux/spi/spi.h>
16#include <linux/pm_runtime.h>
17#include <asm/unaligned.h>
18
19#define DLN2_SPI_MODULE_ID 0x02
20#define DLN2_SPI_CMD(cmd) DLN2_CMD(cmd, DLN2_SPI_MODULE_ID)
21
22/* SPI commands */
23#define DLN2_SPI_GET_PORT_COUNT DLN2_SPI_CMD(0x00)
24#define DLN2_SPI_ENABLE DLN2_SPI_CMD(0x11)
25#define DLN2_SPI_DISABLE DLN2_SPI_CMD(0x12)
26#define DLN2_SPI_IS_ENABLED DLN2_SPI_CMD(0x13)
27#define DLN2_SPI_SET_MODE DLN2_SPI_CMD(0x14)
28#define DLN2_SPI_GET_MODE DLN2_SPI_CMD(0x15)
29#define DLN2_SPI_SET_FRAME_SIZE DLN2_SPI_CMD(0x16)
30#define DLN2_SPI_GET_FRAME_SIZE DLN2_SPI_CMD(0x17)
31#define DLN2_SPI_SET_FREQUENCY DLN2_SPI_CMD(0x18)
32#define DLN2_SPI_GET_FREQUENCY DLN2_SPI_CMD(0x19)
33#define DLN2_SPI_READ_WRITE DLN2_SPI_CMD(0x1A)
34#define DLN2_SPI_READ DLN2_SPI_CMD(0x1B)
35#define DLN2_SPI_WRITE DLN2_SPI_CMD(0x1C)
36#define DLN2_SPI_SET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x20)
37#define DLN2_SPI_GET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x21)
38#define DLN2_SPI_SET_DELAY_AFTER_SS DLN2_SPI_CMD(0x22)
39#define DLN2_SPI_GET_DELAY_AFTER_SS DLN2_SPI_CMD(0x23)
40#define DLN2_SPI_SET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x24)
41#define DLN2_SPI_GET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x25)
42#define DLN2_SPI_SET_SS DLN2_SPI_CMD(0x26)
43#define DLN2_SPI_GET_SS DLN2_SPI_CMD(0x27)
44#define DLN2_SPI_RELEASE_SS DLN2_SPI_CMD(0x28)
45#define DLN2_SPI_SS_VARIABLE_ENABLE DLN2_SPI_CMD(0x2B)
46#define DLN2_SPI_SS_VARIABLE_DISABLE DLN2_SPI_CMD(0x2C)
47#define DLN2_SPI_SS_VARIABLE_IS_ENABLED DLN2_SPI_CMD(0x2D)
48#define DLN2_SPI_SS_AAT_ENABLE DLN2_SPI_CMD(0x2E)
49#define DLN2_SPI_SS_AAT_DISABLE DLN2_SPI_CMD(0x2F)
50#define DLN2_SPI_SS_AAT_IS_ENABLED DLN2_SPI_CMD(0x30)
51#define DLN2_SPI_SS_BETWEEN_FRAMES_ENABLE DLN2_SPI_CMD(0x31)
52#define DLN2_SPI_SS_BETWEEN_FRAMES_DISABLE DLN2_SPI_CMD(0x32)
53#define DLN2_SPI_SS_BETWEEN_FRAMES_IS_ENABLED DLN2_SPI_CMD(0x33)
54#define DLN2_SPI_SET_CPHA DLN2_SPI_CMD(0x34)
55#define DLN2_SPI_GET_CPHA DLN2_SPI_CMD(0x35)
56#define DLN2_SPI_SET_CPOL DLN2_SPI_CMD(0x36)
57#define DLN2_SPI_GET_CPOL DLN2_SPI_CMD(0x37)
58#define DLN2_SPI_SS_MULTI_ENABLE DLN2_SPI_CMD(0x38)
59#define DLN2_SPI_SS_MULTI_DISABLE DLN2_SPI_CMD(0x39)
60#define DLN2_SPI_SS_MULTI_IS_ENABLED DLN2_SPI_CMD(0x3A)
61#define DLN2_SPI_GET_SUPPORTED_MODES DLN2_SPI_CMD(0x40)
62#define DLN2_SPI_GET_SUPPORTED_CPHA_VALUES DLN2_SPI_CMD(0x41)
63#define DLN2_SPI_GET_SUPPORTED_CPOL_VALUES DLN2_SPI_CMD(0x42)
64#define DLN2_SPI_GET_SUPPORTED_FRAME_SIZES DLN2_SPI_CMD(0x43)
65#define DLN2_SPI_GET_SS_COUNT DLN2_SPI_CMD(0x44)
66#define DLN2_SPI_GET_MIN_FREQUENCY DLN2_SPI_CMD(0x45)
67#define DLN2_SPI_GET_MAX_FREQUENCY DLN2_SPI_CMD(0x46)
68#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x47)
69#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x48)
70#define DLN2_SPI_GET_MIN_DELAY_AFTER_SS DLN2_SPI_CMD(0x49)
71#define DLN2_SPI_GET_MAX_DELAY_AFTER_SS DLN2_SPI_CMD(0x4A)
72#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4B)
73#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4C)
74
75#define DLN2_SPI_MAX_XFER_SIZE 256
76#define DLN2_SPI_BUF_SIZE (DLN2_SPI_MAX_XFER_SIZE + 16)
77#define DLN2_SPI_ATTR_LEAVE_SS_LOW BIT(0)
78#define DLN2_TRANSFERS_WAIT_COMPLETE 1
79#define DLN2_TRANSFERS_CANCEL 0
80#define DLN2_RPM_AUTOSUSPEND_TIMEOUT 2000
81
82struct dln2_spi {
83 struct platform_device *pdev;
84 struct spi_master *master;
85 u8 port;
86
87 /*
88 * This buffer will be used mainly for read/write operations. Since
89 * they're quite large, we cannot use the stack. Protection is not
90 * needed because all SPI communication is serialized by the SPI core.
91 */
92 void *buf;
93
94 u8 bpw;
95 u32 speed;
96 u16 mode;
97 u8 cs;
98};
99
100/*
101 * Enable/Disable SPI module. The disable command will wait for transfers to
102 * complete first.
103 */
104static int dln2_spi_enable(struct dln2_spi *dln2, bool enable)
105{
106 u16 cmd;
107 struct {
108 u8 port;
109 u8 wait_for_completion;
110 } tx;
111 unsigned len = sizeof(tx);
112
113 tx.port = dln2->port;
114
115 if (enable) {
116 cmd = DLN2_SPI_ENABLE;
117 len -= sizeof(tx.wait_for_completion);
118 } else {
119 tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE;
120 cmd = DLN2_SPI_DISABLE;
121 }
122
123 return dln2_transfer_tx(dln2->pdev, cmd, &tx, len);
124}
125
126/*
127 * Select/unselect multiple CS lines. The selected lines will be automatically
128 * toggled LOW/HIGH by the board firmware during transfers, provided they're
129 * enabled first.
130 *
131 * Ex: cs_mask = 0x03 -> CS0 & CS1 will be selected and the next WR/RD operation
132 * will toggle the lines LOW/HIGH automatically.
133 */
134static int dln2_spi_cs_set(struct dln2_spi *dln2, u8 cs_mask)
135{
136 struct {
137 u8 port;
138 u8 cs;
139 } tx;
140
141 tx.port = dln2->port;
142
143 /*
144 * According to Diolan docs, "a slave device can be selected by changing
145 * the corresponding bit value to 0". The rest must be set to 1. Hence
146 * the bitwise NOT in front.
147 */
148 tx.cs = ~cs_mask;
149
150 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx));
151}
152
153/*
154 * Select one CS line. The other lines will be un-selected.
155 */
156static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs)
157{
158 return dln2_spi_cs_set(dln2, BIT(cs));
159}
160
161/*
162 * Enable/disable CS lines for usage. The module has to be disabled first.
163 */
164static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable)
165{
166 struct {
167 u8 port;
168 u8 cs;
169 } tx;
170 u16 cmd;
171
172 tx.port = dln2->port;
173 tx.cs = cs_mask;
174 cmd = enable ? DLN2_SPI_SS_MULTI_ENABLE : DLN2_SPI_SS_MULTI_DISABLE;
175
176 return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx));
177}
178
179static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable)
180{
181 u8 cs_mask = GENMASK(dln2->master->num_chipselect - 1, 0);
182
183 return dln2_spi_cs_enable(dln2, cs_mask, enable);
184}
185
186static int dln2_spi_get_cs_num(struct dln2_spi *dln2, u16 *cs_num)
187{
188 int ret;
189 struct {
190 u8 port;
191 } tx;
192 struct {
193 __le16 cs_count;
194 } rx;
195 unsigned rx_len = sizeof(rx);
196
197 tx.port = dln2->port;
198 ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx),
199 &rx, &rx_len);
200 if (ret < 0)
201 return ret;
202 if (rx_len < sizeof(rx))
203 return -EPROTO;
204
205 *cs_num = le16_to_cpu(rx.cs_count);
206
207 dev_dbg(&dln2->pdev->dev, "cs_num = %d\n", *cs_num);
208
209 return 0;
210}
211
212static int dln2_spi_get_speed(struct dln2_spi *dln2, u16 cmd, u32 *freq)
213{
214 int ret;
215 struct {
216 u8 port;
217 } tx;
218 struct {
219 __le32 speed;
220 } rx;
221 unsigned rx_len = sizeof(rx);
222
223 tx.port = dln2->port;
224
225 ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len);
226 if (ret < 0)
227 return ret;
228 if (rx_len < sizeof(rx))
229 return -EPROTO;
230
231 *freq = le32_to_cpu(rx.speed);
232
233 return 0;
234}
235
236/*
237 * Get bus min/max frequencies.
238 */
239static int dln2_spi_get_speed_range(struct dln2_spi *dln2, u32 *fmin, u32 *fmax)
240{
241 int ret;
242
243 ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MIN_FREQUENCY, fmin);
244 if (ret < 0)
245 return ret;
246
247 ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MAX_FREQUENCY, fmax);
248 if (ret < 0)
249 return ret;
250
251 dev_dbg(&dln2->pdev->dev, "freq_min = %d, freq_max = %d\n",
252 *fmin, *fmax);
253
254 return 0;
255}
256
257/*
258 * Set the bus speed. The module will automatically round down to the closest
259 * available frequency and returns it. The module has to be disabled first.
260 */
261static int dln2_spi_set_speed(struct dln2_spi *dln2, u32 speed)
262{
263 int ret;
264 struct {
265 u8 port;
266 __le32 speed;
267 } __packed tx;
268 struct {
269 __le32 speed;
270 } rx;
271 int rx_len = sizeof(rx);
272
273 tx.port = dln2->port;
274 tx.speed = cpu_to_le32(speed);
275
276 ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx),
277 &rx, &rx_len);
278 if (ret < 0)
279 return ret;
280 if (rx_len < sizeof(rx))
281 return -EPROTO;
282
283 return 0;
284}
285
286/*
287 * Change CPOL & CPHA. The module has to be disabled first.
288 */
289static int dln2_spi_set_mode(struct dln2_spi *dln2, u8 mode)
290{
291 struct {
292 u8 port;
293 u8 mode;
294 } tx;
295
296 tx.port = dln2->port;
297 tx.mode = mode;
298
299 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx));
300}
301
302/*
303 * Change frame size. The module has to be disabled first.
304 */
305static int dln2_spi_set_bpw(struct dln2_spi *dln2, u8 bpw)
306{
307 struct {
308 u8 port;
309 u8 bpw;
310 } tx;
311
312 tx.port = dln2->port;
313 tx.bpw = bpw;
314
315 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_FRAME_SIZE,
316 &tx, sizeof(tx));
317}
318
319static int dln2_spi_get_supported_frame_sizes(struct dln2_spi *dln2,
320 u32 *bpw_mask)
321{
322 int ret;
323 struct {
324 u8 port;
325 } tx;
326 struct {
327 u8 count;
328 u8 frame_sizes[36];
329 } *rx = dln2->buf;
330 unsigned rx_len = sizeof(*rx);
331 int i;
332
333 tx.port = dln2->port;
334
335 ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SUPPORTED_FRAME_SIZES,
336 &tx, sizeof(tx), rx, &rx_len);
337 if (ret < 0)
338 return ret;
339 if (rx_len < sizeof(*rx))
340 return -EPROTO;
341 if (rx->count > ARRAY_SIZE(rx->frame_sizes))
342 return -EPROTO;
343
344 *bpw_mask = 0;
345 for (i = 0; i < rx->count; i++)
346 *bpw_mask |= BIT(rx->frame_sizes[i] - 1);
347
348 dev_dbg(&dln2->pdev->dev, "bpw_mask = 0x%X\n", *bpw_mask);
349
350 return 0;
351}
352
353/*
354 * Copy the data to DLN2 buffer and change the byte order to LE, requested by
355 * DLN2 module. SPI core makes sure that the data length is a multiple of word
356 * size.
357 */
358static int dln2_spi_copy_to_buf(u8 *dln2_buf, const u8 *src, u16 len, u8 bpw)
359{
360#ifdef __LITTLE_ENDIAN
361 memcpy(dln2_buf, src, len);
362#else
363 if (bpw <= 8) {
364 memcpy(dln2_buf, src, len);
365 } else if (bpw <= 16) {
366 __le16 *d = (__le16 *)dln2_buf;
367 u16 *s = (u16 *)src;
368
369 len = len / 2;
370 while (len--)
371 *d++ = cpu_to_le16p(s++);
372 } else {
373 __le32 *d = (__le32 *)dln2_buf;
374 u32 *s = (u32 *)src;
375
376 len = len / 4;
377 while (len--)
378 *d++ = cpu_to_le32p(s++);
379 }
380#endif
381
382 return 0;
383}
384
385/*
386 * Copy the data from DLN2 buffer and convert to CPU byte order since the DLN2
387 * buffer is LE ordered. SPI core makes sure that the data length is a multiple
388 * of word size. The RX dln2_buf is 2 byte aligned so, for BE, we have to make
389 * sure we avoid unaligned accesses for 32 bit case.
390 */
391static int dln2_spi_copy_from_buf(u8 *dest, const u8 *dln2_buf, u16 len, u8 bpw)
392{
393#ifdef __LITTLE_ENDIAN
394 memcpy(dest, dln2_buf, len);
395#else
396 if (bpw <= 8) {
397 memcpy(dest, dln2_buf, len);
398 } else if (bpw <= 16) {
399 u16 *d = (u16 *)dest;
400 __le16 *s = (__le16 *)dln2_buf;
401
402 len = len / 2;
403 while (len--)
404 *d++ = le16_to_cpup(s++);
405 } else {
406 u32 *d = (u32 *)dest;
407 __le32 *s = (__le32 *)dln2_buf;
408
409 len = len / 4;
410 while (len--)
411 *d++ = get_unaligned_le32(s++);
412 }
413#endif
414
415 return 0;
416}
417
418/*
419 * Perform one write operation.
420 */
421static int dln2_spi_write_one(struct dln2_spi *dln2, const u8 *data,
422 u16 data_len, u8 attr)
423{
424 struct {
425 u8 port;
426 __le16 size;
427 u8 attr;
428 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
429 } __packed *tx = dln2->buf;
430 unsigned tx_len;
431
432 BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE);
433
434 if (data_len > DLN2_SPI_MAX_XFER_SIZE)
435 return -EINVAL;
436
437 tx->port = dln2->port;
438 tx->size = cpu_to_le16(data_len);
439 tx->attr = attr;
440
441 dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw);
442
443 tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
444 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len);
445}
446
447/*
448 * Perform one read operation.
449 */
450static int dln2_spi_read_one(struct dln2_spi *dln2, u8 *data,
451 u16 data_len, u8 attr)
452{
453 int ret;
454 struct {
455 u8 port;
456 __le16 size;
457 u8 attr;
458 } __packed tx;
459 struct {
460 __le16 size;
461 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
462 } __packed *rx = dln2->buf;
463 unsigned rx_len = sizeof(*rx);
464
465 BUILD_BUG_ON(sizeof(*rx) > DLN2_SPI_BUF_SIZE);
466
467 if (data_len > DLN2_SPI_MAX_XFER_SIZE)
468 return -EINVAL;
469
470 tx.port = dln2->port;
471 tx.size = cpu_to_le16(data_len);
472 tx.attr = attr;
473
474 ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx),
475 rx, &rx_len);
476 if (ret < 0)
477 return ret;
478 if (rx_len < sizeof(rx->size) + data_len)
479 return -EPROTO;
480 if (le16_to_cpu(rx->size) != data_len)
481 return -EPROTO;
482
483 dln2_spi_copy_from_buf(data, rx->buf, data_len, dln2->bpw);
484
485 return 0;
486}
487
488/*
489 * Perform one write & read operation.
490 */
491static int dln2_spi_read_write_one(struct dln2_spi *dln2, const u8 *tx_data,
492 u8 *rx_data, u16 data_len, u8 attr)
493{
494 int ret;
495 struct {
496 u8 port;
497 __le16 size;
498 u8 attr;
499 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
500 } __packed *tx;
501 struct {
502 __le16 size;
503 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
504 } __packed *rx;
505 unsigned tx_len, rx_len;
506
507 BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE ||
508 sizeof(*rx) > DLN2_SPI_BUF_SIZE);
509
510 if (data_len > DLN2_SPI_MAX_XFER_SIZE)
511 return -EINVAL;
512
513 /*
514 * Since this is a pseudo full-duplex communication, we're perfectly
515 * safe to use the same buffer for both tx and rx. When DLN2 sends the
516 * response back, with the rx data, we don't need the tx buffer anymore.
517 */
518 tx = dln2->buf;
519 rx = dln2->buf;
520
521 tx->port = dln2->port;
522 tx->size = cpu_to_le16(data_len);
523 tx->attr = attr;
524
525 dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw);
526
527 tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
528 rx_len = sizeof(*rx);
529
530 ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len,
531 rx, &rx_len);
532 if (ret < 0)
533 return ret;
534 if (rx_len < sizeof(rx->size) + data_len)
535 return -EPROTO;
536 if (le16_to_cpu(rx->size) != data_len)
537 return -EPROTO;
538
539 dln2_spi_copy_from_buf(rx_data, rx->buf, data_len, dln2->bpw);
540
541 return 0;
542}
543
544/*
545 * Read/Write wrapper. It will automatically split an operation into multiple
546 * single ones due to device buffer constraints.
547 */
548static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data,
549 u8 *rx_data, u16 data_len, u8 attr) {
550 int ret;
551 u16 len;
552 u8 temp_attr;
553 u16 remaining = data_len;
554 u16 offset;
555
556 do {
557 if (remaining > DLN2_SPI_MAX_XFER_SIZE) {
558 len = DLN2_SPI_MAX_XFER_SIZE;
559 temp_attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
560 } else {
561 len = remaining;
562 temp_attr = attr;
563 }
564
565 offset = data_len - remaining;
566
567 if (tx_data && rx_data) {
568 ret = dln2_spi_read_write_one(dln2,
569 tx_data + offset,
570 rx_data + offset,
571 len, temp_attr);
572 } else if (tx_data) {
573 ret = dln2_spi_write_one(dln2,
574 tx_data + offset,
575 len, temp_attr);
576 } else if (rx_data) {
577 ret = dln2_spi_read_one(dln2,
578 rx_data + offset,
579 len, temp_attr);
580 } else {
581 return -EINVAL;
582 }
583
584 if (ret < 0)
585 return ret;
586
587 remaining -= len;
588 } while (remaining);
589
590 return 0;
591}
592
593static int dln2_spi_prepare_message(struct spi_master *master,
594 struct spi_message *message)
595{
596 int ret;
597 struct dln2_spi *dln2 = spi_master_get_devdata(master);
598 struct spi_device *spi = message->spi;
599
600 if (dln2->cs != spi->chip_select) {
601 ret = dln2_spi_cs_set_one(dln2, spi->chip_select);
602 if (ret < 0)
603 return ret;
604
605 dln2->cs = spi->chip_select;
606 }
607
608 return 0;
609}
610
611static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed,
612 u8 bpw, u8 mode)
613{
614 int ret;
615 bool bus_setup_change;
616
617 bus_setup_change = dln2->speed != speed || dln2->mode != mode ||
618 dln2->bpw != bpw;
619
620 if (!bus_setup_change)
621 return 0;
622
623 ret = dln2_spi_enable(dln2, false);
624 if (ret < 0)
625 return ret;
626
627 if (dln2->speed != speed) {
628 ret = dln2_spi_set_speed(dln2, speed);
629 if (ret < 0)
630 return ret;
631
632 dln2->speed = speed;
633 }
634
635 if (dln2->mode != mode) {
636 ret = dln2_spi_set_mode(dln2, mode & 0x3);
637 if (ret < 0)
638 return ret;
639
640 dln2->mode = mode;
641 }
642
643 if (dln2->bpw != bpw) {
644 ret = dln2_spi_set_bpw(dln2, bpw);
645 if (ret < 0)
646 return ret;
647
648 dln2->bpw = bpw;
649 }
650
651 return dln2_spi_enable(dln2, true);
652}
653
654static int dln2_spi_transfer_one(struct spi_master *master,
655 struct spi_device *spi,
656 struct spi_transfer *xfer)
657{
658 struct dln2_spi *dln2 = spi_master_get_devdata(master);
659 int status;
660 u8 attr = 0;
661
662 status = dln2_spi_transfer_setup(dln2, xfer->speed_hz,
663 xfer->bits_per_word,
664 spi->mode);
665 if (status < 0) {
666 dev_err(&dln2->pdev->dev, "Cannot setup transfer\n");
667 return status;
668 }
669
670 if (!xfer->cs_change && !spi_transfer_is_last(master, xfer))
671 attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
672
673 status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf,
674 xfer->len, attr);
675 if (status < 0)
676 dev_err(&dln2->pdev->dev, "write/read failed!\n");
677
678 return status;
679}
680
681static int dln2_spi_probe(struct platform_device *pdev)
682{
683 struct spi_master *master;
684 struct dln2_spi *dln2;
685 struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
686 int ret;
687
688 master = spi_alloc_master(&pdev->dev, sizeof(*dln2));
689 if (!master)
690 return -ENOMEM;
691
692 platform_set_drvdata(pdev, master);
693
694 dln2 = spi_master_get_devdata(master);
695
696 dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL);
697 if (!dln2->buf) {
698 ret = -ENOMEM;
699 goto exit_free_master;
700 }
701
702 dln2->master = master;
703 dln2->pdev = pdev;
704 dln2->port = pdata->port;
705 /* cs/mode can never be 0xff, so the first transfer will set them */
706 dln2->cs = 0xff;
707 dln2->mode = 0xff;
708
709 /* disable SPI module before continuing with the setup */
710 ret = dln2_spi_enable(dln2, false);
711 if (ret < 0) {
712 dev_err(&pdev->dev, "Failed to disable SPI module\n");
713 goto exit_free_master;
714 }
715
716 ret = dln2_spi_get_cs_num(dln2, &master->num_chipselect);
717 if (ret < 0) {
718 dev_err(&pdev->dev, "Failed to get number of CS pins\n");
719 goto exit_free_master;
720 }
721
722 ret = dln2_spi_get_speed_range(dln2,
723 &master->min_speed_hz,
724 &master->max_speed_hz);
725 if (ret < 0) {
726 dev_err(&pdev->dev, "Failed to read bus min/max freqs\n");
727 goto exit_free_master;
728 }
729
730 ret = dln2_spi_get_supported_frame_sizes(dln2,
731 &master->bits_per_word_mask);
732 if (ret < 0) {
733 dev_err(&pdev->dev, "Failed to read supported frame sizes\n");
734 goto exit_free_master;
735 }
736
737 ret = dln2_spi_cs_enable_all(dln2, true);
738 if (ret < 0) {
739 dev_err(&pdev->dev, "Failed to enable CS pins\n");
740 goto exit_free_master;
741 }
742
743 master->bus_num = -1;
744 master->mode_bits = SPI_CPOL | SPI_CPHA;
745 master->prepare_message = dln2_spi_prepare_message;
746 master->transfer_one = dln2_spi_transfer_one;
747 master->auto_runtime_pm = true;
748
749 /* enable SPI module, we're good to go */
750 ret = dln2_spi_enable(dln2, true);
751 if (ret < 0) {
752 dev_err(&pdev->dev, "Failed to enable SPI module\n");
753 goto exit_free_master;
754 }
755
756 pm_runtime_set_autosuspend_delay(&pdev->dev,
757 DLN2_RPM_AUTOSUSPEND_TIMEOUT);
758 pm_runtime_use_autosuspend(&pdev->dev);
759 pm_runtime_set_active(&pdev->dev);
760 pm_runtime_enable(&pdev->dev);
761
762 ret = devm_spi_register_master(&pdev->dev, master);
763 if (ret < 0) {
764 dev_err(&pdev->dev, "Failed to register master\n");
765 goto exit_register;
766 }
767
768 return ret;
769
770exit_register:
771 pm_runtime_disable(&pdev->dev);
772 pm_runtime_set_suspended(&pdev->dev);
773
774 if (dln2_spi_enable(dln2, false) < 0)
775 dev_err(&pdev->dev, "Failed to disable SPI module\n");
776exit_free_master:
777 spi_master_put(master);
778
779 return ret;
780}
781
782static int dln2_spi_remove(struct platform_device *pdev)
783{
784 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
785 struct dln2_spi *dln2 = spi_master_get_devdata(master);
786
787 pm_runtime_disable(&pdev->dev);
788
789 if (dln2_spi_enable(dln2, false) < 0)
790 dev_err(&pdev->dev, "Failed to disable SPI module\n");
791
792 return 0;
793}
794
795#ifdef CONFIG_PM_SLEEP
796static int dln2_spi_suspend(struct device *dev)
797{
798 int ret;
799 struct spi_master *master = dev_get_drvdata(dev);
800 struct dln2_spi *dln2 = spi_master_get_devdata(master);
801
802 ret = spi_master_suspend(master);
803 if (ret < 0)
804 return ret;
805
806 if (!pm_runtime_suspended(dev)) {
807 ret = dln2_spi_enable(dln2, false);
808 if (ret < 0)
809 return ret;
810 }
811
812 /*
813 * USB power may be cut off during sleep. Resetting the following
814 * parameters will force the board to be set up before first transfer.
815 */
816 dln2->cs = 0xff;
817 dln2->speed = 0;
818 dln2->bpw = 0;
819 dln2->mode = 0xff;
820
821 return 0;
822}
823
824static int dln2_spi_resume(struct device *dev)
825{
826 int ret;
827 struct spi_master *master = dev_get_drvdata(dev);
828 struct dln2_spi *dln2 = spi_master_get_devdata(master);
829
830 if (!pm_runtime_suspended(dev)) {
831 ret = dln2_spi_cs_enable_all(dln2, true);
832 if (ret < 0)
833 return ret;
834
835 ret = dln2_spi_enable(dln2, true);
836 if (ret < 0)
837 return ret;
838 }
839
840 return spi_master_resume(master);
841}
842#endif /* CONFIG_PM_SLEEP */
843
844#ifdef CONFIG_PM
845static int dln2_spi_runtime_suspend(struct device *dev)
846{
847 struct spi_master *master = dev_get_drvdata(dev);
848 struct dln2_spi *dln2 = spi_master_get_devdata(master);
849
850 return dln2_spi_enable(dln2, false);
851}
852
853static int dln2_spi_runtime_resume(struct device *dev)
854{
855 struct spi_master *master = dev_get_drvdata(dev);
856 struct dln2_spi *dln2 = spi_master_get_devdata(master);
857
858 return dln2_spi_enable(dln2, true);
859}
860#endif /* CONFIG_PM */
861
862static const struct dev_pm_ops dln2_spi_pm = {
863 SET_SYSTEM_SLEEP_PM_OPS(dln2_spi_suspend, dln2_spi_resume)
864 SET_RUNTIME_PM_OPS(dln2_spi_runtime_suspend,
865 dln2_spi_runtime_resume, NULL)
866};
867
868static struct platform_driver spi_dln2_driver = {
869 .driver = {
870 .name = "dln2-spi",
871 .pm = &dln2_spi_pm,
872 },
873 .probe = dln2_spi_probe,
874 .remove = dln2_spi_remove,
875};
876module_platform_driver(spi_dln2_driver);
877
878MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI master interface");
879MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>");
880MODULE_LICENSE("GPL v2");
881MODULE_ALIAS("platform:dln2-spi");
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index a67d37c7e3c0..a0197fd4e95c 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -247,9 +247,9 @@ static struct dw_spi_dma_ops mid_dma_ops = {
247 247
248/* Some specific info for SPI0 controller on Intel MID */ 248/* Some specific info for SPI0 controller on Intel MID */
249 249
250/* HW info for MRST CLk Control Unit, one 32b reg */ 250/* HW info for MRST Clk Control Unit, 32b reg per controller */
251#define MRST_SPI_CLK_BASE 100000000 /* 100m */ 251#define MRST_SPI_CLK_BASE 100000000 /* 100m */
252#define MRST_CLK_SPI0_REG 0xff11d86c 252#define MRST_CLK_SPI_REG 0xff11d86c
253#define CLK_SPI_BDIV_OFFSET 0 253#define CLK_SPI_BDIV_OFFSET 0
254#define CLK_SPI_BDIV_MASK 0x00000007 254#define CLK_SPI_BDIV_MASK 0x00000007
255#define CLK_SPI_CDIV_OFFSET 9 255#define CLK_SPI_CDIV_OFFSET 9
@@ -261,16 +261,17 @@ int dw_spi_mid_init(struct dw_spi *dws)
261 void __iomem *clk_reg; 261 void __iomem *clk_reg;
262 u32 clk_cdiv; 262 u32 clk_cdiv;
263 263
264 clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); 264 clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
265 if (!clk_reg) 265 if (!clk_reg)
266 return -ENOMEM; 266 return -ENOMEM;
267 267
268 /* get SPI controller operating freq info */ 268 /* Get SPI controller operating freq info */
269 clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; 269 clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
270 clk_cdiv &= CLK_SPI_CDIV_MASK;
271 clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
270 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); 272 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
271 iounmap(clk_reg);
272 273
273 dws->num_cs = 16; 274 iounmap(clk_reg);
274 275
275#ifdef CONFIG_SPI_DW_MID_DMA 276#ifdef CONFIG_SPI_DW_MID_DMA
276 dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); 277 dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index ba68da12cdf0..5ba331047cbe 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -30,10 +30,20 @@ struct dw_spi_pci {
30 30
31struct spi_pci_desc { 31struct spi_pci_desc {
32 int (*setup)(struct dw_spi *); 32 int (*setup)(struct dw_spi *);
33 u16 num_cs;
34 u16 bus_num;
33}; 35};
34 36
35static struct spi_pci_desc spi_pci_mid_desc = { 37static struct spi_pci_desc spi_pci_mid_desc_1 = {
36 .setup = dw_spi_mid_init, 38 .setup = dw_spi_mid_init,
39 .num_cs = 32,
40 .bus_num = 0,
41};
42
43static struct spi_pci_desc spi_pci_mid_desc_2 = {
44 .setup = dw_spi_mid_init,
45 .num_cs = 4,
46 .bus_num = 1,
37}; 47};
38 48
39static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 49static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -65,18 +75,23 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
65 75
66 dws->regs = pcim_iomap_table(pdev)[pci_bar]; 76 dws->regs = pcim_iomap_table(pdev)[pci_bar];
67 77
68 dws->bus_num = 0;
69 dws->num_cs = 4;
70 dws->irq = pdev->irq; 78 dws->irq = pdev->irq;
71 79
72 /* 80 /*
73 * Specific handling for paltforms, like dma setup, 81 * Specific handling for paltforms, like dma setup,
74 * clock rate, FIFO depth. 82 * clock rate, FIFO depth.
75 */ 83 */
76 if (desc && desc->setup) { 84 if (desc) {
77 ret = desc->setup(dws); 85 dws->num_cs = desc->num_cs;
78 if (ret) 86 dws->bus_num = desc->bus_num;
79 return ret; 87
88 if (desc->setup) {
89 ret = desc->setup(dws);
90 if (ret)
91 return ret;
92 }
93 } else {
94 return -ENODEV;
80 } 95 }
81 96
82 ret = dw_spi_add_host(&pdev->dev, dws); 97 ret = dw_spi_add_host(&pdev->dev, dws);
@@ -121,7 +136,14 @@ static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume);
121 136
122static const struct pci_device_id pci_ids[] = { 137static const struct pci_device_id pci_ids[] = {
123 /* Intel MID platform SPI controller 0 */ 138 /* Intel MID platform SPI controller 0 */
124 { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc}, 139 /*
140 * The access to the device 8086:0801 is disabled by HW, since it's
141 * exclusively used by SCU to communicate with MSIC.
142 */
143 /* Intel MID platform SPI controller 1 */
144 { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1},
145 /* Intel MID platform SPI controller 2 */
146 { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2},
125 {}, 147 {},
126}; 148};
127 149
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 8edcd1b84562..5a97a62b298a 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -608,7 +608,7 @@ static void dw_spi_cleanup(struct spi_device *spi)
608} 608}
609 609
610/* Restart the controller, disable all interrupts, clean rx fifo */ 610/* Restart the controller, disable all interrupts, clean rx fifo */
611static void spi_hw_init(struct dw_spi *dws) 611static void spi_hw_init(struct device *dev, struct dw_spi *dws)
612{ 612{
613 spi_enable_chip(dws, 0); 613 spi_enable_chip(dws, 0);
614 spi_mask_intr(dws, 0xff); 614 spi_mask_intr(dws, 0xff);
@@ -626,9 +626,10 @@ static void spi_hw_init(struct dw_spi *dws)
626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) 626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
627 break; 627 break;
628 } 628 }
629 dw_writew(dws, DW_SPI_TXFLTR, 0);
629 630
630 dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; 631 dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
631 dw_writew(dws, DW_SPI_TXFLTR, 0); 632 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
632 } 633 }
633} 634}
634 635
@@ -668,7 +669,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
668 master->dev.of_node = dev->of_node; 669 master->dev.of_node = dev->of_node;
669 670
670 /* Basic HW init */ 671 /* Basic HW init */
671 spi_hw_init(dws); 672 spi_hw_init(dev, dws);
672 673
673 if (dws->dma_ops && dws->dma_ops->dma_init) { 674 if (dws->dma_ops && dws->dma_ops->dma_init) {
674 ret = dws->dma_ops->dma_init(dws); 675 ret = dws->dma_ops->dma_init(dws);
@@ -731,7 +732,7 @@ int dw_spi_resume_host(struct dw_spi *dws)
731{ 732{
732 int ret; 733 int ret;
733 734
734 spi_hw_init(dws); 735 spi_hw_init(&dws->master->dev, dws);
735 ret = spi_master_resume(dws->master); 736 ret = spi_master_resume(dws->master);
736 if (ret) 737 if (ret)
737 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); 738 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 912b9037e9cf..286b2c81fc6b 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -353,16 +353,6 @@ static int falcon_sflash_setup(struct spi_device *spi)
353 return 0; 353 return 0;
354} 354}
355 355
356static int falcon_sflash_prepare_xfer(struct spi_master *master)
357{
358 return 0;
359}
360
361static int falcon_sflash_unprepare_xfer(struct spi_master *master)
362{
363 return 0;
364}
365
366static int falcon_sflash_xfer_one(struct spi_master *master, 356static int falcon_sflash_xfer_one(struct spi_master *master,
367 struct spi_message *m) 357 struct spi_message *m)
368{ 358{
@@ -420,9 +410,7 @@ static int falcon_sflash_probe(struct platform_device *pdev)
420 master->mode_bits = SPI_MODE_3; 410 master->mode_bits = SPI_MODE_3;
421 master->flags = SPI_MASTER_HALF_DUPLEX; 411 master->flags = SPI_MASTER_HALF_DUPLEX;
422 master->setup = falcon_sflash_setup; 412 master->setup = falcon_sflash_setup;
423 master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
424 master->transfer_one_message = falcon_sflash_xfer_one; 413 master->transfer_one_message = falcon_sflash_xfer_one;
425 master->unprepare_transfer_hardware = falcon_sflash_unprepare_xfer;
426 master->dev.of_node = pdev->dev.of_node; 414 master->dev.of_node = pdev->dev.of_node;
427 415
428 ret = devm_spi_register_master(&pdev->dev, master); 416 ret = devm_spi_register_master(&pdev->dev, master);
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index e85ab1cb17a2..9c46a3058743 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -20,6 +20,7 @@
20#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
21#include <linux/fsl_devices.h> 21#include <linux/fsl_devices.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h>
23#include <linux/of_address.h> 24#include <linux/of_address.h>
24#include <linux/spi/spi.h> 25#include <linux/spi/spi.h>
25#include <linux/types.h> 26#include <linux/types.h>
@@ -68,6 +69,7 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
68 } 69 }
69 } 70 }
70} 71}
72EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
71 73
72static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) 74static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
73{ 75{
@@ -162,6 +164,7 @@ err_rx_dma:
162 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); 164 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
163 return -ENOMEM; 165 return -ENOMEM;
164} 166}
167EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
165 168
166void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) 169void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
167{ 170{
@@ -174,6 +177,7 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
174 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); 177 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
175 mspi->xfer_in_progress = NULL; 178 mspi->xfer_in_progress = NULL;
176} 179}
180EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
177 181
178void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) 182void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
179{ 183{
@@ -198,6 +202,7 @@ void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
198 else 202 else
199 complete(&mspi->done); 203 complete(&mspi->done);
200} 204}
205EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
201 206
202static void *fsl_spi_alloc_dummy_rx(void) 207static void *fsl_spi_alloc_dummy_rx(void)
203{ 208{
@@ -375,6 +380,7 @@ err_pram:
375 fsl_spi_free_dummy_rx(); 380 fsl_spi_free_dummy_rx();
376 return -ENOMEM; 381 return -ENOMEM;
377} 382}
383EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
378 384
379void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) 385void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
380{ 386{
@@ -389,3 +395,6 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
389 cpm_muram_free(cpm_muram_offset(mspi->pram)); 395 cpm_muram_free(cpm_muram_offset(mspi->pram));
390 fsl_spi_free_dummy_rx(); 396 fsl_spi_free_dummy_rx();
391} 397}
398EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
399
400MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 4cda994d3f40..d1a39249704a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -106,7 +106,7 @@ struct chip_data {
106}; 106};
107 107
108struct fsl_dspi { 108struct fsl_dspi {
109 struct spi_bitbang bitbang; 109 struct spi_master *master;
110 struct platform_device *pdev; 110 struct platform_device *pdev;
111 111
112 struct regmap *regmap; 112 struct regmap *regmap;
@@ -114,6 +114,7 @@ struct fsl_dspi {
114 struct clk *clk; 114 struct clk *clk;
115 115
116 struct spi_transfer *cur_transfer; 116 struct spi_transfer *cur_transfer;
117 struct spi_message *cur_msg;
117 struct chip_data *cur_chip; 118 struct chip_data *cur_chip;
118 size_t len; 119 size_t len;
119 void *tx; 120 void *tx;
@@ -123,6 +124,7 @@ struct fsl_dspi {
123 char dataflags; 124 char dataflags;
124 u8 cs; 125 u8 cs;
125 u16 void_write_data; 126 u16 void_write_data;
127 u32 cs_change;
126 128
127 wait_queue_head_t waitq; 129 wait_queue_head_t waitq;
128 u32 waitflags; 130 u32 waitflags;
@@ -225,6 +227,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
225 if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) { 227 if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
226 /* last transfer in the transfer */ 228 /* last transfer in the transfer */
227 dspi_pushr |= SPI_PUSHR_EOQ; 229 dspi_pushr |= SPI_PUSHR_EOQ;
230 if ((dspi->cs_change) && (!dspi->len))
231 dspi_pushr &= ~SPI_PUSHR_CONT;
228 } else if (tx_word && (dspi->len == 1)) 232 } else if (tx_word && (dspi->len == 1))
229 dspi_pushr |= SPI_PUSHR_EOQ; 233 dspi_pushr |= SPI_PUSHR_EOQ;
230 234
@@ -246,6 +250,7 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
246 int rx_count = 0; 250 int rx_count = 0;
247 int rx_word = is_double_byte_mode(dspi); 251 int rx_word = is_double_byte_mode(dspi);
248 u16 d; 252 u16 d;
253
249 while ((dspi->rx < dspi->rx_end) 254 while ((dspi->rx < dspi->rx_end)
250 && (rx_count < DSPI_FIFO_SIZE)) { 255 && (rx_count < DSPI_FIFO_SIZE)) {
251 if (rx_word) { 256 if (rx_word) {
@@ -276,86 +281,89 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
276 return rx_count; 281 return rx_count;
277} 282}
278 283
279static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t) 284static int dspi_transfer_one_message(struct spi_master *master,
285 struct spi_message *message)
280{ 286{
281 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); 287 struct fsl_dspi *dspi = spi_master_get_devdata(master);
282 dspi->cur_transfer = t; 288 struct spi_device *spi = message->spi;
283 dspi->cur_chip = spi_get_ctldata(spi); 289 struct spi_transfer *transfer;
284 dspi->cs = spi->chip_select; 290 int status = 0;
285 dspi->void_write_data = dspi->cur_chip->void_write_data; 291 message->actual_length = 0;
286 292
287 dspi->dataflags = 0; 293 list_for_each_entry(transfer, &message->transfers, transfer_list) {
288 dspi->tx = (void *)t->tx_buf; 294 dspi->cur_transfer = transfer;
289 dspi->tx_end = dspi->tx + t->len; 295 dspi->cur_msg = message;
290 dspi->rx = t->rx_buf; 296 dspi->cur_chip = spi_get_ctldata(spi);
291 dspi->rx_end = dspi->rx + t->len; 297 dspi->cs = spi->chip_select;
292 dspi->len = t->len; 298 if (dspi->cur_transfer->transfer_list.next
293 299 == &dspi->cur_msg->transfers)
294 if (!dspi->rx) 300 transfer->cs_change = 1;
295 dspi->dataflags |= TRAN_STATE_RX_VOID; 301 dspi->cs_change = transfer->cs_change;
296 302 dspi->void_write_data = dspi->cur_chip->void_write_data;
297 if (!dspi->tx) 303
298 dspi->dataflags |= TRAN_STATE_TX_VOID; 304 dspi->dataflags = 0;
299 305 dspi->tx = (void *)transfer->tx_buf;
300 regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val); 306 dspi->tx_end = dspi->tx + transfer->len;
301 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val); 307 dspi->rx = transfer->rx_buf;
302 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); 308 dspi->rx_end = dspi->rx + transfer->len;
303 309 dspi->len = transfer->len;
304 if (t->speed_hz) 310
311 if (!dspi->rx)
312 dspi->dataflags |= TRAN_STATE_RX_VOID;
313
314 if (!dspi->tx)
315 dspi->dataflags |= TRAN_STATE_TX_VOID;
316
317 regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
318 regmap_update_bits(dspi->regmap, SPI_MCR,
319 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
320 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
305 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), 321 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
306 dspi->cur_chip->ctar_val); 322 dspi->cur_chip->ctar_val);
323 if (transfer->speed_hz)
324 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
325 dspi->cur_chip->ctar_val);
307 326
308 dspi_transfer_write(dspi); 327 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
328 message->actual_length += dspi_transfer_write(dspi);
309 329
310 if (wait_event_interruptible(dspi->waitq, dspi->waitflags)) 330 if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
311 dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n"); 331 dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
312 dspi->waitflags = 0; 332 dspi->waitflags = 0;
313
314 return t->len - dspi->len;
315}
316 333
317static void dspi_chipselect(struct spi_device *spi, int value) 334 if (transfer->delay_usecs)
318{ 335 udelay(transfer->delay_usecs);
319 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
320 unsigned int pushr;
321
322 regmap_read(dspi->regmap, SPI_PUSHR, &pushr);
323
324 switch (value) {
325 case BITBANG_CS_ACTIVE:
326 pushr |= SPI_PUSHR_CONT;
327 break;
328 case BITBANG_CS_INACTIVE:
329 pushr &= ~SPI_PUSHR_CONT;
330 break;
331 } 336 }
332 337
333 regmap_write(dspi->regmap, SPI_PUSHR, pushr); 338 message->status = status;
339 spi_finalize_current_message(master);
340
341 return status;
334} 342}
335 343
336static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 344static int dspi_setup(struct spi_device *spi)
337{ 345{
338 struct chip_data *chip; 346 struct chip_data *chip;
339 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); 347 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
340 unsigned char br = 0, pbr = 0, fmsz = 0; 348 unsigned char br = 0, pbr = 0, fmsz = 0;
341 349
350 if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
351 fmsz = spi->bits_per_word - 1;
352 } else {
353 pr_err("Invalid wordsize\n");
354 return -ENODEV;
355 }
356
342 /* Only alloc on first setup */ 357 /* Only alloc on first setup */
343 chip = spi_get_ctldata(spi); 358 chip = spi_get_ctldata(spi);
344 if (chip == NULL) { 359 if (chip == NULL) {
345 chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data), 360 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
346 GFP_KERNEL);
347 if (!chip) 361 if (!chip)
348 return -ENOMEM; 362 return -ENOMEM;
349 } 363 }
350 364
351 chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | 365 chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
352 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; 366 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
353 if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
354 fmsz = spi->bits_per_word - 1;
355 } else {
356 pr_err("Invalid wordsize\n");
357 return -ENODEV;
358 }
359 367
360 chip->void_write_data = 0; 368 chip->void_write_data = 0;
361 369
@@ -374,34 +382,34 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
374 return 0; 382 return 0;
375} 383}
376 384
377static int dspi_setup(struct spi_device *spi) 385static void dspi_cleanup(struct spi_device *spi)
378{ 386{
379 if (!spi->max_speed_hz) 387 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
380 return -EINVAL; 388
389 dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
390 spi->master->bus_num, spi->chip_select);
381 391
382 return dspi_setup_transfer(spi, NULL); 392 kfree(chip);
383} 393}
384 394
385static irqreturn_t dspi_interrupt(int irq, void *dev_id) 395static irqreturn_t dspi_interrupt(int irq, void *dev_id)
386{ 396{
387 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; 397 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
388 398
389 regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF); 399 struct spi_message *msg = dspi->cur_msg;
390 400
401 regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
391 dspi_transfer_read(dspi); 402 dspi_transfer_read(dspi);
392 403
393 if (!dspi->len) { 404 if (!dspi->len) {
394 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) 405 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
395 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 406 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
396 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16)); 407 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
397 408
398 dspi->waitflags = 1; 409 dspi->waitflags = 1;
399 wake_up_interruptible(&dspi->waitq); 410 wake_up_interruptible(&dspi->waitq);
400 } else { 411 } else
401 dspi_transfer_write(dspi); 412 msg->actual_length += dspi_transfer_write(dspi);
402
403 return IRQ_HANDLED;
404 }
405 413
406 return IRQ_HANDLED; 414 return IRQ_HANDLED;
407} 415}
@@ -460,13 +468,14 @@ static int dspi_probe(struct platform_device *pdev)
460 468
461 dspi = spi_master_get_devdata(master); 469 dspi = spi_master_get_devdata(master);
462 dspi->pdev = pdev; 470 dspi->pdev = pdev;
463 dspi->bitbang.master = master; 471 dspi->master = master;
464 dspi->bitbang.chipselect = dspi_chipselect; 472
465 dspi->bitbang.setup_transfer = dspi_setup_transfer; 473 master->transfer = NULL;
466 dspi->bitbang.txrx_bufs = dspi_txrx_transfer; 474 master->setup = dspi_setup;
467 dspi->bitbang.master->setup = dspi_setup; 475 master->transfer_one_message = dspi_transfer_one_message;
468 dspi->bitbang.master->dev.of_node = pdev->dev.of_node; 476 master->dev.of_node = pdev->dev.of_node;
469 477
478 master->cleanup = dspi_cleanup;
470 master->mode_bits = SPI_CPOL | SPI_CPHA; 479 master->mode_bits = SPI_CPOL | SPI_CPHA;
471 master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | 480 master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
472 SPI_BPW_MASK(16); 481 SPI_BPW_MASK(16);
@@ -525,7 +534,7 @@ static int dspi_probe(struct platform_device *pdev)
525 init_waitqueue_head(&dspi->waitq); 534 init_waitqueue_head(&dspi->waitq);
526 platform_set_drvdata(pdev, master); 535 platform_set_drvdata(pdev, master);
527 536
528 ret = spi_bitbang_start(&dspi->bitbang); 537 ret = spi_register_master(master);
529 if (ret != 0) { 538 if (ret != 0) {
530 dev_err(&pdev->dev, "Problem registering DSPI master\n"); 539 dev_err(&pdev->dev, "Problem registering DSPI master\n");
531 goto out_clk_put; 540 goto out_clk_put;
@@ -547,9 +556,9 @@ static int dspi_remove(struct platform_device *pdev)
547 struct fsl_dspi *dspi = spi_master_get_devdata(master); 556 struct fsl_dspi *dspi = spi_master_get_devdata(master);
548 557
549 /* Disconnect from the SPI framework */ 558 /* Disconnect from the SPI framework */
550 spi_bitbang_stop(&dspi->bitbang);
551 clk_disable_unprepare(dspi->clk); 559 clk_disable_unprepare(dspi->clk);
552 spi_master_put(dspi->bitbang.master); 560 spi_unregister_master(dspi->master);
561 spi_master_put(dspi->master);
553 562
554 return 0; 563 return 0;
555} 564}
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 446b737e1532..cb35d2f0d0e6 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/module.h>
24#include <linux/of_platform.h> 25#include <linux/of_platform.h>
25#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
26#ifdef CONFIG_FSL_SOC 27#ifdef CONFIG_FSL_SOC
@@ -35,7 +36,8 @@ void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
35 type *rx = mpc8xxx_spi->rx; \ 36 type *rx = mpc8xxx_spi->rx; \
36 *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ 37 *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
37 mpc8xxx_spi->rx = rx; \ 38 mpc8xxx_spi->rx = rx; \
38} 39} \
40EXPORT_SYMBOL_GPL(mpc8xxx_spi_rx_buf_##type);
39 41
40#define MPC8XXX_SPI_TX_BUF(type) \ 42#define MPC8XXX_SPI_TX_BUF(type) \
41u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ 43u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
@@ -47,7 +49,8 @@ u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
47 data = *tx++ << mpc8xxx_spi->tx_shift; \ 49 data = *tx++ << mpc8xxx_spi->tx_shift; \
48 mpc8xxx_spi->tx = tx; \ 50 mpc8xxx_spi->tx = tx; \
49 return data; \ 51 return data; \
50} 52} \
53EXPORT_SYMBOL_GPL(mpc8xxx_spi_tx_buf_##type);
51 54
52MPC8XXX_SPI_RX_BUF(u8) 55MPC8XXX_SPI_RX_BUF(u8)
53MPC8XXX_SPI_RX_BUF(u16) 56MPC8XXX_SPI_RX_BUF(u16)
@@ -60,6 +63,7 @@ struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
60{ 63{
61 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); 64 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
62} 65}
66EXPORT_SYMBOL_GPL(to_of_pinfo);
63 67
64const char *mpc8xxx_spi_strmode(unsigned int flags) 68const char *mpc8xxx_spi_strmode(unsigned int flags)
65{ 69{
@@ -75,6 +79,7 @@ const char *mpc8xxx_spi_strmode(unsigned int flags)
75 } 79 }
76 return "CPU"; 80 return "CPU";
77} 81}
82EXPORT_SYMBOL_GPL(mpc8xxx_spi_strmode);
78 83
79void mpc8xxx_spi_probe(struct device *dev, struct resource *mem, 84void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
80 unsigned int irq) 85 unsigned int irq)
@@ -102,13 +107,12 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
102 mpc8xxx_spi->rx_shift = 0; 107 mpc8xxx_spi->rx_shift = 0;
103 mpc8xxx_spi->tx_shift = 0; 108 mpc8xxx_spi->tx_shift = 0;
104 109
105 init_completion(&mpc8xxx_spi->done);
106
107 master->bus_num = pdata->bus_num; 110 master->bus_num = pdata->bus_num;
108 master->num_chipselect = pdata->max_chipselect; 111 master->num_chipselect = pdata->max_chipselect;
109 112
110 init_completion(&mpc8xxx_spi->done); 113 init_completion(&mpc8xxx_spi->done);
111} 114}
115EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe);
112 116
113int mpc8xxx_spi_remove(struct device *dev) 117int mpc8xxx_spi_remove(struct device *dev)
114{ 118{
@@ -127,6 +131,7 @@ int mpc8xxx_spi_remove(struct device *dev)
127 131
128 return 0; 132 return 0;
129} 133}
134EXPORT_SYMBOL_GPL(mpc8xxx_spi_remove);
130 135
131int of_mpc8xxx_spi_probe(struct platform_device *ofdev) 136int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
132{ 137{
@@ -173,3 +178,6 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
173 178
174 return 0; 179 return 0;
175} 180}
181EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe);
182
183MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index b4ed04e8862f..1326a392adca 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -28,7 +28,7 @@ struct mpc8xxx_spi {
28 /* rx & tx bufs from the spi_transfer */ 28 /* rx & tx bufs from the spi_transfer */
29 const void *tx; 29 const void *tx;
30 void *rx; 30 void *rx;
31#ifdef CONFIG_SPI_FSL_ESPI 31#if IS_ENABLED(CONFIG_SPI_FSL_ESPI)
32 int len; 32 int len;
33#endif 33#endif
34 34
@@ -68,7 +68,7 @@ struct mpc8xxx_spi {
68 68
69 unsigned int flags; 69 unsigned int flags;
70 70
71#ifdef CONFIG_SPI_FSL_SPI 71#if IS_ENABLED(CONFIG_SPI_FSL_SPI)
72 int type; 72 int type;
73 int native_chipselects; 73 int native_chipselects;
74 u8 max_bits_per_word; 74 u8 max_bits_per_word;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index aee4e7589568..1c34c9314c8a 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20#include <linux/kernel.h> 16#include <linux/kernel.h>
21#include <linux/module.h> 17#include <linux/module.h>
@@ -92,7 +88,7 @@ struct spi_gpio {
92 88
93/*----------------------------------------------------------------------*/ 89/*----------------------------------------------------------------------*/
94 90
95static inline struct spi_gpio * __pure 91static inline struct spi_gpio *__pure
96spi_to_spi_gpio(const struct spi_device *spi) 92spi_to_spi_gpio(const struct spi_device *spi)
97{ 93{
98 const struct spi_bitbang *bang; 94 const struct spi_bitbang *bang;
@@ -103,7 +99,7 @@ spi_to_spi_gpio(const struct spi_device *spi)
103 return spi_gpio; 99 return spi_gpio;
104} 100}
105 101
106static inline struct spi_gpio_platform_data * __pure 102static inline struct spi_gpio_platform_data *__pure
107spi_to_pdata(const struct spi_device *spi) 103spi_to_pdata(const struct spi_device *spi)
108{ 104{
109 return &spi_to_spi_gpio(spi)->pdata; 105 return &spi_to_spi_gpio(spi)->pdata;
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index aad6683db81b..c01567d53581 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -160,16 +160,16 @@ static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
160 unsigned int count = 0; 160 unsigned int count = 0;
161 u32 status; 161 u32 status;
162 162
163 while (count < max) { 163 while (count < max / 4) {
164 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); 164 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
165 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 165 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
166 if (status & SPFI_INTERRUPT_SDFUL) 166 if (status & SPFI_INTERRUPT_SDFUL)
167 break; 167 break;
168 spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA); 168 spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
169 count += 4; 169 count++;
170 } 170 }
171 171
172 return count; 172 return count * 4;
173} 173}
174 174
175static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf, 175static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
@@ -196,17 +196,17 @@ static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
196 unsigned int count = 0; 196 unsigned int count = 0;
197 u32 status; 197 u32 status;
198 198
199 while (count < max) { 199 while (count < max / 4) {
200 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT, 200 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
201 SPFI_INTERRUPT_CLEAR); 201 SPFI_INTERRUPT_CLEAR);
202 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 202 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
203 if (!(status & SPFI_INTERRUPT_GDEX32BIT)) 203 if (!(status & SPFI_INTERRUPT_GDEX32BIT))
204 break; 204 break;
205 buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); 205 buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
206 count += 4; 206 count++;
207 } 207 }
208 208
209 return count; 209 return count * 4;
210} 210}
211 211
212static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf, 212static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
@@ -251,17 +251,15 @@ static int img_spfi_start_pio(struct spi_master *master,
251 time_before(jiffies, timeout)) { 251 time_before(jiffies, timeout)) {
252 unsigned int tx_count, rx_count; 252 unsigned int tx_count, rx_count;
253 253
254 switch (xfer->bits_per_word) { 254 if (tx_bytes >= 4)
255 case 32:
256 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes); 255 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
257 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); 256 else
258 break;
259 case 8:
260 default:
261 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes); 257 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
258
259 if (rx_bytes >= 4)
260 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
261 else
262 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes); 262 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
263 break;
264 }
265 263
266 tx_buf += tx_count; 264 tx_buf += tx_count;
267 rx_buf += rx_count; 265 rx_buf += rx_count;
@@ -331,14 +329,11 @@ static int img_spfi_start_dma(struct spi_master *master,
331 329
332 if (xfer->rx_buf) { 330 if (xfer->rx_buf) {
333 rxconf.direction = DMA_DEV_TO_MEM; 331 rxconf.direction = DMA_DEV_TO_MEM;
334 switch (xfer->bits_per_word) { 332 if (xfer->len % 4 == 0) {
335 case 32:
336 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA; 333 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
337 rxconf.src_addr_width = 4; 334 rxconf.src_addr_width = 4;
338 rxconf.src_maxburst = 4; 335 rxconf.src_maxburst = 4;
339 break; 336 } else {
340 case 8:
341 default:
342 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; 337 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
343 rxconf.src_addr_width = 1; 338 rxconf.src_addr_width = 1;
344 rxconf.src_maxburst = 4; 339 rxconf.src_maxburst = 4;
@@ -358,18 +353,14 @@ static int img_spfi_start_dma(struct spi_master *master,
358 353
359 if (xfer->tx_buf) { 354 if (xfer->tx_buf) {
360 txconf.direction = DMA_MEM_TO_DEV; 355 txconf.direction = DMA_MEM_TO_DEV;
361 switch (xfer->bits_per_word) { 356 if (xfer->len % 4 == 0) {
362 case 32:
363 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA; 357 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
364 txconf.dst_addr_width = 4; 358 txconf.dst_addr_width = 4;
365 txconf.dst_maxburst = 4; 359 txconf.dst_maxburst = 4;
366 break; 360 } else {
367 case 8:
368 default:
369 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; 361 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
370 txconf.dst_addr_width = 1; 362 txconf.dst_addr_width = 1;
371 txconf.dst_maxburst = 4; 363 txconf.dst_maxburst = 4;
372 break;
373 } 364 }
374 dmaengine_slave_config(spfi->tx_ch, &txconf); 365 dmaengine_slave_config(spfi->tx_ch, &txconf);
375 366
@@ -508,9 +499,7 @@ static void img_spfi_set_cs(struct spi_device *spi, bool enable)
508static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, 499static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
509 struct spi_transfer *xfer) 500 struct spi_transfer *xfer)
510{ 501{
511 if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE) 502 if (xfer->len > SPFI_32BIT_FIFO_SIZE)
512 return true;
513 if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE)
514 return true; 503 return true;
515 return false; 504 return false;
516} 505}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 961b97d43b43..6fea4af51c41 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -89,7 +89,6 @@ struct spi_imx_data {
89 89
90 struct completion xfer_done; 90 struct completion xfer_done;
91 void __iomem *base; 91 void __iomem *base;
92 int irq;
93 struct clk *clk_per; 92 struct clk *clk_per;
94 struct clk *clk_ipg; 93 struct clk *clk_ipg;
95 unsigned long spi_clk; 94 unsigned long spi_clk;
@@ -823,6 +822,10 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
823 struct dma_slave_config slave_config = {}; 822 struct dma_slave_config slave_config = {};
824 int ret; 823 int ret;
825 824
825 /* use pio mode for i.mx6dl chip TKT238285 */
826 if (of_machine_is_compatible("fsl,imx6dl"))
827 return 0;
828
826 /* Prepare for TX DMA: */ 829 /* Prepare for TX DMA: */
827 master->dma_tx = dma_request_slave_channel(dev, "tx"); 830 master->dma_tx = dma_request_slave_channel(dev, "tx");
828 if (!master->dma_tx) { 831 if (!master->dma_tx) {
@@ -892,6 +895,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
892{ 895{
893 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; 896 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
894 int ret; 897 int ret;
898 unsigned long timeout;
895 u32 dma; 899 u32 dma;
896 int left; 900 int left;
897 struct spi_master *master = spi_imx->bitbang.master; 901 struct spi_master *master = spi_imx->bitbang.master;
@@ -939,17 +943,17 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
939 dma_async_issue_pending(master->dma_tx); 943 dma_async_issue_pending(master->dma_tx);
940 dma_async_issue_pending(master->dma_rx); 944 dma_async_issue_pending(master->dma_rx);
941 /* Wait SDMA to finish the data transfer.*/ 945 /* Wait SDMA to finish the data transfer.*/
942 ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 946 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
943 IMX_DMA_TIMEOUT); 947 IMX_DMA_TIMEOUT);
944 if (!ret) { 948 if (!timeout) {
945 pr_warn("%s %s: I/O Error in DMA TX\n", 949 pr_warn("%s %s: I/O Error in DMA TX\n",
946 dev_driver_string(&master->dev), 950 dev_driver_string(&master->dev),
947 dev_name(&master->dev)); 951 dev_name(&master->dev));
948 dmaengine_terminate_all(master->dma_tx); 952 dmaengine_terminate_all(master->dma_tx);
949 } else { 953 } else {
950 ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion, 954 timeout = wait_for_completion_timeout(
951 IMX_DMA_TIMEOUT); 955 &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
952 if (!ret) { 956 if (!timeout) {
953 pr_warn("%s %s: I/O Error in DMA RX\n", 957 pr_warn("%s %s: I/O Error in DMA RX\n",
954 dev_driver_string(&master->dev), 958 dev_driver_string(&master->dev),
955 dev_name(&master->dev)); 959 dev_name(&master->dev));
@@ -964,9 +968,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
964 spi_imx->dma_finished = 1; 968 spi_imx->dma_finished = 1;
965 spi_imx->devtype_data->trigger(spi_imx); 969 spi_imx->devtype_data->trigger(spi_imx);
966 970
967 if (!ret) 971 if (!timeout)
968 ret = -ETIMEDOUT; 972 ret = -ETIMEDOUT;
969 else if (ret > 0) 973 else
970 ret = transfer->len; 974 ret = transfer->len;
971 975
972 return ret; 976 return ret;
@@ -1076,7 +1080,7 @@ static int spi_imx_probe(struct platform_device *pdev)
1076 struct spi_master *master; 1080 struct spi_master *master;
1077 struct spi_imx_data *spi_imx; 1081 struct spi_imx_data *spi_imx;
1078 struct resource *res; 1082 struct resource *res;
1079 int i, ret, num_cs; 1083 int i, ret, num_cs, irq;
1080 1084
1081 if (!np && !mxc_platform_info) { 1085 if (!np && !mxc_platform_info) {
1082 dev_err(&pdev->dev, "can't get the platform data\n"); 1086 dev_err(&pdev->dev, "can't get the platform data\n");
@@ -1143,16 +1147,16 @@ static int spi_imx_probe(struct platform_device *pdev)
1143 goto out_master_put; 1147 goto out_master_put;
1144 } 1148 }
1145 1149
1146 spi_imx->irq = platform_get_irq(pdev, 0); 1150 irq = platform_get_irq(pdev, 0);
1147 if (spi_imx->irq < 0) { 1151 if (irq < 0) {
1148 ret = spi_imx->irq; 1152 ret = irq;
1149 goto out_master_put; 1153 goto out_master_put;
1150 } 1154 }
1151 1155
1152 ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0, 1156 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
1153 dev_name(&pdev->dev), spi_imx); 1157 dev_name(&pdev->dev), spi_imx);
1154 if (ret) { 1158 if (ret) {
1155 dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); 1159 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
1156 goto out_master_put; 1160 goto out_master_put;
1157 } 1161 }
1158 1162
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 41c5765be746..ba72347cb99d 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20 16
21#include <linux/init.h> 17#include <linux/init.h>
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 1bbac0378bf7..5468fc70dbf8 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -85,7 +85,7 @@ struct meson_spifc {
85 struct device *dev; 85 struct device *dev;
86}; 86};
87 87
88static struct regmap_config spifc_regmap_config = { 88static const struct regmap_config spifc_regmap_config = {
89 .reg_bits = 32, 89 .reg_bits = 32,
90 .val_bits = 32, 90 .val_bits = 32,
91 .reg_stride = 4, 91 .reg_stride = 4,
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 4045a1e580e1..5b0e9a3e83f6 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -282,9 +282,8 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
282 dmaengine_submit(desc); 282 dmaengine_submit(desc);
283 dma_async_issue_pending(ssp->dmach); 283 dma_async_issue_pending(ssp->dmach);
284 284
285 ret = wait_for_completion_timeout(&spi->c, 285 if (!wait_for_completion_timeout(&spi->c,
286 msecs_to_jiffies(SSP_TIMEOUT)); 286 msecs_to_jiffies(SSP_TIMEOUT))) {
287 if (!ret) {
288 dev_err(ssp->dev, "DMA transfer timeout\n"); 287 dev_err(ssp->dev, "DMA transfer timeout\n");
289 ret = -ETIMEDOUT; 288 ret = -ETIMEDOUT;
290 dmaengine_terminate_all(ssp->dmach); 289 dmaengine_terminate_all(ssp->dmach);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 79399ae9c84c..d890d309dff9 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -16,11 +16,6 @@
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */ 19 */
25#include <linux/kernel.h> 20#include <linux/kernel.h>
26#include <linux/init.h> 21#include <linux/init.h>
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index daf1ada5cd11..3c0844457c07 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -28,10 +28,6 @@
28 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * You should have received a copy of the GNU General Public License along
33 * with this program; if not, write to the Free Software Foundation, Inc.,
34 * 675 Mass Ave, Cambridge, MA 02139, USA.
35 */ 31 */
36#include <linux/kernel.h> 32#include <linux/kernel.h>
37#include <linux/init.h> 33#include <linux/init.h>
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 3bc3cbabbbc0..4df8942058de 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -14,11 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */ 17 */
23 18
24#include <linux/kernel.h> 19#include <linux/kernel.h>
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 3dec9e0b99b8..861664776672 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -28,7 +28,12 @@
28/* Runtime PM autosuspend timeout: PM is fairly light on this driver */ 28/* Runtime PM autosuspend timeout: PM is fairly light on this driver */
29#define SPI_AUTOSUSPEND_TIMEOUT 200 29#define SPI_AUTOSUSPEND_TIMEOUT 200
30 30
31#define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/ 31/* Some SoCs using this driver support up to 8 chip selects.
32 * It is up to the implementer to only use the chip selects
33 * that are available.
34 */
35#define ORION_NUM_CHIPSELECTS 8
36
32#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */ 37#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */
33 38
34#define ORION_SPI_IF_CTRL_REG 0x00 39#define ORION_SPI_IF_CTRL_REG 0x00
@@ -44,6 +49,10 @@
44#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF 49#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF
45#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \ 50#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \
46 ORION_SPI_MODE_CPHA) 51 ORION_SPI_MODE_CPHA)
52#define ORION_SPI_CS_MASK 0x1C
53#define ORION_SPI_CS_SHIFT 2
54#define ORION_SPI_CS(cs) ((cs << ORION_SPI_CS_SHIFT) & \
55 ORION_SPI_CS_MASK)
47 56
48enum orion_spi_type { 57enum orion_spi_type {
49 ORION_SPI, 58 ORION_SPI,
@@ -215,9 +224,18 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
215 return 0; 224 return 0;
216} 225}
217 226
218static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable) 227static void orion_spi_set_cs(struct spi_device *spi, bool enable)
219{ 228{
220 if (enable) 229 struct orion_spi *orion_spi;
230
231 orion_spi = spi_master_get_devdata(spi->master);
232
233 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
234 orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
235 ORION_SPI_CS(spi->chip_select));
236
237 /* Chip select logic is inverted from spi_set_cs */
238 if (!enable)
221 orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); 239 orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
222 else 240 else
223 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); 241 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
@@ -332,64 +350,31 @@ out:
332 return xfer->len - count; 350 return xfer->len - count;
333} 351}
334 352
335static int orion_spi_transfer_one_message(struct spi_master *master, 353static int orion_spi_transfer_one(struct spi_master *master,
336 struct spi_message *m) 354 struct spi_device *spi,
355 struct spi_transfer *t)
337{ 356{
338 struct orion_spi *orion_spi = spi_master_get_devdata(master);
339 struct spi_device *spi = m->spi;
340 struct spi_transfer *t = NULL;
341 int par_override = 0;
342 int status = 0; 357 int status = 0;
343 int cs_active = 0;
344
345 /* Load defaults */
346 status = orion_spi_setup_transfer(spi, NULL);
347 358
359 status = orion_spi_setup_transfer(spi, t);
348 if (status < 0) 360 if (status < 0)
349 goto msg_done; 361 return status;
350
351 list_for_each_entry(t, &m->transfers, transfer_list) {
352 if (par_override || t->speed_hz || t->bits_per_word) {
353 par_override = 1;
354 status = orion_spi_setup_transfer(spi, t);
355 if (status < 0)
356 break;
357 if (!t->speed_hz && !t->bits_per_word)
358 par_override = 0;
359 }
360
361 if (!cs_active) {
362 orion_spi_set_cs(orion_spi, 1);
363 cs_active = 1;
364 }
365 362
366 if (t->len) 363 if (t->len)
367 m->actual_length += orion_spi_write_read(spi, t); 364 orion_spi_write_read(spi, t);
368 365
369 if (t->delay_usecs) 366 return status;
370 udelay(t->delay_usecs); 367}
371
372 if (t->cs_change) {
373 orion_spi_set_cs(orion_spi, 0);
374 cs_active = 0;
375 }
376 }
377
378msg_done:
379 if (cs_active)
380 orion_spi_set_cs(orion_spi, 0);
381
382 m->status = status;
383 spi_finalize_current_message(master);
384 368
385 return 0; 369static int orion_spi_setup(struct spi_device *spi)
370{
371 return orion_spi_setup_transfer(spi, NULL);
386} 372}
387 373
388static int orion_spi_reset(struct orion_spi *orion_spi) 374static int orion_spi_reset(struct orion_spi *orion_spi)
389{ 375{
390 /* Verify that the CS is deasserted */ 376 /* Verify that the CS is deasserted */
391 orion_spi_set_cs(orion_spi, 0); 377 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
392
393 return 0; 378 return 0;
394} 379}
395 380
@@ -442,9 +427,10 @@ static int orion_spi_probe(struct platform_device *pdev)
442 427
443 /* we support only mode 0, and no options */ 428 /* we support only mode 0, and no options */
444 master->mode_bits = SPI_CPHA | SPI_CPOL; 429 master->mode_bits = SPI_CPHA | SPI_CPOL;
445 430 master->set_cs = orion_spi_set_cs;
446 master->transfer_one_message = orion_spi_transfer_one_message; 431 master->transfer_one = orion_spi_transfer_one;
447 master->num_chipselect = ORION_NUM_CHIPSELECTS; 432 master->num_chipselect = ORION_NUM_CHIPSELECTS;
433 master->setup = orion_spi_setup;
448 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); 434 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
449 master->auto_runtime_pm = true; 435 master->auto_runtime_pm = true;
450 436
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 62a9297e96ac..66a173939be8 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -111,23 +111,24 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
111 * by using ->dma_running. 111 * by using ->dma_running.
112 */ 112 */
113 if (atomic_dec_and_test(&drv_data->dma_running)) { 113 if (atomic_dec_and_test(&drv_data->dma_running)) {
114 void __iomem *reg = drv_data->ioaddr;
115
116 /* 114 /*
117 * If the other CPU is still handling the ROR interrupt we 115 * If the other CPU is still handling the ROR interrupt we
118 * might not know about the error yet. So we re-check the 116 * might not know about the error yet. So we re-check the
119 * ROR bit here before we clear the status register. 117 * ROR bit here before we clear the status register.
120 */ 118 */
121 if (!error) { 119 if (!error) {
122 u32 status = read_SSSR(reg) & drv_data->mask_sr; 120 u32 status = pxa2xx_spi_read(drv_data, SSSR)
121 & drv_data->mask_sr;
123 error = status & SSSR_ROR; 122 error = status & SSSR_ROR;
124 } 123 }
125 124
126 /* Clear status & disable interrupts */ 125 /* Clear status & disable interrupts */
127 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 126 pxa2xx_spi_write(drv_data, SSCR1,
127 pxa2xx_spi_read(drv_data, SSCR1)
128 & ~drv_data->dma_cr1);
128 write_SSSR_CS(drv_data, drv_data->clear_sr); 129 write_SSSR_CS(drv_data, drv_data->clear_sr);
129 if (!pxa25x_ssp_comp(drv_data)) 130 if (!pxa25x_ssp_comp(drv_data))
130 write_SSTO(0, reg); 131 pxa2xx_spi_write(drv_data, SSTO, 0);
131 132
132 if (!error) { 133 if (!error) {
133 pxa2xx_spi_unmap_dma_buffers(drv_data); 134 pxa2xx_spi_unmap_dma_buffers(drv_data);
@@ -139,7 +140,9 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
139 msg->state = pxa2xx_spi_next_transfer(drv_data); 140 msg->state = pxa2xx_spi_next_transfer(drv_data);
140 } else { 141 } else {
141 /* In case we got an error we disable the SSP now */ 142 /* In case we got an error we disable the SSP now */
142 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 143 pxa2xx_spi_write(drv_data, SSCR0,
144 pxa2xx_spi_read(drv_data, SSCR0)
145 & ~SSCR0_SSE);
143 146
144 msg->state = ERROR_STATE; 147 msg->state = ERROR_STATE;
145 } 148 }
@@ -247,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
247{ 250{
248 u32 status; 251 u32 status;
249 252
250 status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr; 253 status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
251 if (status & SSSR_ROR) { 254 if (status & SSSR_ROR) {
252 dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); 255 dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
253 256
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
index e8a26f25d5c0..2e0796a0003f 100644
--- a/drivers/spi/spi-pxa2xx-pxadma.c
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20 16
21#include <linux/delay.h> 17#include <linux/delay.h>
@@ -25,6 +21,7 @@
25#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
26#include <linux/spi/pxa2xx_spi.h> 22#include <linux/spi/pxa2xx_spi.h>
27 23
24#include <mach/dma.h>
28#include "spi-pxa2xx.h" 25#include "spi-pxa2xx.h"
29 26
30#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) 27#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
@@ -118,11 +115,11 @@ static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
118 drv_data->dma_mapped = 0; 115 drv_data->dma_mapped = 0;
119} 116}
120 117
121static int wait_ssp_rx_stall(void const __iomem *ioaddr) 118static int wait_ssp_rx_stall(struct driver_data *drv_data)
122{ 119{
123 unsigned long limit = loops_per_jiffy << 1; 120 unsigned long limit = loops_per_jiffy << 1;
124 121
125 while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) 122 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit)
126 cpu_relax(); 123 cpu_relax();
127 124
128 return limit; 125 return limit;
@@ -141,17 +138,18 @@ static int wait_dma_channel_stop(int channel)
141static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, 138static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
142 const char *msg) 139 const char *msg)
143{ 140{
144 void __iomem *reg = drv_data->ioaddr;
145
146 /* Stop and reset */ 141 /* Stop and reset */
147 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 142 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
148 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 143 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
149 write_SSSR_CS(drv_data, drv_data->clear_sr); 144 write_SSSR_CS(drv_data, drv_data->clear_sr);
150 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 145 pxa2xx_spi_write(drv_data, SSCR1,
146 pxa2xx_spi_read(drv_data, SSCR1)
147 & ~drv_data->dma_cr1);
151 if (!pxa25x_ssp_comp(drv_data)) 148 if (!pxa25x_ssp_comp(drv_data))
152 write_SSTO(0, reg); 149 pxa2xx_spi_write(drv_data, SSTO, 0);
153 pxa2xx_spi_flush(drv_data); 150 pxa2xx_spi_flush(drv_data);
154 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 151 pxa2xx_spi_write(drv_data, SSCR0,
152 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
155 153
156 pxa2xx_spi_unmap_dma_buffers(drv_data); 154 pxa2xx_spi_unmap_dma_buffers(drv_data);
157 155
@@ -163,11 +161,12 @@ static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
163 161
164static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data) 162static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
165{ 163{
166 void __iomem *reg = drv_data->ioaddr;
167 struct spi_message *msg = drv_data->cur_msg; 164 struct spi_message *msg = drv_data->cur_msg;
168 165
169 /* Clear and disable interrupts on SSP and DMA channels*/ 166 /* Clear and disable interrupts on SSP and DMA channels*/
170 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 167 pxa2xx_spi_write(drv_data, SSCR1,
168 pxa2xx_spi_read(drv_data, SSCR1)
169 & ~drv_data->dma_cr1);
171 write_SSSR_CS(drv_data, drv_data->clear_sr); 170 write_SSSR_CS(drv_data, drv_data->clear_sr);
172 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 171 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
173 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 172 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
@@ -228,7 +227,7 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
228 && (drv_data->ssp_type == PXA25x_SSP)) { 227 && (drv_data->ssp_type == PXA25x_SSP)) {
229 228
230 /* Wait for rx to stall */ 229 /* Wait for rx to stall */
231 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) 230 if (wait_ssp_rx_stall(drv_data) == 0)
232 dev_err(&drv_data->pdev->dev, 231 dev_err(&drv_data->pdev->dev,
233 "dma_handler: ssp rx stall failed\n"); 232 "dma_handler: ssp rx stall failed\n");
234 233
@@ -240,9 +239,8 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
240irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) 239irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
241{ 240{
242 u32 irq_status; 241 u32 irq_status;
243 void __iomem *reg = drv_data->ioaddr;
244 242
245 irq_status = read_SSSR(reg) & drv_data->mask_sr; 243 irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
246 if (irq_status & SSSR_ROR) { 244 if (irq_status & SSSR_ROR) {
247 pxa2xx_spi_dma_error_stop(drv_data, 245 pxa2xx_spi_dma_error_stop(drv_data,
248 "dma_transfer: fifo overrun"); 246 "dma_transfer: fifo overrun");
@@ -252,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
252 /* Check for false positive timeout */ 250 /* Check for false positive timeout */
253 if ((irq_status & SSSR_TINT) 251 if ((irq_status & SSSR_TINT)
254 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { 252 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
255 write_SSSR(SSSR_TINT, reg); 253 pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
256 return IRQ_HANDLED; 254 return IRQ_HANDLED;
257 } 255 }
258 256
@@ -261,7 +259,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
261 /* Clear and disable timeout interrupt, do the rest in 259 /* Clear and disable timeout interrupt, do the rest in
262 * dma_transfer_complete */ 260 * dma_transfer_complete */
263 if (!pxa25x_ssp_comp(drv_data)) 261 if (!pxa25x_ssp_comp(drv_data))
264 write_SSTO(0, reg); 262 pxa2xx_spi_write(drv_data, SSTO, 0);
265 263
266 /* finish this transfer, start the next */ 264 /* finish this transfer, start the next */
267 pxa2xx_spi_dma_transfer_complete(drv_data); 265 pxa2xx_spi_dma_transfer_complete(drv_data);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 23822e7df6c1..6f72ad01e041 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */ 14 */
19 15
20#include <linux/init.h> 16#include <linux/init.h>
@@ -45,8 +41,6 @@ MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
45MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
46MODULE_ALIAS("platform:pxa2xx-spi"); 42MODULE_ALIAS("platform:pxa2xx-spi");
47 43
48#define MAX_BUSES 3
49
50#define TIMOUT_DFLT 1000 44#define TIMOUT_DFLT 1000
51 45
52/* 46/*
@@ -162,7 +156,6 @@ pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
162 156
163static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data) 157static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
164{ 158{
165 void __iomem *reg = drv_data->ioaddr;
166 u32 mask; 159 u32 mask;
167 160
168 switch (drv_data->ssp_type) { 161 switch (drv_data->ssp_type) {
@@ -174,7 +167,7 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
174 break; 167 break;
175 } 168 }
176 169
177 return (read_SSSR(reg) & mask) == mask; 170 return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
178} 171}
179 172
180static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data, 173static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
@@ -253,9 +246,6 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
253 unsigned offset = 0x400; 246 unsigned offset = 0x400;
254 u32 value, orig; 247 u32 value, orig;
255 248
256 if (!is_lpss_ssp(drv_data))
257 return;
258
259 /* 249 /*
260 * Perform auto-detection of the LPSS SSP private registers. They 250 * Perform auto-detection of the LPSS SSP private registers. They
261 * can be either at 1k or 2k offset from the base address. 251 * can be either at 1k or 2k offset from the base address.
@@ -304,9 +294,6 @@ static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
304{ 294{
305 u32 value; 295 u32 value;
306 296
307 if (!is_lpss_ssp(drv_data))
308 return;
309
310 value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL); 297 value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
311 if (enable) 298 if (enable)
312 value &= ~SPI_CS_CONTROL_CS_HIGH; 299 value &= ~SPI_CS_CONTROL_CS_HIGH;
@@ -320,7 +307,7 @@ static void cs_assert(struct driver_data *drv_data)
320 struct chip_data *chip = drv_data->cur_chip; 307 struct chip_data *chip = drv_data->cur_chip;
321 308
322 if (drv_data->ssp_type == CE4100_SSP) { 309 if (drv_data->ssp_type == CE4100_SSP) {
323 write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); 310 pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
324 return; 311 return;
325 } 312 }
326 313
@@ -334,7 +321,8 @@ static void cs_assert(struct driver_data *drv_data)
334 return; 321 return;
335 } 322 }
336 323
337 lpss_ssp_cs_control(drv_data, true); 324 if (is_lpss_ssp(drv_data))
325 lpss_ssp_cs_control(drv_data, true);
338} 326}
339 327
340static void cs_deassert(struct driver_data *drv_data) 328static void cs_deassert(struct driver_data *drv_data)
@@ -354,20 +342,18 @@ static void cs_deassert(struct driver_data *drv_data)
354 return; 342 return;
355 } 343 }
356 344
357 lpss_ssp_cs_control(drv_data, false); 345 if (is_lpss_ssp(drv_data))
346 lpss_ssp_cs_control(drv_data, false);
358} 347}
359 348
360int pxa2xx_spi_flush(struct driver_data *drv_data) 349int pxa2xx_spi_flush(struct driver_data *drv_data)
361{ 350{
362 unsigned long limit = loops_per_jiffy << 1; 351 unsigned long limit = loops_per_jiffy << 1;
363 352
364 void __iomem *reg = drv_data->ioaddr;
365
366 do { 353 do {
367 while (read_SSSR(reg) & SSSR_RNE) { 354 while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
368 read_SSDR(reg); 355 pxa2xx_spi_read(drv_data, SSDR);
369 } 356 } while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
370 } while ((read_SSSR(reg) & SSSR_BSY) && --limit);
371 write_SSSR_CS(drv_data, SSSR_ROR); 357 write_SSSR_CS(drv_data, SSSR_ROR);
372 358
373 return limit; 359 return limit;
@@ -375,14 +361,13 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
375 361
376static int null_writer(struct driver_data *drv_data) 362static int null_writer(struct driver_data *drv_data)
377{ 363{
378 void __iomem *reg = drv_data->ioaddr;
379 u8 n_bytes = drv_data->n_bytes; 364 u8 n_bytes = drv_data->n_bytes;
380 365
381 if (pxa2xx_spi_txfifo_full(drv_data) 366 if (pxa2xx_spi_txfifo_full(drv_data)
382 || (drv_data->tx == drv_data->tx_end)) 367 || (drv_data->tx == drv_data->tx_end))
383 return 0; 368 return 0;
384 369
385 write_SSDR(0, reg); 370 pxa2xx_spi_write(drv_data, SSDR, 0);
386 drv_data->tx += n_bytes; 371 drv_data->tx += n_bytes;
387 372
388 return 1; 373 return 1;
@@ -390,12 +375,11 @@ static int null_writer(struct driver_data *drv_data)
390 375
391static int null_reader(struct driver_data *drv_data) 376static int null_reader(struct driver_data *drv_data)
392{ 377{
393 void __iomem *reg = drv_data->ioaddr;
394 u8 n_bytes = drv_data->n_bytes; 378 u8 n_bytes = drv_data->n_bytes;
395 379
396 while ((read_SSSR(reg) & SSSR_RNE) 380 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
397 && (drv_data->rx < drv_data->rx_end)) { 381 && (drv_data->rx < drv_data->rx_end)) {
398 read_SSDR(reg); 382 pxa2xx_spi_read(drv_data, SSDR);
399 drv_data->rx += n_bytes; 383 drv_data->rx += n_bytes;
400 } 384 }
401 385
@@ -404,13 +388,11 @@ static int null_reader(struct driver_data *drv_data)
404 388
405static int u8_writer(struct driver_data *drv_data) 389static int u8_writer(struct driver_data *drv_data)
406{ 390{
407 void __iomem *reg = drv_data->ioaddr;
408
409 if (pxa2xx_spi_txfifo_full(drv_data) 391 if (pxa2xx_spi_txfifo_full(drv_data)
410 || (drv_data->tx == drv_data->tx_end)) 392 || (drv_data->tx == drv_data->tx_end))
411 return 0; 393 return 0;
412 394
413 write_SSDR(*(u8 *)(drv_data->tx), reg); 395 pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
414 ++drv_data->tx; 396 ++drv_data->tx;
415 397
416 return 1; 398 return 1;
@@ -418,11 +400,9 @@ static int u8_writer(struct driver_data *drv_data)
418 400
419static int u8_reader(struct driver_data *drv_data) 401static int u8_reader(struct driver_data *drv_data)
420{ 402{
421 void __iomem *reg = drv_data->ioaddr; 403 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
422 404 && (drv_data->rx < drv_data->rx_end)) {
423 while ((read_SSSR(reg) & SSSR_RNE) 405 *(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
424 && (drv_data->rx < drv_data->rx_end)) {
425 *(u8 *)(drv_data->rx) = read_SSDR(reg);
426 ++drv_data->rx; 406 ++drv_data->rx;
427 } 407 }
428 408
@@ -431,13 +411,11 @@ static int u8_reader(struct driver_data *drv_data)
431 411
432static int u16_writer(struct driver_data *drv_data) 412static int u16_writer(struct driver_data *drv_data)
433{ 413{
434 void __iomem *reg = drv_data->ioaddr;
435
436 if (pxa2xx_spi_txfifo_full(drv_data) 414 if (pxa2xx_spi_txfifo_full(drv_data)
437 || (drv_data->tx == drv_data->tx_end)) 415 || (drv_data->tx == drv_data->tx_end))
438 return 0; 416 return 0;
439 417
440 write_SSDR(*(u16 *)(drv_data->tx), reg); 418 pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
441 drv_data->tx += 2; 419 drv_data->tx += 2;
442 420
443 return 1; 421 return 1;
@@ -445,11 +423,9 @@ static int u16_writer(struct driver_data *drv_data)
445 423
446static int u16_reader(struct driver_data *drv_data) 424static int u16_reader(struct driver_data *drv_data)
447{ 425{
448 void __iomem *reg = drv_data->ioaddr; 426 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
449 427 && (drv_data->rx < drv_data->rx_end)) {
450 while ((read_SSSR(reg) & SSSR_RNE) 428 *(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
451 && (drv_data->rx < drv_data->rx_end)) {
452 *(u16 *)(drv_data->rx) = read_SSDR(reg);
453 drv_data->rx += 2; 429 drv_data->rx += 2;
454 } 430 }
455 431
@@ -458,13 +434,11 @@ static int u16_reader(struct driver_data *drv_data)
458 434
459static int u32_writer(struct driver_data *drv_data) 435static int u32_writer(struct driver_data *drv_data)
460{ 436{
461 void __iomem *reg = drv_data->ioaddr;
462
463 if (pxa2xx_spi_txfifo_full(drv_data) 437 if (pxa2xx_spi_txfifo_full(drv_data)
464 || (drv_data->tx == drv_data->tx_end)) 438 || (drv_data->tx == drv_data->tx_end))
465 return 0; 439 return 0;
466 440
467 write_SSDR(*(u32 *)(drv_data->tx), reg); 441 pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
468 drv_data->tx += 4; 442 drv_data->tx += 4;
469 443
470 return 1; 444 return 1;
@@ -472,11 +446,9 @@ static int u32_writer(struct driver_data *drv_data)
472 446
473static int u32_reader(struct driver_data *drv_data) 447static int u32_reader(struct driver_data *drv_data)
474{ 448{
475 void __iomem *reg = drv_data->ioaddr; 449 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
476 450 && (drv_data->rx < drv_data->rx_end)) {
477 while ((read_SSSR(reg) & SSSR_RNE) 451 *(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
478 && (drv_data->rx < drv_data->rx_end)) {
479 *(u32 *)(drv_data->rx) = read_SSDR(reg);
480 drv_data->rx += 4; 452 drv_data->rx += 4;
481 } 453 }
482 454
@@ -552,27 +524,25 @@ static void giveback(struct driver_data *drv_data)
552 524
553static void reset_sccr1(struct driver_data *drv_data) 525static void reset_sccr1(struct driver_data *drv_data)
554{ 526{
555 void __iomem *reg = drv_data->ioaddr;
556 struct chip_data *chip = drv_data->cur_chip; 527 struct chip_data *chip = drv_data->cur_chip;
557 u32 sccr1_reg; 528 u32 sccr1_reg;
558 529
559 sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; 530 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
560 sccr1_reg &= ~SSCR1_RFT; 531 sccr1_reg &= ~SSCR1_RFT;
561 sccr1_reg |= chip->threshold; 532 sccr1_reg |= chip->threshold;
562 write_SSCR1(sccr1_reg, reg); 533 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
563} 534}
564 535
565static void int_error_stop(struct driver_data *drv_data, const char* msg) 536static void int_error_stop(struct driver_data *drv_data, const char* msg)
566{ 537{
567 void __iomem *reg = drv_data->ioaddr;
568
569 /* Stop and reset SSP */ 538 /* Stop and reset SSP */
570 write_SSSR_CS(drv_data, drv_data->clear_sr); 539 write_SSSR_CS(drv_data, drv_data->clear_sr);
571 reset_sccr1(drv_data); 540 reset_sccr1(drv_data);
572 if (!pxa25x_ssp_comp(drv_data)) 541 if (!pxa25x_ssp_comp(drv_data))
573 write_SSTO(0, reg); 542 pxa2xx_spi_write(drv_data, SSTO, 0);
574 pxa2xx_spi_flush(drv_data); 543 pxa2xx_spi_flush(drv_data);
575 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 544 pxa2xx_spi_write(drv_data, SSCR0,
545 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
576 546
577 dev_err(&drv_data->pdev->dev, "%s\n", msg); 547 dev_err(&drv_data->pdev->dev, "%s\n", msg);
578 548
@@ -582,13 +552,11 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
582 552
583static void int_transfer_complete(struct driver_data *drv_data) 553static void int_transfer_complete(struct driver_data *drv_data)
584{ 554{
585 void __iomem *reg = drv_data->ioaddr;
586
587 /* Stop SSP */ 555 /* Stop SSP */
588 write_SSSR_CS(drv_data, drv_data->clear_sr); 556 write_SSSR_CS(drv_data, drv_data->clear_sr);
589 reset_sccr1(drv_data); 557 reset_sccr1(drv_data);
590 if (!pxa25x_ssp_comp(drv_data)) 558 if (!pxa25x_ssp_comp(drv_data))
591 write_SSTO(0, reg); 559 pxa2xx_spi_write(drv_data, SSTO, 0);
592 560
593 /* Update total byte transferred return count actual bytes read */ 561 /* Update total byte transferred return count actual bytes read */
594 drv_data->cur_msg->actual_length += drv_data->len - 562 drv_data->cur_msg->actual_length += drv_data->len -
@@ -607,12 +575,10 @@ static void int_transfer_complete(struct driver_data *drv_data)
607 575
608static irqreturn_t interrupt_transfer(struct driver_data *drv_data) 576static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
609{ 577{
610 void __iomem *reg = drv_data->ioaddr; 578 u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
579 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
611 580
612 u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? 581 u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
613 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
614
615 u32 irq_status = read_SSSR(reg) & irq_mask;
616 582
617 if (irq_status & SSSR_ROR) { 583 if (irq_status & SSSR_ROR) {
618 int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); 584 int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
@@ -620,7 +586,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
620 } 586 }
621 587
622 if (irq_status & SSSR_TINT) { 588 if (irq_status & SSSR_TINT) {
623 write_SSSR(SSSR_TINT, reg); 589 pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
624 if (drv_data->read(drv_data)) { 590 if (drv_data->read(drv_data)) {
625 int_transfer_complete(drv_data); 591 int_transfer_complete(drv_data);
626 return IRQ_HANDLED; 592 return IRQ_HANDLED;
@@ -644,7 +610,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
644 u32 bytes_left; 610 u32 bytes_left;
645 u32 sccr1_reg; 611 u32 sccr1_reg;
646 612
647 sccr1_reg = read_SSCR1(reg); 613 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
648 sccr1_reg &= ~SSCR1_TIE; 614 sccr1_reg &= ~SSCR1_TIE;
649 615
650 /* 616 /*
@@ -670,7 +636,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
670 636
671 pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre); 637 pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
672 } 638 }
673 write_SSCR1(sccr1_reg, reg); 639 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
674 } 640 }
675 641
676 /* We did something */ 642 /* We did something */
@@ -680,7 +646,6 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
680static irqreturn_t ssp_int(int irq, void *dev_id) 646static irqreturn_t ssp_int(int irq, void *dev_id)
681{ 647{
682 struct driver_data *drv_data = dev_id; 648 struct driver_data *drv_data = dev_id;
683 void __iomem *reg = drv_data->ioaddr;
684 u32 sccr1_reg; 649 u32 sccr1_reg;
685 u32 mask = drv_data->mask_sr; 650 u32 mask = drv_data->mask_sr;
686 u32 status; 651 u32 status;
@@ -700,11 +665,11 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
700 * are all set to one. That means that the device is already 665 * are all set to one. That means that the device is already
701 * powered off. 666 * powered off.
702 */ 667 */
703 status = read_SSSR(reg); 668 status = pxa2xx_spi_read(drv_data, SSSR);
704 if (status == ~0) 669 if (status == ~0)
705 return IRQ_NONE; 670 return IRQ_NONE;
706 671
707 sccr1_reg = read_SSCR1(reg); 672 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
708 673
709 /* Ignore possible writes if we don't need to write */ 674 /* Ignore possible writes if we don't need to write */
710 if (!(sccr1_reg & SSCR1_TIE)) 675 if (!(sccr1_reg & SSCR1_TIE))
@@ -715,10 +680,14 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
715 680
716 if (!drv_data->cur_msg) { 681 if (!drv_data->cur_msg) {
717 682
718 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 683 pxa2xx_spi_write(drv_data, SSCR0,
719 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 684 pxa2xx_spi_read(drv_data, SSCR0)
685 & ~SSCR0_SSE);
686 pxa2xx_spi_write(drv_data, SSCR1,
687 pxa2xx_spi_read(drv_data, SSCR1)
688 & ~drv_data->int_cr1);
720 if (!pxa25x_ssp_comp(drv_data)) 689 if (!pxa25x_ssp_comp(drv_data))
721 write_SSTO(0, reg); 690 pxa2xx_spi_write(drv_data, SSTO, 0);
722 write_SSSR_CS(drv_data, drv_data->clear_sr); 691 write_SSSR_CS(drv_data, drv_data->clear_sr);
723 692
724 dev_err(&drv_data->pdev->dev, 693 dev_err(&drv_data->pdev->dev,
@@ -787,7 +756,6 @@ static void pump_transfers(unsigned long data)
787 struct spi_transfer *transfer = NULL; 756 struct spi_transfer *transfer = NULL;
788 struct spi_transfer *previous = NULL; 757 struct spi_transfer *previous = NULL;
789 struct chip_data *chip = NULL; 758 struct chip_data *chip = NULL;
790 void __iomem *reg = drv_data->ioaddr;
791 u32 clk_div = 0; 759 u32 clk_div = 0;
792 u8 bits = 0; 760 u8 bits = 0;
793 u32 speed = 0; 761 u32 speed = 0;
@@ -931,7 +899,7 @@ static void pump_transfers(unsigned long data)
931 899
932 /* Clear status and start DMA engine */ 900 /* Clear status and start DMA engine */
933 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; 901 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
934 write_SSSR(drv_data->clear_sr, reg); 902 pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
935 903
936 pxa2xx_spi_dma_start(drv_data); 904 pxa2xx_spi_dma_start(drv_data);
937 } else { 905 } else {
@@ -944,39 +912,43 @@ static void pump_transfers(unsigned long data)
944 } 912 }
945 913
946 if (is_lpss_ssp(drv_data)) { 914 if (is_lpss_ssp(drv_data)) {
947 if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold) 915 if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
948 write_SSIRF(chip->lpss_rx_threshold, reg); 916 != chip->lpss_rx_threshold)
949 if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold) 917 pxa2xx_spi_write(drv_data, SSIRF,
950 write_SSITF(chip->lpss_tx_threshold, reg); 918 chip->lpss_rx_threshold);
919 if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
920 != chip->lpss_tx_threshold)
921 pxa2xx_spi_write(drv_data, SSITF,
922 chip->lpss_tx_threshold);
951 } 923 }
952 924
953 if (is_quark_x1000_ssp(drv_data) && 925 if (is_quark_x1000_ssp(drv_data) &&
954 (read_DDS_RATE(reg) != chip->dds_rate)) 926 (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
955 write_DDS_RATE(chip->dds_rate, reg); 927 pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
956 928
957 /* see if we need to reload the config registers */ 929 /* see if we need to reload the config registers */
958 if ((read_SSCR0(reg) != cr0) || 930 if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
959 (read_SSCR1(reg) & change_mask) != (cr1 & change_mask)) { 931 || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
960 932 != (cr1 & change_mask)) {
961 /* stop the SSP, and update the other bits */ 933 /* stop the SSP, and update the other bits */
962 write_SSCR0(cr0 & ~SSCR0_SSE, reg); 934 pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
963 if (!pxa25x_ssp_comp(drv_data)) 935 if (!pxa25x_ssp_comp(drv_data))
964 write_SSTO(chip->timeout, reg); 936 pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
965 /* first set CR1 without interrupt and service enables */ 937 /* first set CR1 without interrupt and service enables */
966 write_SSCR1(cr1 & change_mask, reg); 938 pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
967 /* restart the SSP */ 939 /* restart the SSP */
968 write_SSCR0(cr0, reg); 940 pxa2xx_spi_write(drv_data, SSCR0, cr0);
969 941
970 } else { 942 } else {
971 if (!pxa25x_ssp_comp(drv_data)) 943 if (!pxa25x_ssp_comp(drv_data))
972 write_SSTO(chip->timeout, reg); 944 pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
973 } 945 }
974 946
975 cs_assert(drv_data); 947 cs_assert(drv_data);
976 948
977 /* after chip select, release the data by enabling service 949 /* after chip select, release the data by enabling service
978 * requests and interrupts, without changing any mode bits */ 950 * requests and interrupts, without changing any mode bits */
979 write_SSCR1(cr1, reg); 951 pxa2xx_spi_write(drv_data, SSCR1, cr1);
980} 952}
981 953
982static int pxa2xx_spi_transfer_one_message(struct spi_master *master, 954static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
@@ -1005,8 +977,8 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
1005 struct driver_data *drv_data = spi_master_get_devdata(master); 977 struct driver_data *drv_data = spi_master_get_devdata(master);
1006 978
1007 /* Disable the SSP now */ 979 /* Disable the SSP now */
1008 write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE, 980 pxa2xx_spi_write(drv_data, SSCR0,
1009 drv_data->ioaddr); 981 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
1010 982
1011 return 0; 983 return 0;
1012} 984}
@@ -1289,6 +1261,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1289 struct driver_data *drv_data; 1261 struct driver_data *drv_data;
1290 struct ssp_device *ssp; 1262 struct ssp_device *ssp;
1291 int status; 1263 int status;
1264 u32 tmp;
1292 1265
1293 platform_info = dev_get_platdata(dev); 1266 platform_info = dev_get_platdata(dev);
1294 if (!platform_info) { 1267 if (!platform_info) {
@@ -1386,38 +1359,35 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1386 drv_data->max_clk_rate = clk_get_rate(ssp->clk); 1359 drv_data->max_clk_rate = clk_get_rate(ssp->clk);
1387 1360
1388 /* Load default SSP configuration */ 1361 /* Load default SSP configuration */
1389 write_SSCR0(0, drv_data->ioaddr); 1362 pxa2xx_spi_write(drv_data, SSCR0, 0);
1390 switch (drv_data->ssp_type) { 1363 switch (drv_data->ssp_type) {
1391 case QUARK_X1000_SSP: 1364 case QUARK_X1000_SSP:
1392 write_SSCR1(QUARK_X1000_SSCR1_RxTresh( 1365 tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
1393 RX_THRESH_QUARK_X1000_DFLT) | 1366 | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
1394 QUARK_X1000_SSCR1_TxTresh( 1367 pxa2xx_spi_write(drv_data, SSCR1, tmp);
1395 TX_THRESH_QUARK_X1000_DFLT),
1396 drv_data->ioaddr);
1397 1368
1398 /* using the Motorola SPI protocol and use 8 bit frame */ 1369 /* using the Motorola SPI protocol and use 8 bit frame */
1399 write_SSCR0(QUARK_X1000_SSCR0_Motorola 1370 pxa2xx_spi_write(drv_data, SSCR0,
1400 | QUARK_X1000_SSCR0_DataSize(8), 1371 QUARK_X1000_SSCR0_Motorola
1401 drv_data->ioaddr); 1372 | QUARK_X1000_SSCR0_DataSize(8));
1402 break; 1373 break;
1403 default: 1374 default:
1404 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | 1375 tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
1405 SSCR1_TxTresh(TX_THRESH_DFLT), 1376 SSCR1_TxTresh(TX_THRESH_DFLT);
1406 drv_data->ioaddr); 1377 pxa2xx_spi_write(drv_data, SSCR1, tmp);
1407 write_SSCR0(SSCR0_SCR(2) 1378 tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
1408 | SSCR0_Motorola 1379 pxa2xx_spi_write(drv_data, SSCR0, tmp);
1409 | SSCR0_DataSize(8),
1410 drv_data->ioaddr);
1411 break; 1380 break;
1412 } 1381 }
1413 1382
1414 if (!pxa25x_ssp_comp(drv_data)) 1383 if (!pxa25x_ssp_comp(drv_data))
1415 write_SSTO(0, drv_data->ioaddr); 1384 pxa2xx_spi_write(drv_data, SSTO, 0);
1416 1385
1417 if (!is_quark_x1000_ssp(drv_data)) 1386 if (!is_quark_x1000_ssp(drv_data))
1418 write_SSPSP(0, drv_data->ioaddr); 1387 pxa2xx_spi_write(drv_data, SSPSP, 0);
1419 1388
1420 lpss_ssp_setup(drv_data); 1389 if (is_lpss_ssp(drv_data))
1390 lpss_ssp_setup(drv_data);
1421 1391
1422 tasklet_init(&drv_data->pump_transfers, pump_transfers, 1392 tasklet_init(&drv_data->pump_transfers, pump_transfers,
1423 (unsigned long)drv_data); 1393 (unsigned long)drv_data);
@@ -1460,7 +1430,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
1460 pm_runtime_get_sync(&pdev->dev); 1430 pm_runtime_get_sync(&pdev->dev);
1461 1431
1462 /* Disable the SSP at the peripheral and SOC level */ 1432 /* Disable the SSP at the peripheral and SOC level */
1463 write_SSCR0(0, drv_data->ioaddr); 1433 pxa2xx_spi_write(drv_data, SSCR0, 0);
1464 clk_disable_unprepare(ssp->clk); 1434 clk_disable_unprepare(ssp->clk);
1465 1435
1466 /* Release DMA */ 1436 /* Release DMA */
@@ -1497,7 +1467,7 @@ static int pxa2xx_spi_suspend(struct device *dev)
1497 status = spi_master_suspend(drv_data->master); 1467 status = spi_master_suspend(drv_data->master);
1498 if (status != 0) 1468 if (status != 0)
1499 return status; 1469 return status;
1500 write_SSCR0(0, drv_data->ioaddr); 1470 pxa2xx_spi_write(drv_data, SSCR0, 0);
1501 1471
1502 if (!pm_runtime_suspended(dev)) 1472 if (!pm_runtime_suspended(dev))
1503 clk_disable_unprepare(ssp->clk); 1473 clk_disable_unprepare(ssp->clk);
@@ -1518,7 +1488,8 @@ static int pxa2xx_spi_resume(struct device *dev)
1518 clk_prepare_enable(ssp->clk); 1488 clk_prepare_enable(ssp->clk);
1519 1489
1520 /* Restore LPSS private register bits */ 1490 /* Restore LPSS private register bits */
1521 lpss_ssp_setup(drv_data); 1491 if (is_lpss_ssp(drv_data))
1492 lpss_ssp_setup(drv_data);
1522 1493
1523 /* Start the queue running */ 1494 /* Start the queue running */
1524 status = spi_master_resume(drv_data->master); 1495 status = spi_master_resume(drv_data->master);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 6bec59c90cd4..85a58c906869 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -115,23 +115,17 @@ struct chip_data {
115 void (*cs_control)(u32 command); 115 void (*cs_control)(u32 command);
116}; 116};
117 117
118#define DEFINE_SSP_REG(reg, off) \ 118static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data,
119static inline u32 read_##reg(void const __iomem *p) \ 119 unsigned reg)
120{ return __raw_readl(p + (off)); } \ 120{
121\ 121 return __raw_readl(drv_data->ioaddr + reg);
122static inline void write_##reg(u32 v, void __iomem *p) \ 122}
123{ __raw_writel(v, p + (off)); } 123
124 124static inline void pxa2xx_spi_write(const struct driver_data *drv_data,
125DEFINE_SSP_REG(SSCR0, 0x00) 125 unsigned reg, u32 val)
126DEFINE_SSP_REG(SSCR1, 0x04) 126{
127DEFINE_SSP_REG(SSSR, 0x08) 127 __raw_writel(val, drv_data->ioaddr + reg);
128DEFINE_SSP_REG(SSITR, 0x0c) 128}
129DEFINE_SSP_REG(SSDR, 0x10)
130DEFINE_SSP_REG(DDS_RATE, 0x28) /* DDS Clock Rate */
131DEFINE_SSP_REG(SSTO, 0x28)
132DEFINE_SSP_REG(SSPSP, 0x2c)
133DEFINE_SSP_REG(SSITF, SSITF)
134DEFINE_SSP_REG(SSIRF, SSIRF)
135 129
136#define START_STATE ((void *)0) 130#define START_STATE ((void *)0)
137#define RUNNING_STATE ((void *)1) 131#define RUNNING_STATE ((void *)1)
@@ -155,13 +149,11 @@ static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
155 149
156static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) 150static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
157{ 151{
158 void __iomem *reg = drv_data->ioaddr;
159
160 if (drv_data->ssp_type == CE4100_SSP || 152 if (drv_data->ssp_type == CE4100_SSP ||
161 drv_data->ssp_type == QUARK_X1000_SSP) 153 drv_data->ssp_type == QUARK_X1000_SSP)
162 val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; 154 val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK;
163 155
164 write_SSSR(val, reg); 156 pxa2xx_spi_write(drv_data, SSSR, val);
165} 157}
166 158
167extern int pxa2xx_spi_flush(struct driver_data *drv_data); 159extern int pxa2xx_spi_flush(struct driver_data *drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index e7fb5a0d2e8d..ff9cdbdb6672 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -337,7 +337,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
337static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) 337static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
338{ 338{
339 struct spi_qup *controller = spi_master_get_devdata(spi->master); 339 struct spi_qup *controller = spi_master_get_devdata(spi->master);
340 u32 config, iomode, mode; 340 u32 config, iomode, mode, control;
341 int ret, n_words, w_size; 341 int ret, n_words, w_size;
342 342
343 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { 343 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
@@ -392,6 +392,15 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
392 392
393 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES); 393 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
394 394
395 control = readl_relaxed(controller->base + SPI_IO_CONTROL);
396
397 if (spi->mode & SPI_CPOL)
398 control |= SPI_IO_C_CLK_IDLE_HIGH;
399 else
400 control &= ~SPI_IO_C_CLK_IDLE_HIGH;
401
402 writel_relaxed(control, controller->base + SPI_IO_CONTROL);
403
395 config = readl_relaxed(controller->base + SPI_CONFIG); 404 config = readl_relaxed(controller->base + SPI_CONFIG);
396 405
397 if (spi->mode & SPI_LOOP) 406 if (spi->mode & SPI_LOOP)
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index daabbabd26b0..1a777dc261d6 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -437,6 +437,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
437 rs->state &= ~TXBUSY; 437 rs->state &= ~TXBUSY;
438 spin_unlock_irqrestore(&rs->lock, flags); 438 spin_unlock_irqrestore(&rs->lock, flags);
439 439
440 rxdesc = NULL;
440 if (rs->rx) { 441 if (rs->rx) {
441 rxconf.direction = rs->dma_rx.direction; 442 rxconf.direction = rs->dma_rx.direction;
442 rxconf.src_addr = rs->dma_rx.addr; 443 rxconf.src_addr = rs->dma_rx.addr;
@@ -453,6 +454,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
453 rxdesc->callback_param = rs; 454 rxdesc->callback_param = rs;
454 } 455 }
455 456
457 txdesc = NULL;
456 if (rs->tx) { 458 if (rs->tx) {
457 txconf.direction = rs->dma_tx.direction; 459 txconf.direction = rs->dma_tx.direction;
458 txconf.dst_addr = rs->dma_tx.addr; 460 txconf.dst_addr = rs->dma_tx.addr;
@@ -470,7 +472,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
470 } 472 }
471 473
472 /* rx must be started before tx due to spi instinct */ 474 /* rx must be started before tx due to spi instinct */
473 if (rs->rx) { 475 if (rxdesc) {
474 spin_lock_irqsave(&rs->lock, flags); 476 spin_lock_irqsave(&rs->lock, flags);
475 rs->state |= RXBUSY; 477 rs->state |= RXBUSY;
476 spin_unlock_irqrestore(&rs->lock, flags); 478 spin_unlock_irqrestore(&rs->lock, flags);
@@ -478,7 +480,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
478 dma_async_issue_pending(rs->dma_rx.ch); 480 dma_async_issue_pending(rs->dma_rx.ch);
479 } 481 }
480 482
481 if (rs->tx) { 483 if (txdesc) {
482 spin_lock_irqsave(&rs->lock, flags); 484 spin_lock_irqsave(&rs->lock, flags);
483 rs->state |= TXBUSY; 485 rs->state |= TXBUSY;
484 spin_unlock_irqrestore(&rs->lock, flags); 486 spin_unlock_irqrestore(&rs->lock, flags);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2071f788c6fb..46ce47076e63 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -15,11 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */ 18 */
24 19
25#include <linux/module.h> 20#include <linux/module.h>
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 37b19836f5cb..9231c34b5a5c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */ 14 */
19 15
20#include <linux/init.h> 16#include <linux/init.h>
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 237f2e7a7179..5a56acf8a43e 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 15 */
20 16
21#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index fc29233d0650..20e800e70442 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -16,11 +16,6 @@
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 *
24 */ 19 */
25 20
26#include <linux/clk.h> 21#include <linux/clk.h>
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 3ab7a21445fc..e57eec0b2f46 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -82,6 +82,8 @@ struct sh_msiof_spi_priv {
82#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ 82#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
83#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ 83#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
84#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ 84#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
85#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
86#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
85#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */ 87#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
86#define MDR1_FLD_SHIFT 2 88#define MDR1_FLD_SHIFT 2
87#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ 89#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
@@ -241,42 +243,80 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
241 243
242static struct { 244static struct {
243 unsigned short div; 245 unsigned short div;
244 unsigned short scr; 246 unsigned short brdv;
245} const sh_msiof_spi_clk_table[] = { 247} const sh_msiof_spi_div_table[] = {
246 { 1, SCR_BRPS( 1) | SCR_BRDV_DIV_1 }, 248 { 1, SCR_BRDV_DIV_1 },
247 { 2, SCR_BRPS( 1) | SCR_BRDV_DIV_2 }, 249 { 2, SCR_BRDV_DIV_2 },
248 { 4, SCR_BRPS( 1) | SCR_BRDV_DIV_4 }, 250 { 4, SCR_BRDV_DIV_4 },
249 { 8, SCR_BRPS( 1) | SCR_BRDV_DIV_8 }, 251 { 8, SCR_BRDV_DIV_8 },
250 { 16, SCR_BRPS( 1) | SCR_BRDV_DIV_16 }, 252 { 16, SCR_BRDV_DIV_16 },
251 { 32, SCR_BRPS( 1) | SCR_BRDV_DIV_32 }, 253 { 32, SCR_BRDV_DIV_32 },
252 { 64, SCR_BRPS(32) | SCR_BRDV_DIV_2 },
253 { 128, SCR_BRPS(32) | SCR_BRDV_DIV_4 },
254 { 256, SCR_BRPS(32) | SCR_BRDV_DIV_8 },
255 { 512, SCR_BRPS(32) | SCR_BRDV_DIV_16 },
256 { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 },
257}; 254};
258 255
259static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, 256static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
260 unsigned long parent_rate, u32 spi_hz) 257 unsigned long parent_rate, u32 spi_hz)
261{ 258{
262 unsigned long div = 1024; 259 unsigned long div = 1024;
260 u32 brps, scr;
263 size_t k; 261 size_t k;
264 262
265 if (!WARN_ON(!spi_hz || !parent_rate)) 263 if (!WARN_ON(!spi_hz || !parent_rate))
266 div = DIV_ROUND_UP(parent_rate, spi_hz); 264 div = DIV_ROUND_UP(parent_rate, spi_hz);
267 265
268 /* TODO: make more fine grained */ 266 for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
269 267 brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
270 for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) { 268 if (brps <= 32) /* max of brdv is 32 */
271 if (sh_msiof_spi_clk_table[k].div >= div)
272 break; 269 break;
273 } 270 }
274 271
275 k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1); 272 k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
276 273
277 sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr); 274 scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
275 sh_msiof_write(p, TSCR, scr);
278 if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX)) 276 if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
279 sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr); 277 sh_msiof_write(p, RSCR, scr);
278}
279
280static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
281{
282 /*
283 * DTDL/SYNCDL bit : p->info->dtdl or p->info->syncdl
284 * b'000 : 0
285 * b'001 : 100
286 * b'010 : 200
287 * b'011 (SYNCDL only) : 300
288 * b'101 : 50
289 * b'110 : 150
290 */
291 if (dtdl_or_syncdl % 100)
292 return dtdl_or_syncdl / 100 + 5;
293 else
294 return dtdl_or_syncdl / 100;
295}
296
297static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
298{
299 u32 val;
300
301 if (!p->info)
302 return 0;
303
304 /* check if DTDL and SYNCDL is allowed value */
305 if (p->info->dtdl > 200 || p->info->syncdl > 300) {
306 dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
307 return 0;
308 }
309
310 /* check if the sum of DTDL and SYNCDL becomes an integer value */
311 if ((p->info->dtdl + p->info->syncdl) % 100) {
312 dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
313 return 0;
314 }
315
316 val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
317 val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
318
319 return val;
280} 320}
281 321
282static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, 322static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
@@ -296,6 +336,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
296 tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP; 336 tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
297 tmp |= !cs_high << MDR1_SYNCAC_SHIFT; 337 tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
298 tmp |= lsb_first << MDR1_BITLSB_SHIFT; 338 tmp |= lsb_first << MDR1_BITLSB_SHIFT;
339 tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
299 sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON); 340 sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
300 if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) { 341 if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
301 /* These bits are reserved if RX needs TX */ 342 /* These bits are reserved if RX needs TX */
@@ -501,7 +542,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
501 gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 542 gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
502 543
503 544
504 pm_runtime_put_sync(&p->pdev->dev); 545 pm_runtime_put(&p->pdev->dev);
505 546
506 return 0; 547 return 0;
507} 548}
@@ -595,8 +636,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
595 } 636 }
596 637
597 /* wait for tx fifo to be emptied / rx fifo to be filled */ 638 /* wait for tx fifo to be emptied / rx fifo to be filled */
598 ret = wait_for_completion_timeout(&p->done, HZ); 639 if (!wait_for_completion_timeout(&p->done, HZ)) {
599 if (!ret) {
600 dev_err(&p->pdev->dev, "PIO timeout\n"); 640 dev_err(&p->pdev->dev, "PIO timeout\n");
601 ret = -ETIMEDOUT; 641 ret = -ETIMEDOUT;
602 goto stop_reset; 642 goto stop_reset;
@@ -706,8 +746,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
706 } 746 }
707 747
708 /* wait for tx fifo to be emptied / rx fifo to be filled */ 748 /* wait for tx fifo to be emptied / rx fifo to be filled */
709 ret = wait_for_completion_timeout(&p->done, HZ); 749 if (!wait_for_completion_timeout(&p->done, HZ)) {
710 if (!ret) {
711 dev_err(&p->pdev->dev, "DMA timeout\n"); 750 dev_err(&p->pdev->dev, "DMA timeout\n");
712 ret = -ETIMEDOUT; 751 ret = -ETIMEDOUT;
713 goto stop_reset; 752 goto stop_reset;
@@ -957,6 +996,8 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
957 &info->tx_fifo_override); 996 &info->tx_fifo_override);
958 of_property_read_u32(np, "renesas,rx-fifo-size", 997 of_property_read_u32(np, "renesas,rx-fifo-size",
959 &info->rx_fifo_override); 998 &info->rx_fifo_override);
999 of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
1000 of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
960 1001
961 info->num_chipselect = num_cs; 1002 info->num_chipselect = num_cs;
962 1003
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 1cfc906dd174..502501187c9e 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -14,11 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */ 17 */
23 18
24#include <linux/module.h> 19#include <linux/module.h>
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index d075191476f0..f5715c9f68b0 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -818,7 +818,6 @@ static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
818 818
819static const struct of_device_id spi_sirfsoc_of_match[] = { 819static const struct of_device_id spi_sirfsoc_of_match[] = {
820 { .compatible = "sirf,prima2-spi", }, 820 { .compatible = "sirf,prima2-spi", },
821 { .compatible = "sirf,marco-spi", },
822 {} 821 {}
823}; 822};
824MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match); 823MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
new file mode 100644
index 000000000000..2faeaa7b57a8
--- /dev/null
+++ b/drivers/spi/spi-st-ssc4.c
@@ -0,0 +1,504 @@
1/*
2 * Copyright (c) 2008-2014 STMicroelectronics Limited
3 *
4 * Author: Angus Clark <Angus.Clark@st.com>
5 * Patrice Chotard <patrice.chotard@st.com>
6 * Lee Jones <lee.jones@linaro.org>
7 *
8 * SPI master mode controller driver, used in STMicroelectronics devices.
9 *
10 * May be copied or modified under the terms of the GNU General Public
11 * License Version 2.0 only. See linux/COPYING for more information.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/pinctrl/consumer.h>
20#include <linux/platform_device.h>
21#include <linux/of.h>
22#include <linux/of_gpio.h>
23#include <linux/of_irq.h>
24#include <linux/pm_runtime.h>
25#include <linux/spi/spi.h>
26#include <linux/spi/spi_bitbang.h>
27
28/* SSC registers */
29#define SSC_BRG 0x000
30#define SSC_TBUF 0x004
31#define SSC_RBUF 0x008
32#define SSC_CTL 0x00C
33#define SSC_IEN 0x010
34#define SSC_I2C 0x018
35
36/* SSC Control */
37#define SSC_CTL_DATA_WIDTH_9 0x8
38#define SSC_CTL_DATA_WIDTH_MSK 0xf
39#define SSC_CTL_BM 0xf
40#define SSC_CTL_HB BIT(4)
41#define SSC_CTL_PH BIT(5)
42#define SSC_CTL_PO BIT(6)
43#define SSC_CTL_SR BIT(7)
44#define SSC_CTL_MS BIT(8)
45#define SSC_CTL_EN BIT(9)
46#define SSC_CTL_LPB BIT(10)
47#define SSC_CTL_EN_TX_FIFO BIT(11)
48#define SSC_CTL_EN_RX_FIFO BIT(12)
49#define SSC_CTL_EN_CLST_RX BIT(13)
50
51/* SSC Interrupt Enable */
52#define SSC_IEN_TEEN BIT(2)
53
54#define FIFO_SIZE 8
55
56struct spi_st {
57 /* SSC SPI Controller */
58 void __iomem *base;
59 struct clk *clk;
60 struct device *dev;
61
62 /* SSC SPI current transaction */
63 const u8 *tx_ptr;
64 u8 *rx_ptr;
65 u16 bytes_per_word;
66 unsigned int words_remaining;
67 unsigned int baud;
68 struct completion done;
69};
70
71static int spi_st_clk_enable(struct spi_st *spi_st)
72{
73 /*
74 * Current platforms use one of the core clocks for SPI and I2C.
75 * If we attempt to disable the clock, the system will hang.
76 *
77 * TODO: Remove this when platform supports power domains.
78 */
79 return 0;
80
81 return clk_prepare_enable(spi_st->clk);
82}
83
84static void spi_st_clk_disable(struct spi_st *spi_st)
85{
86 /*
87 * Current platforms use one of the core clocks for SPI and I2C.
88 * If we attempt to disable the clock, the system will hang.
89 *
90 * TODO: Remove this when platform supports power domains.
91 */
92 return;
93
94 clk_disable_unprepare(spi_st->clk);
95}
96
97/* Load the TX FIFO */
98static void ssc_write_tx_fifo(struct spi_st *spi_st)
99{
100 unsigned int count, i;
101 uint32_t word = 0;
102
103 if (spi_st->words_remaining > FIFO_SIZE)
104 count = FIFO_SIZE;
105 else
106 count = spi_st->words_remaining;
107
108 for (i = 0; i < count; i++) {
109 if (spi_st->tx_ptr) {
110 if (spi_st->bytes_per_word == 1) {
111 word = *spi_st->tx_ptr++;
112 } else {
113 word = *spi_st->tx_ptr++;
114 word = *spi_st->tx_ptr++ | (word << 8);
115 }
116 }
117 writel_relaxed(word, spi_st->base + SSC_TBUF);
118 }
119}
120
121/* Read the RX FIFO */
122static void ssc_read_rx_fifo(struct spi_st *spi_st)
123{
124 unsigned int count, i;
125 uint32_t word = 0;
126
127 if (spi_st->words_remaining > FIFO_SIZE)
128 count = FIFO_SIZE;
129 else
130 count = spi_st->words_remaining;
131
132 for (i = 0; i < count; i++) {
133 word = readl_relaxed(spi_st->base + SSC_RBUF);
134
135 if (spi_st->rx_ptr) {
136 if (spi_st->bytes_per_word == 1) {
137 *spi_st->rx_ptr++ = (uint8_t)word;
138 } else {
139 *spi_st->rx_ptr++ = (word >> 8);
140 *spi_st->rx_ptr++ = word & 0xff;
141 }
142 }
143 }
144 spi_st->words_remaining -= count;
145}
146
147static int spi_st_transfer_one(struct spi_master *master,
148 struct spi_device *spi, struct spi_transfer *t)
149{
150 struct spi_st *spi_st = spi_master_get_devdata(master);
151 uint32_t ctl = 0;
152
153 /* Setup transfer */
154 spi_st->tx_ptr = t->tx_buf;
155 spi_st->rx_ptr = t->rx_buf;
156
157 if (spi->bits_per_word > 8) {
158 /*
159 * Anything greater than 8 bits-per-word requires 2
160 * bytes-per-word in the RX/TX buffers
161 */
162 spi_st->bytes_per_word = 2;
163 spi_st->words_remaining = t->len / 2;
164
165 } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) {
166 /*
167 * If transfer is even-length, and 8 bits-per-word, then
168 * implement as half-length 16 bits-per-word transfer
169 */
170 spi_st->bytes_per_word = 2;
171 spi_st->words_remaining = t->len / 2;
172
173 /* Set SSC_CTL to 16 bits-per-word */
174 ctl = readl_relaxed(spi_st->base + SSC_CTL);
175 writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL);
176
177 readl_relaxed(spi_st->base + SSC_RBUF);
178
179 } else {
180 spi_st->bytes_per_word = 1;
181 spi_st->words_remaining = t->len;
182 }
183
184 reinit_completion(&spi_st->done);
185
186 /* Start transfer by writing to the TX FIFO */
187 ssc_write_tx_fifo(spi_st);
188 writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN);
189
190 /* Wait for transfer to complete */
191 wait_for_completion(&spi_st->done);
192
193 /* Restore SSC_CTL if necessary */
194 if (ctl)
195 writel_relaxed(ctl, spi_st->base + SSC_CTL);
196
197 spi_finalize_current_transfer(spi->master);
198
199 return t->len;
200}
201
202static void spi_st_cleanup(struct spi_device *spi)
203{
204 int cs = spi->cs_gpio;
205
206 if (gpio_is_valid(cs))
207 devm_gpio_free(&spi->dev, cs);
208}
209
210/* the spi->mode bits understood by this driver: */
211#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
212static int spi_st_setup(struct spi_device *spi)
213{
214 struct spi_st *spi_st = spi_master_get_devdata(spi->master);
215 u32 spi_st_clk, sscbrg, var;
216 u32 hz = spi->max_speed_hz;
217 int cs = spi->cs_gpio;
218 int ret;
219
220 if (!hz) {
221 dev_err(&spi->dev, "max_speed_hz unspecified\n");
222 return -EINVAL;
223 }
224
225 if (!gpio_is_valid(cs)) {
226 dev_err(&spi->dev, "%d is not a valid gpio\n", cs);
227 return -EINVAL;
228 }
229
230 if (devm_gpio_request(&spi->dev, cs, dev_name(&spi->dev))) {
231 dev_err(&spi->dev, "could not request gpio:%d\n", cs);
232 return -EINVAL;
233 }
234
235 ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH);
236 if (ret)
237 return ret;
238
239 spi_st_clk = clk_get_rate(spi_st->clk);
240
241 /* Set SSC_BRF */
242 sscbrg = spi_st_clk / (2 * hz);
243 if (sscbrg < 0x07 || sscbrg > BIT(16)) {
244 dev_err(&spi->dev,
245 "baudrate %d outside valid range %d\n", sscbrg, hz);
246 return -EINVAL;
247 }
248
249 spi_st->baud = spi_st_clk / (2 * sscbrg);
250 if (sscbrg == BIT(16)) /* 16-bit counter wraps */
251 sscbrg = 0x0;
252
253 writel_relaxed(sscbrg, spi_st->base + SSC_BRG);
254
255 dev_dbg(&spi->dev,
256 "setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n",
257 hz, spi_st->baud, sscbrg);
258
259 /* Set SSC_CTL and enable SSC */
260 var = readl_relaxed(spi_st->base + SSC_CTL);
261 var |= SSC_CTL_MS;
262
263 if (spi->mode & SPI_CPOL)
264 var |= SSC_CTL_PO;
265 else
266 var &= ~SSC_CTL_PO;
267
268 if (spi->mode & SPI_CPHA)
269 var |= SSC_CTL_PH;
270 else
271 var &= ~SSC_CTL_PH;
272
273 if ((spi->mode & SPI_LSB_FIRST) == 0)
274 var |= SSC_CTL_HB;
275 else
276 var &= ~SSC_CTL_HB;
277
278 if (spi->mode & SPI_LOOP)
279 var |= SSC_CTL_LPB;
280 else
281 var &= ~SSC_CTL_LPB;
282
283 var &= ~SSC_CTL_DATA_WIDTH_MSK;
284 var |= (spi->bits_per_word - 1);
285
286 var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
287 var |= SSC_CTL_EN;
288
289 writel_relaxed(var, spi_st->base + SSC_CTL);
290
291 /* Clear the status register */
292 readl_relaxed(spi_st->base + SSC_RBUF);
293
294 return 0;
295}
296
297/* Interrupt fired when TX shift register becomes empty */
298static irqreturn_t spi_st_irq(int irq, void *dev_id)
299{
300 struct spi_st *spi_st = (struct spi_st *)dev_id;
301
302 /* Read RX FIFO */
303 ssc_read_rx_fifo(spi_st);
304
305 /* Fill TX FIFO */
306 if (spi_st->words_remaining) {
307 ssc_write_tx_fifo(spi_st);
308 } else {
309 /* TX/RX complete */
310 writel_relaxed(0x0, spi_st->base + SSC_IEN);
311 /*
312 * read SSC_IEN to ensure that this bit is set
313 * before re-enabling interrupt
314 */
315 readl(spi_st->base + SSC_IEN);
316 complete(&spi_st->done);
317 }
318
319 return IRQ_HANDLED;
320}
321
322static int spi_st_probe(struct platform_device *pdev)
323{
324 struct device_node *np = pdev->dev.of_node;
325 struct spi_master *master;
326 struct resource *res;
327 struct spi_st *spi_st;
328 int irq, ret = 0;
329 u32 var;
330
331 master = spi_alloc_master(&pdev->dev, sizeof(*spi_st));
332 if (!master)
333 return -ENOMEM;
334
335 master->dev.of_node = np;
336 master->mode_bits = MODEBITS;
337 master->setup = spi_st_setup;
338 master->cleanup = spi_st_cleanup;
339 master->transfer_one = spi_st_transfer_one;
340 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
341 master->auto_runtime_pm = true;
342 master->bus_num = pdev->id;
343 spi_st = spi_master_get_devdata(master);
344
345 spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
346 if (IS_ERR(spi_st->clk)) {
347 dev_err(&pdev->dev, "Unable to request clock\n");
348 return PTR_ERR(spi_st->clk);
349 }
350
351 ret = spi_st_clk_enable(spi_st);
352 if (ret)
353 return ret;
354
355 init_completion(&spi_st->done);
356
357 /* Get resources */
358 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
359 spi_st->base = devm_ioremap_resource(&pdev->dev, res);
360 if (IS_ERR(spi_st->base)) {
361 ret = PTR_ERR(spi_st->base);
362 goto clk_disable;
363 }
364
365 /* Disable I2C and Reset SSC */
366 writel_relaxed(0x0, spi_st->base + SSC_I2C);
367 var = readw_relaxed(spi_st->base + SSC_CTL);
368 var |= SSC_CTL_SR;
369 writel_relaxed(var, spi_st->base + SSC_CTL);
370
371 udelay(1);
372 var = readl_relaxed(spi_st->base + SSC_CTL);
373 var &= ~SSC_CTL_SR;
374 writel_relaxed(var, spi_st->base + SSC_CTL);
375
376 /* Set SSC into slave mode before reconfiguring PIO pins */
377 var = readl_relaxed(spi_st->base + SSC_CTL);
378 var &= ~SSC_CTL_MS;
379 writel_relaxed(var, spi_st->base + SSC_CTL);
380
381 irq = irq_of_parse_and_map(np, 0);
382 if (!irq) {
383 dev_err(&pdev->dev, "IRQ missing or invalid\n");
384 ret = -EINVAL;
385 goto clk_disable;
386 }
387
388 ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0,
389 pdev->name, spi_st);
390 if (ret) {
391 dev_err(&pdev->dev, "Failed to request irq %d\n", irq);
392 goto clk_disable;
393 }
394
395 /* by default the device is on */
396 pm_runtime_set_active(&pdev->dev);
397 pm_runtime_enable(&pdev->dev);
398
399 platform_set_drvdata(pdev, master);
400
401 ret = devm_spi_register_master(&pdev->dev, master);
402 if (ret) {
403 dev_err(&pdev->dev, "Failed to register master\n");
404 goto clk_disable;
405 }
406
407 return 0;
408
409clk_disable:
410 spi_st_clk_disable(spi_st);
411
412 return ret;
413}
414
415static int spi_st_remove(struct platform_device *pdev)
416{
417 struct spi_master *master = platform_get_drvdata(pdev);
418 struct spi_st *spi_st = spi_master_get_devdata(master);
419
420 spi_st_clk_disable(spi_st);
421
422 pinctrl_pm_select_sleep_state(&pdev->dev);
423
424 return 0;
425}
426
427#ifdef CONFIG_PM
428static int spi_st_runtime_suspend(struct device *dev)
429{
430 struct spi_master *master = dev_get_drvdata(dev);
431 struct spi_st *spi_st = spi_master_get_devdata(master);
432
433 writel_relaxed(0, spi_st->base + SSC_IEN);
434 pinctrl_pm_select_sleep_state(dev);
435
436 spi_st_clk_disable(spi_st);
437
438 return 0;
439}
440
441static int spi_st_runtime_resume(struct device *dev)
442{
443 struct spi_master *master = dev_get_drvdata(dev);
444 struct spi_st *spi_st = spi_master_get_devdata(master);
445 int ret;
446
447 ret = spi_st_clk_enable(spi_st);
448 pinctrl_pm_select_default_state(dev);
449
450 return ret;
451}
452#endif
453
454#ifdef CONFIG_PM_SLEEP
455static int spi_st_suspend(struct device *dev)
456{
457 struct spi_master *master = dev_get_drvdata(dev);
458 int ret;
459
460 ret = spi_master_suspend(master);
461 if (ret)
462 return ret;
463
464 return pm_runtime_force_suspend(dev);
465}
466
467static int spi_st_resume(struct device *dev)
468{
469 struct spi_master *master = dev_get_drvdata(dev);
470 int ret;
471
472 ret = spi_master_resume(master);
473 if (ret)
474 return ret;
475
476 return pm_runtime_force_resume(dev);
477}
478#endif
479
480static const struct dev_pm_ops spi_st_pm = {
481 SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume)
482 SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
483};
484
485static struct of_device_id stm_spi_match[] = {
486 { .compatible = "st,comms-ssc4-spi", },
487 {},
488};
489MODULE_DEVICE_TABLE(of, stm_spi_match);
490
491static struct platform_driver spi_st_driver = {
492 .driver = {
493 .name = "spi-st",
494 .pm = &spi_st_pm,
495 .of_match_table = of_match_ptr(stm_spi_match),
496 },
497 .probe = spi_st_probe,
498 .remove = spi_st_remove,
499};
500module_platform_driver(spi_st_driver);
501
502MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
503MODULE_DESCRIPTION("STM SSC SPI driver");
504MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 6146c4cd6583..884a716e50cb 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -201,7 +201,7 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
201 201
202static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) 202static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
203{ 203{
204 int wlen, count, ret; 204 int wlen, count;
205 unsigned int cmd; 205 unsigned int cmd;
206 const u8 *txbuf; 206 const u8 *txbuf;
207 207
@@ -230,9 +230,8 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
230 } 230 }
231 231
232 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); 232 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
233 ret = wait_for_completion_timeout(&qspi->transfer_complete, 233 if (!wait_for_completion_timeout(&qspi->transfer_complete,
234 QSPI_COMPLETION_TIMEOUT); 234 QSPI_COMPLETION_TIMEOUT)) {
235 if (ret == 0) {
236 dev_err(qspi->dev, "write timed out\n"); 235 dev_err(qspi->dev, "write timed out\n");
237 return -ETIMEDOUT; 236 return -ETIMEDOUT;
238 } 237 }
@@ -245,7 +244,7 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
245 244
246static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) 245static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
247{ 246{
248 int wlen, count, ret; 247 int wlen, count;
249 unsigned int cmd; 248 unsigned int cmd;
250 u8 *rxbuf; 249 u8 *rxbuf;
251 250
@@ -268,9 +267,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
268 while (count) { 267 while (count) {
269 dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); 268 dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
270 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); 269 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
271 ret = wait_for_completion_timeout(&qspi->transfer_complete, 270 if (!wait_for_completion_timeout(&qspi->transfer_complete,
272 QSPI_COMPLETION_TIMEOUT); 271 QSPI_COMPLETION_TIMEOUT)) {
273 if (ret == 0) {
274 dev_err(qspi->dev, "read timed out\n"); 272 dev_err(qspi->dev, "read timed out\n");
275 return -ETIMEDOUT; 273 return -ETIMEDOUT;
276 } 274 }
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index be692ad50442..93dfcee0f987 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
18 */ 14 */
19 15
20#include <linux/delay.h> 16#include <linux/delay.h>
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 79bd84f43430..133f53a9c1d4 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -22,6 +22,8 @@
22#include <linux/spi/xilinx_spi.h> 22#include <linux/spi/xilinx_spi.h>
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#define XILINX_SPI_MAX_CS 32
26
25#define XILINX_SPI_NAME "xilinx_spi" 27#define XILINX_SPI_NAME "xilinx_spi"
26 28
27/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) 29/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
@@ -34,7 +36,8 @@
34#define XSPI_CR_MASTER_MODE 0x04 36#define XSPI_CR_MASTER_MODE 0x04
35#define XSPI_CR_CPOL 0x08 37#define XSPI_CR_CPOL 0x08
36#define XSPI_CR_CPHA 0x10 38#define XSPI_CR_CPHA 0x10
37#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL) 39#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL | \
40 XSPI_CR_LSB_FIRST | XSPI_CR_LOOP)
38#define XSPI_CR_TXFIFO_RESET 0x20 41#define XSPI_CR_TXFIFO_RESET 0x20
39#define XSPI_CR_RXFIFO_RESET 0x40 42#define XSPI_CR_RXFIFO_RESET 0x40
40#define XSPI_CR_MANUAL_SSELECT 0x80 43#define XSPI_CR_MANUAL_SSELECT 0x80
@@ -85,12 +88,11 @@ struct xilinx_spi {
85 88
86 u8 *rx_ptr; /* pointer in the Tx buffer */ 89 u8 *rx_ptr; /* pointer in the Tx buffer */
87 const u8 *tx_ptr; /* pointer in the Rx buffer */ 90 const u8 *tx_ptr; /* pointer in the Rx buffer */
88 int remaining_bytes; /* the number of bytes left to transfer */ 91 u8 bytes_per_word;
89 u8 bits_per_word; 92 int buffer_size; /* buffer size in words */
93 u32 cs_inactive; /* Level of the CS pins when inactive*/
90 unsigned int (*read_fn)(void __iomem *); 94 unsigned int (*read_fn)(void __iomem *);
91 void (*write_fn)(u32, void __iomem *); 95 void (*write_fn)(u32, void __iomem *);
92 void (*tx_fn)(struct xilinx_spi *);
93 void (*rx_fn)(struct xilinx_spi *);
94}; 96};
95 97
96static void xspi_write32(u32 val, void __iomem *addr) 98static void xspi_write32(u32 val, void __iomem *addr)
@@ -113,49 +115,51 @@ static unsigned int xspi_read32_be(void __iomem *addr)
113 return ioread32be(addr); 115 return ioread32be(addr);
114} 116}
115 117
116static void xspi_tx8(struct xilinx_spi *xspi) 118static void xilinx_spi_tx(struct xilinx_spi *xspi)
117{ 119{
118 xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); 120 u32 data = 0;
119 xspi->tx_ptr++;
120}
121
122static void xspi_tx16(struct xilinx_spi *xspi)
123{
124 xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
125 xspi->tx_ptr += 2;
126}
127 121
128static void xspi_tx32(struct xilinx_spi *xspi) 122 if (!xspi->tx_ptr) {
129{ 123 xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
130 xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); 124 return;
131 xspi->tx_ptr += 4;
132}
133
134static void xspi_rx8(struct xilinx_spi *xspi)
135{
136 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
137 if (xspi->rx_ptr) {
138 *xspi->rx_ptr = data & 0xff;
139 xspi->rx_ptr++;
140 } 125 }
141}
142 126
143static void xspi_rx16(struct xilinx_spi *xspi) 127 switch (xspi->bytes_per_word) {
144{ 128 case 1:
145 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); 129 data = *(u8 *)(xspi->tx_ptr);
146 if (xspi->rx_ptr) { 130 break;
147 *(u16 *)(xspi->rx_ptr) = data & 0xffff; 131 case 2:
148 xspi->rx_ptr += 2; 132 data = *(u16 *)(xspi->tx_ptr);
133 break;
134 case 4:
135 data = *(u32 *)(xspi->tx_ptr);
136 break;
149 } 137 }
138
139 xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
140 xspi->tx_ptr += xspi->bytes_per_word;
150} 141}
151 142
152static void xspi_rx32(struct xilinx_spi *xspi) 143static void xilinx_spi_rx(struct xilinx_spi *xspi)
153{ 144{
154 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); 145 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
155 if (xspi->rx_ptr) { 146
147 if (!xspi->rx_ptr)
148 return;
149
150 switch (xspi->bytes_per_word) {
151 case 1:
152 *(u8 *)(xspi->rx_ptr) = data;
153 break;
154 case 2:
155 *(u16 *)(xspi->rx_ptr) = data;
156 break;
157 case 4:
156 *(u32 *)(xspi->rx_ptr) = data; 158 *(u32 *)(xspi->rx_ptr) = data;
157 xspi->rx_ptr += 4; 159 break;
158 } 160 }
161
162 xspi->rx_ptr += xspi->bytes_per_word;
159} 163}
160 164
161static void xspi_init_hw(struct xilinx_spi *xspi) 165static void xspi_init_hw(struct xilinx_spi *xspi)
@@ -165,46 +169,56 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
165 /* Reset the SPI device */ 169 /* Reset the SPI device */
166 xspi->write_fn(XIPIF_V123B_RESET_MASK, 170 xspi->write_fn(XIPIF_V123B_RESET_MASK,
167 regs_base + XIPIF_V123B_RESETR_OFFSET); 171 regs_base + XIPIF_V123B_RESETR_OFFSET);
168 /* Disable all the interrupts just in case */ 172 /* Enable the transmit empty interrupt, which we use to determine
169 xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET); 173 * progress on the transmission.
170 /* Enable the global IPIF interrupt */ 174 */
171 xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, 175 xspi->write_fn(XSPI_INTR_TX_EMPTY,
172 regs_base + XIPIF_V123B_DGIER_OFFSET); 176 regs_base + XIPIF_V123B_IIER_OFFSET);
177 /* Disable the global IPIF interrupt */
178 xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
173 /* Deselect the slave on the SPI bus */ 179 /* Deselect the slave on the SPI bus */
174 xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET); 180 xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
175 /* Disable the transmitter, enable Manual Slave Select Assertion, 181 /* Disable the transmitter, enable Manual Slave Select Assertion,
176 * put SPI controller into master mode, and enable it */ 182 * put SPI controller into master mode, and enable it */
177 xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT | 183 xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
178 XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | 184 XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
179 XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET); 185 regs_base + XSPI_CR_OFFSET);
180} 186}
181 187
182static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) 188static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
183{ 189{
184 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); 190 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
191 u16 cr;
192 u32 cs;
185 193
186 if (is_on == BITBANG_CS_INACTIVE) { 194 if (is_on == BITBANG_CS_INACTIVE) {
187 /* Deselect the slave on the SPI bus */ 195 /* Deselect the slave on the SPI bus */
188 xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET); 196 xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
189 } else if (is_on == BITBANG_CS_ACTIVE) { 197 return;
190 /* Set the SPI clock phase and polarity */
191 u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET)
192 & ~XSPI_CR_MODE_MASK;
193 if (spi->mode & SPI_CPHA)
194 cr |= XSPI_CR_CPHA;
195 if (spi->mode & SPI_CPOL)
196 cr |= XSPI_CR_CPOL;
197 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
198
199 /* We do not check spi->max_speed_hz here as the SPI clock
200 * frequency is not software programmable (the IP block design
201 * parameter)
202 */
203
204 /* Activate the chip select */
205 xspi->write_fn(~(0x0001 << spi->chip_select),
206 xspi->regs + XSPI_SSR_OFFSET);
207 } 198 }
199
200 /* Set the SPI clock phase and polarity */
201 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
202 if (spi->mode & SPI_CPHA)
203 cr |= XSPI_CR_CPHA;
204 if (spi->mode & SPI_CPOL)
205 cr |= XSPI_CR_CPOL;
206 if (spi->mode & SPI_LSB_FIRST)
207 cr |= XSPI_CR_LSB_FIRST;
208 if (spi->mode & SPI_LOOP)
209 cr |= XSPI_CR_LOOP;
210 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
211
212 /* We do not check spi->max_speed_hz here as the SPI clock
213 * frequency is not software programmable (the IP block design
214 * parameter)
215 */
216
217 cs = xspi->cs_inactive;
218 cs ^= BIT(spi->chip_select);
219
220 /* Activate the chip select */
221 xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
208} 222}
209 223
210/* spi_bitbang requires custom setup_transfer() to be defined if there is a 224/* spi_bitbang requires custom setup_transfer() to be defined if there is a
@@ -213,85 +227,85 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
213static int xilinx_spi_setup_transfer(struct spi_device *spi, 227static int xilinx_spi_setup_transfer(struct spi_device *spi,
214 struct spi_transfer *t) 228 struct spi_transfer *t)
215{ 229{
216 return 0; 230 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
217}
218 231
219static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) 232 if (spi->mode & SPI_CS_HIGH)
220{ 233 xspi->cs_inactive &= ~BIT(spi->chip_select);
221 u8 sr; 234 else
235 xspi->cs_inactive |= BIT(spi->chip_select);
222 236
223 /* Fill the Tx FIFO with as many bytes as possible */ 237 return 0;
224 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
225 while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
226 if (xspi->tx_ptr)
227 xspi->tx_fn(xspi);
228 else
229 xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
230 xspi->remaining_bytes -= xspi->bits_per_word / 8;
231 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
232 }
233} 238}
234 239
235static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) 240static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
236{ 241{
237 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); 242 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
238 u32 ipif_ier; 243 int remaining_words; /* the number of words left to transfer */
244 bool use_irq = false;
245 u16 cr = 0;
239 246
240 /* We get here with transmitter inhibited */ 247 /* We get here with transmitter inhibited */
241 248
242 xspi->tx_ptr = t->tx_buf; 249 xspi->tx_ptr = t->tx_buf;
243 xspi->rx_ptr = t->rx_buf; 250 xspi->rx_ptr = t->rx_buf;
244 xspi->remaining_bytes = t->len; 251 remaining_words = t->len / xspi->bytes_per_word;
245 reinit_completion(&xspi->done); 252 reinit_completion(&xspi->done);
246 253
254 if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
255 use_irq = true;
256 xspi->write_fn(XSPI_INTR_TX_EMPTY,
257 xspi->regs + XIPIF_V123B_IISR_OFFSET);
258 /* Enable the global IPIF interrupt */
259 xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
260 xspi->regs + XIPIF_V123B_DGIER_OFFSET);
261 /* Inhibit irq to avoid spurious irqs on tx_empty*/
262 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
263 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
264 xspi->regs + XSPI_CR_OFFSET);
265 }
247 266
248 /* Enable the transmit empty interrupt, which we use to determine 267 while (remaining_words) {
249 * progress on the transmission. 268 int n_words, tx_words, rx_words;
250 */
251 ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET);
252 xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
253 xspi->regs + XIPIF_V123B_IIER_OFFSET);
254 269
255 for (;;) { 270 n_words = min(remaining_words, xspi->buffer_size);
256 u16 cr;
257 u8 sr;
258 271
259 xilinx_spi_fill_tx_fifo(xspi); 272 tx_words = n_words;
273 while (tx_words--)
274 xilinx_spi_tx(xspi);
260 275
261 /* Start the transfer by not inhibiting the transmitter any 276 /* Start the transfer by not inhibiting the transmitter any
262 * longer 277 * longer
263 */ 278 */
264 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
265 ~XSPI_CR_TRANS_INHIBIT;
266 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
267 279
268 wait_for_completion(&xspi->done); 280 if (use_irq) {
281 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
282 wait_for_completion(&xspi->done);
283 } else
284 while (!(xspi->read_fn(xspi->regs + XSPI_SR_OFFSET) &
285 XSPI_SR_TX_EMPTY_MASK))
286 ;
269 287
270 /* A transmit has just completed. Process received data and 288 /* A transmit has just completed. Process received data and
271 * check for more data to transmit. Always inhibit the 289 * check for more data to transmit. Always inhibit the
272 * transmitter while the Isr refills the transmit register/FIFO, 290 * transmitter while the Isr refills the transmit register/FIFO,
273 * or make sure it is stopped if we're done. 291 * or make sure it is stopped if we're done.
274 */ 292 */
275 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); 293 if (use_irq)
276 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, 294 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
277 xspi->regs + XSPI_CR_OFFSET); 295 xspi->regs + XSPI_CR_OFFSET);
278 296
279 /* Read out all the data from the Rx FIFO */ 297 /* Read out all the data from the Rx FIFO */
280 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); 298 rx_words = n_words;
281 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { 299 while (rx_words--)
282 xspi->rx_fn(xspi); 300 xilinx_spi_rx(xspi);
283 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); 301
284 } 302 remaining_words -= n_words;
285
286 /* See if there is more data to send */
287 if (xspi->remaining_bytes <= 0)
288 break;
289 } 303 }
290 304
291 /* Disable the transmit empty interrupt */ 305 if (use_irq)
292 xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); 306 xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
293 307
294 return t->len - xspi->remaining_bytes; 308 return t->len;
295} 309}
296 310
297 311
@@ -316,6 +330,28 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
316 return IRQ_HANDLED; 330 return IRQ_HANDLED;
317} 331}
318 332
333static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
334{
335 u8 sr;
336 int n_words = 0;
337
338 /*
339 * Before the buffer_size detection we reset the core
340 * to make sure we start with a clean state.
341 */
342 xspi->write_fn(XIPIF_V123B_RESET_MASK,
343 xspi->regs + XIPIF_V123B_RESETR_OFFSET);
344
345 /* Fill the Tx FIFO with as many words as possible */
346 do {
347 xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
348 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
349 n_words++;
350 } while (!(sr & XSPI_SR_TX_FULL_MASK));
351
352 return n_words;
353}
354
319static const struct of_device_id xilinx_spi_of_match[] = { 355static const struct of_device_id xilinx_spi_of_match[] = {
320 { .compatible = "xlnx,xps-spi-2.00.a", }, 356 { .compatible = "xlnx,xps-spi-2.00.a", },
321 { .compatible = "xlnx,xps-spi-2.00.b", }, 357 { .compatible = "xlnx,xps-spi-2.00.b", },
@@ -348,14 +384,21 @@ static int xilinx_spi_probe(struct platform_device *pdev)
348 return -EINVAL; 384 return -EINVAL;
349 } 385 }
350 386
387 if (num_cs > XILINX_SPI_MAX_CS) {
388 dev_err(&pdev->dev, "Invalid number of spi slaves\n");
389 return -EINVAL;
390 }
391
351 master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi)); 392 master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
352 if (!master) 393 if (!master)
353 return -ENODEV; 394 return -ENODEV;
354 395
355 /* the spi->mode bits understood by this driver: */ 396 /* the spi->mode bits understood by this driver: */
356 master->mode_bits = SPI_CPOL | SPI_CPHA; 397 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
398 SPI_CS_HIGH;
357 399
358 xspi = spi_master_get_devdata(master); 400 xspi = spi_master_get_devdata(master);
401 xspi->cs_inactive = 0xffffffff;
359 xspi->bitbang.master = master; 402 xspi->bitbang.master = master;
360 xspi->bitbang.chipselect = xilinx_spi_chipselect; 403 xspi->bitbang.chipselect = xilinx_spi_chipselect;
361 xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; 404 xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
@@ -392,35 +435,20 @@ static int xilinx_spi_probe(struct platform_device *pdev)
392 } 435 }
393 436
394 master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word); 437 master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
395 xspi->bits_per_word = bits_per_word; 438 xspi->bytes_per_word = bits_per_word / 8;
396 if (xspi->bits_per_word == 8) { 439 xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
397 xspi->tx_fn = xspi_tx8;
398 xspi->rx_fn = xspi_rx8;
399 } else if (xspi->bits_per_word == 16) {
400 xspi->tx_fn = xspi_tx16;
401 xspi->rx_fn = xspi_rx16;
402 } else if (xspi->bits_per_word == 32) {
403 xspi->tx_fn = xspi_tx32;
404 xspi->rx_fn = xspi_rx32;
405 } else {
406 ret = -EINVAL;
407 goto put_master;
408 }
409
410 /* SPI controller initializations */
411 xspi_init_hw(xspi);
412 440
413 xspi->irq = platform_get_irq(pdev, 0); 441 xspi->irq = platform_get_irq(pdev, 0);
414 if (xspi->irq < 0) { 442 if (xspi->irq >= 0) {
415 ret = xspi->irq; 443 /* Register for SPI Interrupt */
416 goto put_master; 444 ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
445 dev_name(&pdev->dev), xspi);
446 if (ret)
447 goto put_master;
417 } 448 }
418 449
419 /* Register for SPI Interrupt */ 450 /* SPI controller initializations */
420 ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0, 451 xspi_init_hw(xspi);
421 dev_name(&pdev->dev), xspi);
422 if (ret)
423 goto put_master;
424 452
425 ret = spi_bitbang_start(&xspi->bitbang); 453 ret = spi_bitbang_start(&xspi->bitbang);
426 if (ret) { 454 if (ret) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 66a70e9bc743..c64a3e59fce3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 16 */
21 17
22#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -788,7 +784,7 @@ static int spi_transfer_one_message(struct spi_master *master,
788 struct spi_transfer *xfer; 784 struct spi_transfer *xfer;
789 bool keep_cs = false; 785 bool keep_cs = false;
790 int ret = 0; 786 int ret = 0;
791 int ms = 1; 787 unsigned long ms = 1;
792 788
793 spi_set_cs(msg->spi, true); 789 spi_set_cs(msg->spi, true);
794 790
@@ -875,31 +871,59 @@ void spi_finalize_current_transfer(struct spi_master *master)
875EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 871EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
876 872
877/** 873/**
878 * spi_pump_messages - kthread work function which processes spi message queue 874 * __spi_pump_messages - function which processes spi message queue
879 * @work: pointer to kthread work struct contained in the master struct 875 * @master: master to process queue for
876 * @in_kthread: true if we are in the context of the message pump thread
880 * 877 *
881 * This function checks if there is any spi message in the queue that 878 * This function checks if there is any spi message in the queue that
882 * needs processing and if so call out to the driver to initialize hardware 879 * needs processing and if so call out to the driver to initialize hardware
883 * and transfer each message. 880 * and transfer each message.
884 * 881 *
882 * Note that it is called both from the kthread itself and also from
883 * inside spi_sync(); the queue extraction handling at the top of the
884 * function should deal with this safely.
885 */ 885 */
886static void spi_pump_messages(struct kthread_work *work) 886static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
887{ 887{
888 struct spi_master *master =
889 container_of(work, struct spi_master, pump_messages);
890 unsigned long flags; 888 unsigned long flags;
891 bool was_busy = false; 889 bool was_busy = false;
892 int ret; 890 int ret;
893 891
894 /* Lock queue and check for queue work */ 892 /* Lock queue */
895 spin_lock_irqsave(&master->queue_lock, flags); 893 spin_lock_irqsave(&master->queue_lock, flags);
894
895 /* Make sure we are not already running a message */
896 if (master->cur_msg) {
897 spin_unlock_irqrestore(&master->queue_lock, flags);
898 return;
899 }
900
901 /* If another context is idling the device then defer */
902 if (master->idling) {
903 queue_kthread_work(&master->kworker, &master->pump_messages);
904 spin_unlock_irqrestore(&master->queue_lock, flags);
905 return;
906 }
907
908 /* Check if the queue is idle */
896 if (list_empty(&master->queue) || !master->running) { 909 if (list_empty(&master->queue) || !master->running) {
897 if (!master->busy) { 910 if (!master->busy) {
898 spin_unlock_irqrestore(&master->queue_lock, flags); 911 spin_unlock_irqrestore(&master->queue_lock, flags);
899 return; 912 return;
900 } 913 }
914
915 /* Only do teardown in the thread */
916 if (!in_kthread) {
917 queue_kthread_work(&master->kworker,
918 &master->pump_messages);
919 spin_unlock_irqrestore(&master->queue_lock, flags);
920 return;
921 }
922
901 master->busy = false; 923 master->busy = false;
924 master->idling = true;
902 spin_unlock_irqrestore(&master->queue_lock, flags); 925 spin_unlock_irqrestore(&master->queue_lock, flags);
926
903 kfree(master->dummy_rx); 927 kfree(master->dummy_rx);
904 master->dummy_rx = NULL; 928 master->dummy_rx = NULL;
905 kfree(master->dummy_tx); 929 kfree(master->dummy_tx);
@@ -913,14 +937,13 @@ static void spi_pump_messages(struct kthread_work *work)
913 pm_runtime_put_autosuspend(master->dev.parent); 937 pm_runtime_put_autosuspend(master->dev.parent);
914 } 938 }
915 trace_spi_master_idle(master); 939 trace_spi_master_idle(master);
916 return;
917 }
918 940
919 /* Make sure we are not already running a message */ 941 spin_lock_irqsave(&master->queue_lock, flags);
920 if (master->cur_msg) { 942 master->idling = false;
921 spin_unlock_irqrestore(&master->queue_lock, flags); 943 spin_unlock_irqrestore(&master->queue_lock, flags);
922 return; 944 return;
923 } 945 }
946
924 /* Extract head of queue */ 947 /* Extract head of queue */
925 master->cur_msg = 948 master->cur_msg =
926 list_first_entry(&master->queue, struct spi_message, queue); 949 list_first_entry(&master->queue, struct spi_message, queue);
@@ -985,13 +1008,22 @@ static void spi_pump_messages(struct kthread_work *work)
985 } 1008 }
986} 1009}
987 1010
1011/**
1012 * spi_pump_messages - kthread work function which processes spi message queue
1013 * @work: pointer to kthread work struct contained in the master struct
1014 */
1015static void spi_pump_messages(struct kthread_work *work)
1016{
1017 struct spi_master *master =
1018 container_of(work, struct spi_master, pump_messages);
1019
1020 __spi_pump_messages(master, true);
1021}
1022
988static int spi_init_queue(struct spi_master *master) 1023static int spi_init_queue(struct spi_master *master)
989{ 1024{
990 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1025 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
991 1026
992 INIT_LIST_HEAD(&master->queue);
993 spin_lock_init(&master->queue_lock);
994
995 master->running = false; 1027 master->running = false;
996 master->busy = false; 1028 master->busy = false;
997 1029
@@ -1161,12 +1193,9 @@ static int spi_destroy_queue(struct spi_master *master)
1161 return 0; 1193 return 0;
1162} 1194}
1163 1195
1164/** 1196static int __spi_queued_transfer(struct spi_device *spi,
1165 * spi_queued_transfer - transfer function for queued transfers 1197 struct spi_message *msg,
1166 * @spi: spi device which is requesting transfer 1198 bool need_pump)
1167 * @msg: spi message which is to handled is queued to driver queue
1168 */
1169static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1170{ 1199{
1171 struct spi_master *master = spi->master; 1200 struct spi_master *master = spi->master;
1172 unsigned long flags; 1201 unsigned long flags;
@@ -1181,13 +1210,23 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1181 msg->status = -EINPROGRESS; 1210 msg->status = -EINPROGRESS;
1182 1211
1183 list_add_tail(&msg->queue, &master->queue); 1212 list_add_tail(&msg->queue, &master->queue);
1184 if (!master->busy) 1213 if (!master->busy && need_pump)
1185 queue_kthread_work(&master->kworker, &master->pump_messages); 1214 queue_kthread_work(&master->kworker, &master->pump_messages);
1186 1215
1187 spin_unlock_irqrestore(&master->queue_lock, flags); 1216 spin_unlock_irqrestore(&master->queue_lock, flags);
1188 return 0; 1217 return 0;
1189} 1218}
1190 1219
1220/**
1221 * spi_queued_transfer - transfer function for queued transfers
1222 * @spi: spi device which is requesting transfer
1223 * @msg: spi message which is to handled is queued to driver queue
1224 */
1225static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1226{
1227 return __spi_queued_transfer(spi, msg, true);
1228}
1229
1191static int spi_master_initialize_queue(struct spi_master *master) 1230static int spi_master_initialize_queue(struct spi_master *master)
1192{ 1231{
1193 int ret; 1232 int ret;
@@ -1609,6 +1648,8 @@ int spi_register_master(struct spi_master *master)
1609 dynamic = 1; 1648 dynamic = 1;
1610 } 1649 }
1611 1650
1651 INIT_LIST_HEAD(&master->queue);
1652 spin_lock_init(&master->queue_lock);
1612 spin_lock_init(&master->bus_lock_spinlock); 1653 spin_lock_init(&master->bus_lock_spinlock);
1613 mutex_init(&master->bus_lock_mutex); 1654 mutex_init(&master->bus_lock_mutex);
1614 master->bus_lock_flag = 0; 1655 master->bus_lock_flag = 0;
@@ -2114,19 +2155,46 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2114 DECLARE_COMPLETION_ONSTACK(done); 2155 DECLARE_COMPLETION_ONSTACK(done);
2115 int status; 2156 int status;
2116 struct spi_master *master = spi->master; 2157 struct spi_master *master = spi->master;
2158 unsigned long flags;
2159
2160 status = __spi_validate(spi, message);
2161 if (status != 0)
2162 return status;
2117 2163
2118 message->complete = spi_complete; 2164 message->complete = spi_complete;
2119 message->context = &done; 2165 message->context = &done;
2166 message->spi = spi;
2120 2167
2121 if (!bus_locked) 2168 if (!bus_locked)
2122 mutex_lock(&master->bus_lock_mutex); 2169 mutex_lock(&master->bus_lock_mutex);
2123 2170
2124 status = spi_async_locked(spi, message); 2171 /* If we're not using the legacy transfer method then we will
2172 * try to transfer in the calling context so special case.
2173 * This code would be less tricky if we could remove the
2174 * support for driver implemented message queues.
2175 */
2176 if (master->transfer == spi_queued_transfer) {
2177 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2178
2179 trace_spi_message_submit(message);
2180
2181 status = __spi_queued_transfer(spi, message, false);
2182
2183 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2184 } else {
2185 status = spi_async_locked(spi, message);
2186 }
2125 2187
2126 if (!bus_locked) 2188 if (!bus_locked)
2127 mutex_unlock(&master->bus_lock_mutex); 2189 mutex_unlock(&master->bus_lock_mutex);
2128 2190
2129 if (status == 0) { 2191 if (status == 0) {
2192 /* Push out the messages in the calling context if we
2193 * can.
2194 */
2195 if (master->transfer == spi_queued_transfer)
2196 __spi_pump_messages(master, false);
2197
2130 wait_for_completion(&done); 2198 wait_for_completion(&done);
2131 status = message->status; 2199 status = message->status;
2132 } 2200 }
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 6941e04afb8c..4eb7a980e670 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -14,10 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 17 */
22 18
23#include <linux/init.h> 19#include <linux/init.h>
@@ -317,6 +313,37 @@ done:
317 return status; 313 return status;
318} 314}
319 315
316static struct spi_ioc_transfer *
317spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
318 unsigned *n_ioc)
319{
320 struct spi_ioc_transfer *ioc;
321 u32 tmp;
322
323 /* Check type, command number and direction */
324 if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
325 || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
326 || _IOC_DIR(cmd) != _IOC_WRITE)
327 return ERR_PTR(-ENOTTY);
328
329 tmp = _IOC_SIZE(cmd);
330 if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
331 return ERR_PTR(-EINVAL);
332 *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
333 if (*n_ioc == 0)
334 return NULL;
335
336 /* copy into scratch area */
337 ioc = kmalloc(tmp, GFP_KERNEL);
338 if (!ioc)
339 return ERR_PTR(-ENOMEM);
340 if (__copy_from_user(ioc, u_ioc, tmp)) {
341 kfree(ioc);
342 return ERR_PTR(-EFAULT);
343 }
344 return ioc;
345}
346
320static long 347static long
321spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 348spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
322{ 349{
@@ -456,32 +483,15 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
456 483
457 default: 484 default:
458 /* segmented and/or full-duplex I/O request */ 485 /* segmented and/or full-duplex I/O request */
459 if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) 486 /* Check message and copy into scratch area */
460 || _IOC_DIR(cmd) != _IOC_WRITE) { 487 ioc = spidev_get_ioc_message(cmd,
461 retval = -ENOTTY; 488 (struct spi_ioc_transfer __user *)arg, &n_ioc);
462 break; 489 if (IS_ERR(ioc)) {
463 } 490 retval = PTR_ERR(ioc);
464
465 tmp = _IOC_SIZE(cmd);
466 if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
467 retval = -EINVAL;
468 break;
469 }
470 n_ioc = tmp / sizeof(struct spi_ioc_transfer);
471 if (n_ioc == 0)
472 break;
473
474 /* copy into scratch area */
475 ioc = kmalloc(tmp, GFP_KERNEL);
476 if (!ioc) {
477 retval = -ENOMEM;
478 break;
479 }
480 if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
481 kfree(ioc);
482 retval = -EFAULT;
483 break; 491 break;
484 } 492 }
493 if (!ioc)
494 break; /* n_ioc is also 0 */
485 495
486 /* translate to spi_message, execute */ 496 /* translate to spi_message, execute */
487 retval = spidev_message(spidev, ioc, n_ioc); 497 retval = spidev_message(spidev, ioc, n_ioc);
@@ -496,8 +506,67 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
496 506
497#ifdef CONFIG_COMPAT 507#ifdef CONFIG_COMPAT
498static long 508static long
509spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
510 unsigned long arg)
511{
512 struct spi_ioc_transfer __user *u_ioc;
513 int retval = 0;
514 struct spidev_data *spidev;
515 struct spi_device *spi;
516 unsigned n_ioc, n;
517 struct spi_ioc_transfer *ioc;
518
519 u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
520 if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd)))
521 return -EFAULT;
522
523 /* guard against device removal before, or while,
524 * we issue this ioctl.
525 */
526 spidev = filp->private_data;
527 spin_lock_irq(&spidev->spi_lock);
528 spi = spi_dev_get(spidev->spi);
529 spin_unlock_irq(&spidev->spi_lock);
530
531 if (spi == NULL)
532 return -ESHUTDOWN;
533
534 /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
535 mutex_lock(&spidev->buf_lock);
536
537 /* Check message and copy into scratch area */
538 ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
539 if (IS_ERR(ioc)) {
540 retval = PTR_ERR(ioc);
541 goto done;
542 }
543 if (!ioc)
544 goto done; /* n_ioc is also 0 */
545
546 /* Convert buffer pointers */
547 for (n = 0; n < n_ioc; n++) {
548 ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
549 ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
550 }
551
552 /* translate to spi_message, execute */
553 retval = spidev_message(spidev, ioc, n_ioc);
554 kfree(ioc);
555
556done:
557 mutex_unlock(&spidev->buf_lock);
558 spi_dev_put(spi);
559 return retval;
560}
561
562static long
499spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 563spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
500{ 564{
565 if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
566 && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
567 && _IOC_DIR(cmd) == _IOC_WRITE)
568 return spidev_compat_ioc_message(filp, cmd, arg);
569
501 return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 570 return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
502} 571}
503#else 572#else
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index d415d69dc237..9484d5652ca5 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -650,8 +650,10 @@ static void handle_rx(struct vhost_net *net)
650 break; 650 break;
651 } 651 }
652 /* TODO: Should check and handle checksum. */ 652 /* TODO: Should check and handle checksum. */
653
654 hdr.num_buffers = cpu_to_vhost16(vq, headcount);
653 if (likely(mergeable) && 655 if (likely(mergeable) &&
654 memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount, 656 memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
655 offsetof(typeof(hdr), num_buffers), 657 offsetof(typeof(hdr), num_buffers),
656 sizeof hdr.num_buffers)) { 658 sizeof hdr.num_buffers)) {
657 vq_err(vq, "Failed num_buffers write"); 659 vq_err(vq, "Failed num_buffers write");
diff --git a/fs/aio.c b/fs/aio.c
index 1b7893ecc296..c428871f1093 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1140,6 +1140,13 @@ static long aio_read_events_ring(struct kioctx *ctx,
1140 long ret = 0; 1140 long ret = 0;
1141 int copy_ret; 1141 int copy_ret;
1142 1142
1143 /*
1144 * The mutex can block and wake us up and that will cause
1145 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1146 * and repeat. This should be rare enough that it doesn't cause
1147 * peformance issues. See the comment in read_events() for more detail.
1148 */
1149 sched_annotate_sleep();
1143 mutex_lock(&ctx->ring_lock); 1150 mutex_lock(&ctx->ring_lock);
1144 1151
1145 /* Access to ->ring_pages here is protected by ctx->ring_lock. */ 1152 /* Access to ->ring_pages here is protected by ctx->ring_lock. */
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index a66768ebc8d1..80e9c18ea64f 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -8,6 +8,7 @@ config BTRFS_FS
8 select LZO_DECOMPRESS 8 select LZO_DECOMPRESS
9 select RAID6_PQ 9 select RAID6_PQ
10 select XOR_BLOCKS 10 select XOR_BLOCKS
11 select SRCU
11 12
12 help 13 help
13 Btrfs is a general purpose copy-on-write filesystem with extents, 14 Btrfs is a general purpose copy-on-write filesystem with extents,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9a02da16f2be..1a9585d4380a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2591,6 +2591,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2591 } 2591 }
2592 2592
2593 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { 2593 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2594 blk_finish_plug(&plug);
2594 mutex_unlock(&log_root_tree->log_mutex); 2595 mutex_unlock(&log_root_tree->log_mutex);
2595 ret = root_log_ctx.log_ret; 2596 ret = root_log_ctx.log_ret;
2596 goto out; 2597 goto out;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 9c56ef776407..7febcf2475c5 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -606,9 +606,11 @@ cifs_security_flags_handle_must_flags(unsigned int *flags)
606 *flags = CIFSSEC_MUST_NTLMV2; 606 *flags = CIFSSEC_MUST_NTLMV2;
607 else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM) 607 else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM)
608 *flags = CIFSSEC_MUST_NTLM; 608 *flags = CIFSSEC_MUST_NTLM;
609 else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) 609 else if (CIFSSEC_MUST_LANMAN &&
610 (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
610 *flags = CIFSSEC_MUST_LANMAN; 611 *flags = CIFSSEC_MUST_LANMAN;
611 else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) 612 else if (CIFSSEC_MUST_PLNTXT &&
613 (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
612 *flags = CIFSSEC_MUST_PLNTXT; 614 *flags = CIFSSEC_MUST_PLNTXT;
613 615
614 *flags |= signflags; 616 *flags |= signflags;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 96b7e9b7706d..74f12877493a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
366 struct cifsLockInfo *li, *tmp; 366 struct cifsLockInfo *li, *tmp;
367 struct cifs_fid fid; 367 struct cifs_fid fid;
368 struct cifs_pending_open open; 368 struct cifs_pending_open open;
369 bool oplock_break_cancelled;
369 370
370 spin_lock(&cifs_file_list_lock); 371 spin_lock(&cifs_file_list_lock);
371 if (--cifs_file->count > 0) { 372 if (--cifs_file->count > 0) {
@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
397 } 398 }
398 spin_unlock(&cifs_file_list_lock); 399 spin_unlock(&cifs_file_list_lock);
399 400
400 cancel_work_sync(&cifs_file->oplock_break); 401 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
401 402
402 if (!tcon->need_reconnect && !cifs_file->invalidHandle) { 403 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
403 struct TCP_Server_Info *server = tcon->ses->server; 404 struct TCP_Server_Info *server = tcon->ses->server;
@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
409 _free_xid(xid); 410 _free_xid(xid);
410 } 411 }
411 412
413 if (oplock_break_cancelled)
414 cifs_done_oplock_break(cifsi);
415
412 cifs_del_pending_open(&open); 416 cifs_del_pending_open(&open);
413 417
414 /* 418 /*
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 6c1566366a66..a4232ec4f2ba 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -221,7 +221,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
221 } 221 }
222 222
223 rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); 223 rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
224 memset(wpwd, 0, 129 * sizeof(__le16)); 224 memzero_explicit(wpwd, sizeof(wpwd));
225 225
226 return rc; 226 return rc;
227} 227}
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 91093cd74f0d..385704027575 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -141,7 +141,6 @@ enum {
141 * @ti_save: Backup of journal_info field of task_struct 141 * @ti_save: Backup of journal_info field of task_struct
142 * @ti_flags: Flags 142 * @ti_flags: Flags
143 * @ti_count: Nest level 143 * @ti_count: Nest level
144 * @ti_garbage: List of inode to be put when releasing semaphore
145 */ 144 */
146struct nilfs_transaction_info { 145struct nilfs_transaction_info {
147 u32 ti_magic; 146 u32 ti_magic;
@@ -150,7 +149,6 @@ struct nilfs_transaction_info {
150 one of other filesystems has a bug. */ 149 one of other filesystems has a bug. */
151 unsigned short ti_flags; 150 unsigned short ti_flags;
152 unsigned short ti_count; 151 unsigned short ti_count;
153 struct list_head ti_garbage;
154}; 152};
155 153
156/* ti_magic */ 154/* ti_magic */
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 7ef18fc656c2..469086b9f99b 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb,
305 ti->ti_count = 0; 305 ti->ti_count = 0;
306 ti->ti_save = cur_ti; 306 ti->ti_save = cur_ti;
307 ti->ti_magic = NILFS_TI_MAGIC; 307 ti->ti_magic = NILFS_TI_MAGIC;
308 INIT_LIST_HEAD(&ti->ti_garbage);
309 current->journal_info = ti; 308 current->journal_info = ti;
310 309
311 for (;;) { 310 for (;;) {
@@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb)
332 331
333 up_write(&nilfs->ns_segctor_sem); 332 up_write(&nilfs->ns_segctor_sem);
334 current->journal_info = ti->ti_save; 333 current->journal_info = ti->ti_save;
335 if (!list_empty(&ti->ti_garbage))
336 nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
337} 334}
338 335
339static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, 336static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
@@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs,
746 } 743 }
747} 744}
748 745
746static void nilfs_iput_work_func(struct work_struct *work)
747{
748 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
749 sc_iput_work);
750 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
751
752 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
753}
754
749static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, 755static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
750 struct nilfs_root *root) 756 struct nilfs_root *root)
751{ 757{
@@ -1900,8 +1906,8 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1900static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, 1906static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1901 struct the_nilfs *nilfs) 1907 struct the_nilfs *nilfs)
1902{ 1908{
1903 struct nilfs_transaction_info *ti = current->journal_info;
1904 struct nilfs_inode_info *ii, *n; 1909 struct nilfs_inode_info *ii, *n;
1910 int defer_iput = false;
1905 1911
1906 spin_lock(&nilfs->ns_inode_lock); 1912 spin_lock(&nilfs->ns_inode_lock);
1907 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { 1913 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
@@ -1912,9 +1918,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1912 clear_bit(NILFS_I_BUSY, &ii->i_state); 1918 clear_bit(NILFS_I_BUSY, &ii->i_state);
1913 brelse(ii->i_bh); 1919 brelse(ii->i_bh);
1914 ii->i_bh = NULL; 1920 ii->i_bh = NULL;
1915 list_move_tail(&ii->i_dirty, &ti->ti_garbage); 1921 list_del_init(&ii->i_dirty);
1922 if (!ii->vfs_inode.i_nlink) {
1923 /*
1924 * Defer calling iput() to avoid a deadlock
1925 * over I_SYNC flag for inodes with i_nlink == 0
1926 */
1927 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
1928 defer_iput = true;
1929 } else {
1930 spin_unlock(&nilfs->ns_inode_lock);
1931 iput(&ii->vfs_inode);
1932 spin_lock(&nilfs->ns_inode_lock);
1933 }
1916 } 1934 }
1917 spin_unlock(&nilfs->ns_inode_lock); 1935 spin_unlock(&nilfs->ns_inode_lock);
1936
1937 if (defer_iput)
1938 schedule_work(&sci->sc_iput_work);
1918} 1939}
1919 1940
1920/* 1941/*
@@ -2583,6 +2604,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2583 INIT_LIST_HEAD(&sci->sc_segbufs); 2604 INIT_LIST_HEAD(&sci->sc_segbufs);
2584 INIT_LIST_HEAD(&sci->sc_write_logs); 2605 INIT_LIST_HEAD(&sci->sc_write_logs);
2585 INIT_LIST_HEAD(&sci->sc_gc_inodes); 2606 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2607 INIT_LIST_HEAD(&sci->sc_iput_queue);
2608 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2586 init_timer(&sci->sc_timer); 2609 init_timer(&sci->sc_timer);
2587 2610
2588 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; 2611 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
@@ -2609,6 +2632,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2609 ret = nilfs_segctor_construct(sci, SC_LSEG_SR); 2632 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2610 nilfs_transaction_unlock(sci->sc_super); 2633 nilfs_transaction_unlock(sci->sc_super);
2611 2634
2635 flush_work(&sci->sc_iput_work);
2636
2612 } while (ret && retrycount-- > 0); 2637 } while (ret && retrycount-- > 0);
2613} 2638}
2614 2639
@@ -2633,6 +2658,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2633 || sci->sc_seq_request != sci->sc_seq_done); 2658 || sci->sc_seq_request != sci->sc_seq_done);
2634 spin_unlock(&sci->sc_state_lock); 2659 spin_unlock(&sci->sc_state_lock);
2635 2660
2661 if (flush_work(&sci->sc_iput_work))
2662 flag = true;
2663
2636 if (flag || !nilfs_segctor_confirm(sci)) 2664 if (flag || !nilfs_segctor_confirm(sci))
2637 nilfs_segctor_write_out(sci); 2665 nilfs_segctor_write_out(sci);
2638 2666
@@ -2642,6 +2670,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2642 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); 2670 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2643 } 2671 }
2644 2672
2673 if (!list_empty(&sci->sc_iput_queue)) {
2674 nilfs_warning(sci->sc_super, __func__,
2675 "iput queue is not empty\n");
2676 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2677 }
2678
2645 WARN_ON(!list_empty(&sci->sc_segbufs)); 2679 WARN_ON(!list_empty(&sci->sc_segbufs));
2646 WARN_ON(!list_empty(&sci->sc_write_logs)); 2680 WARN_ON(!list_empty(&sci->sc_write_logs));
2647 2681
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 38a1d0013314..a48d6de1e02c 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -26,6 +26,7 @@
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/buffer_head.h> 28#include <linux/buffer_head.h>
29#include <linux/workqueue.h>
29#include <linux/nilfs2_fs.h> 30#include <linux/nilfs2_fs.h>
30#include "nilfs.h" 31#include "nilfs.h"
31 32
@@ -92,6 +93,8 @@ struct nilfs_segsum_pointer {
92 * @sc_nblk_inc: Block count of current generation 93 * @sc_nblk_inc: Block count of current generation
93 * @sc_dirty_files: List of files to be written 94 * @sc_dirty_files: List of files to be written
94 * @sc_gc_inodes: List of GC inodes having blocks to be written 95 * @sc_gc_inodes: List of GC inodes having blocks to be written
96 * @sc_iput_queue: list of inodes for which iput should be done
97 * @sc_iput_work: work struct to defer iput call
95 * @sc_freesegs: array of segment numbers to be freed 98 * @sc_freesegs: array of segment numbers to be freed
96 * @sc_nfreesegs: number of segments on @sc_freesegs 99 * @sc_nfreesegs: number of segments on @sc_freesegs
97 * @sc_dsync_inode: inode whose data pages are written for a sync operation 100 * @sc_dsync_inode: inode whose data pages are written for a sync operation
@@ -135,6 +138,8 @@ struct nilfs_sc_info {
135 138
136 struct list_head sc_dirty_files; 139 struct list_head sc_dirty_files;
137 struct list_head sc_gc_inodes; 140 struct list_head sc_gc_inodes;
141 struct list_head sc_iput_queue;
142 struct work_struct sc_iput_work;
138 143
139 __u64 *sc_freesegs; 144 __u64 *sc_freesegs;
140 size_t sc_nfreesegs; 145 size_t sc_nfreesegs;
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index 22c629eedd82..2a24249b30af 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -1,5 +1,6 @@
1config FSNOTIFY 1config FSNOTIFY
2 def_bool n 2 def_bool n
3 select SRCU
3 4
4source "fs/notify/dnotify/Kconfig" 5source "fs/notify/dnotify/Kconfig"
5source "fs/notify/inotify/Kconfig" 6source "fs/notify/inotify/Kconfig"
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index c51df1dd237e..4a09975aac90 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -5,6 +5,7 @@
5config QUOTA 5config QUOTA
6 bool "Quota support" 6 bool "Quota support"
7 select QUOTACTL 7 select QUOTACTL
8 select SRCU
8 help 9 help
9 If you say Y here, you will be able to set per user limits for disk 10 If you say Y here, you will be able to set per user limits for disk
10 usage (also called disk quotas). Currently, it works for the 11 usage (also called disk quotas). Currently, it works for the
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 33063f872ee3..176bf816875e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -385,7 +385,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
385 385
386/* Is this type a native word size -- useful for atomic operations */ 386/* Is this type a native word size -- useful for atomic operations */
387#ifndef __native_word 387#ifndef __native_word
388# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) 388# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
389#endif 389#endif
390 390
391/* Compile time object size, -1 for unknown */ 391/* Compile time object size, -1 for unknown */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 515a35e2a48a..960e666c51e4 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
472/** 472/**
473 * vlan_get_protocol - get protocol EtherType. 473 * vlan_get_protocol - get protocol EtherType.
474 * @skb: skbuff to query 474 * @skb: skbuff to query
475 * @type: first vlan protocol
476 * @depth: buffer to store length of eth and vlan tags in bytes
475 * 477 *
476 * Returns the EtherType of the packet, regardless of whether it is 478 * Returns the EtherType of the packet, regardless of whether it is
477 * vlan encapsulated (normal or hardware accelerated) or not. 479 * vlan encapsulated (normal or hardware accelerated) or not.
478 */ 480 */
479static inline __be16 vlan_get_protocol(const struct sk_buff *skb) 481static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
482 int *depth)
480{ 483{
481 __be16 protocol = 0; 484 unsigned int vlan_depth = skb->mac_len;
482 485
483 if (vlan_tx_tag_present(skb) || 486 /* if type is 802.1Q/AD then the header should already be
484 skb->protocol != cpu_to_be16(ETH_P_8021Q)) 487 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
485 protocol = skb->protocol; 488 * ETH_HLEN otherwise
486 else { 489 */
487 __be16 proto, *protop; 490 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
488 protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, 491 if (vlan_depth) {
489 h_vlan_encapsulated_proto), 492 if (WARN_ON(vlan_depth < VLAN_HLEN))
490 sizeof(proto), &proto); 493 return 0;
491 if (likely(protop)) 494 vlan_depth -= VLAN_HLEN;
492 protocol = *protop; 495 } else {
496 vlan_depth = ETH_HLEN;
497 }
498 do {
499 struct vlan_hdr *vh;
500
501 if (unlikely(!pskb_may_pull(skb,
502 vlan_depth + VLAN_HLEN)))
503 return 0;
504
505 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
506 type = vh->h_vlan_encapsulated_proto;
507 vlan_depth += VLAN_HLEN;
508 } while (type == htons(ETH_P_8021Q) ||
509 type == htons(ETH_P_8021AD));
493 } 510 }
494 511
495 return protocol; 512 if (depth)
513 *depth = vlan_depth;
514
515 return type;
516}
517
518/**
519 * vlan_get_protocol - get protocol EtherType.
520 * @skb: skbuff to query
521 *
522 * Returns the EtherType of the packet, regardless of whether it is
523 * vlan encapsulated (normal or hardware accelerated) or not.
524 */
525static inline __be16 vlan_get_protocol(struct sk_buff *skb)
526{
527 return __vlan_get_protocol(skb, skb->protocol, NULL);
496} 528}
497 529
498static inline void vlan_set_encap_proto(struct sk_buff *skb, 530static inline void vlan_set_encap_proto(struct sk_buff *skb,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 25c791e295fd..5f3a9aa7225d 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -97,7 +97,7 @@ enum {
97 MLX4_MAX_NUM_PF = 16, 97 MLX4_MAX_NUM_PF = 16,
98 MLX4_MAX_NUM_VF = 126, 98 MLX4_MAX_NUM_VF = 126,
99 MLX4_MAX_NUM_VF_P_PORT = 64, 99 MLX4_MAX_NUM_VF_P_PORT = 64,
100 MLX4_MFUNC_MAX = 80, 100 MLX4_MFUNC_MAX = 128,
101 MLX4_MAX_EQ_NUM = 1024, 101 MLX4_MAX_EQ_NUM = 1024,
102 MLX4_MFUNC_EQ_NUM = 4, 102 MLX4_MFUNC_EQ_NUM = 4,
103 MLX4_MFUNC_MAX_EQES = 8, 103 MLX4_MFUNC_MAX_EQES = 8,
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 77aed9ea1d26..dab545bb66b3 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -37,6 +37,7 @@
37#define SSDR (0x10) /* SSP Data Write/Data Read Register */ 37#define SSDR (0x10) /* SSP Data Write/Data Read Register */
38 38
39#define SSTO (0x28) /* SSP Time Out Register */ 39#define SSTO (0x28) /* SSP Time Out Register */
40#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
40#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */ 41#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
41#define SSTSA (0x30) /* SSP Tx Timeslot Active */ 42#define SSTSA (0x30) /* SSP Tx Timeslot Active */
42#define SSRSA (0x34) /* SSP Rx Timeslot Active */ 43#define SSRSA (0x34) /* SSP Rx Timeslot Active */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 529bc946f450..a18b16f1dc0e 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -524,11 +524,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
524 * @member: the name of the hlist_node within the struct. 524 * @member: the name of the hlist_node within the struct.
525 */ 525 */
526#define hlist_for_each_entry_continue_rcu(pos, member) \ 526#define hlist_for_each_entry_continue_rcu(pos, member) \
527 for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ 527 for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
528 typeof(*(pos)), member); \ 528 &(pos)->member)), typeof(*(pos)), member); \
529 pos; \ 529 pos; \
530 pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ 530 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
531 typeof(*(pos)), member)) 531 &(pos)->member)), typeof(*(pos)), member))
532 532
533/** 533/**
534 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point 534 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
@@ -536,11 +536,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
536 * @member: the name of the hlist_node within the struct. 536 * @member: the name of the hlist_node within the struct.
537 */ 537 */
538#define hlist_for_each_entry_continue_rcu_bh(pos, member) \ 538#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
539 for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ 539 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
540 typeof(*(pos)), member); \ 540 &(pos)->member)), typeof(*(pos)), member); \
541 pos; \ 541 pos; \
542 pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ 542 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
543 typeof(*(pos)), member)) 543 &(pos)->member)), typeof(*(pos)), member))
544 544
545/** 545/**
546 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point 546 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index ed4f5939a452..78097491cd99 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -331,12 +331,13 @@ static inline void rcu_init_nohz(void)
331extern struct srcu_struct tasks_rcu_exit_srcu; 331extern struct srcu_struct tasks_rcu_exit_srcu;
332#define rcu_note_voluntary_context_switch(t) \ 332#define rcu_note_voluntary_context_switch(t) \
333 do { \ 333 do { \
334 rcu_all_qs(); \
334 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ 335 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
335 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ 336 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
336 } while (0) 337 } while (0)
337#else /* #ifdef CONFIG_TASKS_RCU */ 338#else /* #ifdef CONFIG_TASKS_RCU */
338#define TASKS_RCU(x) do { } while (0) 339#define TASKS_RCU(x) do { } while (0)
339#define rcu_note_voluntary_context_switch(t) do { } while (0) 340#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
340#endif /* #else #ifdef CONFIG_TASKS_RCU */ 341#endif /* #else #ifdef CONFIG_TASKS_RCU */
341 342
342/** 343/**
@@ -582,11 +583,11 @@ static inline void rcu_preempt_sleep_check(void)
582}) 583})
583#define __rcu_dereference_check(p, c, space) \ 584#define __rcu_dereference_check(p, c, space) \
584({ \ 585({ \
585 typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ 586 /* Dependency order vs. p above. */ \
587 typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
586 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ 588 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
587 rcu_dereference_sparse(p, space); \ 589 rcu_dereference_sparse(p, space); \
588 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ 590 ((typeof(*p) __force __kernel *)(________p1)); \
589 ((typeof(*p) __force __kernel *)(_________p1)); \
590}) 591})
591#define __rcu_dereference_protected(p, c, space) \ 592#define __rcu_dereference_protected(p, c, space) \
592({ \ 593({ \
@@ -603,10 +604,10 @@ static inline void rcu_preempt_sleep_check(void)
603}) 604})
604#define __rcu_dereference_index_check(p, c) \ 605#define __rcu_dereference_index_check(p, c) \
605({ \ 606({ \
606 typeof(p) _________p1 = ACCESS_ONCE(p); \ 607 /* Dependency order vs. p above. */ \
608 typeof(p) _________p1 = lockless_dereference(p); \
607 rcu_lockdep_assert(c, \ 609 rcu_lockdep_assert(c, \
608 "suspicious rcu_dereference_index_check() usage"); \ 610 "suspicious rcu_dereference_index_check() usage"); \
609 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
610 (_________p1); \ 611 (_________p1); \
611}) 612})
612 613
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 0e5366200154..937edaeb150d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu)
92} 92}
93 93
94/* 94/*
95 * Return the number of grace periods. 95 * Return the number of grace periods started.
96 */ 96 */
97static inline long rcu_batches_completed(void) 97static inline unsigned long rcu_batches_started(void)
98{ 98{
99 return 0; 99 return 0;
100} 100}
101 101
102/* 102/*
103 * Return the number of bottom-half grace periods. 103 * Return the number of bottom-half grace periods started.
104 */ 104 */
105static inline long rcu_batches_completed_bh(void) 105static inline unsigned long rcu_batches_started_bh(void)
106{
107 return 0;
108}
109
110/*
111 * Return the number of sched grace periods started.
112 */
113static inline unsigned long rcu_batches_started_sched(void)
114{
115 return 0;
116}
117
118/*
119 * Return the number of grace periods completed.
120 */
121static inline unsigned long rcu_batches_completed(void)
122{
123 return 0;
124}
125
126/*
127 * Return the number of bottom-half grace periods completed.
128 */
129static inline unsigned long rcu_batches_completed_bh(void)
130{
131 return 0;
132}
133
134/*
135 * Return the number of sched grace periods completed.
136 */
137static inline unsigned long rcu_batches_completed_sched(void)
106{ 138{
107 return 0; 139 return 0;
108} 140}
@@ -154,7 +186,10 @@ static inline bool rcu_is_watching(void)
154 return true; 186 return true;
155} 187}
156 188
157
158#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 189#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
159 190
191static inline void rcu_all_qs(void)
192{
193}
194
160#endif /* __LINUX_RCUTINY_H */ 195#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 52953790dcca..d2e583a6aaca 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -81,9 +81,12 @@ void cond_synchronize_rcu(unsigned long oldstate);
81 81
82extern unsigned long rcutorture_testseq; 82extern unsigned long rcutorture_testseq;
83extern unsigned long rcutorture_vernum; 83extern unsigned long rcutorture_vernum;
84long rcu_batches_completed(void); 84unsigned long rcu_batches_started(void);
85long rcu_batches_completed_bh(void); 85unsigned long rcu_batches_started_bh(void);
86long rcu_batches_completed_sched(void); 86unsigned long rcu_batches_started_sched(void);
87unsigned long rcu_batches_completed(void);
88unsigned long rcu_batches_completed_bh(void);
89unsigned long rcu_batches_completed_sched(void);
87void show_rcu_gp_kthreads(void); 90void show_rcu_gp_kthreads(void);
88 91
89void rcu_force_quiescent_state(void); 92void rcu_force_quiescent_state(void);
@@ -97,4 +100,6 @@ extern int rcu_scheduler_active __read_mostly;
97 100
98bool rcu_is_watching(void); 101bool rcu_is_watching(void);
99 102
103void rcu_all_qs(void);
104
100#endif /* __LINUX_RCUTREE_H */ 105#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 4419b99d8d6e..116655d92269 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -468,7 +468,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
468 * 468 *
469 * @reg: Offset of the register within the regmap bank 469 * @reg: Offset of the register within the regmap bank
470 * @lsb: lsb of the register field. 470 * @lsb: lsb of the register field.
471 * @reg: msb of the register field. 471 * @msb: msb of the register field.
472 * @id_size: port size if it has some ports 472 * @id_size: port size if it has some ports
473 * @id_offset: address offset for each ports 473 * @id_offset: address offset for each ports
474 */ 474 */
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5479394fefce..5dd65acc2a69 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -32,6 +32,8 @@ struct da9211_pdata {
32 * 2 : 2 phase 2 buck 32 * 2 : 2 phase 2 buck
33 */ 33 */
34 int num_buck; 34 int num_buck;
35 int gpio_ren[DA9211_MAX_REGULATORS];
36 struct device_node *reg_node[DA9211_MAX_REGULATORS];
35 struct regulator_init_data *init_data[DA9211_MAX_REGULATORS]; 37 struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
36}; 38};
37#endif 39#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 5f1e9ca47417..d4ad5b5a02bb 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -21,6 +21,7 @@
21 21
22struct regmap; 22struct regmap;
23struct regulator_dev; 23struct regulator_dev;
24struct regulator_config;
24struct regulator_init_data; 25struct regulator_init_data;
25struct regulator_enable_gpio; 26struct regulator_enable_gpio;
26 27
@@ -205,6 +206,15 @@ enum regulator_type {
205 * @supply_name: Identifying the regulator supply 206 * @supply_name: Identifying the regulator supply
206 * @of_match: Name used to identify regulator in DT. 207 * @of_match: Name used to identify regulator in DT.
207 * @regulators_node: Name of node containing regulator definitions in DT. 208 * @regulators_node: Name of node containing regulator definitions in DT.
209 * @of_parse_cb: Optional callback called only if of_match is present.
210 * Will be called for each regulator parsed from DT, during
211 * init_data parsing.
212 * The regulator_config passed as argument to the callback will
213 * be a copy of config passed to regulator_register, valid only
214 * for this particular call. Callback may freely change the
215 * config but it cannot store it for later usage.
216 * Callback should return 0 on success or negative ERRNO
217 * indicating failure.
208 * @id: Numerical identifier for the regulator. 218 * @id: Numerical identifier for the regulator.
209 * @ops: Regulator operations table. 219 * @ops: Regulator operations table.
210 * @irq: Interrupt number for the regulator. 220 * @irq: Interrupt number for the regulator.
@@ -251,6 +261,9 @@ struct regulator_desc {
251 const char *supply_name; 261 const char *supply_name;
252 const char *of_match; 262 const char *of_match;
253 const char *regulators_node; 263 const char *regulators_node;
264 int (*of_parse_cb)(struct device_node *,
265 const struct regulator_desc *,
266 struct regulator_config *);
254 int id; 267 int id;
255 bool continuous_voltage_range; 268 bool continuous_voltage_range;
256 unsigned n_voltages; 269 unsigned n_voltages;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 0b08d05d470b..b07562e082c4 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -191,15 +191,22 @@ struct regulator_init_data {
191 void *driver_data; /* core does not touch this */ 191 void *driver_data; /* core does not touch this */
192}; 192};
193 193
194int regulator_suspend_prepare(suspend_state_t state);
195int regulator_suspend_finish(void);
196
197#ifdef CONFIG_REGULATOR 194#ifdef CONFIG_REGULATOR
198void regulator_has_full_constraints(void); 195void regulator_has_full_constraints(void);
196int regulator_suspend_prepare(suspend_state_t state);
197int regulator_suspend_finish(void);
199#else 198#else
200static inline void regulator_has_full_constraints(void) 199static inline void regulator_has_full_constraints(void)
201{ 200{
202} 201}
202static inline int regulator_suspend_prepare(suspend_state_t state)
203{
204 return 0;
205}
206static inline int regulator_suspend_finish(void)
207{
208 return 0;
209}
203#endif 210#endif
204 211
205#endif 212#endif
diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h
new file mode 100644
index 000000000000..30cc5963e265
--- /dev/null
+++ b/include/linux/regulator/mt6397-regulator.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Flora Fu <flora.fu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __LINUX_REGULATOR_MT6397_H
16#define __LINUX_REGULATOR_MT6397_H
17
18enum {
19 MT6397_ID_VPCA15 = 0,
20 MT6397_ID_VPCA7,
21 MT6397_ID_VSRAMCA15,
22 MT6397_ID_VSRAMCA7,
23 MT6397_ID_VCORE,
24 MT6397_ID_VGPU,
25 MT6397_ID_VDRM,
26 MT6397_ID_VIO18 = 7,
27 MT6397_ID_VTCXO,
28 MT6397_ID_VA28,
29 MT6397_ID_VCAMA,
30 MT6397_ID_VIO28,
31 MT6397_ID_VUSB,
32 MT6397_ID_VMC,
33 MT6397_ID_VMCH,
34 MT6397_ID_VEMC3V3,
35 MT6397_ID_VGP1,
36 MT6397_ID_VGP2,
37 MT6397_ID_VGP3,
38 MT6397_ID_VGP4,
39 MT6397_ID_VGP5,
40 MT6397_ID_VGP6,
41 MT6397_ID_VIBR,
42 MT6397_ID_RG_MAX,
43};
44
45#define MT6397_MAX_REGULATOR MT6397_ID_RG_MAX
46#define MT6397_REGULATOR_ID97 0x97
47#define MT6397_REGULATOR_ID91 0x91
48
49#endif /* __LINUX_REGULATOR_MT6397_H */
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index 364f7a7c43db..70c6c66c5bcf 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -49,6 +49,20 @@
49#define PFUZE200_VGEN5 11 49#define PFUZE200_VGEN5 11
50#define PFUZE200_VGEN6 12 50#define PFUZE200_VGEN6 12
51 51
52#define PFUZE3000_SW1A 0
53#define PFUZE3000_SW1B 1
54#define PFUZE3000_SW2 2
55#define PFUZE3000_SW3 3
56#define PFUZE3000_SWBST 4
57#define PFUZE3000_VSNVS 5
58#define PFUZE3000_VREFDDR 6
59#define PFUZE3000_VLDO1 7
60#define PFUZE3000_VLDO2 8
61#define PFUZE3000_VCCSD 9
62#define PFUZE3000_V33 10
63#define PFUZE3000_VLDO3 11
64#define PFUZE3000_VLDO4 12
65
52struct regulator_init_data; 66struct regulator_init_data;
53 67
54struct pfuze_regulator_platform_data { 68struct pfuze_regulator_platform_data {
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
index b2b1afbb3202..cd519a11c2c6 100644
--- a/include/linux/spi/at86rf230.h
+++ b/include/linux/spi/at86rf230.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by: 15 * Written by:
20 * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com> 16 * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
21 */ 17 */
diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h
index bc8677c8eba9..e69e9b51b21a 100644
--- a/include/linux/spi/l4f00242t03.h
+++ b/include/linux/spi/l4f00242t03.h
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/ 15*/
20 16
21#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_ 17#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
index 555d254e6606..fdd1d1d51da5 100644
--- a/include/linux/spi/lms283gf05.h
+++ b/include/linux/spi/lms283gf05.h
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18*/ 14*/
19 15
20#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ 16#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
diff --git a/include/linux/spi/mxs-spi.h b/include/linux/spi/mxs-spi.h
index 4835486f58e5..381d368b91b4 100644
--- a/include/linux/spi/mxs-spi.h
+++ b/include/linux/spi/mxs-spi.h
@@ -15,10 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */ 18 */
23 19
24#ifndef __LINUX_SPI_MXS_SPI_H__ 20#ifndef __LINUX_SPI_MXS_SPI_H__
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index d5a316550177..6d36dacec4ba 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 13 */
18#ifndef __linux_pxa2xx_spi_h 14#ifndef __linux_pxa2xx_spi_h
19#define __linux_pxa2xx_spi_h 15#define __linux_pxa2xx_spi_h
@@ -57,7 +53,6 @@ struct pxa2xx_spi_chip {
57#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) 53#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
58 54
59#include <linux/clk.h> 55#include <linux/clk.h>
60#include <mach/dma.h>
61 56
62extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); 57extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
63 58
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
index e546b2ceb623..a693188cc08b 100644
--- a/include/linux/spi/rspi.h
+++ b/include/linux/spi/rspi.h
@@ -11,11 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */ 14 */
20 15
21#ifndef __LINUX_SPI_RENESAS_SPI_H__ 16#ifndef __LINUX_SPI_RENESAS_SPI_H__
diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h
index a1121f872ac1..aa0d440ab4f0 100644
--- a/include/linux/spi/sh_hspi.h
+++ b/include/linux/spi/sh_hspi.h
@@ -9,10 +9,6 @@
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details. 11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 */ 12 */
17#ifndef SH_HSPI_H 13#ifndef SH_HSPI_H
18#define SH_HSPI_H 14#define SH_HSPI_H
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index 88a14d81c49e..b087a85f5f72 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -7,6 +7,8 @@ struct sh_msiof_spi_info {
7 u16 num_chipselect; 7 u16 num_chipselect;
8 unsigned int dma_tx_id; 8 unsigned int dma_tx_id;
9 unsigned int dma_rx_id; 9 unsigned int dma_rx_id;
10 u32 dtdl;
11 u32 syncdl;
10}; 12};
11 13
12#endif /* __SPI_SH_MSIOF_H__ */ 14#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a6ef2a8e6de4..ed9489d893a4 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 13 */
18 14
19#ifndef __LINUX_SPI_H 15#ifndef __LINUX_SPI_H
@@ -260,6 +256,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
260 * @pump_messages: work struct for scheduling work to the message pump 256 * @pump_messages: work struct for scheduling work to the message pump
261 * @queue_lock: spinlock to syncronise access to message queue 257 * @queue_lock: spinlock to syncronise access to message queue
262 * @queue: message queue 258 * @queue: message queue
259 * @idling: the device is entering idle state
263 * @cur_msg: the currently in-flight message 260 * @cur_msg: the currently in-flight message
264 * @cur_msg_prepared: spi_prepare_message was called for the currently 261 * @cur_msg_prepared: spi_prepare_message was called for the currently
265 * in-flight message 262 * in-flight message
@@ -425,6 +422,7 @@ struct spi_master {
425 spinlock_t queue_lock; 422 spinlock_t queue_lock;
426 struct list_head queue; 423 struct list_head queue;
427 struct spi_message *cur_msg; 424 struct spi_message *cur_msg;
425 bool idling;
428 bool busy; 426 bool busy;
429 bool running; 427 bool running;
430 bool rt; 428 bool rt;
diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h
index 60b59187e590..414c6fddfcf0 100644
--- a/include/linux/spi/tle62x0.h
+++ b/include/linux/spi/tle62x0.h
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/ 15*/
20 16
21struct tle62x0_pdata { 17struct tle62x0_pdata {
diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h
index 8f721e465e05..563b3b1799a8 100644
--- a/include/linux/spi/tsc2005.h
+++ b/include/linux/spi/tsc2005.h
@@ -12,11 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */ 15 */
21 16
22#ifndef _LINUX_SPI_TSC2005_H 17#ifndef _LINUX_SPI_TSC2005_H
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a2783cb5d275..9cfd9623fb03 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -45,7 +45,7 @@ struct rcu_batch {
45#define RCU_BATCH_INIT(name) { NULL, &(name.head) } 45#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
46 46
47struct srcu_struct { 47struct srcu_struct {
48 unsigned completed; 48 unsigned long completed;
49 struct srcu_struct_array __percpu *per_cpu_ref; 49 struct srcu_struct_array __percpu *per_cpu_ref;
50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */ 50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */
51 bool running; 51 bool running;
@@ -102,13 +102,11 @@ void process_srcu(struct work_struct *work);
102 * define and init a srcu struct at build time. 102 * define and init a srcu struct at build time.
103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. 103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
104 */ 104 */
105#define DEFINE_SRCU(name) \ 105#define __DEFINE_SRCU(name, is_static) \
106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ 106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
107 struct srcu_struct name = __SRCU_STRUCT_INIT(name); 107 is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
108 108#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
109#define DEFINE_STATIC_SRCU(name) \ 109#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
110 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
111 static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
112 110
113/** 111/**
114 * call_srcu() - Queue a callback for invocation after an SRCU grace period 112 * call_srcu() - Queue a callback for invocation after an SRCU grace period
@@ -135,7 +133,7 @@ int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
135void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 133void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
136void synchronize_srcu(struct srcu_struct *sp); 134void synchronize_srcu(struct srcu_struct *sp);
137void synchronize_srcu_expedited(struct srcu_struct *sp); 135void synchronize_srcu_expedited(struct srcu_struct *sp);
138long srcu_batches_completed(struct srcu_struct *sp); 136unsigned long srcu_batches_completed(struct srcu_struct *sp);
139void srcu_barrier(struct srcu_struct *sp); 137void srcu_barrier(struct srcu_struct *sp);
140 138
141#ifdef CONFIG_DEBUG_LOCK_ALLOC 139#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index e08e21e5f601..c72851328ca9 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -173,7 +173,7 @@ extern void syscall_unregfunc(void);
173 TP_PROTO(data_proto), \ 173 TP_PROTO(data_proto), \
174 TP_ARGS(data_args), \ 174 TP_ARGS(data_args), \
175 TP_CONDITION(cond),,); \ 175 TP_CONDITION(cond),,); \
176 if (IS_ENABLED(CONFIG_LOCKDEP)) { \ 176 if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
177 rcu_read_lock_sched_notrace(); \ 177 rcu_read_lock_sched_notrace(); \
178 rcu_dereference_sched(__tracepoint_##name.funcs);\ 178 rcu_dereference_sched(__tracepoint_##name.funcs);\
179 rcu_read_unlock_sched_notrace(); \ 179 rcu_read_unlock_sched_notrace(); \
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2232ed16635a..37423e0e1379 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -363,7 +363,6 @@ do { \
363 */ 363 */
364#define wait_event_cmd(wq, condition, cmd1, cmd2) \ 364#define wait_event_cmd(wq, condition, cmd1, cmd2) \
365do { \ 365do { \
366 might_sleep(); \
367 if (condition) \ 366 if (condition) \
368 break; \ 367 break; \
369 __wait_event_cmd(wq, condition, cmd1, cmd2); \ 368 __wait_event_cmd(wq, condition, cmd1, cmd2); \
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index 7ee2df083542..dc8fd81412bf 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -22,9 +22,9 @@ struct flow_keys {
22 __be32 ports; 22 __be32 ports;
23 __be16 port16[2]; 23 __be16 port16[2];
24 }; 24 };
25 u16 thoff; 25 u16 thoff;
26 u16 n_proto; 26 __be16 n_proto;
27 u8 ip_proto; 27 u8 ip_proto;
28}; 28};
29 29
30bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, 30bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
diff --git a/include/net/ip.h b/include/net/ip.h
index f7cbd703d15d..09cf5aebb283 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -181,7 +181,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
181 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; 181 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
182} 182}
183 183
184void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, 184void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
185 const struct ip_options *sopt, 185 const struct ip_options *sopt,
186 __be32 daddr, __be32 saddr, 186 __be32 daddr, __be32 saddr,
187 const struct ip_reply_arg *arg, 187 const struct ip_reply_arg *arg,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4292929392b0..6e416f6d3e3c 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -671,6 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); 671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
672} 672}
673 673
674u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
675 struct in6_addr *src);
676void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
674void ipv6_proxy_select_ident(struct sk_buff *skb); 677void ipv6_proxy_select_ident(struct sk_buff *skb);
675 678
676int ip6_dst_hoplimit(struct dst_entry *dst); 679int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -708,7 +711,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
708 __be32 flowlabel, bool autolabel) 711 __be32 flowlabel, bool autolabel)
709{ 712{
710 if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) { 713 if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
711 __be32 hash; 714 u32 hash;
712 715
713 hash = skb_get_hash(skb); 716 hash = skb_get_hash(skb);
714 717
@@ -718,7 +721,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
718 */ 721 */
719 hash ^= hash >> 12; 722 hash ^= hash >> 12;
720 723
721 flowlabel = hash & IPV6_FLOWLABEL_MASK; 724 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
722 } 725 }
723 726
724 return flowlabel; 727 return flowlabel;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3ae969e3acf0..9eaaa7884586 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -530,6 +530,8 @@ enum nft_chain_type {
530 530
531int nft_chain_validate_dependency(const struct nft_chain *chain, 531int nft_chain_validate_dependency(const struct nft_chain *chain,
532 enum nft_chain_type type); 532 enum nft_chain_type type);
533int nft_chain_validate_hooks(const struct nft_chain *chain,
534 unsigned int hook_flags);
533 535
534struct nft_stats { 536struct nft_stats {
535 u64 bytes; 537 u64 bytes;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 24945cefc4fd..0ffef1a38efc 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -52,6 +52,7 @@ struct netns_ipv4 {
52 struct inet_peer_base *peers; 52 struct inet_peer_base *peers;
53 struct tcpm_hash_bucket *tcp_metrics_hash; 53 struct tcpm_hash_bucket *tcp_metrics_hash;
54 unsigned int tcp_metrics_hash_log; 54 unsigned int tcp_metrics_hash_log;
55 struct sock * __percpu *tcp_sk;
55 struct netns_frags frags; 56 struct netns_frags frags;
56#ifdef CONFIG_NETFILTER 57#ifdef CONFIG_NETFILTER
57 struct xt_table *iptable_filter; 58 struct xt_table *iptable_filter;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 3d282cbb66bf..c605d305c577 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -79,6 +79,9 @@ struct Qdisc {
79 struct netdev_queue *dev_queue; 79 struct netdev_queue *dev_queue;
80 80
81 struct gnet_stats_rate_est64 rate_est; 81 struct gnet_stats_rate_est64 rate_est;
82 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
83 struct gnet_stats_queue __percpu *cpu_qstats;
84
82 struct Qdisc *next_sched; 85 struct Qdisc *next_sched;
83 struct sk_buff *gso_skb; 86 struct sk_buff *gso_skb;
84 /* 87 /*
@@ -86,15 +89,9 @@ struct Qdisc {
86 */ 89 */
87 unsigned long state; 90 unsigned long state;
88 struct sk_buff_head q; 91 struct sk_buff_head q;
89 union { 92 struct gnet_stats_basic_packed bstats;
90 struct gnet_stats_basic_packed bstats;
91 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
92 } __packed;
93 unsigned int __state; 93 unsigned int __state;
94 union { 94 struct gnet_stats_queue qstats;
95 struct gnet_stats_queue qstats;
96 struct gnet_stats_queue __percpu *cpu_qstats;
97 } __packed;
98 struct rcu_head rcu_head; 95 struct rcu_head rcu_head;
99 int padded; 96 int padded;
100 atomic_t refcnt; 97 atomic_t refcnt;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f50f29faf76f..9d9111ef43ae 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -834,8 +834,8 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
834void tcp_get_allowed_congestion_control(char *buf, size_t len); 834void tcp_get_allowed_congestion_control(char *buf, size_t len);
835int tcp_set_allowed_congestion_control(char *allowed); 835int tcp_set_allowed_congestion_control(char *allowed);
836int tcp_set_congestion_control(struct sock *sk, const char *name); 836int tcp_set_congestion_control(struct sock *sk, const char *name);
837void tcp_slow_start(struct tcp_sock *tp, u32 acked); 837u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
838void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); 838void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
839 839
840u32 tcp_reno_ssthresh(struct sock *sk); 840u32 tcp_reno_ssthresh(struct sock *sk);
841void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 841void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0d74f1de99aa..65994a19e840 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1707,10 +1707,7 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t
1707 1707
1708static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 1708static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1709{ 1709{
1710 size_t copy_sz; 1710 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1711
1712 copy_sz = min_t(size_t, len, udata->outlen);
1713 return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0;
1714} 1711}
1715 1712
1716/** 1713/**
diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h
index 2609048c1d44..3a34f6edc2d1 100644
--- a/include/sound/ak4113.h
+++ b/include/sound/ak4113.h
@@ -286,7 +286,7 @@ struct ak4113 {
286 ak4113_write_t *write; 286 ak4113_write_t *write;
287 ak4113_read_t *read; 287 ak4113_read_t *read;
288 void *private_data; 288 void *private_data;
289 unsigned int init:1; 289 atomic_t wq_processing;
290 spinlock_t lock; 290 spinlock_t lock;
291 unsigned char regmap[AK4113_WRITABLE_REGS]; 291 unsigned char regmap[AK4113_WRITABLE_REGS];
292 struct snd_kcontrol *kctls[AK4113_CONTROLS]; 292 struct snd_kcontrol *kctls[AK4113_CONTROLS];
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index 52f02a60dba7..069299a88915 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -168,7 +168,7 @@ struct ak4114 {
168 ak4114_write_t * write; 168 ak4114_write_t * write;
169 ak4114_read_t * read; 169 ak4114_read_t * read;
170 void * private_data; 170 void * private_data;
171 unsigned int init: 1; 171 atomic_t wq_processing;
172 spinlock_t lock; 172 spinlock_t lock;
173 unsigned char regmap[6]; 173 unsigned char regmap[6];
174 unsigned char txcsb[5]; 174 unsigned char txcsb[5];
diff --git a/include/sound/soc.h b/include/sound/soc.h
index b4fca9aed2a2..ac8b333acb4d 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -498,6 +498,7 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
498 unsigned int mask, unsigned int value); 498 unsigned int mask, unsigned int value);
499 499
500#ifdef CONFIG_SND_SOC_AC97_BUS 500#ifdef CONFIG_SND_SOC_AC97_BUS
501struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
501struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec); 502struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
502void snd_soc_free_ac97_codec(struct snd_ac97 *ac97); 503void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
503 504
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 13391d288107..0e7635765153 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -13,11 +13,13 @@
13 { TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \ 13 { TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \
14 { TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" } 14 { TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" }
15 15
16TRACE_EVENT(tlb_flush, 16TRACE_EVENT_CONDITION(tlb_flush,
17 17
18 TP_PROTO(int reason, unsigned long pages), 18 TP_PROTO(int reason, unsigned long pages),
19 TP_ARGS(reason, pages), 19 TP_ARGS(reason, pages),
20 20
21 TP_CONDITION(cpu_online(smp_processor_id())),
22
21 TP_STRUCT__entry( 23 TP_STRUCT__entry(
22 __field( int, reason) 24 __field( int, reason)
23 __field(unsigned long, pages) 25 __field(unsigned long, pages)
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 4275b961bf60..867cc5084afb 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -90,7 +90,6 @@ enum {
90}; 90};
91 91
92enum { 92enum {
93 IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
94 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 93 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
95 IB_USER_VERBS_EX_CMD_DESTROY_FLOW, 94 IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
96}; 95};
@@ -202,32 +201,6 @@ struct ib_uverbs_query_device_resp {
202 __u8 reserved[4]; 201 __u8 reserved[4];
203}; 202};
204 203
205enum {
206 IB_USER_VERBS_EX_QUERY_DEVICE_ODP = 1ULL << 0,
207};
208
209struct ib_uverbs_ex_query_device {
210 __u32 comp_mask;
211 __u32 reserved;
212};
213
214struct ib_uverbs_odp_caps {
215 __u64 general_caps;
216 struct {
217 __u32 rc_odp_caps;
218 __u32 uc_odp_caps;
219 __u32 ud_odp_caps;
220 } per_transport_caps;
221 __u32 reserved;
222};
223
224struct ib_uverbs_ex_query_device_resp {
225 struct ib_uverbs_query_device_resp base;
226 __u32 comp_mask;
227 __u32 reserved;
228 struct ib_uverbs_odp_caps odp_caps;
229};
230
231struct ib_uverbs_query_port { 204struct ib_uverbs_query_port {
232 __u64 response; 205 __u64 response;
233 __u8 port_num; 206 __u8 port_num;
diff --git a/init/Kconfig b/init/Kconfig
index 9afb971497f4..1354ac09b516 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -470,7 +470,6 @@ choice
470config TREE_RCU 470config TREE_RCU
471 bool "Tree-based hierarchical RCU" 471 bool "Tree-based hierarchical RCU"
472 depends on !PREEMPT && SMP 472 depends on !PREEMPT && SMP
473 select IRQ_WORK
474 help 473 help
475 This option selects the RCU implementation that is 474 This option selects the RCU implementation that is
476 designed for very large SMP system with hundreds or 475 designed for very large SMP system with hundreds or
@@ -480,7 +479,6 @@ config TREE_RCU
480config PREEMPT_RCU 479config PREEMPT_RCU
481 bool "Preemptible tree-based hierarchical RCU" 480 bool "Preemptible tree-based hierarchical RCU"
482 depends on PREEMPT 481 depends on PREEMPT
483 select IRQ_WORK
484 help 482 help
485 This option selects the RCU implementation that is 483 This option selects the RCU implementation that is
486 designed for very large SMP systems with hundreds or 484 designed for very large SMP systems with hundreds or
@@ -501,9 +499,17 @@ config TINY_RCU
501 499
502endchoice 500endchoice
503 501
502config SRCU
503 bool
504 help
505 This option selects the sleepable version of RCU. This version
506 permits arbitrary sleeping or blocking within RCU read-side critical
507 sections.
508
504config TASKS_RCU 509config TASKS_RCU
505 bool "Task_based RCU implementation using voluntary context switch" 510 bool "Task_based RCU implementation using voluntary context switch"
506 default n 511 default n
512 select SRCU
507 help 513 help
508 This option enables a task-based RCU implementation that uses 514 This option enables a task-based RCU implementation that uses
509 only voluntary context switch (not preemption!), idle, and 515 only voluntary context switch (not preemption!), idle, and
@@ -668,9 +674,10 @@ config RCU_BOOST
668 674
669config RCU_KTHREAD_PRIO 675config RCU_KTHREAD_PRIO
670 int "Real-time priority to use for RCU worker threads" 676 int "Real-time priority to use for RCU worker threads"
671 range 1 99 677 range 1 99 if RCU_BOOST
672 depends on RCU_BOOST 678 range 0 99 if !RCU_BOOST
673 default 1 679 default 1 if RCU_BOOST
680 default 0 if !RCU_BOOST
674 help 681 help
675 This option specifies the SCHED_FIFO priority value that will be 682 This option specifies the SCHED_FIFO priority value that will be
676 assigned to the rcuc/n and rcub/n threads and is also the value 683 assigned to the rcuc/n and rcub/n threads and is also the value
@@ -1595,6 +1602,7 @@ config PERF_EVENTS
1595 depends on HAVE_PERF_EVENTS 1602 depends on HAVE_PERF_EVENTS
1596 select ANON_INODES 1603 select ANON_INODES
1597 select IRQ_WORK 1604 select IRQ_WORK
1605 select SRCU
1598 help 1606 help
1599 Enable kernel support for various performance events provided 1607 Enable kernel support for various performance events provided
1600 by software and hardware. 1608 by software and hardware.
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5d220234b3ca..1972b161c61e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -58,22 +58,23 @@ static int cpu_hotplug_disabled;
58 58
59static struct { 59static struct {
60 struct task_struct *active_writer; 60 struct task_struct *active_writer;
61 struct mutex lock; /* Synchronizes accesses to refcount, */ 61 /* wait queue to wake up the active_writer */
62 wait_queue_head_t wq;
63 /* verifies that no writer will get active while readers are active */
64 struct mutex lock;
62 /* 65 /*
63 * Also blocks the new readers during 66 * Also blocks the new readers during
64 * an ongoing cpu hotplug operation. 67 * an ongoing cpu hotplug operation.
65 */ 68 */
66 int refcount; 69 atomic_t refcount;
67 /* And allows lockless put_online_cpus(). */
68 atomic_t puts_pending;
69 70
70#ifdef CONFIG_DEBUG_LOCK_ALLOC 71#ifdef CONFIG_DEBUG_LOCK_ALLOC
71 struct lockdep_map dep_map; 72 struct lockdep_map dep_map;
72#endif 73#endif
73} cpu_hotplug = { 74} cpu_hotplug = {
74 .active_writer = NULL, 75 .active_writer = NULL,
76 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
75 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 77 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
76 .refcount = 0,
77#ifdef CONFIG_DEBUG_LOCK_ALLOC 78#ifdef CONFIG_DEBUG_LOCK_ALLOC
78 .dep_map = {.name = "cpu_hotplug.lock" }, 79 .dep_map = {.name = "cpu_hotplug.lock" },
79#endif 80#endif
@@ -86,15 +87,6 @@ static struct {
86#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) 87#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
87#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) 88#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
88 89
89static void apply_puts_pending(int max)
90{
91 int delta;
92
93 if (atomic_read(&cpu_hotplug.puts_pending) >= max) {
94 delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
95 cpu_hotplug.refcount -= delta;
96 }
97}
98 90
99void get_online_cpus(void) 91void get_online_cpus(void)
100{ 92{
@@ -103,8 +95,7 @@ void get_online_cpus(void)
103 return; 95 return;
104 cpuhp_lock_acquire_read(); 96 cpuhp_lock_acquire_read();
105 mutex_lock(&cpu_hotplug.lock); 97 mutex_lock(&cpu_hotplug.lock);
106 apply_puts_pending(65536); 98 atomic_inc(&cpu_hotplug.refcount);
107 cpu_hotplug.refcount++;
108 mutex_unlock(&cpu_hotplug.lock); 99 mutex_unlock(&cpu_hotplug.lock);
109} 100}
110EXPORT_SYMBOL_GPL(get_online_cpus); 101EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -116,8 +107,7 @@ bool try_get_online_cpus(void)
116 if (!mutex_trylock(&cpu_hotplug.lock)) 107 if (!mutex_trylock(&cpu_hotplug.lock))
117 return false; 108 return false;
118 cpuhp_lock_acquire_tryread(); 109 cpuhp_lock_acquire_tryread();
119 apply_puts_pending(65536); 110 atomic_inc(&cpu_hotplug.refcount);
120 cpu_hotplug.refcount++;
121 mutex_unlock(&cpu_hotplug.lock); 111 mutex_unlock(&cpu_hotplug.lock);
122 return true; 112 return true;
123} 113}
@@ -125,20 +115,18 @@ EXPORT_SYMBOL_GPL(try_get_online_cpus);
125 115
126void put_online_cpus(void) 116void put_online_cpus(void)
127{ 117{
118 int refcount;
119
128 if (cpu_hotplug.active_writer == current) 120 if (cpu_hotplug.active_writer == current)
129 return; 121 return;
130 if (!mutex_trylock(&cpu_hotplug.lock)) {
131 atomic_inc(&cpu_hotplug.puts_pending);
132 cpuhp_lock_release();
133 return;
134 }
135 122
136 if (WARN_ON(!cpu_hotplug.refcount)) 123 refcount = atomic_dec_return(&cpu_hotplug.refcount);
137 cpu_hotplug.refcount++; /* try to fix things up */ 124 if (WARN_ON(refcount < 0)) /* try to fix things up */
125 atomic_inc(&cpu_hotplug.refcount);
126
127 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
128 wake_up(&cpu_hotplug.wq);
138 129
139 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
140 wake_up_process(cpu_hotplug.active_writer);
141 mutex_unlock(&cpu_hotplug.lock);
142 cpuhp_lock_release(); 130 cpuhp_lock_release();
143 131
144} 132}
@@ -168,18 +156,20 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
168 */ 156 */
169void cpu_hotplug_begin(void) 157void cpu_hotplug_begin(void)
170{ 158{
171 cpu_hotplug.active_writer = current; 159 DEFINE_WAIT(wait);
172 160
161 cpu_hotplug.active_writer = current;
173 cpuhp_lock_acquire(); 162 cpuhp_lock_acquire();
163
174 for (;;) { 164 for (;;) {
175 mutex_lock(&cpu_hotplug.lock); 165 mutex_lock(&cpu_hotplug.lock);
176 apply_puts_pending(1); 166 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
177 if (likely(!cpu_hotplug.refcount)) 167 if (likely(!atomic_read(&cpu_hotplug.refcount)))
178 break; 168 break;
179 __set_current_state(TASK_UNINTERRUPTIBLE);
180 mutex_unlock(&cpu_hotplug.lock); 169 mutex_unlock(&cpu_hotplug.lock);
181 schedule(); 170 schedule();
182 } 171 }
172 finish_wait(&cpu_hotplug.wq, &wait);
183} 173}
184 174
185void cpu_hotplug_done(void) 175void cpu_hotplug_done(void)
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 4803da6eab62..ae9fc7cc360e 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -402,6 +402,7 @@ int raw_notifier_call_chain(struct raw_notifier_head *nh,
402} 402}
403EXPORT_SYMBOL_GPL(raw_notifier_call_chain); 403EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
404 404
405#ifdef CONFIG_SRCU
405/* 406/*
406 * SRCU notifier chain routines. Registration and unregistration 407 * SRCU notifier chain routines. Registration and unregistration
407 * use a mutex, and call_chain is synchronized by SRCU (no locks). 408 * use a mutex, and call_chain is synchronized by SRCU (no locks).
@@ -528,6 +529,8 @@ void srcu_init_notifier_head(struct srcu_notifier_head *nh)
528} 529}
529EXPORT_SYMBOL_GPL(srcu_init_notifier_head); 530EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
530 531
532#endif /* CONFIG_SRCU */
533
531static ATOMIC_NOTIFIER_HEAD(die_chain); 534static ATOMIC_NOTIFIER_HEAD(die_chain);
532 535
533int notrace notify_die(enum die_val val, const char *str, 536int notrace notify_die(enum die_val val, const char *str,
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 48b28d387c7f..7e01f78f0417 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -251,6 +251,7 @@ config APM_EMULATION
251 251
252config PM_OPP 252config PM_OPP
253 bool 253 bool
254 select SRCU
254 ---help--- 255 ---help---
255 SOCs have a standard set of tuples consisting of frequency and 256 SOCs have a standard set of tuples consisting of frequency and
256 voltage pairs that the device will support per voltage domain. This 257 voltage pairs that the device will support per voltage domain. This
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
index e6fae503d1bc..50a808424b06 100644
--- a/kernel/rcu/Makefile
+++ b/kernel/rcu/Makefile
@@ -1,4 +1,5 @@
1obj-y += update.o srcu.o 1obj-y += update.o
2obj-$(CONFIG_SRCU) += srcu.o
2obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o 3obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
3obj-$(CONFIG_TREE_RCU) += tree.o 4obj-$(CONFIG_TREE_RCU) += tree.o
4obj-$(CONFIG_PREEMPT_RCU) += tree.o 5obj-$(CONFIG_PREEMPT_RCU) += tree.o
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 07bb02eda844..80adef7d4c3d 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -137,4 +137,10 @@ int rcu_jiffies_till_stall_check(void);
137 137
138void rcu_early_boot_tests(void); 138void rcu_early_boot_tests(void);
139 139
140/*
141 * This function really isn't for public consumption, but RCU is special in
142 * that context switches can allow the state machine to make progress.
143 */
144extern void resched_cpu(int cpu);
145
140#endif /* __LINUX_RCU_H */ 146#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 4d559baf06e0..30d42aa55d83 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -244,7 +244,8 @@ struct rcu_torture_ops {
244 int (*readlock)(void); 244 int (*readlock)(void);
245 void (*read_delay)(struct torture_random_state *rrsp); 245 void (*read_delay)(struct torture_random_state *rrsp);
246 void (*readunlock)(int idx); 246 void (*readunlock)(int idx);
247 int (*completed)(void); 247 unsigned long (*started)(void);
248 unsigned long (*completed)(void);
248 void (*deferred_free)(struct rcu_torture *p); 249 void (*deferred_free)(struct rcu_torture *p);
249 void (*sync)(void); 250 void (*sync)(void);
250 void (*exp_sync)(void); 251 void (*exp_sync)(void);
@@ -296,11 +297,6 @@ static void rcu_torture_read_unlock(int idx) __releases(RCU)
296 rcu_read_unlock(); 297 rcu_read_unlock();
297} 298}
298 299
299static int rcu_torture_completed(void)
300{
301 return rcu_batches_completed();
302}
303
304/* 300/*
305 * Update callback in the pipe. This should be invoked after a grace period. 301 * Update callback in the pipe. This should be invoked after a grace period.
306 */ 302 */
@@ -356,7 +352,7 @@ rcu_torture_cb(struct rcu_head *p)
356 cur_ops->deferred_free(rp); 352 cur_ops->deferred_free(rp);
357} 353}
358 354
359static int rcu_no_completed(void) 355static unsigned long rcu_no_completed(void)
360{ 356{
361 return 0; 357 return 0;
362} 358}
@@ -377,7 +373,8 @@ static struct rcu_torture_ops rcu_ops = {
377 .readlock = rcu_torture_read_lock, 373 .readlock = rcu_torture_read_lock,
378 .read_delay = rcu_read_delay, 374 .read_delay = rcu_read_delay,
379 .readunlock = rcu_torture_read_unlock, 375 .readunlock = rcu_torture_read_unlock,
380 .completed = rcu_torture_completed, 376 .started = rcu_batches_started,
377 .completed = rcu_batches_completed,
381 .deferred_free = rcu_torture_deferred_free, 378 .deferred_free = rcu_torture_deferred_free,
382 .sync = synchronize_rcu, 379 .sync = synchronize_rcu,
383 .exp_sync = synchronize_rcu_expedited, 380 .exp_sync = synchronize_rcu_expedited,
@@ -407,11 +404,6 @@ static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
407 rcu_read_unlock_bh(); 404 rcu_read_unlock_bh();
408} 405}
409 406
410static int rcu_bh_torture_completed(void)
411{
412 return rcu_batches_completed_bh();
413}
414
415static void rcu_bh_torture_deferred_free(struct rcu_torture *p) 407static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
416{ 408{
417 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); 409 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
@@ -423,7 +415,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
423 .readlock = rcu_bh_torture_read_lock, 415 .readlock = rcu_bh_torture_read_lock,
424 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 416 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
425 .readunlock = rcu_bh_torture_read_unlock, 417 .readunlock = rcu_bh_torture_read_unlock,
426 .completed = rcu_bh_torture_completed, 418 .started = rcu_batches_started_bh,
419 .completed = rcu_batches_completed_bh,
427 .deferred_free = rcu_bh_torture_deferred_free, 420 .deferred_free = rcu_bh_torture_deferred_free,
428 .sync = synchronize_rcu_bh, 421 .sync = synchronize_rcu_bh,
429 .exp_sync = synchronize_rcu_bh_expedited, 422 .exp_sync = synchronize_rcu_bh_expedited,
@@ -466,6 +459,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
466 .readlock = rcu_torture_read_lock, 459 .readlock = rcu_torture_read_lock,
467 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 460 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
468 .readunlock = rcu_torture_read_unlock, 461 .readunlock = rcu_torture_read_unlock,
462 .started = rcu_no_completed,
469 .completed = rcu_no_completed, 463 .completed = rcu_no_completed,
470 .deferred_free = rcu_busted_torture_deferred_free, 464 .deferred_free = rcu_busted_torture_deferred_free,
471 .sync = synchronize_rcu_busted, 465 .sync = synchronize_rcu_busted,
@@ -510,7 +504,7 @@ static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
510 srcu_read_unlock(&srcu_ctl, idx); 504 srcu_read_unlock(&srcu_ctl, idx);
511} 505}
512 506
513static int srcu_torture_completed(void) 507static unsigned long srcu_torture_completed(void)
514{ 508{
515 return srcu_batches_completed(&srcu_ctl); 509 return srcu_batches_completed(&srcu_ctl);
516} 510}
@@ -564,6 +558,7 @@ static struct rcu_torture_ops srcu_ops = {
564 .readlock = srcu_torture_read_lock, 558 .readlock = srcu_torture_read_lock,
565 .read_delay = srcu_read_delay, 559 .read_delay = srcu_read_delay,
566 .readunlock = srcu_torture_read_unlock, 560 .readunlock = srcu_torture_read_unlock,
561 .started = NULL,
567 .completed = srcu_torture_completed, 562 .completed = srcu_torture_completed,
568 .deferred_free = srcu_torture_deferred_free, 563 .deferred_free = srcu_torture_deferred_free,
569 .sync = srcu_torture_synchronize, 564 .sync = srcu_torture_synchronize,
@@ -600,7 +595,8 @@ static struct rcu_torture_ops sched_ops = {
600 .readlock = sched_torture_read_lock, 595 .readlock = sched_torture_read_lock,
601 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 596 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
602 .readunlock = sched_torture_read_unlock, 597 .readunlock = sched_torture_read_unlock,
603 .completed = rcu_no_completed, 598 .started = rcu_batches_started_sched,
599 .completed = rcu_batches_completed_sched,
604 .deferred_free = rcu_sched_torture_deferred_free, 600 .deferred_free = rcu_sched_torture_deferred_free,
605 .sync = synchronize_sched, 601 .sync = synchronize_sched,
606 .exp_sync = synchronize_sched_expedited, 602 .exp_sync = synchronize_sched_expedited,
@@ -638,6 +634,7 @@ static struct rcu_torture_ops tasks_ops = {
638 .readlock = tasks_torture_read_lock, 634 .readlock = tasks_torture_read_lock,
639 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 635 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
640 .readunlock = tasks_torture_read_unlock, 636 .readunlock = tasks_torture_read_unlock,
637 .started = rcu_no_completed,
641 .completed = rcu_no_completed, 638 .completed = rcu_no_completed,
642 .deferred_free = rcu_tasks_torture_deferred_free, 639 .deferred_free = rcu_tasks_torture_deferred_free,
643 .sync = synchronize_rcu_tasks, 640 .sync = synchronize_rcu_tasks,
@@ -1015,8 +1012,8 @@ static void rcutorture_trace_dump(void)
1015static void rcu_torture_timer(unsigned long unused) 1012static void rcu_torture_timer(unsigned long unused)
1016{ 1013{
1017 int idx; 1014 int idx;
1018 int completed; 1015 unsigned long started;
1019 int completed_end; 1016 unsigned long completed;
1020 static DEFINE_TORTURE_RANDOM(rand); 1017 static DEFINE_TORTURE_RANDOM(rand);
1021 static DEFINE_SPINLOCK(rand_lock); 1018 static DEFINE_SPINLOCK(rand_lock);
1022 struct rcu_torture *p; 1019 struct rcu_torture *p;
@@ -1024,7 +1021,10 @@ static void rcu_torture_timer(unsigned long unused)
1024 unsigned long long ts; 1021 unsigned long long ts;
1025 1022
1026 idx = cur_ops->readlock(); 1023 idx = cur_ops->readlock();
1027 completed = cur_ops->completed(); 1024 if (cur_ops->started)
1025 started = cur_ops->started();
1026 else
1027 started = cur_ops->completed();
1028 ts = rcu_trace_clock_local(); 1028 ts = rcu_trace_clock_local();
1029 p = rcu_dereference_check(rcu_torture_current, 1029 p = rcu_dereference_check(rcu_torture_current,
1030 rcu_read_lock_bh_held() || 1030 rcu_read_lock_bh_held() ||
@@ -1047,14 +1047,16 @@ static void rcu_torture_timer(unsigned long unused)
1047 /* Should not happen, but... */ 1047 /* Should not happen, but... */
1048 pipe_count = RCU_TORTURE_PIPE_LEN; 1048 pipe_count = RCU_TORTURE_PIPE_LEN;
1049 } 1049 }
1050 completed_end = cur_ops->completed(); 1050 completed = cur_ops->completed();
1051 if (pipe_count > 1) { 1051 if (pipe_count > 1) {
1052 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, 1052 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
1053 completed, completed_end); 1053 started, completed);
1054 rcutorture_trace_dump(); 1054 rcutorture_trace_dump();
1055 } 1055 }
1056 __this_cpu_inc(rcu_torture_count[pipe_count]); 1056 __this_cpu_inc(rcu_torture_count[pipe_count]);
1057 completed = completed_end - completed; 1057 completed = completed - started;
1058 if (cur_ops->started)
1059 completed++;
1058 if (completed > RCU_TORTURE_PIPE_LEN) { 1060 if (completed > RCU_TORTURE_PIPE_LEN) {
1059 /* Should not happen, but... */ 1061 /* Should not happen, but... */
1060 completed = RCU_TORTURE_PIPE_LEN; 1062 completed = RCU_TORTURE_PIPE_LEN;
@@ -1073,8 +1075,8 @@ static void rcu_torture_timer(unsigned long unused)
1073static int 1075static int
1074rcu_torture_reader(void *arg) 1076rcu_torture_reader(void *arg)
1075{ 1077{
1076 int completed; 1078 unsigned long started;
1077 int completed_end; 1079 unsigned long completed;
1078 int idx; 1080 int idx;
1079 DEFINE_TORTURE_RANDOM(rand); 1081 DEFINE_TORTURE_RANDOM(rand);
1080 struct rcu_torture *p; 1082 struct rcu_torture *p;
@@ -1093,7 +1095,10 @@ rcu_torture_reader(void *arg)
1093 mod_timer(&t, jiffies + 1); 1095 mod_timer(&t, jiffies + 1);
1094 } 1096 }
1095 idx = cur_ops->readlock(); 1097 idx = cur_ops->readlock();
1096 completed = cur_ops->completed(); 1098 if (cur_ops->started)
1099 started = cur_ops->started();
1100 else
1101 started = cur_ops->completed();
1097 ts = rcu_trace_clock_local(); 1102 ts = rcu_trace_clock_local();
1098 p = rcu_dereference_check(rcu_torture_current, 1103 p = rcu_dereference_check(rcu_torture_current,
1099 rcu_read_lock_bh_held() || 1104 rcu_read_lock_bh_held() ||
@@ -1114,14 +1119,16 @@ rcu_torture_reader(void *arg)
1114 /* Should not happen, but... */ 1119 /* Should not happen, but... */
1115 pipe_count = RCU_TORTURE_PIPE_LEN; 1120 pipe_count = RCU_TORTURE_PIPE_LEN;
1116 } 1121 }
1117 completed_end = cur_ops->completed(); 1122 completed = cur_ops->completed();
1118 if (pipe_count > 1) { 1123 if (pipe_count > 1) {
1119 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1124 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1120 ts, completed, completed_end); 1125 ts, started, completed);
1121 rcutorture_trace_dump(); 1126 rcutorture_trace_dump();
1122 } 1127 }
1123 __this_cpu_inc(rcu_torture_count[pipe_count]); 1128 __this_cpu_inc(rcu_torture_count[pipe_count]);
1124 completed = completed_end - completed; 1129 completed = completed - started;
1130 if (cur_ops->started)
1131 completed++;
1125 if (completed > RCU_TORTURE_PIPE_LEN) { 1132 if (completed > RCU_TORTURE_PIPE_LEN) {
1126 /* Should not happen, but... */ 1133 /* Should not happen, but... */
1127 completed = RCU_TORTURE_PIPE_LEN; 1134 completed = RCU_TORTURE_PIPE_LEN;
@@ -1420,6 +1427,9 @@ static int rcu_torture_barrier(void *arg)
1420 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 1427 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1421 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 1428 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1422 n_rcu_torture_barrier_error++; 1429 n_rcu_torture_barrier_error++;
1430 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1431 atomic_read(&barrier_cbs_invoked),
1432 n_barrier_cbs);
1423 WARN_ON_ONCE(1); 1433 WARN_ON_ONCE(1);
1424 } 1434 }
1425 n_barrier_successes++; 1435 n_barrier_successes++;
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index e037f3eb2f7b..445bf8ffe3fb 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(srcu_barrier);
546 * Report the number of batches, correlated with, but not necessarily 546 * Report the number of batches, correlated with, but not necessarily
547 * precisely the same as, the number of grace periods that have elapsed. 547 * precisely the same as, the number of grace periods that have elapsed.
548 */ 548 */
549long srcu_batches_completed(struct srcu_struct *sp) 549unsigned long srcu_batches_completed(struct srcu_struct *sp)
550{ 550{
551 return sp->completed; 551 return sp->completed;
552} 552}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 0db5649f8817..cc9ceca7bde1 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -47,54 +47,14 @@ static void __call_rcu(struct rcu_head *head,
47 void (*func)(struct rcu_head *rcu), 47 void (*func)(struct rcu_head *rcu),
48 struct rcu_ctrlblk *rcp); 48 struct rcu_ctrlblk *rcp);
49 49
50static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
51
52#include "tiny_plugin.h" 50#include "tiny_plugin.h"
53 51
54/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
55static void rcu_idle_enter_common(long long newval)
56{
57 if (newval) {
58 RCU_TRACE(trace_rcu_dyntick(TPS("--="),
59 rcu_dynticks_nesting, newval));
60 rcu_dynticks_nesting = newval;
61 return;
62 }
63 RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
64 rcu_dynticks_nesting, newval));
65 if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
66 struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
67
68 RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
69 rcu_dynticks_nesting, newval));
70 ftrace_dump(DUMP_ALL);
71 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
72 current->pid, current->comm,
73 idle->pid, idle->comm); /* must be idle task! */
74 }
75 rcu_sched_qs(); /* implies rcu_bh_inc() */
76 barrier();
77 rcu_dynticks_nesting = newval;
78}
79
80/* 52/*
81 * Enter idle, which is an extended quiescent state if we have fully 53 * Enter idle, which is an extended quiescent state if we have fully
82 * entered that mode (i.e., if the new value of dynticks_nesting is zero). 54 * entered that mode.
83 */ 55 */
84void rcu_idle_enter(void) 56void rcu_idle_enter(void)
85{ 57{
86 unsigned long flags;
87 long long newval;
88
89 local_irq_save(flags);
90 WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
91 if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
92 DYNTICK_TASK_NEST_VALUE)
93 newval = 0;
94 else
95 newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
96 rcu_idle_enter_common(newval);
97 local_irq_restore(flags);
98} 58}
99EXPORT_SYMBOL_GPL(rcu_idle_enter); 59EXPORT_SYMBOL_GPL(rcu_idle_enter);
100 60
@@ -103,55 +63,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
103 */ 63 */
104void rcu_irq_exit(void) 64void rcu_irq_exit(void)
105{ 65{
106 unsigned long flags;
107 long long newval;
108
109 local_irq_save(flags);
110 newval = rcu_dynticks_nesting - 1;
111 WARN_ON_ONCE(newval < 0);
112 rcu_idle_enter_common(newval);
113 local_irq_restore(flags);
114} 66}
115EXPORT_SYMBOL_GPL(rcu_irq_exit); 67EXPORT_SYMBOL_GPL(rcu_irq_exit);
116 68
117/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
118static void rcu_idle_exit_common(long long oldval)
119{
120 if (oldval) {
121 RCU_TRACE(trace_rcu_dyntick(TPS("++="),
122 oldval, rcu_dynticks_nesting));
123 return;
124 }
125 RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
126 if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
127 struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
128
129 RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
130 oldval, rcu_dynticks_nesting));
131 ftrace_dump(DUMP_ALL);
132 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
133 current->pid, current->comm,
134 idle->pid, idle->comm); /* must be idle task! */
135 }
136}
137
138/* 69/*
139 * Exit idle, so that we are no longer in an extended quiescent state. 70 * Exit idle, so that we are no longer in an extended quiescent state.
140 */ 71 */
141void rcu_idle_exit(void) 72void rcu_idle_exit(void)
142{ 73{
143 unsigned long flags;
144 long long oldval;
145
146 local_irq_save(flags);
147 oldval = rcu_dynticks_nesting;
148 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
149 if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
150 rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
151 else
152 rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
153 rcu_idle_exit_common(oldval);
154 local_irq_restore(flags);
155} 74}
156EXPORT_SYMBOL_GPL(rcu_idle_exit); 75EXPORT_SYMBOL_GPL(rcu_idle_exit);
157 76
@@ -160,15 +79,6 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
160 */ 79 */
161void rcu_irq_enter(void) 80void rcu_irq_enter(void)
162{ 81{
163 unsigned long flags;
164 long long oldval;
165
166 local_irq_save(flags);
167 oldval = rcu_dynticks_nesting;
168 rcu_dynticks_nesting++;
169 WARN_ON_ONCE(rcu_dynticks_nesting == 0);
170 rcu_idle_exit_common(oldval);
171 local_irq_restore(flags);
172} 82}
173EXPORT_SYMBOL_GPL(rcu_irq_enter); 83EXPORT_SYMBOL_GPL(rcu_irq_enter);
174 84
@@ -179,23 +89,13 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter);
179 */ 89 */
180bool notrace __rcu_is_watching(void) 90bool notrace __rcu_is_watching(void)
181{ 91{
182 return rcu_dynticks_nesting; 92 return true;
183} 93}
184EXPORT_SYMBOL(__rcu_is_watching); 94EXPORT_SYMBOL(__rcu_is_watching);
185 95
186#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 96#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
187 97
188/* 98/*
189 * Test whether the current CPU was interrupted from idle. Nested
190 * interrupts don't count, we must be running at the first interrupt
191 * level.
192 */
193static int rcu_is_cpu_rrupt_from_idle(void)
194{
195 return rcu_dynticks_nesting <= 1;
196}
197
198/*
199 * Helper function for rcu_sched_qs() and rcu_bh_qs(). 99 * Helper function for rcu_sched_qs() and rcu_bh_qs().
200 * Also irqs are disabled to avoid confusion due to interrupt handlers 100 * Also irqs are disabled to avoid confusion due to interrupt handlers
201 * invoking call_rcu(). 101 * invoking call_rcu().
@@ -250,7 +150,7 @@ void rcu_bh_qs(void)
250void rcu_check_callbacks(int user) 150void rcu_check_callbacks(int user)
251{ 151{
252 RCU_TRACE(check_cpu_stalls()); 152 RCU_TRACE(check_cpu_stalls());
253 if (user || rcu_is_cpu_rrupt_from_idle()) 153 if (user)
254 rcu_sched_qs(); 154 rcu_sched_qs();
255 else if (!in_softirq()) 155 else if (!in_softirq())
256 rcu_bh_qs(); 156 rcu_bh_qs();
@@ -357,6 +257,11 @@ static void __call_rcu(struct rcu_head *head,
357 rcp->curtail = &head->next; 257 rcp->curtail = &head->next;
358 RCU_TRACE(rcp->qlen++); 258 RCU_TRACE(rcp->qlen++);
359 local_irq_restore(flags); 259 local_irq_restore(flags);
260
261 if (unlikely(is_idle_task(current))) {
262 /* force scheduling for rcu_sched_qs() */
263 resched_cpu(0);
264 }
360} 265}
361 266
362/* 267/*
@@ -383,6 +288,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
383void __init rcu_init(void) 288void __init rcu_init(void)
384{ 289{
385 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 290 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
291 RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk));
292 RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk));
386 293
387 rcu_early_boot_tests(); 294 rcu_early_boot_tests();
388} 295}
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 858c56569127..f94e209a10d6 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -145,17 +145,16 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
145 rcp->ticks_this_gp++; 145 rcp->ticks_this_gp++;
146 j = jiffies; 146 j = jiffies;
147 js = ACCESS_ONCE(rcp->jiffies_stall); 147 js = ACCESS_ONCE(rcp->jiffies_stall);
148 if (*rcp->curtail && ULONG_CMP_GE(j, js)) { 148 if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
149 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", 149 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
150 rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting, 150 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
151 jiffies - rcp->gp_start, rcp->qlen); 151 jiffies - rcp->gp_start, rcp->qlen);
152 dump_stack(); 152 dump_stack();
153 }
154 if (*rcp->curtail && ULONG_CMP_GE(j, js))
155 ACCESS_ONCE(rcp->jiffies_stall) = jiffies + 153 ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
156 3 * rcu_jiffies_till_stall_check() + 3; 154 3 * rcu_jiffies_till_stall_check() + 3;
157 else if (ULONG_CMP_GE(j, js)) 155 } else if (ULONG_CMP_GE(j, js)) {
158 ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); 156 ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
157 }
159} 158}
160 159
161static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp) 160static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 7680fc275036..48d640ca1a05 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -156,6 +156,10 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
156static void invoke_rcu_core(void); 156static void invoke_rcu_core(void);
157static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 157static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
158 158
159/* rcuc/rcub kthread realtime priority */
160static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
161module_param(kthread_prio, int, 0644);
162
159/* 163/*
160 * Track the rcutorture test sequence number and the update version 164 * Track the rcutorture test sequence number and the update version
161 * number within a given test. The rcutorture_testseq is incremented 165 * number within a given test. The rcutorture_testseq is incremented
@@ -215,6 +219,9 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
215#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 219#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
216}; 220};
217 221
222DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
223EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
224
218/* 225/*
219 * Let the RCU core know that this CPU has gone through the scheduler, 226 * Let the RCU core know that this CPU has gone through the scheduler,
220 * which is a quiescent state. This is called when the need for a 227 * which is a quiescent state. This is called when the need for a
@@ -284,6 +291,22 @@ void rcu_note_context_switch(void)
284} 291}
285EXPORT_SYMBOL_GPL(rcu_note_context_switch); 292EXPORT_SYMBOL_GPL(rcu_note_context_switch);
286 293
294/*
295 * Register a quiesecent state for all RCU flavors. If there is an
296 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
297 * dyntick-idle quiescent state visible to other CPUs (but only for those
298 * RCU flavors in desparate need of a quiescent state, which will normally
299 * be none of them). Either way, do a lightweight quiescent state for
300 * all RCU flavors.
301 */
302void rcu_all_qs(void)
303{
304 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
305 rcu_momentary_dyntick_idle();
306 this_cpu_inc(rcu_qs_ctr);
307}
308EXPORT_SYMBOL_GPL(rcu_all_qs);
309
287static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ 310static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
288static long qhimark = 10000; /* If this many pending, ignore blimit. */ 311static long qhimark = 10000; /* If this many pending, ignore blimit. */
289static long qlowmark = 100; /* Once only this many pending, use blimit. */ 312static long qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -315,18 +338,54 @@ static void force_quiescent_state(struct rcu_state *rsp);
315static int rcu_pending(void); 338static int rcu_pending(void);
316 339
317/* 340/*
318 * Return the number of RCU-sched batches processed thus far for debug & stats. 341 * Return the number of RCU batches started thus far for debug & stats.
342 */
343unsigned long rcu_batches_started(void)
344{
345 return rcu_state_p->gpnum;
346}
347EXPORT_SYMBOL_GPL(rcu_batches_started);
348
349/*
350 * Return the number of RCU-sched batches started thus far for debug & stats.
351 */
352unsigned long rcu_batches_started_sched(void)
353{
354 return rcu_sched_state.gpnum;
355}
356EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
357
358/*
359 * Return the number of RCU BH batches started thus far for debug & stats.
319 */ 360 */
320long rcu_batches_completed_sched(void) 361unsigned long rcu_batches_started_bh(void)
362{
363 return rcu_bh_state.gpnum;
364}
365EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
366
367/*
368 * Return the number of RCU batches completed thus far for debug & stats.
369 */
370unsigned long rcu_batches_completed(void)
371{
372 return rcu_state_p->completed;
373}
374EXPORT_SYMBOL_GPL(rcu_batches_completed);
375
376/*
377 * Return the number of RCU-sched batches completed thus far for debug & stats.
378 */
379unsigned long rcu_batches_completed_sched(void)
321{ 380{
322 return rcu_sched_state.completed; 381 return rcu_sched_state.completed;
323} 382}
324EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); 383EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
325 384
326/* 385/*
327 * Return the number of RCU BH batches processed thus far for debug & stats. 386 * Return the number of RCU BH batches completed thus far for debug & stats.
328 */ 387 */
329long rcu_batches_completed_bh(void) 388unsigned long rcu_batches_completed_bh(void)
330{ 389{
331 return rcu_bh_state.completed; 390 return rcu_bh_state.completed;
332} 391}
@@ -759,39 +818,71 @@ void rcu_irq_enter(void)
759/** 818/**
760 * rcu_nmi_enter - inform RCU of entry to NMI context 819 * rcu_nmi_enter - inform RCU of entry to NMI context
761 * 820 *
762 * If the CPU was idle with dynamic ticks active, and there is no 821 * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
763 * irq handler running, this updates rdtp->dynticks_nmi to let the 822 * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
764 * RCU grace-period handling know that the CPU is active. 823 * that the CPU is active. This implementation permits nested NMIs, as
824 * long as the nesting level does not overflow an int. (You will probably
825 * run out of stack space first.)
765 */ 826 */
766void rcu_nmi_enter(void) 827void rcu_nmi_enter(void)
767{ 828{
768 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 829 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
830 int incby = 2;
769 831
770 if (rdtp->dynticks_nmi_nesting == 0 && 832 /* Complain about underflow. */
771 (atomic_read(&rdtp->dynticks) & 0x1)) 833 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
772 return; 834
773 rdtp->dynticks_nmi_nesting++; 835 /*
774 smp_mb__before_atomic(); /* Force delay from prior write. */ 836 * If idle from RCU viewpoint, atomically increment ->dynticks
775 atomic_inc(&rdtp->dynticks); 837 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
776 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 838 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
777 smp_mb__after_atomic(); /* See above. */ 839 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
778 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 840 * to be in the outermost NMI handler that interrupted an RCU-idle
841 * period (observation due to Andy Lutomirski).
842 */
843 if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
844 smp_mb__before_atomic(); /* Force delay from prior write. */
845 atomic_inc(&rdtp->dynticks);
846 /* atomic_inc() before later RCU read-side crit sects */
847 smp_mb__after_atomic(); /* See above. */
848 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
849 incby = 1;
850 }
851 rdtp->dynticks_nmi_nesting += incby;
852 barrier();
779} 853}
780 854
781/** 855/**
782 * rcu_nmi_exit - inform RCU of exit from NMI context 856 * rcu_nmi_exit - inform RCU of exit from NMI context
783 * 857 *
784 * If the CPU was idle with dynamic ticks active, and there is no 858 * If we are returning from the outermost NMI handler that interrupted an
785 * irq handler running, this updates rdtp->dynticks_nmi to let the 859 * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
786 * RCU grace-period handling know that the CPU is no longer active. 860 * to let the RCU grace-period handling know that the CPU is back to
861 * being RCU-idle.
787 */ 862 */
788void rcu_nmi_exit(void) 863void rcu_nmi_exit(void)
789{ 864{
790 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 865 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
791 866
792 if (rdtp->dynticks_nmi_nesting == 0 || 867 /*
793 --rdtp->dynticks_nmi_nesting != 0) 868 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
869 * (We are exiting an NMI handler, so RCU better be paying attention
870 * to us!)
871 */
872 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
873 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
874
875 /*
876 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
877 * leave it in non-RCU-idle state.
878 */
879 if (rdtp->dynticks_nmi_nesting != 1) {
880 rdtp->dynticks_nmi_nesting -= 2;
794 return; 881 return;
882 }
883
884 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
885 rdtp->dynticks_nmi_nesting = 0;
795 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 886 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
796 smp_mb__before_atomic(); /* See above. */ 887 smp_mb__before_atomic(); /* See above. */
797 atomic_inc(&rdtp->dynticks); 888 atomic_inc(&rdtp->dynticks);
@@ -898,17 +989,14 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
898 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 989 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
899 return 1; 990 return 1;
900 } else { 991 } else {
992 if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
993 rdp->mynode->gpnum))
994 ACCESS_ONCE(rdp->gpwrap) = true;
901 return 0; 995 return 0;
902 } 996 }
903} 997}
904 998
905/* 999/*
906 * This function really isn't for public consumption, but RCU is special in
907 * that context switches can allow the state machine to make progress.
908 */
909extern void resched_cpu(int cpu);
910
911/*
912 * Return true if the specified CPU has passed through a quiescent 1000 * Return true if the specified CPU has passed through a quiescent
913 * state by virtue of being in or having passed through an dynticks 1001 * state by virtue of being in or having passed through an dynticks
914 * idle state since the last call to dyntick_save_progress_counter() 1002 * idle state since the last call to dyntick_save_progress_counter()
@@ -1011,6 +1099,22 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
1011 j1 = rcu_jiffies_till_stall_check(); 1099 j1 = rcu_jiffies_till_stall_check();
1012 ACCESS_ONCE(rsp->jiffies_stall) = j + j1; 1100 ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
1013 rsp->jiffies_resched = j + j1 / 2; 1101 rsp->jiffies_resched = j + j1 / 2;
1102 rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
1103}
1104
1105/*
1106 * Complain about starvation of grace-period kthread.
1107 */
1108static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1109{
1110 unsigned long gpa;
1111 unsigned long j;
1112
1113 j = jiffies;
1114 gpa = ACCESS_ONCE(rsp->gp_activity);
1115 if (j - gpa > 2 * HZ)
1116 pr_err("%s kthread starved for %ld jiffies!\n",
1117 rsp->name, j - gpa);
1014} 1118}
1015 1119
1016/* 1120/*
@@ -1033,11 +1137,13 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1033 } 1137 }
1034} 1138}
1035 1139
1036static void print_other_cpu_stall(struct rcu_state *rsp) 1140static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1037{ 1141{
1038 int cpu; 1142 int cpu;
1039 long delta; 1143 long delta;
1040 unsigned long flags; 1144 unsigned long flags;
1145 unsigned long gpa;
1146 unsigned long j;
1041 int ndetected = 0; 1147 int ndetected = 0;
1042 struct rcu_node *rnp = rcu_get_root(rsp); 1148 struct rcu_node *rnp = rcu_get_root(rsp);
1043 long totqlen = 0; 1149 long totqlen = 0;
@@ -1075,30 +1181,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
1075 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1181 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1076 } 1182 }
1077 1183
1078 /*
1079 * Now rat on any tasks that got kicked up to the root rcu_node
1080 * due to CPU offlining.
1081 */
1082 rnp = rcu_get_root(rsp);
1083 raw_spin_lock_irqsave(&rnp->lock, flags);
1084 ndetected += rcu_print_task_stall(rnp);
1085 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1086
1087 print_cpu_stall_info_end(); 1184 print_cpu_stall_info_end();
1088 for_each_possible_cpu(cpu) 1185 for_each_possible_cpu(cpu)
1089 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; 1186 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
1090 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n", 1187 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1091 smp_processor_id(), (long)(jiffies - rsp->gp_start), 1188 smp_processor_id(), (long)(jiffies - rsp->gp_start),
1092 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1189 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1093 if (ndetected == 0) 1190 if (ndetected) {
1094 pr_err("INFO: Stall ended before state dump start\n");
1095 else
1096 rcu_dump_cpu_stacks(rsp); 1191 rcu_dump_cpu_stacks(rsp);
1192 } else {
1193 if (ACCESS_ONCE(rsp->gpnum) != gpnum ||
1194 ACCESS_ONCE(rsp->completed) == gpnum) {
1195 pr_err("INFO: Stall ended before state dump start\n");
1196 } else {
1197 j = jiffies;
1198 gpa = ACCESS_ONCE(rsp->gp_activity);
1199 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n",
1200 rsp->name, j - gpa, j, gpa,
1201 jiffies_till_next_fqs);
1202 /* In this case, the current CPU might be at fault. */
1203 sched_show_task(current);
1204 }
1205 }
1097 1206
1098 /* Complain about tasks blocking the grace period. */ 1207 /* Complain about tasks blocking the grace period. */
1099
1100 rcu_print_detail_task_stall(rsp); 1208 rcu_print_detail_task_stall(rsp);
1101 1209
1210 rcu_check_gp_kthread_starvation(rsp);
1211
1102 force_quiescent_state(rsp); /* Kick them all. */ 1212 force_quiescent_state(rsp); /* Kick them all. */
1103} 1213}
1104 1214
@@ -1123,6 +1233,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
1123 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n", 1233 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1124 jiffies - rsp->gp_start, 1234 jiffies - rsp->gp_start,
1125 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1235 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1236
1237 rcu_check_gp_kthread_starvation(rsp);
1238
1126 rcu_dump_cpu_stacks(rsp); 1239 rcu_dump_cpu_stacks(rsp);
1127 1240
1128 raw_spin_lock_irqsave(&rnp->lock, flags); 1241 raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1193,7 +1306,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1193 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { 1306 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1194 1307
1195 /* They had a few time units to dump stack, so complain. */ 1308 /* They had a few time units to dump stack, so complain. */
1196 print_other_cpu_stall(rsp); 1309 print_other_cpu_stall(rsp, gpnum);
1197 } 1310 }
1198} 1311}
1199 1312
@@ -1530,7 +1643,8 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1530 bool ret; 1643 bool ret;
1531 1644
1532 /* Handle the ends of any preceding grace periods first. */ 1645 /* Handle the ends of any preceding grace periods first. */
1533 if (rdp->completed == rnp->completed) { 1646 if (rdp->completed == rnp->completed &&
1647 !unlikely(ACCESS_ONCE(rdp->gpwrap))) {
1534 1648
1535 /* No grace period end, so just accelerate recent callbacks. */ 1649 /* No grace period end, so just accelerate recent callbacks. */
1536 ret = rcu_accelerate_cbs(rsp, rnp, rdp); 1650 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1545,7 +1659,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1545 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); 1659 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1546 } 1660 }
1547 1661
1548 if (rdp->gpnum != rnp->gpnum) { 1662 if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) {
1549 /* 1663 /*
1550 * If the current grace period is waiting for this CPU, 1664 * If the current grace period is waiting for this CPU,
1551 * set up to detect a quiescent state, otherwise don't 1665 * set up to detect a quiescent state, otherwise don't
@@ -1554,8 +1668,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1554 rdp->gpnum = rnp->gpnum; 1668 rdp->gpnum = rnp->gpnum;
1555 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); 1669 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1556 rdp->passed_quiesce = 0; 1670 rdp->passed_quiesce = 0;
1671 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1557 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); 1672 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1558 zero_cpu_stall_ticks(rdp); 1673 zero_cpu_stall_ticks(rdp);
1674 ACCESS_ONCE(rdp->gpwrap) = false;
1559 } 1675 }
1560 return ret; 1676 return ret;
1561} 1677}
@@ -1569,7 +1685,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1569 local_irq_save(flags); 1685 local_irq_save(flags);
1570 rnp = rdp->mynode; 1686 rnp = rdp->mynode;
1571 if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && 1687 if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
1572 rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */ 1688 rdp->completed == ACCESS_ONCE(rnp->completed) &&
1689 !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */
1573 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ 1690 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1574 local_irq_restore(flags); 1691 local_irq_restore(flags);
1575 return; 1692 return;
@@ -1589,6 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1589 struct rcu_data *rdp; 1706 struct rcu_data *rdp;
1590 struct rcu_node *rnp = rcu_get_root(rsp); 1707 struct rcu_node *rnp = rcu_get_root(rsp);
1591 1708
1709 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1592 rcu_bind_gp_kthread(); 1710 rcu_bind_gp_kthread();
1593 raw_spin_lock_irq(&rnp->lock); 1711 raw_spin_lock_irq(&rnp->lock);
1594 smp_mb__after_unlock_lock(); 1712 smp_mb__after_unlock_lock();
@@ -1649,6 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1649 rnp->grphi, rnp->qsmask); 1767 rnp->grphi, rnp->qsmask);
1650 raw_spin_unlock_irq(&rnp->lock); 1768 raw_spin_unlock_irq(&rnp->lock);
1651 cond_resched_rcu_qs(); 1769 cond_resched_rcu_qs();
1770 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1652 } 1771 }
1653 1772
1654 mutex_unlock(&rsp->onoff_mutex); 1773 mutex_unlock(&rsp->onoff_mutex);
@@ -1665,6 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1665 unsigned long maxj; 1784 unsigned long maxj;
1666 struct rcu_node *rnp = rcu_get_root(rsp); 1785 struct rcu_node *rnp = rcu_get_root(rsp);
1667 1786
1787 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1668 rsp->n_force_qs++; 1788 rsp->n_force_qs++;
1669 if (fqs_state == RCU_SAVE_DYNTICK) { 1789 if (fqs_state == RCU_SAVE_DYNTICK) {
1670 /* Collect dyntick-idle snapshots. */ 1790 /* Collect dyntick-idle snapshots. */
@@ -1703,6 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1703 struct rcu_data *rdp; 1823 struct rcu_data *rdp;
1704 struct rcu_node *rnp = rcu_get_root(rsp); 1824 struct rcu_node *rnp = rcu_get_root(rsp);
1705 1825
1826 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1706 raw_spin_lock_irq(&rnp->lock); 1827 raw_spin_lock_irq(&rnp->lock);
1707 smp_mb__after_unlock_lock(); 1828 smp_mb__after_unlock_lock();
1708 gp_duration = jiffies - rsp->gp_start; 1829 gp_duration = jiffies - rsp->gp_start;
@@ -1739,6 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1739 nocb += rcu_future_gp_cleanup(rsp, rnp); 1860 nocb += rcu_future_gp_cleanup(rsp, rnp);
1740 raw_spin_unlock_irq(&rnp->lock); 1861 raw_spin_unlock_irq(&rnp->lock);
1741 cond_resched_rcu_qs(); 1862 cond_resched_rcu_qs();
1863 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1742 } 1864 }
1743 rnp = rcu_get_root(rsp); 1865 rnp = rcu_get_root(rsp);
1744 raw_spin_lock_irq(&rnp->lock); 1866 raw_spin_lock_irq(&rnp->lock);
@@ -1788,6 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
1788 if (rcu_gp_init(rsp)) 1910 if (rcu_gp_init(rsp))
1789 break; 1911 break;
1790 cond_resched_rcu_qs(); 1912 cond_resched_rcu_qs();
1913 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1791 WARN_ON(signal_pending(current)); 1914 WARN_ON(signal_pending(current));
1792 trace_rcu_grace_period(rsp->name, 1915 trace_rcu_grace_period(rsp->name,
1793 ACCESS_ONCE(rsp->gpnum), 1916 ACCESS_ONCE(rsp->gpnum),
@@ -1831,9 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
1831 ACCESS_ONCE(rsp->gpnum), 1954 ACCESS_ONCE(rsp->gpnum),
1832 TPS("fqsend")); 1955 TPS("fqsend"));
1833 cond_resched_rcu_qs(); 1956 cond_resched_rcu_qs();
1957 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1834 } else { 1958 } else {
1835 /* Deal with stray signal. */ 1959 /* Deal with stray signal. */
1836 cond_resched_rcu_qs(); 1960 cond_resched_rcu_qs();
1961 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1837 WARN_ON(signal_pending(current)); 1962 WARN_ON(signal_pending(current));
1838 trace_rcu_grace_period(rsp->name, 1963 trace_rcu_grace_period(rsp->name,
1839 ACCESS_ONCE(rsp->gpnum), 1964 ACCESS_ONCE(rsp->gpnum),
@@ -2010,8 +2135,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2010 rnp = rdp->mynode; 2135 rnp = rdp->mynode;
2011 raw_spin_lock_irqsave(&rnp->lock, flags); 2136 raw_spin_lock_irqsave(&rnp->lock, flags);
2012 smp_mb__after_unlock_lock(); 2137 smp_mb__after_unlock_lock();
2013 if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum || 2138 if ((rdp->passed_quiesce == 0 &&
2014 rnp->completed == rnp->gpnum) { 2139 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
2140 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
2141 rdp->gpwrap) {
2015 2142
2016 /* 2143 /*
2017 * The grace period in which this quiescent state was 2144 * The grace period in which this quiescent state was
@@ -2020,6 +2147,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2020 * within the current grace period. 2147 * within the current grace period.
2021 */ 2148 */
2022 rdp->passed_quiesce = 0; /* need qs for new gp. */ 2149 rdp->passed_quiesce = 0; /* need qs for new gp. */
2150 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
2023 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2151 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2024 return; 2152 return;
2025 } 2153 }
@@ -2064,7 +2192,8 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2064 * Was there a quiescent state since the beginning of the grace 2192 * Was there a quiescent state since the beginning of the grace
2065 * period? If no, then exit and wait for the next call. 2193 * period? If no, then exit and wait for the next call.
2066 */ 2194 */
2067 if (!rdp->passed_quiesce) 2195 if (!rdp->passed_quiesce &&
2196 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
2068 return; 2197 return;
2069 2198
2070 /* 2199 /*
@@ -2195,6 +2324,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2195} 2324}
2196 2325
2197/* 2326/*
2327 * All CPUs for the specified rcu_node structure have gone offline,
2328 * and all tasks that were preempted within an RCU read-side critical
2329 * section while running on one of those CPUs have since exited their RCU
2330 * read-side critical section. Some other CPU is reporting this fact with
2331 * the specified rcu_node structure's ->lock held and interrupts disabled.
2332 * This function therefore goes up the tree of rcu_node structures,
2333 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2334 * the leaf rcu_node structure's ->qsmaskinit field has already been
2335 * updated
2336 *
2337 * This function does check that the specified rcu_node structure has
2338 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2339 * prematurely. That said, invoking it after the fact will cost you
2340 * a needless lock acquisition. So once it has done its work, don't
2341 * invoke it again.
2342 */
2343static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2344{
2345 long mask;
2346 struct rcu_node *rnp = rnp_leaf;
2347
2348 if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2349 return;
2350 for (;;) {
2351 mask = rnp->grpmask;
2352 rnp = rnp->parent;
2353 if (!rnp)
2354 break;
2355 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2356 smp_mb__after_unlock_lock(); /* GP memory ordering. */
2357 rnp->qsmaskinit &= ~mask;
2358 if (rnp->qsmaskinit) {
2359 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2360 return;
2361 }
2362 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2363 }
2364}
2365
2366/*
2198 * The CPU has been completely removed, and some other CPU is reporting 2367 * The CPU has been completely removed, and some other CPU is reporting
2199 * this fact from process context. Do the remainder of the cleanup, 2368 * this fact from process context. Do the remainder of the cleanup,
2200 * including orphaning the outgoing CPU's RCU callbacks, and also 2369 * including orphaning the outgoing CPU's RCU callbacks, and also
@@ -2204,8 +2373,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2204static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2373static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2205{ 2374{
2206 unsigned long flags; 2375 unsigned long flags;
2207 unsigned long mask;
2208 int need_report = 0;
2209 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 2376 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2210 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2377 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2211 2378
@@ -2219,40 +2386,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2219 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ 2386 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
2220 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); 2387 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2221 rcu_adopt_orphan_cbs(rsp, flags); 2388 rcu_adopt_orphan_cbs(rsp, flags);
2389 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
2222 2390
2223 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 2391 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
2224 mask = rdp->grpmask; /* rnp->grplo is constant. */ 2392 raw_spin_lock_irqsave(&rnp->lock, flags);
2225 do { 2393 smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
2226 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 2394 rnp->qsmaskinit &= ~rdp->grpmask;
2227 smp_mb__after_unlock_lock(); 2395 if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp))
2228 rnp->qsmaskinit &= ~mask; 2396 rcu_cleanup_dead_rnp(rnp);
2229 if (rnp->qsmaskinit != 0) { 2397 rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */
2230 if (rnp != rdp->mynode)
2231 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2232 break;
2233 }
2234 if (rnp == rdp->mynode)
2235 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
2236 else
2237 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2238 mask = rnp->grpmask;
2239 rnp = rnp->parent;
2240 } while (rnp != NULL);
2241
2242 /*
2243 * We still hold the leaf rcu_node structure lock here, and
2244 * irqs are still disabled. The reason for this subterfuge is
2245 * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
2246 * held leads to deadlock.
2247 */
2248 raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
2249 rnp = rdp->mynode;
2250 if (need_report & RCU_OFL_TASKS_NORM_GP)
2251 rcu_report_unblock_qs_rnp(rnp, flags);
2252 else
2253 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2254 if (need_report & RCU_OFL_TASKS_EXP_GP)
2255 rcu_report_exp_rnp(rsp, rnp, true);
2256 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, 2398 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2257 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", 2399 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2258 cpu, rdp->qlen, rdp->nxtlist); 2400 cpu, rdp->qlen, rdp->nxtlist);
@@ -2268,6 +2410,10 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2268{ 2410{
2269} 2411}
2270 2412
2413static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2414{
2415}
2416
2271static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2417static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2272{ 2418{
2273} 2419}
@@ -2464,12 +2610,6 @@ static void force_qs_rnp(struct rcu_state *rsp,
2464 } 2610 }
2465 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2611 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2466 } 2612 }
2467 rnp = rcu_get_root(rsp);
2468 if (rnp->qsmask == 0) {
2469 raw_spin_lock_irqsave(&rnp->lock, flags);
2470 smp_mb__after_unlock_lock();
2471 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
2472 }
2473} 2613}
2474 2614
2475/* 2615/*
@@ -2569,7 +2709,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
2569 * Schedule RCU callback invocation. If the specified type of RCU 2709 * Schedule RCU callback invocation. If the specified type of RCU
2570 * does not support RCU priority boosting, just do a direct call, 2710 * does not support RCU priority boosting, just do a direct call,
2571 * otherwise wake up the per-CPU kernel kthread. Note that because we 2711 * otherwise wake up the per-CPU kernel kthread. Note that because we
2572 * are running on the current CPU with interrupts disabled, the 2712 * are running on the current CPU with softirqs disabled, the
2573 * rcu_cpu_kthread_task cannot disappear out from under us. 2713 * rcu_cpu_kthread_task cannot disappear out from under us.
2574 */ 2714 */
2575static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 2715static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
@@ -3109,9 +3249,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3109 3249
3110 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3250 /* Is the RCU core waiting for a quiescent state from this CPU? */
3111 if (rcu_scheduler_fully_active && 3251 if (rcu_scheduler_fully_active &&
3112 rdp->qs_pending && !rdp->passed_quiesce) { 3252 rdp->qs_pending && !rdp->passed_quiesce &&
3253 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
3113 rdp->n_rp_qs_pending++; 3254 rdp->n_rp_qs_pending++;
3114 } else if (rdp->qs_pending && rdp->passed_quiesce) { 3255 } else if (rdp->qs_pending &&
3256 (rdp->passed_quiesce ||
3257 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
3115 rdp->n_rp_report_qs++; 3258 rdp->n_rp_report_qs++;
3116 return 1; 3259 return 1;
3117 } 3260 }
@@ -3135,7 +3278,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3135 } 3278 }
3136 3279
3137 /* Has a new RCU grace period started? */ 3280 /* Has a new RCU grace period started? */
3138 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ 3281 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum ||
3282 unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */
3139 rdp->n_rp_gp_started++; 3283 rdp->n_rp_gp_started++;
3140 return 1; 3284 return 1;
3141 } 3285 }
@@ -3318,6 +3462,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3318 } else { 3462 } else {
3319 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, 3463 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3320 rsp->n_barrier_done); 3464 rsp->n_barrier_done);
3465 smp_mb__before_atomic();
3321 atomic_inc(&rsp->barrier_cpu_count); 3466 atomic_inc(&rsp->barrier_cpu_count);
3322 __call_rcu(&rdp->barrier_head, 3467 __call_rcu(&rdp->barrier_head,
3323 rcu_barrier_callback, rsp, cpu, 0); 3468 rcu_barrier_callback, rsp, cpu, 0);
@@ -3385,9 +3530,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3385 /* Set up local state, ensuring consistent view of global state. */ 3530 /* Set up local state, ensuring consistent view of global state. */
3386 raw_spin_lock_irqsave(&rnp->lock, flags); 3531 raw_spin_lock_irqsave(&rnp->lock, flags);
3387 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); 3532 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
3388 init_callback_list(rdp);
3389 rdp->qlen_lazy = 0;
3390 ACCESS_ONCE(rdp->qlen) = 0;
3391 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 3533 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3392 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); 3534 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3393 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); 3535 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
@@ -3444,6 +3586,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3444 rdp->gpnum = rnp->completed; 3586 rdp->gpnum = rnp->completed;
3445 rdp->completed = rnp->completed; 3587 rdp->completed = rnp->completed;
3446 rdp->passed_quiesce = 0; 3588 rdp->passed_quiesce = 0;
3589 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
3447 rdp->qs_pending = 0; 3590 rdp->qs_pending = 0;
3448 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 3591 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3449 } 3592 }
@@ -3535,17 +3678,35 @@ static int rcu_pm_notify(struct notifier_block *self,
3535static int __init rcu_spawn_gp_kthread(void) 3678static int __init rcu_spawn_gp_kthread(void)
3536{ 3679{
3537 unsigned long flags; 3680 unsigned long flags;
3681 int kthread_prio_in = kthread_prio;
3538 struct rcu_node *rnp; 3682 struct rcu_node *rnp;
3539 struct rcu_state *rsp; 3683 struct rcu_state *rsp;
3684 struct sched_param sp;
3540 struct task_struct *t; 3685 struct task_struct *t;
3541 3686
3687 /* Force priority into range. */
3688 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3689 kthread_prio = 1;
3690 else if (kthread_prio < 0)
3691 kthread_prio = 0;
3692 else if (kthread_prio > 99)
3693 kthread_prio = 99;
3694 if (kthread_prio != kthread_prio_in)
3695 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3696 kthread_prio, kthread_prio_in);
3697
3542 rcu_scheduler_fully_active = 1; 3698 rcu_scheduler_fully_active = 1;
3543 for_each_rcu_flavor(rsp) { 3699 for_each_rcu_flavor(rsp) {
3544 t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name); 3700 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
3545 BUG_ON(IS_ERR(t)); 3701 BUG_ON(IS_ERR(t));
3546 rnp = rcu_get_root(rsp); 3702 rnp = rcu_get_root(rsp);
3547 raw_spin_lock_irqsave(&rnp->lock, flags); 3703 raw_spin_lock_irqsave(&rnp->lock, flags);
3548 rsp->gp_kthread = t; 3704 rsp->gp_kthread = t;
3705 if (kthread_prio) {
3706 sp.sched_priority = kthread_prio;
3707 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3708 }
3709 wake_up_process(t);
3549 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3710 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3550 } 3711 }
3551 rcu_spawn_nocb_kthreads(); 3712 rcu_spawn_nocb_kthreads();
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 8e7b1843896e..119de399eb2f 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -27,7 +27,6 @@
27#include <linux/threads.h> 27#include <linux/threads.h>
28#include <linux/cpumask.h> 28#include <linux/cpumask.h>
29#include <linux/seqlock.h> 29#include <linux/seqlock.h>
30#include <linux/irq_work.h>
31 30
32/* 31/*
33 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and 32 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@@ -172,11 +171,6 @@ struct rcu_node {
172 /* queued on this rcu_node structure that */ 171 /* queued on this rcu_node structure that */
173 /* are blocking the current grace period, */ 172 /* are blocking the current grace period, */
174 /* there can be no such task. */ 173 /* there can be no such task. */
175 struct completion boost_completion;
176 /* Used to ensure that the rt_mutex used */
177 /* to carry out the boosting is fully */
178 /* released with no future boostee accesses */
179 /* before that rt_mutex is re-initialized. */
180 struct rt_mutex boost_mtx; 174 struct rt_mutex boost_mtx;
181 /* Used only for the priority-boosting */ 175 /* Used only for the priority-boosting */
182 /* side effect, not as a lock. */ 176 /* side effect, not as a lock. */
@@ -257,9 +251,12 @@ struct rcu_data {
257 /* in order to detect GP end. */ 251 /* in order to detect GP end. */
258 unsigned long gpnum; /* Highest gp number that this CPU */ 252 unsigned long gpnum; /* Highest gp number that this CPU */
259 /* is aware of having started. */ 253 /* is aware of having started. */
254 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
255 /* for rcu_all_qs() invocations. */
260 bool passed_quiesce; /* User-mode/idle loop etc. */ 256 bool passed_quiesce; /* User-mode/idle loop etc. */
261 bool qs_pending; /* Core waits for quiesc state. */ 257 bool qs_pending; /* Core waits for quiesc state. */
262 bool beenonline; /* CPU online at least once. */ 258 bool beenonline; /* CPU online at least once. */
259 bool gpwrap; /* Possible gpnum/completed wrap. */
263 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 260 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
264 unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 261 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
265#ifdef CONFIG_RCU_CPU_STALL_INFO 262#ifdef CONFIG_RCU_CPU_STALL_INFO
@@ -340,14 +337,10 @@ struct rcu_data {
340#ifdef CONFIG_RCU_NOCB_CPU 337#ifdef CONFIG_RCU_NOCB_CPU
341 struct rcu_head *nocb_head; /* CBs waiting for kthread. */ 338 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
342 struct rcu_head **nocb_tail; 339 struct rcu_head **nocb_tail;
343 atomic_long_t nocb_q_count; /* # CBs waiting for kthread */ 340 atomic_long_t nocb_q_count; /* # CBs waiting for nocb */
344 atomic_long_t nocb_q_count_lazy; /* (approximate). */ 341 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
345 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ 342 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
346 struct rcu_head **nocb_follower_tail; 343 struct rcu_head **nocb_follower_tail;
347 atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */
348 atomic_long_t nocb_follower_count_lazy; /* (approximate). */
349 int nocb_p_count; /* # CBs being invoked by kthread */
350 int nocb_p_count_lazy; /* (approximate). */
351 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ 344 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
352 struct task_struct *nocb_kthread; 345 struct task_struct *nocb_kthread;
353 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ 346 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
@@ -356,8 +349,6 @@ struct rcu_data {
356 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; 349 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
357 /* CBs waiting for GP. */ 350 /* CBs waiting for GP. */
358 struct rcu_head **nocb_gp_tail; 351 struct rcu_head **nocb_gp_tail;
359 long nocb_gp_count;
360 long nocb_gp_count_lazy;
361 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ 352 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */
362 struct rcu_data *nocb_next_follower; 353 struct rcu_data *nocb_next_follower;
363 /* Next follower in wakeup chain. */ 354 /* Next follower in wakeup chain. */
@@ -488,10 +479,14 @@ struct rcu_state {
488 /* due to no GP active. */ 479 /* due to no GP active. */
489 unsigned long gp_start; /* Time at which GP started, */ 480 unsigned long gp_start; /* Time at which GP started, */
490 /* but in jiffies. */ 481 /* but in jiffies. */
482 unsigned long gp_activity; /* Time of last GP kthread */
483 /* activity in jiffies. */
491 unsigned long jiffies_stall; /* Time at which to check */ 484 unsigned long jiffies_stall; /* Time at which to check */
492 /* for CPU stalls. */ 485 /* for CPU stalls. */
493 unsigned long jiffies_resched; /* Time at which to resched */ 486 unsigned long jiffies_resched; /* Time at which to resched */
494 /* a reluctant CPU. */ 487 /* a reluctant CPU. */
488 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
489 /* GP start. */
495 unsigned long gp_max; /* Maximum GP duration in */ 490 unsigned long gp_max; /* Maximum GP duration in */
496 /* jiffies. */ 491 /* jiffies. */
497 const char *name; /* Name of structure. */ 492 const char *name; /* Name of structure. */
@@ -514,13 +509,6 @@ extern struct list_head rcu_struct_flavors;
514#define for_each_rcu_flavor(rsp) \ 509#define for_each_rcu_flavor(rsp) \
515 list_for_each_entry((rsp), &rcu_struct_flavors, flavors) 510 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
516 511
517/* Return values for rcu_preempt_offline_tasks(). */
518
519#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
520 /* GP were moved to root. */
521#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
522 /* GP were moved to root. */
523
524/* 512/*
525 * RCU implementation internal declarations: 513 * RCU implementation internal declarations:
526 */ 514 */
@@ -546,27 +534,16 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
546 534
547/* Forward declarations for rcutree_plugin.h */ 535/* Forward declarations for rcutree_plugin.h */
548static void rcu_bootup_announce(void); 536static void rcu_bootup_announce(void);
549long rcu_batches_completed(void);
550static void rcu_preempt_note_context_switch(void); 537static void rcu_preempt_note_context_switch(void);
551static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 538static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
552#ifdef CONFIG_HOTPLUG_CPU 539#ifdef CONFIG_HOTPLUG_CPU
553static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, 540static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
554 unsigned long flags);
555#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 541#endif /* #ifdef CONFIG_HOTPLUG_CPU */
556static void rcu_print_detail_task_stall(struct rcu_state *rsp); 542static void rcu_print_detail_task_stall(struct rcu_state *rsp);
557static int rcu_print_task_stall(struct rcu_node *rnp); 543static int rcu_print_task_stall(struct rcu_node *rnp);
558static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 544static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
559#ifdef CONFIG_HOTPLUG_CPU
560static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
561 struct rcu_node *rnp,
562 struct rcu_data *rdp);
563#endif /* #ifdef CONFIG_HOTPLUG_CPU */
564static void rcu_preempt_check_callbacks(void); 545static void rcu_preempt_check_callbacks(void);
565void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 546void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
566#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU)
567static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
568 bool wake);
569#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */
570static void __init __rcu_init_preempt(void); 547static void __init __rcu_init_preempt(void);
571static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 548static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
572static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 549static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -622,24 +599,15 @@ static void rcu_dynticks_task_exit(void);
622#endif /* #ifndef RCU_TREE_NONCORE */ 599#endif /* #ifndef RCU_TREE_NONCORE */
623 600
624#ifdef CONFIG_RCU_TRACE 601#ifdef CONFIG_RCU_TRACE
625#ifdef CONFIG_RCU_NOCB_CPU 602/* Read out queue lengths for tracing. */
626/* Sum up queue lengths for tracing. */
627static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) 603static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
628{ 604{
629 *ql = atomic_long_read(&rdp->nocb_q_count) + 605#ifdef CONFIG_RCU_NOCB_CPU
630 rdp->nocb_p_count + 606 *ql = atomic_long_read(&rdp->nocb_q_count);
631 atomic_long_read(&rdp->nocb_follower_count) + 607 *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
632 rdp->nocb_p_count + rdp->nocb_gp_count;
633 *qll = atomic_long_read(&rdp->nocb_q_count_lazy) +
634 rdp->nocb_p_count_lazy +
635 atomic_long_read(&rdp->nocb_follower_count_lazy) +
636 rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy;
637}
638#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 608#else /* #ifdef CONFIG_RCU_NOCB_CPU */
639static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
640{
641 *ql = 0; 609 *ql = 0;
642 *qll = 0; 610 *qll = 0;
643}
644#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 611#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
612}
645#endif /* #ifdef CONFIG_RCU_TRACE */ 613#endif /* #ifdef CONFIG_RCU_TRACE */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 3ec85cb5d544..2e850a51bb8f 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -34,10 +34,6 @@
34 34
35#include "../locking/rtmutex_common.h" 35#include "../locking/rtmutex_common.h"
36 36
37/* rcuc/rcub kthread realtime priority */
38static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
39module_param(kthread_prio, int, 0644);
40
41/* 37/*
42 * Control variables for per-CPU and per-rcu_node kthreads. These 38 * Control variables for per-CPU and per-rcu_node kthreads. These
43 * handle all flavors of RCU. 39 * handle all flavors of RCU.
@@ -103,6 +99,8 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
103static struct rcu_state *rcu_state_p = &rcu_preempt_state; 99static struct rcu_state *rcu_state_p = &rcu_preempt_state;
104 100
105static int rcu_preempted_readers_exp(struct rcu_node *rnp); 101static int rcu_preempted_readers_exp(struct rcu_node *rnp);
102static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
103 bool wake);
106 104
107/* 105/*
108 * Tell them what RCU they are running. 106 * Tell them what RCU they are running.
@@ -114,25 +112,6 @@ static void __init rcu_bootup_announce(void)
114} 112}
115 113
116/* 114/*
117 * Return the number of RCU-preempt batches processed thus far
118 * for debug and statistics.
119 */
120static long rcu_batches_completed_preempt(void)
121{
122 return rcu_preempt_state.completed;
123}
124EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
125
126/*
127 * Return the number of RCU batches processed thus far for debug & stats.
128 */
129long rcu_batches_completed(void)
130{
131 return rcu_batches_completed_preempt();
132}
133EXPORT_SYMBOL_GPL(rcu_batches_completed);
134
135/*
136 * Record a preemptible-RCU quiescent state for the specified CPU. Note 115 * Record a preemptible-RCU quiescent state for the specified CPU. Note
137 * that this just means that the task currently running on the CPU is 116 * that this just means that the task currently running on the CPU is
138 * not in a quiescent state. There might be any number of tasks blocked 117 * not in a quiescent state. There might be any number of tasks blocked
@@ -307,15 +286,25 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
307} 286}
308 287
309/* 288/*
289 * Return true if the specified rcu_node structure has tasks that were
290 * preempted within an RCU read-side critical section.
291 */
292static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
293{
294 return !list_empty(&rnp->blkd_tasks);
295}
296
297/*
310 * Handle special cases during rcu_read_unlock(), such as needing to 298 * Handle special cases during rcu_read_unlock(), such as needing to
311 * notify RCU core processing or task having blocked during the RCU 299 * notify RCU core processing or task having blocked during the RCU
312 * read-side critical section. 300 * read-side critical section.
313 */ 301 */
314void rcu_read_unlock_special(struct task_struct *t) 302void rcu_read_unlock_special(struct task_struct *t)
315{ 303{
316 int empty; 304 bool empty;
317 int empty_exp; 305 bool empty_exp;
318 int empty_exp_now; 306 bool empty_norm;
307 bool empty_exp_now;
319 unsigned long flags; 308 unsigned long flags;
320 struct list_head *np; 309 struct list_head *np;
321#ifdef CONFIG_RCU_BOOST 310#ifdef CONFIG_RCU_BOOST
@@ -367,7 +356,8 @@ void rcu_read_unlock_special(struct task_struct *t)
367 break; 356 break;
368 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 357 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
369 } 358 }
370 empty = !rcu_preempt_blocked_readers_cgp(rnp); 359 empty = !rcu_preempt_has_tasks(rnp);
360 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
371 empty_exp = !rcu_preempted_readers_exp(rnp); 361 empty_exp = !rcu_preempted_readers_exp(rnp);
372 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 362 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
373 np = rcu_next_node_entry(t, rnp); 363 np = rcu_next_node_entry(t, rnp);
@@ -387,13 +377,21 @@ void rcu_read_unlock_special(struct task_struct *t)
387#endif /* #ifdef CONFIG_RCU_BOOST */ 377#endif /* #ifdef CONFIG_RCU_BOOST */
388 378
389 /* 379 /*
380 * If this was the last task on the list, go see if we
381 * need to propagate ->qsmaskinit bit clearing up the
382 * rcu_node tree.
383 */
384 if (!empty && !rcu_preempt_has_tasks(rnp))
385 rcu_cleanup_dead_rnp(rnp);
386
387 /*
390 * If this was the last task on the current list, and if 388 * If this was the last task on the current list, and if
391 * we aren't waiting on any CPUs, report the quiescent state. 389 * we aren't waiting on any CPUs, report the quiescent state.
392 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 390 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
393 * so we must take a snapshot of the expedited state. 391 * so we must take a snapshot of the expedited state.
394 */ 392 */
395 empty_exp_now = !rcu_preempted_readers_exp(rnp); 393 empty_exp_now = !rcu_preempted_readers_exp(rnp);
396 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { 394 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
397 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 395 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
398 rnp->gpnum, 396 rnp->gpnum,
399 0, rnp->qsmask, 397 0, rnp->qsmask,
@@ -408,10 +406,8 @@ void rcu_read_unlock_special(struct task_struct *t)
408 406
409#ifdef CONFIG_RCU_BOOST 407#ifdef CONFIG_RCU_BOOST
410 /* Unboost if we were boosted. */ 408 /* Unboost if we were boosted. */
411 if (drop_boost_mutex) { 409 if (drop_boost_mutex)
412 rt_mutex_unlock(&rnp->boost_mtx); 410 rt_mutex_unlock(&rnp->boost_mtx);
413 complete(&rnp->boost_completion);
414 }
415#endif /* #ifdef CONFIG_RCU_BOOST */ 411#endif /* #ifdef CONFIG_RCU_BOOST */
416 412
417 /* 413 /*
@@ -519,99 +515,13 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
519static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 515static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
520{ 516{
521 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 517 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
522 if (!list_empty(&rnp->blkd_tasks)) 518 if (rcu_preempt_has_tasks(rnp))
523 rnp->gp_tasks = rnp->blkd_tasks.next; 519 rnp->gp_tasks = rnp->blkd_tasks.next;
524 WARN_ON_ONCE(rnp->qsmask); 520 WARN_ON_ONCE(rnp->qsmask);
525} 521}
526 522
527#ifdef CONFIG_HOTPLUG_CPU 523#ifdef CONFIG_HOTPLUG_CPU
528 524
529/*
530 * Handle tasklist migration for case in which all CPUs covered by the
531 * specified rcu_node have gone offline. Move them up to the root
532 * rcu_node. The reason for not just moving them to the immediate
533 * parent is to remove the need for rcu_read_unlock_special() to
534 * make more than two attempts to acquire the target rcu_node's lock.
535 * Returns true if there were tasks blocking the current RCU grace
536 * period.
537 *
538 * Returns 1 if there was previously a task blocking the current grace
539 * period on the specified rcu_node structure.
540 *
541 * The caller must hold rnp->lock with irqs disabled.
542 */
543static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
544 struct rcu_node *rnp,
545 struct rcu_data *rdp)
546{
547 struct list_head *lp;
548 struct list_head *lp_root;
549 int retval = 0;
550 struct rcu_node *rnp_root = rcu_get_root(rsp);
551 struct task_struct *t;
552
553 if (rnp == rnp_root) {
554 WARN_ONCE(1, "Last CPU thought to be offlined?");
555 return 0; /* Shouldn't happen: at least one CPU online. */
556 }
557
558 /* If we are on an internal node, complain bitterly. */
559 WARN_ON_ONCE(rnp != rdp->mynode);
560
561 /*
562 * Move tasks up to root rcu_node. Don't try to get fancy for
563 * this corner-case operation -- just put this node's tasks
564 * at the head of the root node's list, and update the root node's
565 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
566 * if non-NULL. This might result in waiting for more tasks than
567 * absolutely necessary, but this is a good performance/complexity
568 * tradeoff.
569 */
570 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
571 retval |= RCU_OFL_TASKS_NORM_GP;
572 if (rcu_preempted_readers_exp(rnp))
573 retval |= RCU_OFL_TASKS_EXP_GP;
574 lp = &rnp->blkd_tasks;
575 lp_root = &rnp_root->blkd_tasks;
576 while (!list_empty(lp)) {
577 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
578 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
579 smp_mb__after_unlock_lock();
580 list_del(&t->rcu_node_entry);
581 t->rcu_blocked_node = rnp_root;
582 list_add(&t->rcu_node_entry, lp_root);
583 if (&t->rcu_node_entry == rnp->gp_tasks)
584 rnp_root->gp_tasks = rnp->gp_tasks;
585 if (&t->rcu_node_entry == rnp->exp_tasks)
586 rnp_root->exp_tasks = rnp->exp_tasks;
587#ifdef CONFIG_RCU_BOOST
588 if (&t->rcu_node_entry == rnp->boost_tasks)
589 rnp_root->boost_tasks = rnp->boost_tasks;
590#endif /* #ifdef CONFIG_RCU_BOOST */
591 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
592 }
593
594 rnp->gp_tasks = NULL;
595 rnp->exp_tasks = NULL;
596#ifdef CONFIG_RCU_BOOST
597 rnp->boost_tasks = NULL;
598 /*
599 * In case root is being boosted and leaf was not. Make sure
600 * that we boost the tasks blocking the current grace period
601 * in this case.
602 */
603 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
604 smp_mb__after_unlock_lock();
605 if (rnp_root->boost_tasks != NULL &&
606 rnp_root->boost_tasks != rnp_root->gp_tasks &&
607 rnp_root->boost_tasks != rnp_root->exp_tasks)
608 rnp_root->boost_tasks = rnp_root->gp_tasks;
609 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
610#endif /* #ifdef CONFIG_RCU_BOOST */
611
612 return retval;
613}
614
615#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 525#endif /* #ifdef CONFIG_HOTPLUG_CPU */
616 526
617/* 527/*
@@ -771,7 +681,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
771 681
772 raw_spin_lock_irqsave(&rnp->lock, flags); 682 raw_spin_lock_irqsave(&rnp->lock, flags);
773 smp_mb__after_unlock_lock(); 683 smp_mb__after_unlock_lock();
774 if (list_empty(&rnp->blkd_tasks)) { 684 if (!rcu_preempt_has_tasks(rnp)) {
775 raw_spin_unlock_irqrestore(&rnp->lock, flags); 685 raw_spin_unlock_irqrestore(&rnp->lock, flags);
776 } else { 686 } else {
777 rnp->exp_tasks = rnp->blkd_tasks.next; 687 rnp->exp_tasks = rnp->blkd_tasks.next;
@@ -933,15 +843,6 @@ static void __init rcu_bootup_announce(void)
933} 843}
934 844
935/* 845/*
936 * Return the number of RCU batches processed thus far for debug & stats.
937 */
938long rcu_batches_completed(void)
939{
940 return rcu_batches_completed_sched();
941}
942EXPORT_SYMBOL_GPL(rcu_batches_completed);
943
944/*
945 * Because preemptible RCU does not exist, we never have to check for 846 * Because preemptible RCU does not exist, we never have to check for
946 * CPUs being in quiescent states. 847 * CPUs being in quiescent states.
947 */ 848 */
@@ -960,11 +861,12 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
960 861
961#ifdef CONFIG_HOTPLUG_CPU 862#ifdef CONFIG_HOTPLUG_CPU
962 863
963/* Because preemptible RCU does not exist, no quieting of tasks. */ 864/*
964static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 865 * Because there is no preemptible RCU, there can be no readers blocked.
965 __releases(rnp->lock) 866 */
867static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
966{ 868{
967 raw_spin_unlock_irqrestore(&rnp->lock, flags); 869 return false;
968} 870}
969 871
970#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 872#endif /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -996,23 +898,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
996 WARN_ON_ONCE(rnp->qsmask); 898 WARN_ON_ONCE(rnp->qsmask);
997} 899}
998 900
999#ifdef CONFIG_HOTPLUG_CPU
1000
1001/*
1002 * Because preemptible RCU does not exist, it never needs to migrate
1003 * tasks that were blocked within RCU read-side critical sections, and
1004 * such non-existent tasks cannot possibly have been blocking the current
1005 * grace period.
1006 */
1007static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1008 struct rcu_node *rnp,
1009 struct rcu_data *rdp)
1010{
1011 return 0;
1012}
1013
1014#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1015
1016/* 901/*
1017 * Because preemptible RCU does not exist, it never has any callbacks 902 * Because preemptible RCU does not exist, it never has any callbacks
1018 * to check. 903 * to check.
@@ -1031,20 +916,6 @@ void synchronize_rcu_expedited(void)
1031} 916}
1032EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 917EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1033 918
1034#ifdef CONFIG_HOTPLUG_CPU
1035
1036/*
1037 * Because preemptible RCU does not exist, there is never any need to
1038 * report on tasks preempted in RCU read-side critical sections during
1039 * expedited RCU grace periods.
1040 */
1041static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1042 bool wake)
1043{
1044}
1045
1046#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1047
1048/* 919/*
1049 * Because preemptible RCU does not exist, rcu_barrier() is just 920 * Because preemptible RCU does not exist, rcu_barrier() is just
1050 * another name for rcu_barrier_sched(). 921 * another name for rcu_barrier_sched().
@@ -1080,7 +951,7 @@ void exit_rcu(void)
1080 951
1081static void rcu_initiate_boost_trace(struct rcu_node *rnp) 952static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1082{ 953{
1083 if (list_empty(&rnp->blkd_tasks)) 954 if (!rcu_preempt_has_tasks(rnp))
1084 rnp->n_balk_blkd_tasks++; 955 rnp->n_balk_blkd_tasks++;
1085 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) 956 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1086 rnp->n_balk_exp_gp_tasks++; 957 rnp->n_balk_exp_gp_tasks++;
@@ -1127,7 +998,8 @@ static int rcu_boost(struct rcu_node *rnp)
1127 struct task_struct *t; 998 struct task_struct *t;
1128 struct list_head *tb; 999 struct list_head *tb;
1129 1000
1130 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) 1001 if (ACCESS_ONCE(rnp->exp_tasks) == NULL &&
1002 ACCESS_ONCE(rnp->boost_tasks) == NULL)
1131 return 0; /* Nothing left to boost. */ 1003 return 0; /* Nothing left to boost. */
1132 1004
1133 raw_spin_lock_irqsave(&rnp->lock, flags); 1005 raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1175,15 +1047,11 @@ static int rcu_boost(struct rcu_node *rnp)
1175 */ 1047 */
1176 t = container_of(tb, struct task_struct, rcu_node_entry); 1048 t = container_of(tb, struct task_struct, rcu_node_entry);
1177 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); 1049 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
1178 init_completion(&rnp->boost_completion);
1179 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1050 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1180 /* Lock only for side effect: boosts task t's priority. */ 1051 /* Lock only for side effect: boosts task t's priority. */
1181 rt_mutex_lock(&rnp->boost_mtx); 1052 rt_mutex_lock(&rnp->boost_mtx);
1182 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1053 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
1183 1054
1184 /* Wait for boostee to be done w/boost_mtx before reinitializing. */
1185 wait_for_completion(&rnp->boost_completion);
1186
1187 return ACCESS_ONCE(rnp->exp_tasks) != NULL || 1055 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1188 ACCESS_ONCE(rnp->boost_tasks) != NULL; 1056 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1189} 1057}
@@ -1416,12 +1284,8 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1416 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) 1284 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1417 if ((mask & 0x1) && cpu != outgoingcpu) 1285 if ((mask & 0x1) && cpu != outgoingcpu)
1418 cpumask_set_cpu(cpu, cm); 1286 cpumask_set_cpu(cpu, cm);
1419 if (cpumask_weight(cm) == 0) { 1287 if (cpumask_weight(cm) == 0)
1420 cpumask_setall(cm); 1288 cpumask_setall(cm);
1421 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1422 cpumask_clear_cpu(cpu, cm);
1423 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1424 }
1425 set_cpus_allowed_ptr(t, cm); 1289 set_cpus_allowed_ptr(t, cm);
1426 free_cpumask_var(cm); 1290 free_cpumask_var(cm);
1427} 1291}
@@ -1446,12 +1310,8 @@ static void __init rcu_spawn_boost_kthreads(void)
1446 for_each_possible_cpu(cpu) 1310 for_each_possible_cpu(cpu)
1447 per_cpu(rcu_cpu_has_work, cpu) = 0; 1311 per_cpu(rcu_cpu_has_work, cpu) = 0;
1448 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); 1312 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1449 rnp = rcu_get_root(rcu_state_p); 1313 rcu_for_each_leaf_node(rcu_state_p, rnp)
1450 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1314 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1451 if (NUM_RCU_NODES > 1) {
1452 rcu_for_each_leaf_node(rcu_state_p, rnp)
1453 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1454 }
1455} 1315}
1456 1316
1457static void rcu_prepare_kthreads(int cpu) 1317static void rcu_prepare_kthreads(int cpu)
@@ -1605,7 +1465,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
1605 * completed since we last checked and there are 1465 * completed since we last checked and there are
1606 * callbacks not yet ready to invoke. 1466 * callbacks not yet ready to invoke.
1607 */ 1467 */
1608 if (rdp->completed != rnp->completed && 1468 if ((rdp->completed != rnp->completed ||
1469 unlikely(ACCESS_ONCE(rdp->gpwrap))) &&
1609 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) 1470 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1610 note_gp_changes(rsp, rdp); 1471 note_gp_changes(rsp, rdp);
1611 1472
@@ -1898,11 +1759,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1898 ticks_value = rsp->gpnum - rdp->gpnum; 1759 ticks_value = rsp->gpnum - rdp->gpnum;
1899 } 1760 }
1900 print_cpu_stall_fast_no_hz(fast_no_hz, cpu); 1761 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1901 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n", 1762 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
1902 cpu, ticks_value, ticks_title, 1763 cpu, ticks_value, ticks_title,
1903 atomic_read(&rdtp->dynticks) & 0xfff, 1764 atomic_read(&rdtp->dynticks) & 0xfff,
1904 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, 1765 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1905 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), 1766 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1767 ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
1906 fast_no_hz); 1768 fast_no_hz);
1907} 1769}
1908 1770
@@ -2056,9 +1918,26 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
2056static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) 1918static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2057{ 1919{
2058 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1920 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1921 unsigned long ret;
1922#ifdef CONFIG_PROVE_RCU
2059 struct rcu_head *rhp; 1923 struct rcu_head *rhp;
1924#endif /* #ifdef CONFIG_PROVE_RCU */
2060 1925
2061 /* No-CBs CPUs might have callbacks on any of three lists. */ 1926 /*
1927 * Check count of all no-CBs callbacks awaiting invocation.
1928 * There needs to be a barrier before this function is called,
1929 * but associated with a prior determination that no more
1930 * callbacks would be posted. In the worst case, the first
1931 * barrier in _rcu_barrier() suffices (but the caller cannot
1932 * necessarily rely on this, not a substitute for the caller
1933 * getting the concurrency design right!). There must also be
1934 * a barrier between the following load an posting of a callback
1935 * (if a callback is in fact needed). This is associated with an
1936 * atomic_inc() in the caller.
1937 */
1938 ret = atomic_long_read(&rdp->nocb_q_count);
1939
1940#ifdef CONFIG_PROVE_RCU
2062 rhp = ACCESS_ONCE(rdp->nocb_head); 1941 rhp = ACCESS_ONCE(rdp->nocb_head);
2063 if (!rhp) 1942 if (!rhp)
2064 rhp = ACCESS_ONCE(rdp->nocb_gp_head); 1943 rhp = ACCESS_ONCE(rdp->nocb_gp_head);
@@ -2072,8 +1951,9 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2072 cpu, rhp->func); 1951 cpu, rhp->func);
2073 WARN_ON_ONCE(1); 1952 WARN_ON_ONCE(1);
2074 } 1953 }
1954#endif /* #ifdef CONFIG_PROVE_RCU */
2075 1955
2076 return !!rhp; 1956 return !!ret;
2077} 1957}
2078 1958
2079/* 1959/*
@@ -2095,9 +1975,10 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2095 struct task_struct *t; 1975 struct task_struct *t;
2096 1976
2097 /* Enqueue the callback on the nocb list and update counts. */ 1977 /* Enqueue the callback on the nocb list and update counts. */
1978 atomic_long_add(rhcount, &rdp->nocb_q_count);
1979 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
2098 old_rhpp = xchg(&rdp->nocb_tail, rhtp); 1980 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2099 ACCESS_ONCE(*old_rhpp) = rhp; 1981 ACCESS_ONCE(*old_rhpp) = rhp;
2100 atomic_long_add(rhcount, &rdp->nocb_q_count);
2101 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); 1982 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2102 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ 1983 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
2103 1984
@@ -2288,9 +2169,6 @@ wait_again:
2288 /* Move callbacks to wait-for-GP list, which is empty. */ 2169 /* Move callbacks to wait-for-GP list, which is empty. */
2289 ACCESS_ONCE(rdp->nocb_head) = NULL; 2170 ACCESS_ONCE(rdp->nocb_head) = NULL;
2290 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); 2171 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2291 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
2292 rdp->nocb_gp_count_lazy =
2293 atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2294 gotcbs = true; 2172 gotcbs = true;
2295 } 2173 }
2296 2174
@@ -2338,9 +2216,6 @@ wait_again:
2338 /* Append callbacks to follower's "done" list. */ 2216 /* Append callbacks to follower's "done" list. */
2339 tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail); 2217 tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
2340 *tail = rdp->nocb_gp_head; 2218 *tail = rdp->nocb_gp_head;
2341 atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
2342 atomic_long_add(rdp->nocb_gp_count_lazy,
2343 &rdp->nocb_follower_count_lazy);
2344 smp_mb__after_atomic(); /* Store *tail before wakeup. */ 2219 smp_mb__after_atomic(); /* Store *tail before wakeup. */
2345 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { 2220 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2346 /* 2221 /*
@@ -2415,13 +2290,11 @@ static int rcu_nocb_kthread(void *arg)
2415 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); 2290 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
2416 ACCESS_ONCE(rdp->nocb_follower_head) = NULL; 2291 ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
2417 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); 2292 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
2418 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
2419 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
2420 rdp->nocb_p_count += c;
2421 rdp->nocb_p_count_lazy += cl;
2422 2293
2423 /* Each pass through the following loop invokes a callback. */ 2294 /* Each pass through the following loop invokes a callback. */
2424 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1); 2295 trace_rcu_batch_start(rdp->rsp->name,
2296 atomic_long_read(&rdp->nocb_q_count_lazy),
2297 atomic_long_read(&rdp->nocb_q_count), -1);
2425 c = cl = 0; 2298 c = cl = 0;
2426 while (list) { 2299 while (list) {
2427 next = list->next; 2300 next = list->next;
@@ -2443,9 +2316,9 @@ static int rcu_nocb_kthread(void *arg)
2443 list = next; 2316 list = next;
2444 } 2317 }
2445 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); 2318 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2446 ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c; 2319 smp_mb__before_atomic(); /* _add after CB invocation. */
2447 ACCESS_ONCE(rdp->nocb_p_count_lazy) = 2320 atomic_long_add(-c, &rdp->nocb_q_count);
2448 rdp->nocb_p_count_lazy - cl; 2321 atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
2449 rdp->n_nocbs_invoked += c; 2322 rdp->n_nocbs_invoked += c;
2450 } 2323 }
2451 return 0; 2324 return 0;
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 5cdc62e1beeb..fbb6240509ea 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -46,6 +46,8 @@
46#define RCU_TREE_NONCORE 46#define RCU_TREE_NONCORE
47#include "tree.h" 47#include "tree.h"
48 48
49DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
50
49static int r_open(struct inode *inode, struct file *file, 51static int r_open(struct inode *inode, struct file *file,
50 const struct seq_operations *op) 52 const struct seq_operations *op)
51{ 53{
@@ -115,11 +117,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
115 117
116 if (!rdp->beenonline) 118 if (!rdp->beenonline)
117 return; 119 return;
118 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d qp=%d", 120 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d",
119 rdp->cpu, 121 rdp->cpu,
120 cpu_is_offline(rdp->cpu) ? '!' : ' ', 122 cpu_is_offline(rdp->cpu) ? '!' : ' ',
121 ulong2long(rdp->completed), ulong2long(rdp->gpnum), 123 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
122 rdp->passed_quiesce, rdp->qs_pending); 124 rdp->passed_quiesce,
125 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
126 rdp->qs_pending);
123 seq_printf(m, " dt=%d/%llx/%d df=%lu", 127 seq_printf(m, " dt=%d/%llx/%d df=%lu",
124 atomic_read(&rdp->dynticks->dynticks), 128 atomic_read(&rdp->dynticks->dynticks),
125 rdp->dynticks->dynticks_nesting, 129 rdp->dynticks->dynticks_nesting,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e628cb11b560..5eab11d4b747 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1814,6 +1814,10 @@ void __dl_clear_params(struct task_struct *p)
1814 dl_se->dl_period = 0; 1814 dl_se->dl_period = 0;
1815 dl_se->flags = 0; 1815 dl_se->flags = 0;
1816 dl_se->dl_bw = 0; 1816 dl_se->dl_bw = 0;
1817
1818 dl_se->dl_throttled = 0;
1819 dl_se->dl_new = 1;
1820 dl_se->dl_yielded = 0;
1817} 1821}
1818 1822
1819/* 1823/*
@@ -1839,7 +1843,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
1839#endif 1843#endif
1840 1844
1841 RB_CLEAR_NODE(&p->dl.rb_node); 1845 RB_CLEAR_NODE(&p->dl.rb_node);
1842 hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1846 init_dl_task_timer(&p->dl);
1843 __dl_clear_params(p); 1847 __dl_clear_params(p);
1844 1848
1845 INIT_LIST_HEAD(&p->rt.run_list); 1849 INIT_LIST_HEAD(&p->rt.run_list);
@@ -2049,6 +2053,9 @@ static inline int dl_bw_cpus(int i)
2049 * allocated bandwidth to reflect the new situation. 2053 * allocated bandwidth to reflect the new situation.
2050 * 2054 *
2051 * This function is called while holding p's rq->lock. 2055 * This function is called while holding p's rq->lock.
2056 *
2057 * XXX we should delay bw change until the task's 0-lag point, see
2058 * __setparam_dl().
2052 */ 2059 */
2053static int dl_overflow(struct task_struct *p, int policy, 2060static int dl_overflow(struct task_struct *p, int policy,
2054 const struct sched_attr *attr) 2061 const struct sched_attr *attr)
@@ -3251,15 +3258,31 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3251{ 3258{
3252 struct sched_dl_entity *dl_se = &p->dl; 3259 struct sched_dl_entity *dl_se = &p->dl;
3253 3260
3254 init_dl_task_timer(dl_se);
3255 dl_se->dl_runtime = attr->sched_runtime; 3261 dl_se->dl_runtime = attr->sched_runtime;
3256 dl_se->dl_deadline = attr->sched_deadline; 3262 dl_se->dl_deadline = attr->sched_deadline;
3257 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3263 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3258 dl_se->flags = attr->sched_flags; 3264 dl_se->flags = attr->sched_flags;
3259 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3265 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3260 dl_se->dl_throttled = 0; 3266
3261 dl_se->dl_new = 1; 3267 /*
3262 dl_se->dl_yielded = 0; 3268 * Changing the parameters of a task is 'tricky' and we're not doing
3269 * the correct thing -- also see task_dead_dl() and switched_from_dl().
3270 *
3271 * What we SHOULD do is delay the bandwidth release until the 0-lag
3272 * point. This would include retaining the task_struct until that time
3273 * and change dl_overflow() to not immediately decrement the current
3274 * amount.
3275 *
3276 * Instead we retain the current runtime/deadline and let the new
3277 * parameters take effect after the current reservation period lapses.
3278 * This is safe (albeit pessimistic) because the 0-lag point is always
3279 * before the current scheduling deadline.
3280 *
3281 * We can still have temporary overloads because we do not delay the
3282 * change in bandwidth until that time; so admission control is
3283 * not on the safe side. It does however guarantee tasks will never
3284 * consume more than promised.
3285 */
3263} 3286}
3264 3287
3265/* 3288/*
@@ -4642,6 +4665,9 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
4642 struct dl_bw *cur_dl_b; 4665 struct dl_bw *cur_dl_b;
4643 unsigned long flags; 4666 unsigned long flags;
4644 4667
4668 if (!cpumask_weight(cur))
4669 return ret;
4670
4645 rcu_read_lock_sched(); 4671 rcu_read_lock_sched();
4646 cur_dl_b = dl_bw_of(cpumask_any(cur)); 4672 cur_dl_b = dl_bw_of(cpumask_any(cur));
4647 trial_cpus = cpumask_weight(trial); 4673 trial_cpus = cpumask_weight(trial);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b52092f2636d..726470d47f87 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1094,6 +1094,7 @@ static void task_dead_dl(struct task_struct *p)
1094 * Since we are TASK_DEAD we won't slip out of the domain! 1094 * Since we are TASK_DEAD we won't slip out of the domain!
1095 */ 1095 */
1096 raw_spin_lock_irq(&dl_b->lock); 1096 raw_spin_lock_irq(&dl_b->lock);
1097 /* XXX we should retain the bw until 0-lag */
1097 dl_b->total_bw -= p->dl.dl_bw; 1098 dl_b->total_bw -= p->dl.dl_bw;
1098 raw_spin_unlock_irq(&dl_b->lock); 1099 raw_spin_unlock_irq(&dl_b->lock);
1099 1100
@@ -1614,8 +1615,8 @@ static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
1614 1615
1615static void switched_from_dl(struct rq *rq, struct task_struct *p) 1616static void switched_from_dl(struct rq *rq, struct task_struct *p)
1616{ 1617{
1618 /* XXX we should retain the bw until 0-lag */
1617 cancel_dl_timer(rq, p); 1619 cancel_dl_timer(rq, p);
1618
1619 __dl_clear_params(p); 1620 __dl_clear_params(p);
1620 1621
1621 /* 1622 /*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 40667cbf371b..fe331fc391f5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1730,7 +1730,7 @@ static int preferred_group_nid(struct task_struct *p, int nid)
1730 nodes = node_online_map; 1730 nodes = node_online_map;
1731 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { 1731 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1732 unsigned long max_faults = 0; 1732 unsigned long max_faults = 0;
1733 nodemask_t max_group; 1733 nodemask_t max_group = NODE_MASK_NONE;
1734 int a, b; 1734 int a, b;
1735 1735
1736 /* Are there nodes at this distance from each other? */ 1736 /* Are there nodes at this distance from each other? */
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index f032fb5284e3..40190f28db35 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -280,6 +280,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
280 unsigned int cpu; 280 unsigned int cpu;
281 int ret = 0; 281 int ret = 0;
282 282
283 get_online_cpus();
283 mutex_lock(&smpboot_threads_lock); 284 mutex_lock(&smpboot_threads_lock);
284 for_each_online_cpu(cpu) { 285 for_each_online_cpu(cpu) {
285 ret = __smpboot_create_thread(plug_thread, cpu); 286 ret = __smpboot_create_thread(plug_thread, cpu);
@@ -292,6 +293,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
292 list_add(&plug_thread->list, &hotplug_threads); 293 list_add(&plug_thread->list, &hotplug_threads);
293out: 294out:
294 mutex_unlock(&smpboot_threads_lock); 295 mutex_unlock(&smpboot_threads_lock);
296 put_online_cpus();
295 return ret; 297 return ret;
296} 298}
297EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread); 299EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b9ae81643f1d..479e4436f787 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -660,9 +660,8 @@ static void run_ksoftirqd(unsigned int cpu)
660 * in the task stack here. 660 * in the task stack here.
661 */ 661 */
662 __do_softirq(); 662 __do_softirq();
663 rcu_note_context_switch();
664 local_irq_enable(); 663 local_irq_enable();
665 cond_resched(); 664 cond_resched_rcu_qs();
666 return; 665 return;
667 } 666 }
668 local_irq_enable(); 667 local_irq_enable();
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 37e50aadd471..d8c724cda37b 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -122,7 +122,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
122 mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai); 122 mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai);
123 boot = ktime_add(mono, off_boot); 123 boot = ktime_add(mono, off_boot);
124 xtim = ktime_add(mono, off_real); 124 xtim = ktime_add(mono, off_real);
125 tai = ktime_add(xtim, off_tai); 125 tai = ktime_add(mono, off_tai);
126 126
127 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; 127 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
128 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; 128 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 5f2ce616c046..a2ca213c71ca 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1215,6 +1215,7 @@ config RCU_TORTURE_TEST
1215 tristate "torture tests for RCU" 1215 tristate "torture tests for RCU"
1216 depends on DEBUG_KERNEL 1216 depends on DEBUG_KERNEL
1217 select TORTURE_TEST 1217 select TORTURE_TEST
1218 select SRCU
1218 default n 1219 default n
1219 help 1220 help
1220 This option provides a kernel module that runs torture tests 1221 This option provides a kernel module that runs torture tests
@@ -1257,7 +1258,7 @@ config RCU_CPU_STALL_TIMEOUT
1257config RCU_CPU_STALL_INFO 1258config RCU_CPU_STALL_INFO
1258 bool "Print additional diagnostics on RCU CPU stall" 1259 bool "Print additional diagnostics on RCU CPU stall"
1259 depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL 1260 depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL
1260 default n 1261 default y
1261 help 1262 help
1262 For each stalled CPU that is aware of the current RCU grace 1263 For each stalled CPU that is aware of the current RCU grace
1263 period, print out additional per-CPU diagnostic information 1264 period, print out additional per-CPU diagnostic information
diff --git a/lib/checksum.c b/lib/checksum.c
index 129775eb6de6..8b39e86dbab5 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
181EXPORT_SYMBOL(csum_partial_copy); 181EXPORT_SYMBOL(csum_partial_copy);
182 182
183#ifndef csum_tcpudp_nofold 183#ifndef csum_tcpudp_nofold
184static inline u32 from64to32(u64 x)
185{
186 /* add up 32-bit and 32-bit for 32+c bit */
187 x = (x & 0xffffffff) + (x >> 32);
188 /* add up carry.. */
189 x = (x & 0xffffffff) + (x >> 32);
190 return (u32)x;
191}
192
184__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 193__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
185 unsigned short len, 194 unsigned short len,
186 unsigned short proto, 195 unsigned short proto,
@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
195#else 204#else
196 s += (proto + len) << 8; 205 s += (proto + len) << 8;
197#endif 206#endif
198 s += (s >> 32); 207 return (__force __wsum)from64to32(s);
199 return (__force __wsum)s;
200} 208}
201EXPORT_SYMBOL(csum_tcpudp_nofold); 209EXPORT_SYMBOL(csum_tcpudp_nofold);
202#endif 210#endif
diff --git a/mm/Kconfig b/mm/Kconfig
index 1d1ae6b078fd..4395b12869c8 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -325,6 +325,7 @@ config VIRT_TO_BUS
325 325
326config MMU_NOTIFIER 326config MMU_NOTIFIER
327 bool 327 bool
328 select SRCU
328 329
329config KSM 330config KSM
330 bool "Enable KSM for page merging" 331 bool "Enable KSM for page merging"
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 683b4782019b..2f6893c2f01b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5773,7 +5773,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
5773 * mem_cgroup_migrate - migrate a charge to another page 5773 * mem_cgroup_migrate - migrate a charge to another page
5774 * @oldpage: currently charged page 5774 * @oldpage: currently charged page
5775 * @newpage: page to transfer the charge to 5775 * @newpage: page to transfer the charge to
5776 * @lrucare: both pages might be on the LRU already 5776 * @lrucare: either or both pages might be on the LRU already
5777 * 5777 *
5778 * Migrate the charge from @oldpage to @newpage. 5778 * Migrate the charge from @oldpage to @newpage.
5779 * 5779 *
diff --git a/mm/nommu.c b/mm/nommu.c
index b51eadf6d952..28bd8c4dff6f 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -59,6 +59,7 @@
59#endif 59#endif
60 60
61void *high_memory; 61void *high_memory;
62EXPORT_SYMBOL(high_memory);
62struct page *mem_map; 63struct page *mem_map;
63unsigned long max_mapnr; 64unsigned long max_mapnr;
64unsigned long highest_memmap_pfn; 65unsigned long highest_memmap_pfn;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index ad83195521f2..b264bda46e1b 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -199,7 +199,10 @@ int walk_page_range(unsigned long addr, unsigned long end,
199 */ 199 */
200 if ((vma->vm_start <= addr) && 200 if ((vma->vm_start <= addr) &&
201 (vma->vm_flags & VM_PFNMAP)) { 201 (vma->vm_flags & VM_PFNMAP)) {
202 next = vma->vm_end; 202 if (walk->pte_hole)
203 err = walk->pte_hole(addr, next, walk);
204 if (err)
205 break;
203 pgd = pgd_offset(walk->mm, next); 206 pgd = pgd_offset(walk->mm, next);
204 continue; 207 continue;
205 } 208 }
diff --git a/mm/shmem.c b/mm/shmem.c
index 73ba1df7c8ba..993e6ba689cc 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1013,7 +1013,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1013 */ 1013 */
1014 oldpage = newpage; 1014 oldpage = newpage;
1015 } else { 1015 } else {
1016 mem_cgroup_migrate(oldpage, newpage, false); 1016 mem_cgroup_migrate(oldpage, newpage, true);
1017 lru_cache_add_anon(newpage); 1017 lru_cache_add_anon(newpage);
1018 *pagep = newpage; 1018 *pagep = newpage;
1019 } 1019 }
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index b0330aecbf97..3244aead0926 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -265,22 +265,12 @@ out:
265 data[NFT_REG_VERDICT].verdict = NF_DROP; 265 data[NFT_REG_VERDICT].verdict = NF_DROP;
266} 266}
267 267
268static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain) 268static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
269 const struct nft_expr *expr,
270 const struct nft_data **data)
269{ 271{
270 struct nft_base_chain *basechain; 272 return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
271 273 (1 << NF_BR_LOCAL_IN));
272 if (chain->flags & NFT_BASE_CHAIN) {
273 basechain = nft_base_chain(chain);
274
275 switch (basechain->ops[0].hooknum) {
276 case NF_BR_PRE_ROUTING:
277 case NF_BR_LOCAL_IN:
278 break;
279 default:
280 return -EOPNOTSUPP;
281 }
282 }
283 return 0;
284} 274}
285 275
286static int nft_reject_bridge_init(const struct nft_ctx *ctx, 276static int nft_reject_bridge_init(const struct nft_ctx *ctx,
@@ -290,7 +280,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx,
290 struct nft_reject *priv = nft_expr_priv(expr); 280 struct nft_reject *priv = nft_expr_priv(expr);
291 int icmp_code, err; 281 int icmp_code, err;
292 282
293 err = nft_reject_bridge_validate_hooks(ctx->chain); 283 err = nft_reject_bridge_validate(ctx, expr, NULL);
294 if (err < 0) 284 if (err < 0)
295 return err; 285 return err;
296 286
@@ -341,13 +331,6 @@ nla_put_failure:
341 return -1; 331 return -1;
342} 332}
343 333
344static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
345 const struct nft_expr *expr,
346 const struct nft_data **data)
347{
348 return nft_reject_bridge_validate_hooks(ctx->chain);
349}
350
351static struct nft_expr_type nft_reject_bridge_type; 334static struct nft_expr_type nft_reject_bridge_type;
352static const struct nft_expr_ops nft_reject_bridge_ops = { 335static const struct nft_expr_ops nft_reject_bridge_ops = {
353 .type = &nft_reject_bridge_type, 336 .type = &nft_reject_bridge_type,
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 4589ff67bfa9..67a4a36febd1 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -470,7 +470,6 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
470 ASSERT_RTNL(); 470 ASSERT_RTNL();
471 caifdev = netdev_priv(dev); 471 caifdev = netdev_priv(dev);
472 caif_netlink_parms(data, &caifdev->conn_req); 472 caif_netlink_parms(data, &caifdev->conn_req);
473 dev_net_set(caifdev->netdev, src_net);
474 473
475 ret = register_netdevice(dev); 474 ret = register_netdevice(dev);
476 if (ret) 475 if (ret)
diff --git a/net/core/dev.c b/net/core/dev.c
index 171420e75b03..7fe82929f509 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2352,7 +2352,6 @@ EXPORT_SYMBOL(skb_checksum_help);
2352 2352
2353__be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2353__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2354{ 2354{
2355 unsigned int vlan_depth = skb->mac_len;
2356 __be16 type = skb->protocol; 2355 __be16 type = skb->protocol;
2357 2356
2358 /* Tunnel gso handlers can set protocol to ethernet. */ 2357 /* Tunnel gso handlers can set protocol to ethernet. */
@@ -2366,35 +2365,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2366 type = eth->h_proto; 2365 type = eth->h_proto;
2367 } 2366 }
2368 2367
2369 /* if skb->protocol is 802.1Q/AD then the header should already be 2368 return __vlan_get_protocol(skb, type, depth);
2370 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2371 * ETH_HLEN otherwise
2372 */
2373 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2374 if (vlan_depth) {
2375 if (WARN_ON(vlan_depth < VLAN_HLEN))
2376 return 0;
2377 vlan_depth -= VLAN_HLEN;
2378 } else {
2379 vlan_depth = ETH_HLEN;
2380 }
2381 do {
2382 struct vlan_hdr *vh;
2383
2384 if (unlikely(!pskb_may_pull(skb,
2385 vlan_depth + VLAN_HLEN)))
2386 return 0;
2387
2388 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2389 type = vh->h_vlan_encapsulated_proto;
2390 vlan_depth += VLAN_HLEN;
2391 } while (type == htons(ETH_P_8021Q) ||
2392 type == htons(ETH_P_8021AD));
2393 }
2394
2395 *depth = vlan_depth;
2396
2397 return type;
2398} 2369}
2399 2370
2400/** 2371/**
@@ -5323,7 +5294,7 @@ void netdev_upper_dev_unlink(struct net_device *dev,
5323} 5294}
5324EXPORT_SYMBOL(netdev_upper_dev_unlink); 5295EXPORT_SYMBOL(netdev_upper_dev_unlink);
5325 5296
5326void netdev_adjacent_add_links(struct net_device *dev) 5297static void netdev_adjacent_add_links(struct net_device *dev)
5327{ 5298{
5328 struct netdev_adjacent *iter; 5299 struct netdev_adjacent *iter;
5329 5300
@@ -5348,7 +5319,7 @@ void netdev_adjacent_add_links(struct net_device *dev)
5348 } 5319 }
5349} 5320}
5350 5321
5351void netdev_adjacent_del_links(struct net_device *dev) 5322static void netdev_adjacent_del_links(struct net_device *dev)
5352{ 5323{
5353 struct netdev_adjacent *iter; 5324 struct netdev_adjacent *iter;
5354 5325
@@ -6656,7 +6627,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6656 if (!queue) 6627 if (!queue)
6657 return NULL; 6628 return NULL;
6658 netdev_init_one_queue(dev, queue, NULL); 6629 netdev_init_one_queue(dev, queue, NULL);
6659 queue->qdisc = &noop_qdisc; 6630 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
6660 queue->qdisc_sleeping = &noop_qdisc; 6631 queue->qdisc_sleeping = &noop_qdisc;
6661 rcu_assign_pointer(dev->ingress_queue, queue); 6632 rcu_assign_pointer(dev->ingress_queue, queue);
6662#endif 6633#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9cf6fe9ddc0c..446cbaf81185 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2895,12 +2895,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
2895 goto errout; 2895 goto errout;
2896 } 2896 }
2897 2897
2898 if (!skb->len)
2899 goto errout;
2900
2898 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 2901 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
2899 return 0; 2902 return 0;
2900errout: 2903errout:
2901 WARN_ON(err == -EMSGSIZE); 2904 WARN_ON(err == -EMSGSIZE);
2902 kfree_skb(skb); 2905 kfree_skb(skb);
2903 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 2906 if (err)
2907 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2904 return err; 2908 return err;
2905} 2909}
2906 2910
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index b50861b22b6b..c373c0708d97 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1506,23 +1506,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1506/* 1506/*
1507 * Generic function to send a packet as reply to another packet. 1507 * Generic function to send a packet as reply to another packet.
1508 * Used to send some TCP resets/acks so far. 1508 * Used to send some TCP resets/acks so far.
1509 *
1510 * Use a fake percpu inet socket to avoid false sharing and contention.
1511 */ 1509 */
1512static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { 1510void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1513 .sk = {
1514 .__sk_common = {
1515 .skc_refcnt = ATOMIC_INIT(1),
1516 },
1517 .sk_wmem_alloc = ATOMIC_INIT(1),
1518 .sk_allocation = GFP_ATOMIC,
1519 .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
1520 },
1521 .pmtudisc = IP_PMTUDISC_WANT,
1522 .uc_ttl = -1,
1523};
1524
1525void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1526 const struct ip_options *sopt, 1511 const struct ip_options *sopt,
1527 __be32 daddr, __be32 saddr, 1512 __be32 daddr, __be32 saddr,
1528 const struct ip_reply_arg *arg, 1513 const struct ip_reply_arg *arg,
@@ -1532,9 +1517,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1532 struct ipcm_cookie ipc; 1517 struct ipcm_cookie ipc;
1533 struct flowi4 fl4; 1518 struct flowi4 fl4;
1534 struct rtable *rt = skb_rtable(skb); 1519 struct rtable *rt = skb_rtable(skb);
1520 struct net *net = sock_net(sk);
1535 struct sk_buff *nskb; 1521 struct sk_buff *nskb;
1536 struct sock *sk;
1537 struct inet_sock *inet;
1538 int err; 1522 int err;
1539 1523
1540 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) 1524 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
@@ -1565,15 +1549,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1565 if (IS_ERR(rt)) 1549 if (IS_ERR(rt))
1566 return; 1550 return;
1567 1551
1568 inet = &get_cpu_var(unicast_sock); 1552 inet_sk(sk)->tos = arg->tos;
1569 1553
1570 inet->tos = arg->tos;
1571 sk = &inet->sk;
1572 sk->sk_priority = skb->priority; 1554 sk->sk_priority = skb->priority;
1573 sk->sk_protocol = ip_hdr(skb)->protocol; 1555 sk->sk_protocol = ip_hdr(skb)->protocol;
1574 sk->sk_bound_dev_if = arg->bound_dev_if; 1556 sk->sk_bound_dev_if = arg->bound_dev_if;
1575 sock_net_set(sk, net);
1576 __skb_queue_head_init(&sk->sk_write_queue);
1577 sk->sk_sndbuf = sysctl_wmem_default; 1557 sk->sk_sndbuf = sysctl_wmem_default;
1578 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1558 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1579 len, 0, &ipc, &rt, MSG_DONTWAIT); 1559 len, 0, &ipc, &rt, MSG_DONTWAIT);
@@ -1589,13 +1569,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1589 arg->csumoffset) = csum_fold(csum_add(nskb->csum, 1569 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1590 arg->csum)); 1570 arg->csum));
1591 nskb->ip_summed = CHECKSUM_NONE; 1571 nskb->ip_summed = CHECKSUM_NONE;
1592 skb_orphan(nskb);
1593 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); 1572 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
1594 ip_push_pending_frames(sk, &fl4); 1573 ip_push_pending_frames(sk, &fl4);
1595 } 1574 }
1596out: 1575out:
1597 put_cpu_var(unicast_sock);
1598
1599 ip_rt_put(rt); 1576 ip_rt_put(rt);
1600} 1577}
1601 1578
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d58dd0ec3e53..52e1f2bf0ca2 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -966,6 +966,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
966 if (dst->dev->mtu < mtu) 966 if (dst->dev->mtu < mtu)
967 return; 967 return;
968 968
969 if (rt->rt_pmtu && rt->rt_pmtu < mtu)
970 return;
971
969 if (mtu < ip_rt_min_pmtu) 972 if (mtu < ip_rt_min_pmtu)
970 mtu = ip_rt_min_pmtu; 973 mtu = ip_rt_min_pmtu;
971 974
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index bb395d46a389..c037644eafb7 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
150 tcp_slow_start(tp, acked); 150 tcp_slow_start(tp, acked);
151 else { 151 else {
152 bictcp_update(ca, tp->snd_cwnd); 152 bictcp_update(ca, tp->snd_cwnd);
153 tcp_cong_avoid_ai(tp, ca->cnt); 153 tcp_cong_avoid_ai(tp, ca->cnt, 1);
154 } 154 }
155} 155}
156 156
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 27ead0dd16bc..8670e68e2ce6 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -291,26 +291,32 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
291 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and 291 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
292 * returns the leftover acks to adjust cwnd in congestion avoidance mode. 292 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
293 */ 293 */
294void tcp_slow_start(struct tcp_sock *tp, u32 acked) 294u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
295{ 295{
296 u32 cwnd = tp->snd_cwnd + acked; 296 u32 cwnd = tp->snd_cwnd + acked;
297 297
298 if (cwnd > tp->snd_ssthresh) 298 if (cwnd > tp->snd_ssthresh)
299 cwnd = tp->snd_ssthresh + 1; 299 cwnd = tp->snd_ssthresh + 1;
300 acked -= cwnd - tp->snd_cwnd;
300 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); 301 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
302
303 return acked;
301} 304}
302EXPORT_SYMBOL_GPL(tcp_slow_start); 305EXPORT_SYMBOL_GPL(tcp_slow_start);
303 306
304/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ 307/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
305void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) 308 * for every packet that was ACKed.
309 */
310void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
306{ 311{
312 tp->snd_cwnd_cnt += acked;
307 if (tp->snd_cwnd_cnt >= w) { 313 if (tp->snd_cwnd_cnt >= w) {
308 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 314 u32 delta = tp->snd_cwnd_cnt / w;
309 tp->snd_cwnd++; 315
310 tp->snd_cwnd_cnt = 0; 316 tp->snd_cwnd_cnt -= delta * w;
311 } else { 317 tp->snd_cwnd += delta;
312 tp->snd_cwnd_cnt++;
313 } 318 }
319 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
314} 320}
315EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); 321EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
316 322
@@ -329,11 +335,13 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
329 return; 335 return;
330 336
331 /* In "safe" area, increase. */ 337 /* In "safe" area, increase. */
332 if (tp->snd_cwnd <= tp->snd_ssthresh) 338 if (tp->snd_cwnd <= tp->snd_ssthresh) {
333 tcp_slow_start(tp, acked); 339 acked = tcp_slow_start(tp, acked);
340 if (!acked)
341 return;
342 }
334 /* In dangerous area, increase slowly. */ 343 /* In dangerous area, increase slowly. */
335 else 344 tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
336 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
337} 345}
338EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 346EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
339 347
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 6b6002416a73..4b276d1ed980 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -93,9 +93,7 @@ struct bictcp {
93 u32 epoch_start; /* beginning of an epoch */ 93 u32 epoch_start; /* beginning of an epoch */
94 u32 ack_cnt; /* number of acks */ 94 u32 ack_cnt; /* number of acks */
95 u32 tcp_cwnd; /* estimated tcp cwnd */ 95 u32 tcp_cwnd; /* estimated tcp cwnd */
96#define ACK_RATIO_SHIFT 4 96 u16 unused;
97#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
98 u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
99 u8 sample_cnt; /* number of samples to decide curr_rtt */ 97 u8 sample_cnt; /* number of samples to decide curr_rtt */
100 u8 found; /* the exit point is found? */ 98 u8 found; /* the exit point is found? */
101 u32 round_start; /* beginning of each round */ 99 u32 round_start; /* beginning of each round */
@@ -114,7 +112,6 @@ static inline void bictcp_reset(struct bictcp *ca)
114 ca->bic_K = 0; 112 ca->bic_K = 0;
115 ca->delay_min = 0; 113 ca->delay_min = 0;
116 ca->epoch_start = 0; 114 ca->epoch_start = 0;
117 ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
118 ca->ack_cnt = 0; 115 ca->ack_cnt = 0;
119 ca->tcp_cwnd = 0; 116 ca->tcp_cwnd = 0;
120 ca->found = 0; 117 ca->found = 0;
@@ -205,23 +202,30 @@ static u32 cubic_root(u64 a)
205/* 202/*
206 * Compute congestion window to use. 203 * Compute congestion window to use.
207 */ 204 */
208static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 205static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
209{ 206{
210 u32 delta, bic_target, max_cnt; 207 u32 delta, bic_target, max_cnt;
211 u64 offs, t; 208 u64 offs, t;
212 209
213 ca->ack_cnt++; /* count the number of ACKs */ 210 ca->ack_cnt += acked; /* count the number of ACKed packets */
214 211
215 if (ca->last_cwnd == cwnd && 212 if (ca->last_cwnd == cwnd &&
216 (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) 213 (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
217 return; 214 return;
218 215
216 /* The CUBIC function can update ca->cnt at most once per jiffy.
217 * On all cwnd reduction events, ca->epoch_start is set to 0,
218 * which will force a recalculation of ca->cnt.
219 */
220 if (ca->epoch_start && tcp_time_stamp == ca->last_time)
221 goto tcp_friendliness;
222
219 ca->last_cwnd = cwnd; 223 ca->last_cwnd = cwnd;
220 ca->last_time = tcp_time_stamp; 224 ca->last_time = tcp_time_stamp;
221 225
222 if (ca->epoch_start == 0) { 226 if (ca->epoch_start == 0) {
223 ca->epoch_start = tcp_time_stamp; /* record beginning */ 227 ca->epoch_start = tcp_time_stamp; /* record beginning */
224 ca->ack_cnt = 1; /* start counting */ 228 ca->ack_cnt = acked; /* start counting */
225 ca->tcp_cwnd = cwnd; /* syn with cubic */ 229 ca->tcp_cwnd = cwnd; /* syn with cubic */
226 230
227 if (ca->last_max_cwnd <= cwnd) { 231 if (ca->last_max_cwnd <= cwnd) {
@@ -283,6 +287,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
283 if (ca->last_max_cwnd == 0 && ca->cnt > 20) 287 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
284 ca->cnt = 20; /* increase cwnd 5% per RTT */ 288 ca->cnt = 20; /* increase cwnd 5% per RTT */
285 289
290tcp_friendliness:
286 /* TCP Friendly */ 291 /* TCP Friendly */
287 if (tcp_friendliness) { 292 if (tcp_friendliness) {
288 u32 scale = beta_scale; 293 u32 scale = beta_scale;
@@ -301,7 +306,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
301 } 306 }
302 } 307 }
303 308
304 ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
305 if (ca->cnt == 0) /* cannot be zero */ 309 if (ca->cnt == 0) /* cannot be zero */
306 ca->cnt = 1; 310 ca->cnt = 1;
307} 311}
@@ -317,11 +321,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
317 if (tp->snd_cwnd <= tp->snd_ssthresh) { 321 if (tp->snd_cwnd <= tp->snd_ssthresh) {
318 if (hystart && after(ack, ca->end_seq)) 322 if (hystart && after(ack, ca->end_seq))
319 bictcp_hystart_reset(sk); 323 bictcp_hystart_reset(sk);
320 tcp_slow_start(tp, acked); 324 acked = tcp_slow_start(tp, acked);
321 } else { 325 if (!acked)
322 bictcp_update(ca, tp->snd_cwnd); 326 return;
323 tcp_cong_avoid_ai(tp, ca->cnt);
324 } 327 }
328 bictcp_update(ca, tp->snd_cwnd, acked);
329 tcp_cong_avoid_ai(tp, ca->cnt, acked);
325} 330}
326 331
327static u32 bictcp_recalc_ssthresh(struct sock *sk) 332static u32 bictcp_recalc_ssthresh(struct sock *sk)
@@ -411,20 +416,10 @@ static void hystart_update(struct sock *sk, u32 delay)
411 */ 416 */
412static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) 417static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
413{ 418{
414 const struct inet_connection_sock *icsk = inet_csk(sk);
415 const struct tcp_sock *tp = tcp_sk(sk); 419 const struct tcp_sock *tp = tcp_sk(sk);
416 struct bictcp *ca = inet_csk_ca(sk); 420 struct bictcp *ca = inet_csk_ca(sk);
417 u32 delay; 421 u32 delay;
418 422
419 if (icsk->icsk_ca_state == TCP_CA_Open) {
420 u32 ratio = ca->delayed_ack;
421
422 ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
423 ratio += cnt;
424
425 ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
426 }
427
428 /* Some calls are for duplicates without timetamps */ 423 /* Some calls are for duplicates without timetamps */
429 if (rtt_us < 0) 424 if (rtt_us < 0)
430 return; 425 return;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a3f72d7fc06c..d22f54482bab 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -683,7 +683,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
683 arg.bound_dev_if = sk->sk_bound_dev_if; 683 arg.bound_dev_if = sk->sk_bound_dev_if;
684 684
685 arg.tos = ip_hdr(skb)->tos; 685 arg.tos = ip_hdr(skb)->tos;
686 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, 686 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
687 skb, &TCP_SKB_CB(skb)->header.h4.opt,
687 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 688 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
688 &arg, arg.iov[0].iov_len); 689 &arg, arg.iov[0].iov_len);
689 690
@@ -767,7 +768,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
767 if (oif) 768 if (oif)
768 arg.bound_dev_if = oif; 769 arg.bound_dev_if = oif;
769 arg.tos = tos; 770 arg.tos = tos;
770 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, 771 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
772 skb, &TCP_SKB_CB(skb)->header.h4.opt,
771 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 773 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
772 &arg, arg.iov[0].iov_len); 774 &arg, arg.iov[0].iov_len);
773 775
@@ -2428,14 +2430,39 @@ struct proto tcp_prot = {
2428}; 2430};
2429EXPORT_SYMBOL(tcp_prot); 2431EXPORT_SYMBOL(tcp_prot);
2430 2432
2433static void __net_exit tcp_sk_exit(struct net *net)
2434{
2435 int cpu;
2436
2437 for_each_possible_cpu(cpu)
2438 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2439 free_percpu(net->ipv4.tcp_sk);
2440}
2441
2431static int __net_init tcp_sk_init(struct net *net) 2442static int __net_init tcp_sk_init(struct net *net)
2432{ 2443{
2444 int res, cpu;
2445
2446 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2447 if (!net->ipv4.tcp_sk)
2448 return -ENOMEM;
2449
2450 for_each_possible_cpu(cpu) {
2451 struct sock *sk;
2452
2453 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2454 IPPROTO_TCP, net);
2455 if (res)
2456 goto fail;
2457 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2458 }
2433 net->ipv4.sysctl_tcp_ecn = 2; 2459 net->ipv4.sysctl_tcp_ecn = 2;
2434 return 0; 2460 return 0;
2435}
2436 2461
2437static void __net_exit tcp_sk_exit(struct net *net) 2462fail:
2438{ 2463 tcp_sk_exit(net);
2464
2465 return res;
2439} 2466}
2440 2467
2441static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) 2468static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 6824afb65d93..333bcb2415ff 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -25,7 +25,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
25 if (tp->snd_cwnd <= tp->snd_ssthresh) 25 if (tp->snd_cwnd <= tp->snd_ssthresh)
26 tcp_slow_start(tp, acked); 26 tcp_slow_start(tp, acked);
27 else 27 else
28 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)); 28 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
29 1);
29} 30}
30 31
31static u32 tcp_scalable_ssthresh(struct sock *sk) 32static u32 tcp_scalable_ssthresh(struct sock *sk)
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index a4d2d2d88dca..112151eeee45 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -159,7 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
159 /* In the "non-congestive state", increase cwnd 159 /* In the "non-congestive state", increase cwnd
160 * every rtt. 160 * every rtt.
161 */ 161 */
162 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 162 tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
163 } else { 163 } else {
164 /* In the "congestive state", increase cwnd 164 /* In the "congestive state", increase cwnd
165 * every other rtt. 165 * every other rtt.
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index cd7273218598..17d35662930d 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -92,7 +92,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
92 92
93 } else { 93 } else {
94 /* Reno */ 94 /* Reno */
95 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 95 tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
96 } 96 }
97 97
98 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt. 98 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 13cda4c6313b..01ccc28a686f 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -417,7 +417,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
417 if (code == ICMPV6_HDR_FIELD) 417 if (code == ICMPV6_HDR_FIELD)
418 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 418 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
419 419
420 if (teli && teli == info - 2) { 420 if (teli && teli == be32_to_cpu(info) - 2) {
421 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 421 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
422 if (tel->encap_limit == 0) { 422 if (tel->encap_limit == 0) {
423 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 423 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
@@ -429,7 +429,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
429 } 429 }
430 break; 430 break;
431 case ICMPV6_PKT_TOOBIG: 431 case ICMPV6_PKT_TOOBIG:
432 mtu = info - offset; 432 mtu = be32_to_cpu(info) - offset;
433 if (mtu < IPV6_MIN_MTU) 433 if (mtu < IPV6_MIN_MTU)
434 mtu = IPV6_MIN_MTU; 434 mtu = IPV6_MIN_MTU;
435 t->dev->mtu = mtu; 435 t->dev->mtu = mtu;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ce69a12ae48c..d28f2a2efb32 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -537,20 +537,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
537 skb_copy_secmark(to, from); 537 skb_copy_secmark(to, from);
538} 538}
539 539
540static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
541{
542 static u32 ip6_idents_hashrnd __read_mostly;
543 u32 hash, id;
544
545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
546
547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
548 hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
549
550 id = ip_idents_reserve(hash, 1);
551 fhdr->identification = htonl(id);
552}
553
554int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 540int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
555{ 541{
556 struct sk_buff *frag; 542 struct sk_buff *frag;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 97f41a3e68d9..54520a0bd5e3 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -9,6 +9,24 @@
9#include <net/addrconf.h> 9#include <net/addrconf.h>
10#include <net/secure_seq.h> 10#include <net/secure_seq.h>
11 11
12u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src)
13{
14 u32 hash, id;
15
16 hash = __ipv6_addr_jhash(dst, hashrnd);
17 hash = __ipv6_addr_jhash(src, hash);
18
19 /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
20 * set the hight order instead thus minimizing possible future
21 * collisions.
22 */
23 id = ip_idents_reserve(hash, 1);
24 if (unlikely(!id))
25 id = 1 << 31;
26
27 return id;
28}
29
12/* This function exists only for tap drivers that must support broken 30/* This function exists only for tap drivers that must support broken
13 * clients requesting UFO without specifying an IPv6 fragment ID. 31 * clients requesting UFO without specifying an IPv6 fragment ID.
14 * 32 *
@@ -22,7 +40,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
22 static u32 ip6_proxy_idents_hashrnd __read_mostly; 40 static u32 ip6_proxy_idents_hashrnd __read_mostly;
23 struct in6_addr buf[2]; 41 struct in6_addr buf[2];
24 struct in6_addr *addrs; 42 struct in6_addr *addrs;
25 u32 hash, id; 43 u32 id;
26 44
27 addrs = skb_header_pointer(skb, 45 addrs = skb_header_pointer(skb,
28 skb_network_offset(skb) + 46 skb_network_offset(skb) +
@@ -34,14 +52,25 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
34 net_get_random_once(&ip6_proxy_idents_hashrnd, 52 net_get_random_once(&ip6_proxy_idents_hashrnd,
35 sizeof(ip6_proxy_idents_hashrnd)); 53 sizeof(ip6_proxy_idents_hashrnd));
36 54
37 hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); 55 id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
38 hash = __ipv6_addr_jhash(&addrs[0], hash); 56 &addrs[1], &addrs[0]);
39 57 skb_shinfo(skb)->ip6_frag_id = id;
40 id = ip_idents_reserve(hash, 1);
41 skb_shinfo(skb)->ip6_frag_id = htonl(id);
42} 58}
43EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); 59EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
44 60
61void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
62{
63 static u32 ip6_idents_hashrnd __read_mostly;
64 u32 id;
65
66 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
67
68 id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr,
69 &rt->rt6i_src.addr);
70 fhdr->identification = htonl(id);
71}
72EXPORT_SYMBOL(ipv6_select_ident);
73
45int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 74int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
46{ 75{
47 u16 offset = sizeof(struct ipv6hdr); 76 u16 offset = sizeof(struct ipv6hdr);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 213546bd6d5d..cdbfe5af6187 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1506,12 +1506,12 @@ static bool ipip6_netlink_encap_parms(struct nlattr *data[],
1506 1506
1507 if (data[IFLA_IPTUN_ENCAP_SPORT]) { 1507 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1508 ret = true; 1508 ret = true;
1509 ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]); 1509 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1510 } 1510 }
1511 1511
1512 if (data[IFLA_IPTUN_ENCAP_DPORT]) { 1512 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1513 ret = true; 1513 ret = true;
1514 ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]); 1514 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1515 } 1515 }
1516 1516
1517 return ret; 1517 return ret;
@@ -1707,9 +1707,9 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
1707 1707
1708 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, 1708 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
1709 tunnel->encap.type) || 1709 tunnel->encap.type) ||
1710 nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT, 1710 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
1711 tunnel->encap.sport) || 1711 tunnel->encap.sport) ||
1712 nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, 1712 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
1713 tunnel->encap.dport) || 1713 tunnel->encap.dport) ||
1714 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, 1714 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
1715 tunnel->encap.flags)) 1715 tunnel->encap.flags))
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index b6aa8ed18257..a56276996b72 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -52,6 +52,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
52 52
53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
54 54
55 /* Set the IPv6 fragment id if not set yet */
56 if (!skb_shinfo(skb)->ip6_frag_id)
57 ipv6_proxy_select_ident(skb);
58
55 segs = NULL; 59 segs = NULL;
56 goto out; 60 goto out;
57 } 61 }
@@ -108,7 +112,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
108 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); 112 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
109 fptr->nexthdr = nexthdr; 113 fptr->nexthdr = nexthdr;
110 fptr->reserved = 0; 114 fptr->reserved = 0;
111 fptr->identification = skb_shinfo(skb)->ip6_frag_id; 115 if (skb_shinfo(skb)->ip6_frag_id)
116 fptr->identification = skb_shinfo(skb)->ip6_frag_id;
117 else
118 ipv6_select_ident(fptr,
119 (struct rt6_info *)skb_dst(skb));
112 120
113 /* Fragment the skb. ipv6 header and the remaining fields of the 121 /* Fragment the skb. ipv6 header and the remaining fields of the
114 * fragment header are updated in ipv6_gso_segment() 122 * fragment header are updated in ipv6_gso_segment()
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 990decba1fe4..b87ca32efa0b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -659,16 +659,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
659 return err; 659 return err;
660} 660}
661 661
662static int ip_vs_route_me_harder(int af, struct sk_buff *skb) 662static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
663 unsigned int hooknum)
663{ 664{
665 if (!sysctl_snat_reroute(skb))
666 return 0;
667 /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
668 if (NF_INET_LOCAL_IN == hooknum)
669 return 0;
664#ifdef CONFIG_IP_VS_IPV6 670#ifdef CONFIG_IP_VS_IPV6
665 if (af == AF_INET6) { 671 if (af == AF_INET6) {
666 if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0) 672 struct dst_entry *dst = skb_dst(skb);
673
674 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
675 ip6_route_me_harder(skb) != 0)
667 return 1; 676 return 1;
668 } else 677 } else
669#endif 678#endif
670 if ((sysctl_snat_reroute(skb) || 679 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
671 skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
672 ip_route_me_harder(skb, RTN_LOCAL) != 0) 680 ip_route_me_harder(skb, RTN_LOCAL) != 0)
673 return 1; 681 return 1;
674 682
@@ -791,7 +799,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
791 union nf_inet_addr *snet, 799 union nf_inet_addr *snet,
792 __u8 protocol, struct ip_vs_conn *cp, 800 __u8 protocol, struct ip_vs_conn *cp,
793 struct ip_vs_protocol *pp, 801 struct ip_vs_protocol *pp,
794 unsigned int offset, unsigned int ihl) 802 unsigned int offset, unsigned int ihl,
803 unsigned int hooknum)
795{ 804{
796 unsigned int verdict = NF_DROP; 805 unsigned int verdict = NF_DROP;
797 806
@@ -821,7 +830,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
821#endif 830#endif
822 ip_vs_nat_icmp(skb, pp, cp, 1); 831 ip_vs_nat_icmp(skb, pp, cp, 1);
823 832
824 if (ip_vs_route_me_harder(af, skb)) 833 if (ip_vs_route_me_harder(af, skb, hooknum))
825 goto out; 834 goto out;
826 835
827 /* do the statistics and put it back */ 836 /* do the statistics and put it back */
@@ -916,7 +925,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
916 925
917 snet.ip = iph->saddr; 926 snet.ip = iph->saddr;
918 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, 927 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
919 pp, ciph.len, ihl); 928 pp, ciph.len, ihl, hooknum);
920} 929}
921 930
922#ifdef CONFIG_IP_VS_IPV6 931#ifdef CONFIG_IP_VS_IPV6
@@ -981,7 +990,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
981 snet.in6 = ciph.saddr.in6; 990 snet.in6 = ciph.saddr.in6;
982 writable = ciph.len; 991 writable = ciph.len;
983 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp, 992 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
984 pp, writable, sizeof(struct ipv6hdr)); 993 pp, writable, sizeof(struct ipv6hdr),
994 hooknum);
985} 995}
986#endif 996#endif
987 997
@@ -1040,7 +1050,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
1040 */ 1050 */
1041static unsigned int 1051static unsigned int
1042handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 1052handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1043 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 1053 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1054 unsigned int hooknum)
1044{ 1055{
1045 struct ip_vs_protocol *pp = pd->pp; 1056 struct ip_vs_protocol *pp = pd->pp;
1046 1057
@@ -1078,7 +1089,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1078 * if it came from this machine itself. So re-compute 1089 * if it came from this machine itself. So re-compute
1079 * the routing information. 1090 * the routing information.
1080 */ 1091 */
1081 if (ip_vs_route_me_harder(af, skb)) 1092 if (ip_vs_route_me_harder(af, skb, hooknum))
1082 goto drop; 1093 goto drop;
1083 1094
1084 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT"); 1095 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
@@ -1181,7 +1192,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1181 cp = pp->conn_out_get(af, skb, &iph, 0); 1192 cp = pp->conn_out_get(af, skb, &iph, 0);
1182 1193
1183 if (likely(cp)) 1194 if (likely(cp))
1184 return handle_response(af, skb, pd, cp, &iph); 1195 return handle_response(af, skb, pd, cp, &iph, hooknum);
1185 if (sysctl_nat_icmp_send(net) && 1196 if (sysctl_nat_icmp_send(net) &&
1186 (pp->protocol == IPPROTO_TCP || 1197 (pp->protocol == IPPROTO_TCP ||
1187 pp->protocol == IPPROTO_UDP || 1198 pp->protocol == IPPROTO_UDP ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 3b3ddb4fb9ee..1ff04bcd4871 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1134,9 +1134,11 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
1134 /* Restore old counters on this cpu, no problem. Per-cpu statistics 1134 /* Restore old counters on this cpu, no problem. Per-cpu statistics
1135 * are not exposed to userspace. 1135 * are not exposed to userspace.
1136 */ 1136 */
1137 preempt_disable();
1137 stats = this_cpu_ptr(newstats); 1138 stats = this_cpu_ptr(newstats);
1138 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); 1139 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
1139 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); 1140 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
1141 preempt_enable();
1140 1142
1141 return newstats; 1143 return newstats;
1142} 1144}
@@ -1262,8 +1264,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1262 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1264 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
1263 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, 1265 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
1264 sizeof(struct nft_trans_chain)); 1266 sizeof(struct nft_trans_chain));
1265 if (trans == NULL) 1267 if (trans == NULL) {
1268 free_percpu(stats);
1266 return -ENOMEM; 1269 return -ENOMEM;
1270 }
1267 1271
1268 nft_trans_chain_stats(trans) = stats; 1272 nft_trans_chain_stats(trans) = stats;
1269 nft_trans_chain_update(trans) = true; 1273 nft_trans_chain_update(trans) = true;
@@ -1319,8 +1323,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1319 hookfn = type->hooks[hooknum]; 1323 hookfn = type->hooks[hooknum];
1320 1324
1321 basechain = kzalloc(sizeof(*basechain), GFP_KERNEL); 1325 basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
1322 if (basechain == NULL) 1326 if (basechain == NULL) {
1327 module_put(type->owner);
1323 return -ENOMEM; 1328 return -ENOMEM;
1329 }
1324 1330
1325 if (nla[NFTA_CHAIN_COUNTERS]) { 1331 if (nla[NFTA_CHAIN_COUNTERS]) {
1326 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); 1332 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
@@ -3753,6 +3759,24 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
3753} 3759}
3754EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); 3760EXPORT_SYMBOL_GPL(nft_chain_validate_dependency);
3755 3761
3762int nft_chain_validate_hooks(const struct nft_chain *chain,
3763 unsigned int hook_flags)
3764{
3765 struct nft_base_chain *basechain;
3766
3767 if (chain->flags & NFT_BASE_CHAIN) {
3768 basechain = nft_base_chain(chain);
3769
3770 if ((1 << basechain->ops[0].hooknum) & hook_flags)
3771 return 0;
3772
3773 return -EOPNOTSUPP;
3774 }
3775
3776 return 0;
3777}
3778EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
3779
3756/* 3780/*
3757 * Loop detection - walk through the ruleset beginning at the destination chain 3781 * Loop detection - walk through the ruleset beginning at the destination chain
3758 * of a new jump until either the source chain is reached (loop) or all 3782 * of a new jump until either the source chain is reached (loop) or all
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index d1ffd5eb3a9b..9aea747b43ea 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -21,6 +21,21 @@ const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
21}; 21};
22EXPORT_SYMBOL_GPL(nft_masq_policy); 22EXPORT_SYMBOL_GPL(nft_masq_policy);
23 23
24int nft_masq_validate(const struct nft_ctx *ctx,
25 const struct nft_expr *expr,
26 const struct nft_data **data)
27{
28 int err;
29
30 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
31 if (err < 0)
32 return err;
33
34 return nft_chain_validate_hooks(ctx->chain,
35 (1 << NF_INET_POST_ROUTING));
36}
37EXPORT_SYMBOL_GPL(nft_masq_validate);
38
24int nft_masq_init(const struct nft_ctx *ctx, 39int nft_masq_init(const struct nft_ctx *ctx,
25 const struct nft_expr *expr, 40 const struct nft_expr *expr,
26 const struct nlattr * const tb[]) 41 const struct nlattr * const tb[])
@@ -28,8 +43,8 @@ int nft_masq_init(const struct nft_ctx *ctx,
28 struct nft_masq *priv = nft_expr_priv(expr); 43 struct nft_masq *priv = nft_expr_priv(expr);
29 int err; 44 int err;
30 45
31 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 46 err = nft_masq_validate(ctx, expr, NULL);
32 if (err < 0) 47 if (err)
33 return err; 48 return err;
34 49
35 if (tb[NFTA_MASQ_FLAGS] == NULL) 50 if (tb[NFTA_MASQ_FLAGS] == NULL)
@@ -60,12 +75,5 @@ nla_put_failure:
60} 75}
61EXPORT_SYMBOL_GPL(nft_masq_dump); 76EXPORT_SYMBOL_GPL(nft_masq_dump);
62 77
63int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
64 const struct nft_data **data)
65{
66 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
67}
68EXPORT_SYMBOL_GPL(nft_masq_validate);
69
70MODULE_LICENSE("GPL"); 78MODULE_LICENSE("GPL");
71MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); 79MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index aff54fb1c8a0..a0837c6c9283 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -88,17 +88,40 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
88 [NFTA_NAT_FLAGS] = { .type = NLA_U32 }, 88 [NFTA_NAT_FLAGS] = { .type = NLA_U32 },
89}; 89};
90 90
91static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 91static int nft_nat_validate(const struct nft_ctx *ctx,
92 const struct nlattr * const tb[]) 92 const struct nft_expr *expr,
93 const struct nft_data **data)
93{ 94{
94 struct nft_nat *priv = nft_expr_priv(expr); 95 struct nft_nat *priv = nft_expr_priv(expr);
95 u32 family;
96 int err; 96 int err;
97 97
98 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 98 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
99 if (err < 0) 99 if (err < 0)
100 return err; 100 return err;
101 101
102 switch (priv->type) {
103 case NFT_NAT_SNAT:
104 err = nft_chain_validate_hooks(ctx->chain,
105 (1 << NF_INET_POST_ROUTING) |
106 (1 << NF_INET_LOCAL_IN));
107 break;
108 case NFT_NAT_DNAT:
109 err = nft_chain_validate_hooks(ctx->chain,
110 (1 << NF_INET_PRE_ROUTING) |
111 (1 << NF_INET_LOCAL_OUT));
112 break;
113 }
114
115 return err;
116}
117
118static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
119 const struct nlattr * const tb[])
120{
121 struct nft_nat *priv = nft_expr_priv(expr);
122 u32 family;
123 int err;
124
102 if (tb[NFTA_NAT_TYPE] == NULL || 125 if (tb[NFTA_NAT_TYPE] == NULL ||
103 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && 126 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
104 tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) 127 tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
@@ -115,6 +138,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
115 return -EINVAL; 138 return -EINVAL;
116 } 139 }
117 140
141 err = nft_nat_validate(ctx, expr, NULL);
142 if (err < 0)
143 return err;
144
118 if (tb[NFTA_NAT_FAMILY] == NULL) 145 if (tb[NFTA_NAT_FAMILY] == NULL)
119 return -EINVAL; 146 return -EINVAL;
120 147
@@ -219,13 +246,6 @@ nla_put_failure:
219 return -1; 246 return -1;
220} 247}
221 248
222static int nft_nat_validate(const struct nft_ctx *ctx,
223 const struct nft_expr *expr,
224 const struct nft_data **data)
225{
226 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
227}
228
229static struct nft_expr_type nft_nat_type; 249static struct nft_expr_type nft_nat_type;
230static const struct nft_expr_ops nft_nat_ops = { 250static const struct nft_expr_ops nft_nat_ops = {
231 .type = &nft_nat_type, 251 .type = &nft_nat_type,
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 9e8093f28311..d7e9e93a4e90 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -23,6 +23,22 @@ const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
23}; 23};
24EXPORT_SYMBOL_GPL(nft_redir_policy); 24EXPORT_SYMBOL_GPL(nft_redir_policy);
25 25
26int nft_redir_validate(const struct nft_ctx *ctx,
27 const struct nft_expr *expr,
28 const struct nft_data **data)
29{
30 int err;
31
32 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
33 if (err < 0)
34 return err;
35
36 return nft_chain_validate_hooks(ctx->chain,
37 (1 << NF_INET_PRE_ROUTING) |
38 (1 << NF_INET_LOCAL_OUT));
39}
40EXPORT_SYMBOL_GPL(nft_redir_validate);
41
26int nft_redir_init(const struct nft_ctx *ctx, 42int nft_redir_init(const struct nft_ctx *ctx,
27 const struct nft_expr *expr, 43 const struct nft_expr *expr,
28 const struct nlattr * const tb[]) 44 const struct nlattr * const tb[])
@@ -30,7 +46,7 @@ int nft_redir_init(const struct nft_ctx *ctx,
30 struct nft_redir *priv = nft_expr_priv(expr); 46 struct nft_redir *priv = nft_expr_priv(expr);
31 int err; 47 int err;
32 48
33 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 49 err = nft_redir_validate(ctx, expr, NULL);
34 if (err < 0) 50 if (err < 0)
35 return err; 51 return err;
36 52
@@ -88,12 +104,5 @@ nla_put_failure:
88} 104}
89EXPORT_SYMBOL_GPL(nft_redir_dump); 105EXPORT_SYMBOL_GPL(nft_redir_dump);
90 106
91int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
92 const struct nft_data **data)
93{
94 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
95}
96EXPORT_SYMBOL_GPL(nft_redir_validate);
97
98MODULE_LICENSE("GPL"); 107MODULE_LICENSE("GPL");
99MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); 108MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 02fdde28dada..75532efa51cd 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1438,7 +1438,7 @@ static void netlink_undo_bind(int group, long unsigned int groups,
1438 1438
1439 for (undo = 0; undo < group; undo++) 1439 for (undo = 0; undo < group; undo++)
1440 if (test_bit(undo, &groups)) 1440 if (test_bit(undo, &groups))
1441 nlk->netlink_unbind(sock_net(sk), undo); 1441 nlk->netlink_unbind(sock_net(sk), undo + 1);
1442} 1442}
1443 1443
1444static int netlink_bind(struct socket *sock, struct sockaddr *addr, 1444static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1476,7 +1476,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1476 for (group = 0; group < nlk->ngroups; group++) { 1476 for (group = 0; group < nlk->ngroups; group++) {
1477 if (!test_bit(group, &groups)) 1477 if (!test_bit(group, &groups))
1478 continue; 1478 continue;
1479 err = nlk->netlink_bind(net, group); 1479 err = nlk->netlink_bind(net, group + 1);
1480 if (!err) 1480 if (!err)
1481 continue; 1481 continue;
1482 netlink_undo_bind(group, groups, sk); 1482 netlink_undo_bind(group, groups, sk);
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index c3b0cd43eb56..c173f69e1479 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = {
71 { 71 {
72 .procname = "max_unacked_packets", 72 .procname = "max_unacked_packets",
73 .data = &rds_sysctl_max_unacked_packets, 73 .data = &rds_sysctl_max_unacked_packets,
74 .maxlen = sizeof(unsigned long), 74 .maxlen = sizeof(int),
75 .mode = 0644, 75 .mode = 0644,
76 .proc_handler = proc_dointvec, 76 .proc_handler = proc_dointvec,
77 }, 77 },
78 { 78 {
79 .procname = "max_unacked_bytes", 79 .procname = "max_unacked_bytes",
80 .data = &rds_sysctl_max_unacked_bytes, 80 .data = &rds_sysctl_max_unacked_bytes,
81 .maxlen = sizeof(unsigned long), 81 .maxlen = sizeof(int),
82 .mode = 0644, 82 .mode = 0644,
83 .proc_handler = proc_dointvec, 83 .proc_handler = proc_dointvec,
84 }, 84 },
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index aad6a679fb13..baef987fe2c0 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -556,8 +556,9 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
556} 556}
557EXPORT_SYMBOL(tcf_exts_change); 557EXPORT_SYMBOL(tcf_exts_change);
558 558
559#define tcf_exts_first_act(ext) \ 559#define tcf_exts_first_act(ext) \
560 list_first_entry(&(exts)->actions, struct tc_action, list) 560 list_first_entry_or_null(&(exts)->actions, \
561 struct tc_action, list)
561 562
562int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 563int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
563{ 564{
@@ -603,7 +604,7 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
603{ 604{
604#ifdef CONFIG_NET_CLS_ACT 605#ifdef CONFIG_NET_CLS_ACT
605 struct tc_action *a = tcf_exts_first_act(exts); 606 struct tc_action *a = tcf_exts_first_act(exts);
606 if (tcf_action_copy_stats(skb, a, 1) < 0) 607 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
607 return -1; 608 return -1;
608#endif 609#endif
609 return 0; 610 return 0;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 9b05924cc386..333cd94ba381 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -670,8 +670,14 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
670 if (tb[TCA_FQ_FLOW_PLIMIT]) 670 if (tb[TCA_FQ_FLOW_PLIMIT])
671 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); 671 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
672 672
673 if (tb[TCA_FQ_QUANTUM]) 673 if (tb[TCA_FQ_QUANTUM]) {
674 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 674 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
675
676 if (quantum > 0)
677 q->quantum = quantum;
678 else
679 err = -EINVAL;
680 }
675 681
676 if (tb[TCA_FQ_INITIAL_QUANTUM]) 682 if (tb[TCA_FQ_INITIAL_QUANTUM])
677 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 683 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index e49e231cef52..06320c8c1c86 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2608,7 +2608,7 @@ do_addr_param:
2608 2608
2609 addr_param = param.v + sizeof(sctp_addip_param_t); 2609 addr_param = param.v + sizeof(sctp_addip_param_t);
2610 2610
2611 af = sctp_get_af_specific(param_type2af(param.p->type)); 2611 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
2612 if (af == NULL) 2612 if (af == NULL)
2613 break; 2613 break;
2614 2614
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index 8eb779b9d77f..604e718d68d3 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -5,6 +5,7 @@ config SECURITY_TOMOYO
5 select SECURITYFS 5 select SECURITYFS
6 select SECURITY_PATH 6 select SECURITY_PATH
7 select SECURITY_NETWORK 7 select SECURITY_NETWORK
8 select SRCU
8 default n 9 default n
9 help 10 help
10 This selects TOMOYO Linux, pathname-based access control. 11 This selects TOMOYO Linux, pathname-based access control.
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
index 1a3a6fa27158..c6bba99a90b2 100644
--- a/sound/i2c/other/ak4113.c
+++ b/sound/i2c/other/ak4113.c
@@ -56,8 +56,7 @@ static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg)
56 56
57static void snd_ak4113_free(struct ak4113 *chip) 57static void snd_ak4113_free(struct ak4113 *chip)
58{ 58{
59 chip->init = 1; /* don't schedule new work */ 59 atomic_inc(&chip->wq_processing); /* don't schedule new work */
60 mb();
61 cancel_delayed_work_sync(&chip->work); 60 cancel_delayed_work_sync(&chip->work);
62 kfree(chip); 61 kfree(chip);
63} 62}
@@ -89,6 +88,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read,
89 chip->write = write; 88 chip->write = write;
90 chip->private_data = private_data; 89 chip->private_data = private_data;
91 INIT_DELAYED_WORK(&chip->work, ak4113_stats); 90 INIT_DELAYED_WORK(&chip->work, ak4113_stats);
91 atomic_set(&chip->wq_processing, 0);
92 92
93 for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++) 93 for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++)
94 chip->regmap[reg] = pgm[reg]; 94 chip->regmap[reg] = pgm[reg];
@@ -139,13 +139,11 @@ static void ak4113_init_regs(struct ak4113 *chip)
139 139
140void snd_ak4113_reinit(struct ak4113 *chip) 140void snd_ak4113_reinit(struct ak4113 *chip)
141{ 141{
142 chip->init = 1; 142 if (atomic_inc_return(&chip->wq_processing) == 1)
143 mb(); 143 cancel_delayed_work_sync(&chip->work);
144 flush_delayed_work(&chip->work);
145 ak4113_init_regs(chip); 144 ak4113_init_regs(chip);
146 /* bring up statistics / event queing */ 145 /* bring up statistics / event queing */
147 chip->init = 0; 146 if (atomic_dec_and_test(&chip->wq_processing))
148 if (chip->kctls[0])
149 schedule_delayed_work(&chip->work, HZ / 10); 147 schedule_delayed_work(&chip->work, HZ / 10);
150} 148}
151EXPORT_SYMBOL_GPL(snd_ak4113_reinit); 149EXPORT_SYMBOL_GPL(snd_ak4113_reinit);
@@ -632,8 +630,9 @@ static void ak4113_stats(struct work_struct *work)
632{ 630{
633 struct ak4113 *chip = container_of(work, struct ak4113, work.work); 631 struct ak4113 *chip = container_of(work, struct ak4113, work.work);
634 632
635 if (!chip->init) 633 if (atomic_inc_return(&chip->wq_processing) == 1)
636 snd_ak4113_check_rate_and_errors(chip, chip->check_flags); 634 snd_ak4113_check_rate_and_errors(chip, chip->check_flags);
637 635
638 schedule_delayed_work(&chip->work, HZ / 10); 636 if (atomic_dec_and_test(&chip->wq_processing))
637 schedule_delayed_work(&chip->work, HZ / 10);
639} 638}
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index c7f56339415d..b70e6eccbd03 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -66,8 +66,7 @@ static void reg_dump(struct ak4114 *ak4114)
66 66
67static void snd_ak4114_free(struct ak4114 *chip) 67static void snd_ak4114_free(struct ak4114 *chip)
68{ 68{
69 chip->init = 1; /* don't schedule new work */ 69 atomic_inc(&chip->wq_processing); /* don't schedule new work */
70 mb();
71 cancel_delayed_work_sync(&chip->work); 70 cancel_delayed_work_sync(&chip->work);
72 kfree(chip); 71 kfree(chip);
73} 72}
@@ -100,6 +99,7 @@ int snd_ak4114_create(struct snd_card *card,
100 chip->write = write; 99 chip->write = write;
101 chip->private_data = private_data; 100 chip->private_data = private_data;
102 INIT_DELAYED_WORK(&chip->work, ak4114_stats); 101 INIT_DELAYED_WORK(&chip->work, ak4114_stats);
102 atomic_set(&chip->wq_processing, 0);
103 103
104 for (reg = 0; reg < 6; reg++) 104 for (reg = 0; reg < 6; reg++)
105 chip->regmap[reg] = pgm[reg]; 105 chip->regmap[reg] = pgm[reg];
@@ -152,13 +152,11 @@ static void ak4114_init_regs(struct ak4114 *chip)
152 152
153void snd_ak4114_reinit(struct ak4114 *chip) 153void snd_ak4114_reinit(struct ak4114 *chip)
154{ 154{
155 chip->init = 1; 155 if (atomic_inc_return(&chip->wq_processing) == 1)
156 mb(); 156 cancel_delayed_work_sync(&chip->work);
157 flush_delayed_work(&chip->work);
158 ak4114_init_regs(chip); 157 ak4114_init_regs(chip);
159 /* bring up statistics / event queing */ 158 /* bring up statistics / event queing */
160 chip->init = 0; 159 if (atomic_dec_and_test(&chip->wq_processing))
161 if (chip->kctls[0])
162 schedule_delayed_work(&chip->work, HZ / 10); 160 schedule_delayed_work(&chip->work, HZ / 10);
163} 161}
164 162
@@ -612,10 +610,10 @@ static void ak4114_stats(struct work_struct *work)
612{ 610{
613 struct ak4114 *chip = container_of(work, struct ak4114, work.work); 611 struct ak4114 *chip = container_of(work, struct ak4114, work.work);
614 612
615 if (!chip->init) 613 if (atomic_inc_return(&chip->wq_processing) == 1)
616 snd_ak4114_check_rate_and_errors(chip, chip->check_flags); 614 snd_ak4114_check_rate_and_errors(chip, chip->check_flags);
617 615 if (atomic_dec_and_test(&chip->wq_processing))
618 schedule_delayed_work(&chip->work, HZ / 10); 616 schedule_delayed_work(&chip->work, HZ / 10);
619} 617}
620 618
621EXPORT_SYMBOL(snd_ak4114_create); 619EXPORT_SYMBOL(snd_ak4114_create);
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 99ff35e2a25d..35e44e463cfe 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -348,7 +348,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
348 struct atmel_pcm_dma_params *dma_params; 348 struct atmel_pcm_dma_params *dma_params;
349 int dir, channels, bits; 349 int dir, channels, bits;
350 u32 tfmr, rfmr, tcmr, rcmr; 350 u32 tfmr, rfmr, tcmr, rcmr;
351 int start_event;
352 int ret; 351 int ret;
353 int fslen, fslen_ext; 352 int fslen, fslen_ext;
354 353
@@ -457,19 +456,10 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
457 * The SSC transmit clock is obtained from the BCLK signal on 456 * The SSC transmit clock is obtained from the BCLK signal on
458 * on the TK line, and the SSC receive clock is 457 * on the TK line, and the SSC receive clock is
459 * generated from the transmit clock. 458 * generated from the transmit clock.
460 *
461 * For single channel data, one sample is transferred
462 * on the falling edge of the LRC clock.
463 * For two channel data, one sample is
464 * transferred on both edges of the LRC clock.
465 */ 459 */
466 start_event = ((channels == 1)
467 ? SSC_START_FALLING_RF
468 : SSC_START_EDGE_RF);
469
470 rcmr = SSC_BF(RCMR_PERIOD, 0) 460 rcmr = SSC_BF(RCMR_PERIOD, 0)
471 | SSC_BF(RCMR_STTDLY, START_DELAY) 461 | SSC_BF(RCMR_STTDLY, START_DELAY)
472 | SSC_BF(RCMR_START, start_event) 462 | SSC_BF(RCMR_START, SSC_START_FALLING_RF)
473 | SSC_BF(RCMR_CKI, SSC_CKI_RISING) 463 | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
474 | SSC_BF(RCMR_CKO, SSC_CKO_NONE) 464 | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
475 | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ? 465 | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
@@ -478,14 +468,14 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
478 rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE) 468 rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
479 | SSC_BF(RFMR_FSOS, SSC_FSOS_NONE) 469 | SSC_BF(RFMR_FSOS, SSC_FSOS_NONE)
480 | SSC_BF(RFMR_FSLEN, 0) 470 | SSC_BF(RFMR_FSLEN, 0)
481 | SSC_BF(RFMR_DATNB, 0) 471 | SSC_BF(RFMR_DATNB, (channels - 1))
482 | SSC_BIT(RFMR_MSBF) 472 | SSC_BIT(RFMR_MSBF)
483 | SSC_BF(RFMR_LOOP, 0) 473 | SSC_BF(RFMR_LOOP, 0)
484 | SSC_BF(RFMR_DATLEN, (bits - 1)); 474 | SSC_BF(RFMR_DATLEN, (bits - 1));
485 475
486 tcmr = SSC_BF(TCMR_PERIOD, 0) 476 tcmr = SSC_BF(TCMR_PERIOD, 0)
487 | SSC_BF(TCMR_STTDLY, START_DELAY) 477 | SSC_BF(TCMR_STTDLY, START_DELAY)
488 | SSC_BF(TCMR_START, start_event) 478 | SSC_BF(TCMR_START, SSC_START_FALLING_RF)
489 | SSC_BF(TCMR_CKI, SSC_CKI_FALLING) 479 | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
490 | SSC_BF(TCMR_CKO, SSC_CKO_NONE) 480 | SSC_BF(TCMR_CKO, SSC_CKO_NONE)
491 | SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ? 481 | SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ?
@@ -495,7 +485,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
495 | SSC_BF(TFMR_FSDEN, 0) 485 | SSC_BF(TFMR_FSDEN, 0)
496 | SSC_BF(TFMR_FSOS, SSC_FSOS_NONE) 486 | SSC_BF(TFMR_FSOS, SSC_FSOS_NONE)
497 | SSC_BF(TFMR_FSLEN, 0) 487 | SSC_BF(TFMR_FSLEN, 0)
498 | SSC_BF(TFMR_DATNB, 0) 488 | SSC_BF(TFMR_DATNB, (channels - 1))
499 | SSC_BIT(TFMR_MSBF) 489 | SSC_BIT(TFMR_MSBF)
500 | SSC_BF(TFMR_DATDEF, 0) 490 | SSC_BF(TFMR_DATDEF, 0)
501 | SSC_BF(TFMR_DATLEN, (bits - 1)); 491 | SSC_BF(TFMR_DATLEN, (bits - 1));
@@ -512,7 +502,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
512 rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period) 502 rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
513 | SSC_BF(RCMR_STTDLY, 1) 503 | SSC_BF(RCMR_STTDLY, 1)
514 | SSC_BF(RCMR_START, SSC_START_RISING_RF) 504 | SSC_BF(RCMR_START, SSC_START_RISING_RF)
515 | SSC_BF(RCMR_CKI, SSC_CKI_RISING) 505 | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
516 | SSC_BF(RCMR_CKO, SSC_CKO_NONE) 506 | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
517 | SSC_BF(RCMR_CKS, SSC_CKS_DIV); 507 | SSC_BF(RCMR_CKS, SSC_CKS_DIV);
518 508
@@ -527,7 +517,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
527 tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period) 517 tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
528 | SSC_BF(TCMR_STTDLY, 1) 518 | SSC_BF(TCMR_STTDLY, 1)
529 | SSC_BF(TCMR_START, SSC_START_RISING_RF) 519 | SSC_BF(TCMR_START, SSC_START_RISING_RF)
530 | SSC_BF(TCMR_CKI, SSC_CKI_RISING) 520 | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
531 | SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS) 521 | SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS)
532 | SSC_BF(TCMR_CKS, SSC_CKS_DIV); 522 | SSC_BF(TCMR_CKS, SSC_CKS_DIV);
533 523
@@ -556,7 +546,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
556 rcmr = SSC_BF(RCMR_PERIOD, 0) 546 rcmr = SSC_BF(RCMR_PERIOD, 0)
557 | SSC_BF(RCMR_STTDLY, START_DELAY) 547 | SSC_BF(RCMR_STTDLY, START_DELAY)
558 | SSC_BF(RCMR_START, SSC_START_RISING_RF) 548 | SSC_BF(RCMR_START, SSC_START_RISING_RF)
559 | SSC_BF(RCMR_CKI, SSC_CKI_RISING) 549 | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
560 | SSC_BF(RCMR_CKO, SSC_CKO_NONE) 550 | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
561 | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ? 551 | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
562 SSC_CKS_PIN : SSC_CKS_CLOCK); 552 SSC_CKS_PIN : SSC_CKS_CLOCK);
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index c3f2decd643c..1ff726c29249 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -2124,6 +2124,7 @@ MODULE_DEVICE_TABLE(of, rt5640_of_match);
2124static struct acpi_device_id rt5640_acpi_match[] = { 2124static struct acpi_device_id rt5640_acpi_match[] = {
2125 { "INT33CA", 0 }, 2125 { "INT33CA", 0 },
2126 { "10EC5640", 0 }, 2126 { "10EC5640", 0 },
2127 { "10EC5642", 0 },
2127 { }, 2128 { },
2128}; 2129};
2129MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match); 2130MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 29cf7ce610f4..aa98be32bb60 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -483,21 +483,21 @@ static int sgtl5000_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
483 /* setting i2s data format */ 483 /* setting i2s data format */
484 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 484 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
485 case SND_SOC_DAIFMT_DSP_A: 485 case SND_SOC_DAIFMT_DSP_A:
486 i2sctl |= SGTL5000_I2S_MODE_PCM; 486 i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
487 break; 487 break;
488 case SND_SOC_DAIFMT_DSP_B: 488 case SND_SOC_DAIFMT_DSP_B:
489 i2sctl |= SGTL5000_I2S_MODE_PCM; 489 i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
490 i2sctl |= SGTL5000_I2S_LRALIGN; 490 i2sctl |= SGTL5000_I2S_LRALIGN;
491 break; 491 break;
492 case SND_SOC_DAIFMT_I2S: 492 case SND_SOC_DAIFMT_I2S:
493 i2sctl |= SGTL5000_I2S_MODE_I2S_LJ; 493 i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
494 break; 494 break;
495 case SND_SOC_DAIFMT_RIGHT_J: 495 case SND_SOC_DAIFMT_RIGHT_J:
496 i2sctl |= SGTL5000_I2S_MODE_RJ; 496 i2sctl |= SGTL5000_I2S_MODE_RJ << SGTL5000_I2S_MODE_SHIFT;
497 i2sctl |= SGTL5000_I2S_LRPOL; 497 i2sctl |= SGTL5000_I2S_LRPOL;
498 break; 498 break;
499 case SND_SOC_DAIFMT_LEFT_J: 499 case SND_SOC_DAIFMT_LEFT_J:
500 i2sctl |= SGTL5000_I2S_MODE_I2S_LJ; 500 i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
501 i2sctl |= SGTL5000_I2S_LRALIGN; 501 i2sctl |= SGTL5000_I2S_LRALIGN;
502 break; 502 break;
503 default: 503 default:
@@ -1462,6 +1462,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
1462 if (ret) 1462 if (ret)
1463 return ret; 1463 return ret;
1464 1464
1465 /* Need 8 clocks before I2C accesses */
1466 udelay(1);
1467
1465 /* read chip information */ 1468 /* read chip information */
1466 ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, &reg); 1469 ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, &reg);
1467 if (ret) 1470 if (ret)
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index b7ebce054b4e..dd222b10ce13 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1046,7 +1046,7 @@ static int aic3x_prepare(struct snd_pcm_substream *substream,
1046 delay += aic3x->tdm_delay; 1046 delay += aic3x->tdm_delay;
1047 1047
1048 /* Configure data delay */ 1048 /* Configure data delay */
1049 snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, aic3x->tdm_delay); 1049 snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, delay);
1050 1050
1051 return 0; 1051 return 0;
1052} 1052}
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index b9211b42f6e9..b115ed815db9 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -717,6 +717,8 @@ static int wm8731_i2c_probe(struct i2c_client *i2c,
717 if (wm8731 == NULL) 717 if (wm8731 == NULL)
718 return -ENOMEM; 718 return -ENOMEM;
719 719
720 mutex_init(&wm8731->lock);
721
720 wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap); 722 wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap);
721 if (IS_ERR(wm8731->regmap)) { 723 if (IS_ERR(wm8731->regmap)) {
722 ret = PTR_ERR(wm8731->regmap); 724 ret = PTR_ERR(wm8731->regmap);
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index 3eddb18fefd1..5cc457ef8894 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -344,23 +344,27 @@ static int wm9705_soc_probe(struct snd_soc_codec *codec)
344 struct snd_ac97 *ac97; 344 struct snd_ac97 *ac97;
345 int ret = 0; 345 int ret = 0;
346 346
347 ac97 = snd_soc_new_ac97_codec(codec); 347 ac97 = snd_soc_alloc_ac97_codec(codec);
348 if (IS_ERR(ac97)) { 348 if (IS_ERR(ac97)) {
349 ret = PTR_ERR(ac97); 349 ret = PTR_ERR(ac97);
350 dev_err(codec->dev, "Failed to register AC97 codec\n"); 350 dev_err(codec->dev, "Failed to register AC97 codec\n");
351 return ret; 351 return ret;
352 } 352 }
353 353
354 snd_soc_codec_set_drvdata(codec, ac97);
355
356 ret = wm9705_reset(codec); 354 ret = wm9705_reset(codec);
357 if (ret) 355 if (ret)
358 goto reset_err; 356 goto err_put_device;
357
358 ret = device_add(&ac97->dev);
359 if (ret)
360 goto err_put_device;
361
362 snd_soc_codec_set_drvdata(codec, ac97);
359 363
360 return 0; 364 return 0;
361 365
362reset_err: 366err_put_device:
363 snd_soc_free_ac97_codec(ac97); 367 put_device(&ac97->dev);
364 return ret; 368 return ret;
365} 369}
366 370
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index e04643d2bb24..9517571e820d 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -666,7 +666,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
666 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); 666 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
667 int ret = 0; 667 int ret = 0;
668 668
669 wm9712->ac97 = snd_soc_new_ac97_codec(codec); 669 wm9712->ac97 = snd_soc_alloc_ac97_codec(codec);
670 if (IS_ERR(wm9712->ac97)) { 670 if (IS_ERR(wm9712->ac97)) {
671 ret = PTR_ERR(wm9712->ac97); 671 ret = PTR_ERR(wm9712->ac97);
672 dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret); 672 dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
@@ -675,15 +675,19 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
675 675
676 ret = wm9712_reset(codec, 0); 676 ret = wm9712_reset(codec, 0);
677 if (ret < 0) 677 if (ret < 0)
678 goto reset_err; 678 goto err_put_device;
679
680 ret = device_add(&wm9712->ac97->dev);
681 if (ret)
682 goto err_put_device;
679 683
680 /* set alc mux to none */ 684 /* set alc mux to none */
681 ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000); 685 ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
682 686
683 return 0; 687 return 0;
684 688
685reset_err: 689err_put_device:
686 snd_soc_free_ac97_codec(wm9712->ac97); 690 put_device(&wm9712->ac97->dev);
687 return ret; 691 return ret;
688} 692}
689 693
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 71b9d5b0734d..6ab1122a3872 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -1225,7 +1225,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
1225 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); 1225 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
1226 int ret = 0, reg; 1226 int ret = 0, reg;
1227 1227
1228 wm9713->ac97 = snd_soc_new_ac97_codec(codec); 1228 wm9713->ac97 = snd_soc_alloc_ac97_codec(codec);
1229 if (IS_ERR(wm9713->ac97)) 1229 if (IS_ERR(wm9713->ac97))
1230 return PTR_ERR(wm9713->ac97); 1230 return PTR_ERR(wm9713->ac97);
1231 1231
@@ -1234,7 +1234,11 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
1234 wm9713_reset(codec, 0); 1234 wm9713_reset(codec, 0);
1235 ret = wm9713_reset(codec, 1); 1235 ret = wm9713_reset(codec, 1);
1236 if (ret < 0) 1236 if (ret < 0)
1237 goto reset_err; 1237 goto err_put_device;
1238
1239 ret = device_add(&wm9713->ac97->dev);
1240 if (ret)
1241 goto err_put_device;
1238 1242
1239 /* unmute the adc - move to kcontrol */ 1243 /* unmute the adc - move to kcontrol */
1240 reg = ac97_read(codec, AC97_CD) & 0x7fff; 1244 reg = ac97_read(codec, AC97_CD) & 0x7fff;
@@ -1242,8 +1246,8 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
1242 1246
1243 return 0; 1247 return 0;
1244 1248
1245reset_err: 1249err_put_device:
1246 snd_soc_free_ac97_codec(wm9713->ac97); 1250 put_device(&wm9713->ac97->dev);
1247 return ret; 1251 return ret;
1248} 1252}
1249 1253
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c
index 5bf14040c24a..8156cc1accb7 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/sst-haswell-ipc.c
@@ -651,11 +651,11 @@ static void hsw_notification_work(struct work_struct *work)
651 } 651 }
652 652
653 /* tell DSP that notification has been handled */ 653 /* tell DSP that notification has been handled */
654 sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IPCD, 654 sst_dsp_shim_update_bits(hsw->dsp, SST_IPCD,
655 SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE); 655 SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE);
656 656
657 /* unmask busy interrupt */ 657 /* unmask busy interrupt */
658 sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0); 658 sst_dsp_shim_update_bits(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
659} 659}
660 660
661static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header) 661static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header)
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
index 2ac72eb5e75d..b3360139c41a 100644
--- a/sound/soc/intel/sst/sst_acpi.c
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -350,7 +350,7 @@ static struct sst_machines sst_acpi_bytcr[] = {
350 350
351/* Cherryview-based platforms: CherryTrail and Braswell */ 351/* Cherryview-based platforms: CherryTrail and Braswell */
352static struct sst_machines sst_acpi_chv[] = { 352static struct sst_machines sst_acpi_chv[] = {
353 {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "fw_sst_22a8.bin", 353 {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "intel/fw_sst_22a8.bin",
354 &chv_platform_data }, 354 &chv_platform_data },
355 {}, 355 {},
356}; 356};
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
index 2e10e9a38376..08d7259bbaab 100644
--- a/sound/soc/soc-ac97.c
+++ b/sound/soc/soc-ac97.c
@@ -48,15 +48,18 @@ static void soc_ac97_device_release(struct device *dev)
48} 48}
49 49
50/** 50/**
51 * snd_soc_new_ac97_codec - initailise AC97 device 51 * snd_soc_alloc_ac97_codec() - Allocate new a AC'97 device
52 * @codec: audio codec 52 * @codec: The CODEC for which to create the AC'97 device
53 * 53 *
54 * Initialises AC97 codec resources for use by ad-hoc devices only. 54 * Allocated a new snd_ac97 device and intializes it, but does not yet register
55 * it. The caller is responsible to either call device_add(&ac97->dev) to
56 * register the device, or to call put_device(&ac97->dev) to free the device.
57 *
58 * Returns: A snd_ac97 device or a PTR_ERR in case of an error.
55 */ 59 */
56struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec) 60struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec)
57{ 61{
58 struct snd_ac97 *ac97; 62 struct snd_ac97 *ac97;
59 int ret;
60 63
61 ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL); 64 ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
62 if (ac97 == NULL) 65 if (ac97 == NULL)
@@ -73,7 +76,28 @@ struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
73 codec->component.card->snd_card->number, 0, 76 codec->component.card->snd_card->number, 0,
74 codec->component.name); 77 codec->component.name);
75 78
76 ret = device_register(&ac97->dev); 79 device_initialize(&ac97->dev);
80
81 return ac97;
82}
83EXPORT_SYMBOL(snd_soc_alloc_ac97_codec);
84
85/**
86 * snd_soc_new_ac97_codec - initailise AC97 device
87 * @codec: audio codec
88 *
89 * Initialises AC97 codec resources for use by ad-hoc devices only.
90 */
91struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
92{
93 struct snd_ac97 *ac97;
94 int ret;
95
96 ac97 = snd_soc_alloc_ac97_codec(codec);
97 if (IS_ERR(ac97))
98 return ac97;
99
100 ret = device_add(&ac97->dev);
77 if (ret) { 101 if (ret) {
78 put_device(&ac97->dev); 102 put_device(&ac97->dev);
79 return ERR_PTR(ret); 103 return ERR_PTR(ret);
diff --git a/tools/lib/lockdep/.gitignore b/tools/lib/lockdep/.gitignore
new file mode 100644
index 000000000000..cc0e7a9f99e3
--- /dev/null
+++ b/tools/lib/lockdep/.gitignore
@@ -0,0 +1 @@
liblockdep.so.*
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index 52f9279c6c13..4b866c54f624 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -104,7 +104,7 @@ N =
104 104
105export Q VERBOSE 105export Q VERBOSE
106 106
107INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES) 107INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
108 108
109# Set compile option CFLAGS if not set elsewhere 109# Set compile option CFLAGS if not set elsewhere
110CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g 110CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
index abe14b7f36e9..bb99cde3f5f9 100755
--- a/tools/testing/selftests/rcutorture/bin/cpus2use.sh
+++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
@@ -24,7 +24,7 @@
24 24
25ncpus=`grep '^processor' /proc/cpuinfo | wc -l` 25ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
26idlecpus=`mpstat | tail -1 | \ 26idlecpus=`mpstat | tail -1 | \
27 awk -v ncpus=$ncpus '{ print ncpus * ($7 + $12) / 100 }'` 27 awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'`
28awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null ' 28awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
29BEGIN { 29BEGIN {
30 cpus2use = idlecpus; 30 cpus2use = idlecpus;
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index d6cc07fc137f..559e01ac86be 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -30,6 +30,7 @@ else
30 echo Unreadable results directory: $i 30 echo Unreadable results directory: $i
31 exit 1 31 exit 1
32fi 32fi
33. tools/testing/selftests/rcutorture/bin/functions.sh
33 34
34configfile=`echo $i | sed -e 's/^.*\///'` 35configfile=`echo $i | sed -e 's/^.*\///'`
35ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'` 36ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
@@ -48,4 +49,21 @@ else
48 title="$title ($ngpsps per second)" 49 title="$title ($ngpsps per second)"
49 fi 50 fi
50 echo $title 51 echo $title
52 nclosecalls=`grep --binary-files=text 'torture: Reader Batch' $i/console.log | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'`
53 if test -z "$nclosecalls"
54 then
55 exit 0
56 fi
57 if test "$nclosecalls" -eq 0
58 then
59 exit 0
60 fi
61 # Compute number of close calls per tenth of an hour
62 nclosecalls10=`awk -v nclosecalls=$nclosecalls -v dur=$dur 'BEGIN { print int(nclosecalls * 36000 / dur) }' < /dev/null`
63 if test $nclosecalls10 -gt 5 -a $nclosecalls -gt 1
64 then
65 print_bug $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
66 else
67 print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
68 fi
51fi 69fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 8ca9f21f2efc..5236e073919d 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -8,9 +8,9 @@
8# 8#
9# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args 9# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
10# 10#
11# qemu-args defaults to "-nographic", along with arguments specifying the 11# qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with
12# number of CPUs and other options generated from 12# arguments specifying the number of CPUs and other
13# the underlying CPU architecture. 13# options generated from the underlying CPU architecture.
14# boot_args defaults to value returned by the per_version_boot_params 14# boot_args defaults to value returned by the per_version_boot_params
15# shell function. 15# shell function.
16# 16#
@@ -138,7 +138,7 @@ then
138fi 138fi
139 139
140# Generate -smp qemu argument. 140# Generate -smp qemu argument.
141qemu_args="-nographic $qemu_args" 141qemu_args="-enable-kvm -soundhw pcspk -nographic $qemu_args"
142cpu_count=`configNR_CPUS.sh $config_template` 142cpu_count=`configNR_CPUS.sh $config_template`
143cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"` 143cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
144vcpus=`identify_qemu_vcpus` 144vcpus=`identify_qemu_vcpus`
@@ -168,6 +168,7 @@ then
168 touch $resdir/buildonly 168 touch $resdir/buildonly
169 exit 0 169 exit 0
170fi 170fi
171echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log
171echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd 172echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
172( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) & 173( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
173qemu_pid=$! 174qemu_pid=$!
diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh
index 499d1e598e42..a6b57622c2e5 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh
@@ -26,12 +26,15 @@
26# 26#
27# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 27# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
28 28
29T=$1 29F=$1
30title=$2 30title=$2
31T=/tmp/parse-build.sh.$$
32trap 'rm -rf $T' 0
33mkdir $T
31 34
32. functions.sh 35. functions.sh
33 36
34if grep -q CC < $T 37if grep -q CC < $F
35then 38then
36 : 39 :
37else 40else
@@ -39,18 +42,21 @@ else
39 exit 1 42 exit 1
40fi 43fi
41 44
42if grep -q "error:" < $T 45if grep -q "error:" < $F
43then 46then
44 print_bug $title build errors: 47 print_bug $title build errors:
45 grep "error:" < $T 48 grep "error:" < $F
46 exit 2 49 exit 2
47fi 50fi
48exit 0
49 51
50if egrep -q "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T 52grep warning: < $F > $T/warnings
53grep "include/linux/*rcu*\.h:" $T/warnings > $T/hwarnings
54grep "kernel/rcu/[^/]*:" $T/warnings > $T/cwarnings
55cat $T/hwarnings $T/cwarnings > $T/rcuwarnings
56if test -s $T/rcuwarnings
51then 57then
52 print_warning $title build errors: 58 print_warning $title build errors:
53 egrep "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T 59 cat $T/rcuwarnings
54 exit 2 60 exit 2
55fi 61fi
56exit 0 62exit 0
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index f962ba4cf68b..d8f35cf116be 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -36,7 +36,7 @@ if grep -Pq '\x00' < $file
36then 36then
37 print_warning Console output contains nul bytes, old qemu still running? 37 print_warning Console output contains nul bytes, old qemu still running?
38fi 38fi
39egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T 39egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
40if test -s $T 40if test -s $T
41then 41then
42 print_warning Assertion failure in $file $title 42 print_warning Assertion failure in $file $title