aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-events6
-rw-r--r--Documentation/RCU/stallwarn.txt16
-rw-r--r--Documentation/RCU/trace.txt32
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-st.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/max77686.txt14
-rw-r--r--Documentation/devicetree/bindings/regulator/da9211.txt7
-rw-r--r--Documentation/devicetree/bindings/regulator/isl9305.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6397-regulator.txt217
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt94
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt16
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sirf.txt41
-rw-r--r--Documentation/devicetree/bindings/spi/spi-st-ssc.txt40
-rw-r--r--Documentation/futex-requeue-pi.txt8
-rw-r--r--Documentation/hwmon/ina2xx23
-rw-r--r--Documentation/locking/lockdep-design.txt2
-rw-r--r--Documentation/memory-barriers.txt46
-rw-r--r--Documentation/networking/netlink_mmap.txt13
-rw-r--r--MAINTAINERS22
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arm/boot/compressed/head.S39
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dts8
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi20
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts6
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi8
-rw-r--r--arch/arm/boot/dts/sun5i-a13-hsg-h702.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi9
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi6
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi.dts6
-rw-r--r--arch/arm/boot/dts/sun7i-a20-hummingbird.dts8
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts3
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi8
-rw-r--r--arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a23.dtsi9
-rw-r--r--arch/arm/boot/dts/sun9i-a80-optimus.dts5
-rw-r--r--arch/arm/boot/dts/sun9i-a80.dtsi10
-rw-r--r--arch/arm/include/asm/kvm_emulate.h10
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/kvm_mmu.h77
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kvm/Kconfig1
-rw-r--r--arch/arm/kvm/arm.c10
-rw-r--r--arch/arm/kvm/coproc.c70
-rw-r--r--arch/arm/kvm/coproc.h6
-rw-r--r--arch/arm/kvm/coproc_a15.c2
-rw-r--r--arch/arm/kvm/coproc_a7.c2
-rw-r--r--arch/arm/kvm/mmu.c164
-rw-r--r--arch/arm/kvm/trace.h39
-rw-r--r--arch/arm/mach-mvebu/coherency.c7
-rw-r--r--arch/arm/mach-shmobile/board-ape6evm.c20
-rw-r--r--arch/arm/mach-shmobile/board-lager.c13
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c2
-rw-r--r--arch/arm/mach-shmobile/timer.c12
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/context.c26
-rw-r--r--arch/arm/mm/dma-mapping.c56
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h10
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h34
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/sys_regs.c75
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/mm/fault.c2
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/m32r/mm/fault.c2
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/metag/mm/fault.c2
-rw-r--r--arch/microblaze/mm/fault.c2
-rw-r--r--arch/mips/Kconfig23
-rw-r--r--arch/mips/boot/elf2ecoff.c64
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/configs/malta_defconfig16
-rw-r--r--arch/mips/include/asm/fpu.h43
-rw-r--r--arch/mips/include/asm/fw/arc/hinv.h6
-rw-r--r--arch/mips/include/asm/mips-cm.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h15
-rw-r--r--arch/mips/include/asm/syscall.h8
-rw-r--r--arch/mips/include/asm/thread_info.h1
-rw-r--r--arch/mips/include/uapi/asm/unistd.h15
-rw-r--r--arch/mips/jz4740/irq.c3
-rw-r--r--arch/mips/kernel/elf.c8
-rw-r--r--arch/mips/kernel/irq_cpu.c4
-rw-r--r--arch/mips/kernel/process.c36
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/smp-cmp.c4
-rw-r--r--arch/mips/kernel/smp-mt.c3
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mn10300/include/asm/cacheflush.h7
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/nios2/mm/fault.c10
-rw-r--r--arch/openrisc/mm/fault.c2
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/include/asm/cacheflush.h7
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c2
-rw-r--r--arch/powerpc/xmon/xmon.c1
-rw-r--r--arch/s390/include/asm/cacheflush.h4
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/mm/fault.c6
-rw-r--r--arch/score/mm/fault.c2
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sparc/include/asm/cacheflush_64.h5
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/tile/kvm/Kconfig1
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/debugreg.h5
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h1
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h4
-rw-r--r--arch/x86/kernel/cpu/amd.c19
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h18
-rw-r--r--arch/x86/kernel/hw_breakpoint.c45
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/pci/common.c16
-rw-r--r--arch/x86/pci/intel_mid_pci.c1
-rw-r--r--arch/xtensa/mm/fault.c2
-rw-r--r--block/blk-mq-sysfs.c25
-rw-r--r--block/blk-mq.c23
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/acpi/acpi_lpss.c35
-rw-r--r--drivers/base/regmap/internal.h10
-rw-r--r--drivers/base/regmap/regmap-ac97.c4
-rw-r--r--drivers/base/regmap/regmap-i2c.c46
-rw-r--r--drivers/base/regmap/regmap.c7
-rw-r--r--drivers/block/rbd.c25
-rw-r--r--drivers/char/random.c8
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/devfreq/Kconfig1
-rw-r--r--drivers/gpio/gpio-mcp23s08.c17
-rw-r--r--drivers/gpio/gpio-omap.c39
-rw-r--r--drivers/gpio/gpiolib-sysfs.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c78
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c18
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c9
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c30
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c52
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c12
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/hwmon/Kconfig5
-rw-r--r--drivers/hwmon/abx500.c6
-rw-r--r--drivers/hwmon/ad7314.c5
-rw-r--r--drivers/hwmon/adc128d818.c3
-rw-r--r--drivers/hwmon/ads7828.c102
-rw-r--r--drivers/hwmon/ina2xx.c334
-rw-r--r--drivers/hwmon/jc42.c15
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/tmp102.c15
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c23
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c12
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c137
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c239
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c22
-rw-r--r--drivers/input/mouse/elantech.c16
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h8
-rw-r--r--drivers/iommu/tegra-gart.c3
-rw-r--r--drivers/irqchip/irq-mips-gic.c27
-rw-r--r--drivers/isdn/hardware/eicon/message.c2
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/bitmap.c13
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-thin.c6
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/caif/caif_hsi.c1
-rw-r--r--drivers/net/can/c_can/c_can.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb.c28
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h9
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c27
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c26
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c164
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c22
-rw-r--r--drivers/net/hyperv/netvsc.c11
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/macvtap.c16
-rw-r--r--drivers/net/ppp/ppp_deflate.c2
-rw-r--r--drivers/net/tun.c25
-rw-r--r--drivers/net/usb/sr9700.c36
-rw-r--r--drivers/net/usb/sr9700.h66
-rw-r--r--drivers/net/virtio_net.c24
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wan/Kconfig6
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c53
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c11
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/pci/host/pcie-designware.c3
-rw-r--r--drivers/pci/quirks.c40
-rw-r--r--drivers/pinctrl/pinctrl-at91.c108
-rw-r--r--drivers/regulator/Kconfig17
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/axp20x-regulator.c93
-rw-r--r--drivers/regulator/core.c375
-rw-r--r--drivers/regulator/da9211-regulator.c16
-rw-r--r--drivers/regulator/fan53555.c4
-rw-r--r--drivers/regulator/internal.h2
-rw-r--r--drivers/regulator/isl9305.c6
-rw-r--r--drivers/regulator/lp872x.c24
-rw-r--r--drivers/regulator/max14577.c62
-rw-r--r--drivers/regulator/max77686.c70
-rw-r--r--drivers/regulator/max77843.c227
-rw-r--r--drivers/regulator/max8649.c4
-rw-r--r--drivers/regulator/mt6397-regulator.c332
-rw-r--r--drivers/regulator/of_regulator.c11
-rw-r--r--drivers/regulator/pfuze100-regulator.c134
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c15
-rw-r--r--drivers/regulator/rk808-regulator.c6
-rw-r--r--drivers/regulator/rt5033-regulator.c8
-rw-r--r--drivers/regulator/tps65023-regulator.c6
-rw-r--r--drivers/s390/net/qeth_core_main.c117
-rw-r--r--drivers/s390/net/qeth_l2_main.c220
-rw-r--r--drivers/s390/net/qeth_l3_main.c50
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c3
-rw-r--r--drivers/scsi/scsi.c13
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/spi/Kconfig24
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-atmel.c12
-rw-r--r--drivers/spi/spi-au1550.c4
-rw-r--r--drivers/spi/spi-bcm2835.c4
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-bitbang.c4
-rw-r--r--drivers/spi/spi-butterfly.c4
-rw-r--r--drivers/spi/spi-coldfire-qspi.c5
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dln2.c881
-rw-r--r--drivers/spi/spi-dw-mid.c15
-rw-r--r--drivers/spi/spi-dw-pci.c38
-rw-r--r--drivers/spi/spi-dw.c9
-rw-r--r--drivers/spi/spi-falcon.c12
-rw-r--r--drivers/spi/spi-fsl-cpm.c9
-rw-r--r--drivers/spi/spi-fsl-dspi.c163
-rw-r--r--drivers/spi/spi-fsl-lib.c16
-rw-r--r--drivers/spi/spi-fsl-lib.h4
-rw-r--r--drivers/spi/spi-gpio.c8
-rw-r--r--drivers/spi/spi-img-spfi.c49
-rw-r--r--drivers/spi/spi-imx.c32
-rw-r--r--drivers/spi/spi-lm70llp.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c2
-rw-r--r--drivers/spi/spi-mxs.c5
-rw-r--r--drivers/spi/spi-omap-100k.c5
-rw-r--r--drivers/spi/spi-omap-uwire.c4
-rw-r--r--drivers/spi/spi-omap2-mcspi.c5
-rw-r--r--drivers/spi/spi-orion.c88
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c17
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c34
-rw-r--r--drivers/spi/spi-pxa2xx.c207
-rw-r--r--drivers/spi/spi-pxa2xx.h34
-rw-r--r--drivers/spi/spi-qup.c11
-rw-r--r--drivers/spi/spi-rockchip.c6
-rw-r--r--drivers/spi/spi-rspi.c5
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sc18is602.c4
-rw-r--r--drivers/spi/spi-sh-hspi.c5
-rw-r--r--drivers/spi/spi-sh-msiof.c91
-rw-r--r--drivers/spi/spi-sh.c5
-rw-r--r--drivers/spi/spi-sirf.c1
-rw-r--r--drivers/spi/spi-st-ssc4.c504
-rw-r--r--drivers/spi/spi-ti-qspi.c14
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/spi/spi-xilinx.c298
-rw-r--r--drivers/spi/spi.c120
-rw-r--r--drivers/spi/spidev.c125
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/nvec/nvec.c9
-rw-r--r--drivers/usb/core/otg_whitelist.h5
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core_intr.c6
-rw-r--r--drivers/usb/phy/phy.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--fs/aio.c7
-rw-r--r--fs/btrfs/Kconfig1
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/btrfs/tree-log.c1
-rw-r--r--fs/cifs/cifs_debug.c6
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/smbencrypt.c2
-rw-r--r--fs/gfs2/quota.c49
-rw-r--r--fs/nfs/direct.c6
-rw-r--r--fs/nfs/inode.c5
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nilfs2/nilfs.h2
-rw-r--r--fs/nilfs2/segment.c44
-rw-r--r--fs/nilfs2/segment.h5
-rw-r--r--fs/notify/Kconfig1
-rw-r--r--fs/quota/Kconfig1
-rw-r--r--fs/quota/dquot.c83
-rw-r--r--fs/quota/quota.c162
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/xfs/xfs_qm.h4
-rw-r--r--fs/xfs/xfs_qm_syscalls.c156
-rw-r--r--fs/xfs/xfs_quotaops.c8
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/i2c.h6
-rw-r--r--include/linux/if_vlan.h60
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/osq_lock.h12
-rw-r--r--include/linux/perf_event.h36
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/quota.h47
-rw-r--r--include/linux/quotaops.h4
-rw-r--r--include/linux/rculist.h16
-rw-r--r--include/linux/rcupdate.h13
-rw-r--r--include/linux/rcutiny.h45
-rw-r--r--include/linux/rcutree.h11
-rw-r--r--include/linux/regmap.h2
-rw-r--r--include/linux/regulator/da9211.h2
-rw-r--r--include/linux/regulator/driver.h13
-rw-r--r--include/linux/regulator/machine.h13
-rw-r--r--include/linux/regulator/mt6397-regulator.h49
-rw-r--r--include/linux/regulator/pfuze100.h14
-rw-r--r--include/linux/spi/at86rf230.h4
-rw-r--r--include/linux/spi/l4f00242t03.h4
-rw-r--r--include/linux/spi/lms283gf05.h4
-rw-r--r--include/linux/spi/mxs-spi.h4
-rw-r--r--include/linux/spi/pxa2xx_spi.h5
-rw-r--r--include/linux/spi/rspi.h5
-rw-r--r--include/linux/spi/sh_hspi.h4
-rw-r--r--include/linux/spi/sh_msiof.h2
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spi/tle62x0.h4
-rw-r--r--include/linux/spi/tsc2005.h5
-rw-r--r--include/linux/srcu.h14
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/net/flow_keys.h6
-rw-r--r--include/net/ip.h13
-rw-r--r--include/net/ipv6.h7
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/sch_generic.h13
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/rdma/ib_verbs.h5
-rw-r--r--include/sound/ak4113.h2
-rw-r--r--include/sound/ak4114.h2
-rw-r--r--include/sound/soc.h1
-rw-r--r--include/trace/events/tlb.h4
-rw-r--r--include/trace/ftrace.h7
-rw-r--r--include/uapi/rdma/ib_user_verbs.h27
-rw-r--r--init/Kconfig18
-rw-r--r--kernel/Kconfig.locks4
-rw-r--r--kernel/bpf/syscall.c25
-rw-r--r--kernel/cpu.c56
-rw-r--r--kernel/events/core.c479
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/futex.c6
-rw-r--r--kernel/locking/Makefile3
-rw-r--r--kernel/locking/mcs_spinlock.h16
-rw-r--r--kernel/locking/mutex.c60
-rw-r--r--kernel/locking/osq_lock.c (renamed from kernel/locking/mcs_spinlock.c)9
-rw-r--r--kernel/locking/rtmutex.c7
-rw-r--r--kernel/locking/rwsem-spinlock.c2
-rw-r--r--kernel/locking/rwsem-xadd.c3
-rw-r--r--kernel/notifier.c3
-rw-r--r--kernel/power/Kconfig1
-rw-r--r--kernel/rcu/Makefile3
-rw-r--r--kernel/rcu/rcu.h6
-rw-r--r--kernel/rcu/rcutorture.c66
-rw-r--r--kernel/rcu/srcu.c2
-rw-r--r--kernel/rcu/tiny.c113
-rw-r--r--kernel/rcu/tiny_plugin.h9
-rw-r--r--kernel/rcu/tree.c355
-rw-r--r--kernel/rcu/tree.h62
-rw-r--r--kernel/rcu/tree_plugin.h271
-rw-r--r--kernel/rcu/tree_trace.c8
-rw-r--r--kernel/sched/completion.c18
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/softirq.c9
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/trace/trace_event_perf.c4
-rw-r--r--kernel/trace/trace_kprobe.c4
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/checksum.c12
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/gup.c4
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory.c2
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/pagewalk.c5
-rw-r--r--mm/shmem.c2
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c29
-rw-r--r--net/caif/chnl_net.c1
-rw-r--r--net/core/dev.c37
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/dsa/slave.c1
-rw-r--r--net/ipv4/ip_forward.c3
-rw-r--r--net/ipv4/ip_output.c29
-rw-r--r--net/ipv4/ping.c5
-rw-r--r--net/ipv4/route.c12
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cong.c32
-rw-r--r--net/ipv4/tcp_cubic.c39
-rw-r--r--net/ipv4/tcp_ipv4.c37
-rw-r--r--net/ipv4/tcp_scalable.c3
-rw-r--r--net/ipv4/tcp_veno.c2
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv4/udp_diag.c4
-rw-r--r--net/ipv6/ip6_fib.c45
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/ipv6/output_core.c41
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/udp_offload.c10
-rw-r--r--net/ipv6/xfrm6_policy.c10
-rw-r--r--net/llc/sysctl_net_llc.c8
-rw-r--r--net/mac80211/pm.c29
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c33
-rw-r--r--net/netfilter/nf_tables_api.c28
-rw-r--r--net/netfilter/nft_masq.c26
-rw-r--r--net/netfilter/nft_nat.c40
-rw-r--r--net/netfilter/nft_redir.c25
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/rds/sysctl.c4
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/sched/cls_bpf.c15
-rw-r--r--net/sched/sch_fq.c10
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/socket.c3
-rw-r--r--net/wireless/nl80211.c9
-rw-r--r--net/wireless/util.c6
-rw-r--r--samples/bpf/test_maps.c4
-rw-r--r--security/tomoyo/Kconfig1
-rw-r--r--sound/core/seq/seq_dummy.c31
-rw-r--r--sound/i2c/other/ak4113.c17
-rw-r--r--sound/i2c/other/ak4114.c18
-rw-r--r--sound/soc/adi/axi-i2s.c2
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c24
-rw-r--r--sound/soc/codecs/pcm512x.c2
-rw-r--r--sound/soc/codecs/rt286.c6
-rw-r--r--sound/soc/codecs/rt5640.c1
-rw-r--r--sound/soc/codecs/rt5677.c18
-rw-r--r--sound/soc/codecs/sgtl5000.c13
-rw-r--r--sound/soc/codecs/tlv320aic3x.c2
-rw-r--r--sound/soc/codecs/ts3a227e.c6
-rw-r--r--sound/soc/codecs/wm8731.c2
-rw-r--r--sound/soc/codecs/wm8904.c23
-rw-r--r--sound/soc/codecs/wm8960.c2
-rw-r--r--sound/soc/codecs/wm9705.c16
-rw-r--r--sound/soc/codecs/wm9712.c12
-rw-r--r--sound/soc/codecs/wm9713.c12
-rw-r--r--sound/soc/fsl/fsl_esai.h2
-rw-r--r--sound/soc/fsl/fsl_ssi.c4
-rw-r--r--sound/soc/fsl/imx-wm8962.c1
-rw-r--r--sound/soc/generic/simple-card.c7
-rw-r--r--sound/soc/intel/sst-firmware.c13
-rw-r--r--sound/soc/intel/sst-haswell-ipc.c34
-rw-r--r--sound/soc/intel/sst/sst_acpi.c2
-rw-r--r--sound/soc/omap/omap-mcbsp.c2
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c1
-rw-r--r--sound/soc/soc-ac97.c36
-rw-r--r--sound/soc/soc-compress.c9
-rw-r--r--tools/lib/api/fs/debugfs.c43
-rw-r--r--tools/lib/api/fs/debugfs.h3
-rw-r--r--tools/lib/lockdep/.gitignore1
-rw-r--r--tools/lib/lockdep/Makefile2
-rw-r--r--tools/lib/traceevent/event-parse.c328
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt2
-rw-r--r--tools/perf/Documentation/perf-list.txt13
-rw-r--r--tools/perf/Documentation/perf-mem.txt9
-rw-r--r--tools/perf/Documentation/perf-record.txt19
-rw-r--r--tools/perf/Documentation/perf-script.txt28
-rw-r--r--tools/perf/Documentation/perf-stat.txt20
-rw-r--r--tools/perf/bench/futex.h13
-rw-r--r--tools/perf/builtin-buildid-cache.c4
-rw-r--r--tools/perf/builtin-diff.c248
-rw-r--r--tools/perf/builtin-inject.c5
-rw-r--r--tools/perf/builtin-mem.c131
-rw-r--r--tools/perf/builtin-record.c70
-rw-r--r--tools/perf/builtin-report.c16
-rw-r--r--tools/perf/builtin-stat.c2
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/builtin-trace.c106
-rw-r--r--tools/perf/config/Makefile6
-rw-r--r--tools/perf/config/feature-checks/Makefile4
-rw-r--r--tools/perf/config/feature-checks/test-all.c5
-rw-r--r--tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c14
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.c5
-rw-r--r--tools/perf/tests/attr.py1
-rw-r--r--tools/perf/tests/hists_cumulate.c2
-rw-r--r--tools/perf/tests/hists_output.c2
-rw-r--r--tools/perf/tests/make1
-rw-r--r--tools/perf/tests/parse-events.c60
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/ui/browsers/annotate.c3
-rw-r--r--tools/perf/ui/hist.c12
-rw-r--r--tools/perf/ui/progress.h4
-rw-r--r--tools/perf/ui/tui/helpline.c3
-rw-r--r--tools/perf/ui/tui/setup.c3
-rw-r--r--tools/perf/util/annotate.c20
-rw-r--r--tools/perf/util/color.c126
-rw-r--r--tools/perf/util/color.h2
-rw-r--r--tools/perf/util/dso.c6
-rw-r--r--tools/perf/util/dso.h1
-rw-r--r--tools/perf/util/evlist.c27
-rw-r--r--tools/perf/util/evlist.h1
-rw-r--r--tools/perf/util/evsel.c4
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.c48
-rw-r--r--tools/perf/util/hist.h11
-rw-r--r--tools/perf/util/map.h16
-rw-r--r--tools/perf/util/parse-events.c27
-rw-r--r--tools/perf/util/parse-events.h3
-rw-r--r--tools/perf/util/parse-events.l1
-rw-r--r--tools/perf/util/parse-events.y26
-rw-r--r--tools/perf/util/parse-options.c2
-rw-r--r--tools/perf/util/pmu.c102
-rw-r--r--tools/perf/util/probe-event.c34
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/session.c8
-rw-r--r--tools/perf/util/session.h3
-rw-r--r--tools/perf/util/sort.c37
-rw-r--r--tools/perf/util/symbol-elf.c13
-rw-r--r--tools/perf/util/symbol.c33
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/unwind-libunwind.c31
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/cpus2use.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh18
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh9
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh20
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh2
630 files changed, 10199 insertions, 4934 deletions
diff --git a/.mailmap b/.mailmap
index d357e1bd2a43..0d971cfb0772 100644
--- a/.mailmap
+++ b/.mailmap
@@ -73,6 +73,7 @@ Juha Yrjola <juha.yrjola@nokia.com>
73Juha Yrjola <juha.yrjola@solidboot.com> 73Juha Yrjola <juha.yrjola@solidboot.com>
74Kay Sievers <kay.sievers@vrfy.org> 74Kay Sievers <kay.sievers@vrfy.org>
75Kenneth W Chen <kenneth.w.chen@intel.com> 75Kenneth W Chen <kenneth.w.chen@intel.com>
76Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
76Koushik <raghavendra.koushik@neterion.com> 77Koushik <raghavendra.koushik@neterion.com>
77Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 78Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
78Leonid I Ananiev <leonid.i.ananiev@intel.com> 79Leonid I Ananiev <leonid.i.ananiev@intel.com>
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
index 20979f8b3edb..505f080d20a1 100644
--- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
@@ -52,12 +52,18 @@ Description: Per-pmu performance monitoring events specific to the running syste
52 event=0x2abc 52 event=0x2abc
53 event=0x423,inv,cmask=0x3 53 event=0x423,inv,cmask=0x3
54 domain=0x1,offset=0x8,starting_index=0xffff 54 domain=0x1,offset=0x8,starting_index=0xffff
55 domain=0x1,offset=0x8,core=?
55 56
56 Each of the assignments indicates a value to be assigned to a 57 Each of the assignments indicates a value to be assigned to a
57 particular set of bits (as defined by the format file 58 particular set of bits (as defined by the format file
58 corresponding to the <term>) in the perf_event structure passed 59 corresponding to the <term>) in the perf_event structure passed
59 to the perf_open syscall. 60 to the perf_open syscall.
60 61
62 In the case of the last example, a value replacing "?" would
63 need to be provided by the user selecting the particular event.
64 This is referred to as "event parameterization". Event
65 parameters have the format 'param=?'.
66
61What: /sys/bus/event_source/devices/<pmu>/events/<event>.unit 67What: /sys/bus/event_source/devices/<pmu>/events/<event>.unit
62Date: 2014/02/24 68Date: 2014/02/24
63Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> 69Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index ed186a902d31..b57c0c1cdac6 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -15,7 +15,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT
15 21 seconds. 15 21 seconds.
16 16
17 This configuration parameter may be changed at runtime via the 17 This configuration parameter may be changed at runtime via the
18 /sys/module/rcutree/parameters/rcu_cpu_stall_timeout, however 18 /sys/module/rcupdate/parameters/rcu_cpu_stall_timeout, however
19 this parameter is checked only at the beginning of a cycle. 19 this parameter is checked only at the beginning of a cycle.
20 So if you are 10 seconds into a 40-second stall, setting this 20 So if you are 10 seconds into a 40-second stall, setting this
21 sysfs parameter to (say) five will shorten the timeout for the 21 sysfs parameter to (say) five will shorten the timeout for the
@@ -152,6 +152,15 @@ no non-lazy callbacks ("." is printed otherwise, as shown above) and
152"D" indicates that dyntick-idle processing is enabled ("." is printed 152"D" indicates that dyntick-idle processing is enabled ("." is printed
153otherwise, for example, if disabled via the "nohz=" kernel boot parameter). 153otherwise, for example, if disabled via the "nohz=" kernel boot parameter).
154 154
155If the relevant grace-period kthread has been unable to run prior to
156the stall warning, the following additional line is printed:
157
158 rcu_preempt kthread starved for 2023 jiffies!
159
160Starving the grace-period kthreads of CPU time can of course result in
161RCU CPU stall warnings even when all CPUs and tasks have passed through
162the required quiescent states.
163
155 164
156Multiple Warnings From One Stall 165Multiple Warnings From One Stall
157 166
@@ -187,6 +196,11 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
187 behavior, you might need to replace some of the cond_resched() 196 behavior, you might need to replace some of the cond_resched()
188 calls with calls to cond_resched_rcu_qs(). 197 calls with calls to cond_resched_rcu_qs().
189 198
199o Anything that prevents RCU's grace-period kthreads from running.
200 This can result in the "All QSes seen" console-log message.
201 This message will include information on when the kthread last
202 ran and how often it should be expected to run.
203
190o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might 204o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
191 happen to preempt a low-priority task in the middle of an RCU 205 happen to preempt a low-priority task in the middle of an RCU
192 read-side critical section. This is especially damaging if 206 read-side critical section. This is especially damaging if
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index b63b9bb3bc0c..08651da15448 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -56,14 +56,14 @@ rcuboost:
56 56
57The output of "cat rcu/rcu_preempt/rcudata" looks as follows: 57The output of "cat rcu/rcu_preempt/rcudata" looks as follows:
58 58
59 0!c=30455 g=30456 pq=1 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716 59 0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
60 1!c=30719 g=30720 pq=1 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982 60 1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
61 2!c=30150 g=30151 pq=1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458 61 2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
62 3 c=31249 g=31250 pq=1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622 62 3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
63 4!c=29502 g=29503 pq=1 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521 63 4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
64 5 c=31201 g=31202 pq=1 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698 64 5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
65 6!c=30253 g=30254 pq=1 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353 65 6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
66 7 c=31178 g=31178 pq=1 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969 66 7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
67 67
68This file has one line per CPU, or eight for this 8-CPU system. 68This file has one line per CPU, or eight for this 8-CPU system.
69The fields are as follows: 69The fields are as follows:
@@ -188,14 +188,14 @@ o "ca" is the number of RCU callbacks that have been adopted by this
188Kernels compiled with CONFIG_RCU_BOOST=y display the following from 188Kernels compiled with CONFIG_RCU_BOOST=y display the following from
189/debug/rcu/rcu_preempt/rcudata: 189/debug/rcu/rcu_preempt/rcudata:
190 190
191 0!c=12865 g=12866 pq=1 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871 191 0!c=12865 g=12866 pq=1/0 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
192 1 c=14407 g=14408 pq=1 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485 192 1 c=14407 g=14408 pq=1/0 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
193 2 c=14407 g=14408 pq=1 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490 193 2 c=14407 g=14408 pq=1/0 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
194 3 c=14407 g=14408 pq=1 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290 194 3 c=14407 g=14408 pq=1/0 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
195 4 c=14405 g=14406 pq=1 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114 195 4 c=14405 g=14406 pq=1/0 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
196 5!c=14168 g=14169 pq=1 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722 196 5!c=14168 g=14169 pq=1/0 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
197 6 c=14404 g=14405 pq=1 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811 197 6 c=14404 g=14405 pq=1/0 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
198 7 c=14407 g=14408 pq=1 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042 198 7 c=14407 g=14408 pq=1/0 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
199 199
200This is similar to the output discussed above, but contains the following 200This is similar to the output discussed above, but contains the following
201additional fields: 201additional fields:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-st.txt b/Documentation/devicetree/bindings/i2c/i2c-st.txt
index 437e0db3823c..4c26fda3844a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-st.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-st.txt
@@ -31,7 +31,7 @@ i2c0: i2c@fed40000 {
31 compatible = "st,comms-ssc4-i2c"; 31 compatible = "st,comms-ssc4-i2c";
32 reg = <0xfed40000 0x110>; 32 reg = <0xfed40000 0x110>;
33 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; 33 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
34 clocks = <&CLK_S_ICN_REG_0>; 34 clocks = <&clk_s_a0_ls CLK_ICN_REG>;
35 clock-names = "ssc"; 35 clock-names = "ssc";
36 clock-frequency = <400000>; 36 clock-frequency = <400000>;
37 pinctrl-names = "default"; 37 pinctrl-names = "default";
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 9f4e3824e71e..9f41d05be3be 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -47,6 +47,7 @@ dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM
47dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O 47dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O
48dallas,ds75 Digital Thermometer and Thermostat 48dallas,ds75 Digital Thermometer and Thermostat
49dlg,da9053 DA9053: flexible system level PMIC with multicore support 49dlg,da9053 DA9053: flexible system level PMIC with multicore support
50dlg,da9063 DA9063: system PMIC for quad-core application processors
50epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE 51epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
51epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE 52epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
52fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer 53fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt
index 75fdfaf41831..e39f0bc1f55e 100644
--- a/Documentation/devicetree/bindings/mfd/max77686.txt
+++ b/Documentation/devicetree/bindings/mfd/max77686.txt
@@ -39,6 +39,12 @@ to get matched with their hardware counterparts as follow:
39 -BUCKn : 1-4. 39 -BUCKn : 1-4.
40 Use standard regulator bindings for it ('regulator-off-in-suspend'). 40 Use standard regulator bindings for it ('regulator-off-in-suspend').
41 41
42 LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
43 control. To turn this feature on this property must be added to the regulator
44 sub-node:
45 - maxim,ena-gpios : one GPIO specifier enable control (the gpio
46 flags are actually ignored and always
47 ACTIVE_HIGH is used)
42 48
43Example: 49Example:
44 50
@@ -65,4 +71,12 @@ Example:
65 regulator-always-on; 71 regulator-always-on;
66 regulator-boot-on; 72 regulator-boot-on;
67 }; 73 };
74
75 buck9_reg {
76 regulator-compatible = "BUCK9";
77 regulator-name = "CAM_ISP_CORE_1.2V";
78 regulator-min-microvolt = <1000000>;
79 regulator-max-microvolt = <1200000>;
80 maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
81 };
68 } 82 }
diff --git a/Documentation/devicetree/bindings/regulator/da9211.txt b/Documentation/devicetree/bindings/regulator/da9211.txt
index 240019a82f9a..eb618907c7de 100644
--- a/Documentation/devicetree/bindings/regulator/da9211.txt
+++ b/Documentation/devicetree/bindings/regulator/da9211.txt
@@ -11,6 +11,7 @@ Required properties:
11 BUCKA and BUCKB. 11 BUCKA and BUCKB.
12 12
13Optional properties: 13Optional properties:
14- enable-gpios: platform gpio for control of BUCKA/BUCKB.
14- Any optional property defined in regulator.txt 15- Any optional property defined in regulator.txt
15 16
16Example 1) DA9211 17Example 1) DA9211
@@ -27,6 +28,7 @@ Example 1) DA9211
27 regulator-max-microvolt = <1570000>; 28 regulator-max-microvolt = <1570000>;
28 regulator-min-microamp = <2000000>; 29 regulator-min-microamp = <2000000>;
29 regulator-max-microamp = <5000000>; 30 regulator-max-microamp = <5000000>;
31 enable-gpios = <&gpio 27 0>;
30 }; 32 };
31 BUCKB { 33 BUCKB {
32 regulator-name = "VBUCKB"; 34 regulator-name = "VBUCKB";
@@ -34,11 +36,12 @@ Example 1) DA9211
34 regulator-max-microvolt = <1570000>; 36 regulator-max-microvolt = <1570000>;
35 regulator-min-microamp = <2000000>; 37 regulator-min-microamp = <2000000>;
36 regulator-max-microamp = <5000000>; 38 regulator-max-microamp = <5000000>;
39 enable-gpios = <&gpio 17 0>;
37 }; 40 };
38 }; 41 };
39 }; 42 };
40 43
41Example 2) DA92113 44Example 2) DA9213
42 pmic: da9213@68 { 45 pmic: da9213@68 {
43 compatible = "dlg,da9213"; 46 compatible = "dlg,da9213";
44 reg = <0x68>; 47 reg = <0x68>;
@@ -51,6 +54,7 @@ Example 2) DA92113
51 regulator-max-microvolt = <1570000>; 54 regulator-max-microvolt = <1570000>;
52 regulator-min-microamp = <3000000>; 55 regulator-min-microamp = <3000000>;
53 regulator-max-microamp = <6000000>; 56 regulator-max-microamp = <6000000>;
57 enable-gpios = <&gpio 27 0>;
54 }; 58 };
55 BUCKB { 59 BUCKB {
56 regulator-name = "VBUCKB"; 60 regulator-name = "VBUCKB";
@@ -58,6 +62,7 @@ Example 2) DA92113
58 regulator-max-microvolt = <1570000>; 62 regulator-max-microvolt = <1570000>;
59 regulator-min-microamp = <3000000>; 63 regulator-min-microamp = <3000000>;
60 regulator-max-microamp = <6000000>; 64 regulator-max-microamp = <6000000>;
65 enable-gpios = <&gpio 17 0>;
61 }; 66 };
62 }; 67 };
63 }; 68 };
diff --git a/Documentation/devicetree/bindings/regulator/isl9305.txt b/Documentation/devicetree/bindings/regulator/isl9305.txt
index a626fc1bbf0d..d6e7c9ec9413 100644
--- a/Documentation/devicetree/bindings/regulator/isl9305.txt
+++ b/Documentation/devicetree/bindings/regulator/isl9305.txt
@@ -2,7 +2,7 @@ Intersil ISL9305/ISL9305H voltage regulator
2 2
3Required properties: 3Required properties:
4 4
5- compatible: "isl,isl9305" or "isl,isl9305h" 5- compatible: "isil,isl9305" or "isil,isl9305h"
6- reg: I2C slave address, usually 0x68. 6- reg: I2C slave address, usually 0x68.
7- regulators: A node that houses a sub-node for each regulator within the 7- regulators: A node that houses a sub-node for each regulator within the
8 device. Each sub-node is identified using the node's name, with valid 8 device. Each sub-node is identified using the node's name, with valid
@@ -19,7 +19,7 @@ Optional properties:
19Example 19Example
20 20
21 pmic: isl9305@68 { 21 pmic: isl9305@68 {
22 compatible = "isl,isl9305"; 22 compatible = "isil,isl9305";
23 reg = <0x68>; 23 reg = <0x68>;
24 24
25 VINDCD1-supply = <&system_power>; 25 VINDCD1-supply = <&system_power>;
diff --git a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
new file mode 100644
index 000000000000..a42b1d6e9863
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
@@ -0,0 +1,217 @@
1Mediatek MT6397 Regulator Driver
2
3Required properties:
4- compatible: "mediatek,mt6397-regulator"
5- mt6397regulator: List of regulators provided by this controller. It is named
6 according to its regulator type, buck_<name> and ldo_<name>.
7 The definition for each of these nodes is defined using the standard binding
8 for regulators at Documentation/devicetree/bindings/regulator/regulator.txt.
9
10The valid names for regulators are::
11BUCK:
12 buck_vpca15, buck_vpca7, buck_vsramca15, buck_vsramca7, buck_vcore, buck_vgpu,
13 buck_vdrm, buck_vio18
14LDO:
15 ldo_vtcxo, ldo_va28, ldo_vcama, ldo_vio28, ldo_vusb, ldo_vmc, ldo_vmch,
16 ldo_vemc3v3, ldo_vgp1, ldo_vgp2, ldo_vgp3, ldo_vgp4, ldo_vgp5, ldo_vgp6,
17 ldo_vibr
18
19Example:
20 pmic {
21 compatible = "mediatek,mt6397";
22
23 mt6397regulator: mt6397regulator {
24 compatible = "mediatek,mt6397-regulator";
25
26 mt6397_vpca15_reg: buck_vpca15 {
27 regulator-compatible = "buck_vpca15";
28 regulator-name = "vpca15";
29 regulator-min-microvolt = < 850000>;
30 regulator-max-microvolt = <1350000>;
31 regulator-ramp-delay = <12500>;
32 regulator-enable-ramp-delay = <200>;
33 };
34
35 mt6397_vpca7_reg: buck_vpca7 {
36 regulator-compatible = "buck_vpca7";
37 regulator-name = "vpca7";
38 regulator-min-microvolt = < 850000>;
39 regulator-max-microvolt = <1350000>;
40 regulator-ramp-delay = <12500>;
41 regulator-enable-ramp-delay = <115>;
42 };
43
44 mt6397_vsramca15_reg: buck_vsramca15 {
45 regulator-compatible = "buck_vsramca15";
46 regulator-name = "vsramca15";
47 regulator-min-microvolt = < 850000>;
48 regulator-max-microvolt = <1350000>;
49 regulator-ramp-delay = <12500>;
50 regulator-enable-ramp-delay = <115>;
51
52 };
53
54 mt6397_vsramca7_reg: buck_vsramca7 {
55 regulator-compatible = "buck_vsramca7";
56 regulator-name = "vsramca7";
57 regulator-min-microvolt = < 850000>;
58 regulator-max-microvolt = <1350000>;
59 regulator-ramp-delay = <12500>;
60 regulator-enable-ramp-delay = <115>;
61
62 };
63
64 mt6397_vcore_reg: buck_vcore {
65 regulator-compatible = "buck_vcore";
66 regulator-name = "vcore";
67 regulator-min-microvolt = < 850000>;
68 regulator-max-microvolt = <1350000>;
69 regulator-ramp-delay = <12500>;
70 regulator-enable-ramp-delay = <115>;
71 };
72
73 mt6397_vgpu_reg: buck_vgpu {
74 regulator-compatible = "buck_vgpu";
75 regulator-name = "vgpu";
76 regulator-min-microvolt = < 700000>;
77 regulator-max-microvolt = <1350000>;
78 regulator-ramp-delay = <12500>;
79 regulator-enable-ramp-delay = <115>;
80 };
81
82 mt6397_vdrm_reg: buck_vdrm {
83 regulator-compatible = "buck_vdrm";
84 regulator-name = "vdrm";
85 regulator-min-microvolt = < 800000>;
86 regulator-max-microvolt = <1400000>;
87 regulator-ramp-delay = <12500>;
88 regulator-enable-ramp-delay = <500>;
89 };
90
91 mt6397_vio18_reg: buck_vio18 {
92 regulator-compatible = "buck_vio18";
93 regulator-name = "vio18";
94 regulator-min-microvolt = <1500000>;
95 regulator-max-microvolt = <2120000>;
96 regulator-ramp-delay = <12500>;
97 regulator-enable-ramp-delay = <500>;
98 };
99
100 mt6397_vtcxo_reg: ldo_vtcxo {
101 regulator-compatible = "ldo_vtcxo";
102 regulator-name = "vtcxo";
103 regulator-min-microvolt = <2800000>;
104 regulator-max-microvolt = <2800000>;
105 regulator-enable-ramp-delay = <90>;
106 };
107
108 mt6397_va28_reg: ldo_va28 {
109 regulator-compatible = "ldo_va28";
110 regulator-name = "va28";
111 /* fixed output 2.8 V */
112 regulator-enable-ramp-delay = <218>;
113 };
114
115 mt6397_vcama_reg: ldo_vcama {
116 regulator-compatible = "ldo_vcama";
117 regulator-name = "vcama";
118 regulator-min-microvolt = <1500000>;
119 regulator-max-microvolt = <2800000>;
120 regulator-enable-ramp-delay = <218>;
121 };
122
123 mt6397_vio28_reg: ldo_vio28 {
124 regulator-compatible = "ldo_vio28";
125 regulator-name = "vio28";
126 /* fixed output 2.8 V */
127 regulator-enable-ramp-delay = <240>;
128 };
129
130 mt6397_usb_reg: ldo_vusb {
131 regulator-compatible = "ldo_vusb";
132 regulator-name = "vusb";
133 /* fixed output 3.3 V */
134 regulator-enable-ramp-delay = <218>;
135 };
136
137 mt6397_vmc_reg: ldo_vmc {
138 regulator-compatible = "ldo_vmc";
139 regulator-name = "vmc";
140 regulator-min-microvolt = <1800000>;
141 regulator-max-microvolt = <3300000>;
142 regulator-enable-ramp-delay = <218>;
143 };
144
145 mt6397_vmch_reg: ldo_vmch {
146 regulator-compatible = "ldo_vmch";
147 regulator-name = "vmch";
148 regulator-min-microvolt = <3000000>;
149 regulator-max-microvolt = <3300000>;
150 regulator-enable-ramp-delay = <218>;
151 };
152
153 mt6397_vemc_3v3_reg: ldo_vemc3v3 {
154 regulator-compatible = "ldo_vemc3v3";
155 regulator-name = "vemc_3v3";
156 regulator-min-microvolt = <3000000>;
157 regulator-max-microvolt = <3300000>;
158 regulator-enable-ramp-delay = <218>;
159 };
160
161 mt6397_vgp1_reg: ldo_vgp1 {
162 regulator-compatible = "ldo_vgp1";
163 regulator-name = "vcamd";
164 regulator-min-microvolt = <1220000>;
165 regulator-max-microvolt = <3300000>;
166 regulator-enable-ramp-delay = <240>;
167 };
168
169 mt6397_vgp2_reg: ldo_vgp2 {
170 egulator-compatible = "ldo_vgp2";
171 regulator-name = "vcamio";
172 regulator-min-microvolt = <1000000>;
173 regulator-max-microvolt = <3300000>;
174 regulator-enable-ramp-delay = <218>;
175 };
176
177 mt6397_vgp3_reg: ldo_vgp3 {
178 regulator-compatible = "ldo_vgp3";
179 regulator-name = "vcamaf";
180 regulator-min-microvolt = <1200000>;
181 regulator-max-microvolt = <3300000>;
182 regulator-enable-ramp-delay = <218>;
183 };
184
185 mt6397_vgp4_reg: ldo_vgp4 {
186 regulator-compatible = "ldo_vgp4";
187 regulator-name = "vgp4";
188 regulator-min-microvolt = <1200000>;
189 regulator-max-microvolt = <3300000>;
190 regulator-enable-ramp-delay = <218>;
191 };
192
193 mt6397_vgp5_reg: ldo_vgp5 {
194 regulator-compatible = "ldo_vgp5";
195 regulator-name = "vgp5";
196 regulator-min-microvolt = <1200000>;
197 regulator-max-microvolt = <3000000>;
198 regulator-enable-ramp-delay = <218>;
199 };
200
201 mt6397_vgp6_reg: ldo_vgp6 {
202 regulator-compatible = "ldo_vgp6";
203 regulator-name = "vgp6";
204 regulator-min-microvolt = <1200000>;
205 regulator-max-microvolt = <3300000>;
206 regulator-enable-ramp-delay = <218>;
207 };
208
209 mt6397_vibr_reg: ldo_vibr {
210 regulator-compatible = "ldo_vibr";
211 regulator-name = "vibr";
212 regulator-min-microvolt = <1200000>;
213 regulator-max-microvolt = <3300000>;
214 regulator-enable-ramp-delay = <218>;
215 };
216 };
217 };
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
index 34ef5d16d0f1..9b40db88f637 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.txt
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -1,7 +1,7 @@
1PFUZE100 family of regulators 1PFUZE100 family of regulators
2 2
3Required properties: 3Required properties:
4- compatible: "fsl,pfuze100" or "fsl,pfuze200" 4- compatible: "fsl,pfuze100", "fsl,pfuze200", "fsl,pfuze3000"
5- reg: I2C slave address 5- reg: I2C slave address
6 6
7Required child node: 7Required child node:
@@ -14,6 +14,8 @@ Required child node:
14 sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6 14 sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6
15 --PFUZE200 15 --PFUZE200
16 sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6 16 sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6
17 --PFUZE3000
18 sw1a,sw1b,sw2,sw3,swbst,vsnvs,vrefddr,vldo1,vldo2,vccsd,v33,vldo3,vldo4
17 19
18Each regulator is defined using the standard binding for regulators. 20Each regulator is defined using the standard binding for regulators.
19 21
@@ -205,3 +207,93 @@ Example 2: PFUZE200
205 }; 207 };
206 }; 208 };
207 }; 209 };
210
211Example 3: PFUZE3000
212
213 pmic: pfuze3000@08 {
214 compatible = "fsl,pfuze3000";
215 reg = <0x08>;
216
217 regulators {
218 sw1a_reg: sw1a {
219 regulator-min-microvolt = <700000>;
220 regulator-max-microvolt = <1475000>;
221 regulator-boot-on;
222 regulator-always-on;
223 regulator-ramp-delay = <6250>;
224 };
225 /* use sw1c_reg to align with pfuze100/pfuze200 */
226 sw1c_reg: sw1b {
227 regulator-min-microvolt = <700000>;
228 regulator-max-microvolt = <1475000>;
229 regulator-boot-on;
230 regulator-always-on;
231 regulator-ramp-delay = <6250>;
232 };
233
234 sw2_reg: sw2 {
235 regulator-min-microvolt = <2500000>;
236 regulator-max-microvolt = <3300000>;
237 regulator-boot-on;
238 regulator-always-on;
239 };
240
241 sw3a_reg: sw3 {
242 regulator-min-microvolt = <900000>;
243 regulator-max-microvolt = <1650000>;
244 regulator-boot-on;
245 regulator-always-on;
246 };
247
248 swbst_reg: swbst {
249 regulator-min-microvolt = <5000000>;
250 regulator-max-microvolt = <5150000>;
251 };
252
253 snvs_reg: vsnvs {
254 regulator-min-microvolt = <1000000>;
255 regulator-max-microvolt = <3000000>;
256 regulator-boot-on;
257 regulator-always-on;
258 };
259
260 vref_reg: vrefddr {
261 regulator-boot-on;
262 regulator-always-on;
263 };
264
265 vgen1_reg: vldo1 {
266 regulator-min-microvolt = <1800000>;
267 regulator-max-microvolt = <3300000>;
268 regulator-always-on;
269 };
270
271 vgen2_reg: vldo2 {
272 regulator-min-microvolt = <800000>;
273 regulator-max-microvolt = <1550000>;
274 };
275
276 vgen3_reg: vccsd {
277 regulator-min-microvolt = <2850000>;
278 regulator-max-microvolt = <3300000>;
279 regulator-always-on;
280 };
281
282 vgen4_reg: v33 {
283 regulator-min-microvolt = <2850000>;
284 regulator-max-microvolt = <3300000>;
285 };
286
287 vgen5_reg: vldo3 {
288 regulator-min-microvolt = <1800000>;
289 regulator-max-microvolt = <3300000>;
290 regulator-always-on;
291 };
292
293 vgen6_reg: vldo4 {
294 regulator-min-microvolt = <1800000>;
295 regulator-max-microvolt = <3300000>;
296 regulator-always-on;
297 };
298 };
299 };
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index d11c3721e7cd..4c388bb2f0a2 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -30,6 +30,22 @@ Optional properties:
30 specifiers, one for transmission, and one for 30 specifiers, one for transmission, and one for
31 reception. 31 reception.
32- dma-names : Must contain a list of two DMA names, "tx" and "rx". 32- dma-names : Must contain a list of two DMA names, "tx" and "rx".
33- renesas,dtdl : delay sync signal (setup) in transmit mode.
34 Must contain one of the following values:
35 0 (no bit delay)
36 50 (0.5-clock-cycle delay)
37 100 (1-clock-cycle delay)
38 150 (1.5-clock-cycle delay)
39 200 (2-clock-cycle delay)
40
41- renesas,syncdl : delay sync signal (hold) in transmit mode.
42 Must contain one of the following values:
43 0 (no bit delay)
44 50 (0.5-clock-cycle delay)
45 100 (1-clock-cycle delay)
46 150 (1.5-clock-cycle delay)
47 200 (2-clock-cycle delay)
48 300 (3-clock-cycle delay)
33 49
34Optional properties, deprecated for soctype-specific bindings: 50Optional properties, deprecated for soctype-specific bindings:
35- renesas,tx-fifo-size : Overrides the default tx fifo size given in words 51- renesas,tx-fifo-size : Overrides the default tx fifo size given in words
diff --git a/Documentation/devicetree/bindings/spi/spi-sirf.txt b/Documentation/devicetree/bindings/spi/spi-sirf.txt
new file mode 100644
index 000000000000..4c7adb8f777c
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-sirf.txt
@@ -0,0 +1,41 @@
1* CSR SiRFprimaII Serial Peripheral Interface
2
3Required properties:
4- compatible : Should be "sirf,prima2-spi"
5- reg : Offset and length of the register set for the device
6- interrupts : Should contain SPI interrupt
7- resets: phandle to the reset controller asserting this device in
8 reset
9 See ../reset/reset.txt for details.
10- dmas : Must contain an entry for each entry in clock-names.
11 See ../dma/dma.txt for details.
12- dma-names : Must include the following entries:
13 - rx
14 - tx
15- clocks : Must contain an entry for each entry in clock-names.
16 See ../clocks/clock-bindings.txt for details.
17
18- #address-cells: Number of cells required to define a chip select
19 address on the SPI bus. Should be set to 1.
20- #size-cells: Should be zero.
21
22Optional properties:
23- spi-max-frequency: Specifies maximum SPI clock frequency,
24 Units - Hz. Definition as per
25 Documentation/devicetree/bindings/spi/spi-bus.txt
26- cs-gpios: should specify GPIOs used for chipselects.
27
28Example:
29
30spi0: spi@b00d0000 {
31 compatible = "sirf,prima2-spi";
32 reg = <0xb00d0000 0x10000>;
33 interrupts = <15>;
34 dmas = <&dmac1 9>,
35 <&dmac1 4>;
36 dma-names = "rx", "tx";
37 #address-cells = <1>;
38 #size-cells = <0>;
39 clocks = <&clks 19>;
40 resets = <&rstc 26>;
41};
diff --git a/Documentation/devicetree/bindings/spi/spi-st-ssc.txt b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt
new file mode 100644
index 000000000000..fe54959ec957
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt
@@ -0,0 +1,40 @@
1STMicroelectronics SSC (SPI) Controller
2---------------------------------------
3
4Required properties:
5- compatible : "st,comms-ssc4-spi"
6- reg : Offset and length of the device's register set
7- interrupts : The interrupt specifier
8- clock-names : Must contain "ssc"
9- clocks : Must contain an entry for each name in clock-names
10 See ../clk/*
11- pinctrl-names : Uses "default", can use "sleep" if provided
12 See ../pinctrl/pinctrl-binding.txt
13
14Optional properties:
15- cs-gpios : List of GPIO chip selects
16 See ../spi/spi-bus.txt
17
18Child nodes represent devices on the SPI bus
19 See ../spi/spi-bus.txt
20
21Example:
22 spi@9840000 {
23 compatible = "st,comms-ssc4-spi";
24 reg = <0x9840000 0x110>;
25 interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
26 clocks = <&clk_s_c0_flexgen CLK_EXT2F_A9>;
27 clock-names = "ssc";
28 pinctrl-0 = <&pinctrl_spi0_default>;
29 pinctrl-names = "default";
30 cs-gpios = <&pio17 5 0>;
31 #address-cells = <1>;
32 #size-cells = <0>;
33
34 st95hf@0{
35 compatible = "st,st95hf";
36 reg = <0>;
37 spi-max-frequency = <1000000>;
38 interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
39 };
40 };
diff --git a/Documentation/futex-requeue-pi.txt b/Documentation/futex-requeue-pi.txt
index 31b16610c416..77b36f59d16b 100644
--- a/Documentation/futex-requeue-pi.txt
+++ b/Documentation/futex-requeue-pi.txt
@@ -98,7 +98,7 @@ rt_mutex_start_proxy_lock() and rt_mutex_finish_proxy_lock(), which
98allow the requeue code to acquire an uncontended rt_mutex on behalf 98allow the requeue code to acquire an uncontended rt_mutex on behalf
99of the waiter and to enqueue the waiter on a contended rt_mutex. 99of the waiter and to enqueue the waiter on a contended rt_mutex.
100Two new system calls provide the kernel<->user interface to 100Two new system calls provide the kernel<->user interface to
101requeue_pi: FUTEX_WAIT_REQUEUE_PI and FUTEX_REQUEUE_CMP_PI. 101requeue_pi: FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI.
102 102
103FUTEX_WAIT_REQUEUE_PI is called by the waiter (pthread_cond_wait() 103FUTEX_WAIT_REQUEUE_PI is called by the waiter (pthread_cond_wait()
104and pthread_cond_timedwait()) to block on the initial futex and wait 104and pthread_cond_timedwait()) to block on the initial futex and wait
@@ -107,7 +107,7 @@ result of a high-speed collision between futex_wait() and
107futex_lock_pi(), with some extra logic to check for the additional 107futex_lock_pi(), with some extra logic to check for the additional
108wake-up scenarios. 108wake-up scenarios.
109 109
110FUTEX_REQUEUE_CMP_PI is called by the waker 110FUTEX_CMP_REQUEUE_PI is called by the waker
111(pthread_cond_broadcast() and pthread_cond_signal()) to requeue and 111(pthread_cond_broadcast() and pthread_cond_signal()) to requeue and
112possibly wake the waiting tasks. Internally, this system call is 112possibly wake the waiting tasks. Internally, this system call is
113still handled by futex_requeue (by passing requeue_pi=1). Before 113still handled by futex_requeue (by passing requeue_pi=1). Before
@@ -120,12 +120,12 @@ task as a waiter on the underlying rt_mutex. It is possible that
120the lock can be acquired at this stage as well, if so, the next 120the lock can be acquired at this stage as well, if so, the next
121waiter is woken to finish the acquisition of the lock. 121waiter is woken to finish the acquisition of the lock.
122 122
123FUTEX_REQUEUE_PI accepts nr_wake and nr_requeue as arguments, but 123FUTEX_CMP_REQUEUE_PI accepts nr_wake and nr_requeue as arguments, but
124their sum is all that really matters. futex_requeue() will wake or 124their sum is all that really matters. futex_requeue() will wake or
125requeue up to nr_wake + nr_requeue tasks. It will wake only as many 125requeue up to nr_wake + nr_requeue tasks. It will wake only as many
126tasks as it can acquire the lock for, which in the majority of cases 126tasks as it can acquire the lock for, which in the majority of cases
127should be 0 as good programming practice dictates that the caller of 127should be 0 as good programming practice dictates that the caller of
128either pthread_cond_broadcast() or pthread_cond_signal() acquire the 128either pthread_cond_broadcast() or pthread_cond_signal() acquire the
129mutex prior to making the call. FUTEX_REQUEUE_PI requires that 129mutex prior to making the call. FUTEX_CMP_REQUEUE_PI requires that
130nr_wake=1. nr_requeue should be INT_MAX for broadcast and 0 for 130nr_wake=1. nr_requeue should be INT_MAX for broadcast and 0 for
131signal. 131signal.
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx
index 4223c2d3b508..cfd31d94c872 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -26,6 +26,12 @@ Supported chips:
26 Datasheet: Publicly available at the Texas Instruments website 26 Datasheet: Publicly available at the Texas Instruments website
27 http://www.ti.com/ 27 http://www.ti.com/
28 28
29 * Texas Instruments INA231
30 Prefix: 'ina231'
31 Addresses: I2C 0x40 - 0x4f
32 Datasheet: Publicly available at the Texas Instruments website
33 http://www.ti.com/
34
29Author: Lothar Felten <l-felten@ti.com> 35Author: Lothar Felten <l-felten@ti.com>
30 36
31Description 37Description
@@ -41,9 +47,18 @@ interface. The INA220 monitors both shunt drop and supply voltage.
41The INA226 is a current shunt and power monitor with an I2C interface. 47The INA226 is a current shunt and power monitor with an I2C interface.
42The INA226 monitors both a shunt voltage drop and bus supply voltage. 48The INA226 monitors both a shunt voltage drop and bus supply voltage.
43 49
44The INA230 is a high or low side current shunt and power monitor with an I2C 50INA230 and INA231 are high or low side current shunt and power monitors
45interface. The INA230 monitors both a shunt voltage drop and bus supply voltage. 51with an I2C interface. The chips monitor both a shunt voltage drop and
52bus supply voltage.
46 53
47The shunt value in micro-ohms can be set via platform data or device tree. 54The shunt value in micro-ohms can be set via platform data or device tree at
48Please refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings 55compile-time or via the shunt_resistor attribute in sysfs at run-time. Please
56refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings
49if the device tree is used. 57if the device tree is used.
58
59Additionally ina226 supports update_interval attribute as described in
60Documentation/hwmon/sysfs-interface. Internally the interval is the sum of
61bus and shunt voltage conversion times multiplied by the averaging rate. We
62don't touch the conversion times and only modify the number of averages. The
63lower limit of the update_interval is 2 ms, the upper limit is 2253 ms.
64The actual programmed interval may vary from the desired value.
diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt
index 5dbc99c04f6e..5001280e9d82 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -34,7 +34,7 @@ The validator tracks lock-class usage history into 4n + 1 separate state bits:
34- 'ever held with STATE enabled' 34- 'ever held with STATE enabled'
35- 'ever held as readlock with STATE enabled' 35- 'ever held as readlock with STATE enabled'
36 36
37Where STATE can be either one of (kernel/lockdep_states.h) 37Where STATE can be either one of (kernel/locking/lockdep_states.h)
38 - hardirq 38 - hardirq
39 - softirq 39 - softirq
40 - reclaim_fs 40 - reclaim_fs
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 70a09f8a0383..ca2387ef27ab 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -269,6 +269,50 @@ And there are a number of things that _must_ or _must_not_ be assumed:
269 STORE *(A + 4) = Y; STORE *A = X; 269 STORE *(A + 4) = Y; STORE *A = X;
270 STORE {*A, *(A + 4) } = {X, Y}; 270 STORE {*A, *(A + 4) } = {X, Y};
271 271
272And there are anti-guarantees:
273
274 (*) These guarantees do not apply to bitfields, because compilers often
275 generate code to modify these using non-atomic read-modify-write
276 sequences. Do not attempt to use bitfields to synchronize parallel
277 algorithms.
278
279 (*) Even in cases where bitfields are protected by locks, all fields
280 in a given bitfield must be protected by one lock. If two fields
281 in a given bitfield are protected by different locks, the compiler's
282 non-atomic read-modify-write sequences can cause an update to one
283 field to corrupt the value of an adjacent field.
284
285 (*) These guarantees apply only to properly aligned and sized scalar
286 variables. "Properly sized" currently means variables that are
287 the same size as "char", "short", "int" and "long". "Properly
288 aligned" means the natural alignment, thus no constraints for
289 "char", two-byte alignment for "short", four-byte alignment for
290 "int", and either four-byte or eight-byte alignment for "long",
291 on 32-bit and 64-bit systems, respectively. Note that these
292 guarantees were introduced into the C11 standard, so beware when
293 using older pre-C11 compilers (for example, gcc 4.6). The portion
294 of the standard containing this guarantee is Section 3.14, which
295 defines "memory location" as follows:
296
297 memory location
298 either an object of scalar type, or a maximal sequence
299 of adjacent bit-fields all having nonzero width
300
301 NOTE 1: Two threads of execution can update and access
302 separate memory locations without interfering with
303 each other.
304
305 NOTE 2: A bit-field and an adjacent non-bit-field member
306 are in separate memory locations. The same applies
307 to two bit-fields, if one is declared inside a nested
308 structure declaration and the other is not, or if the two
309 are separated by a zero-length bit-field declaration,
310 or if they are separated by a non-bit-field member
311 declaration. It is not safe to concurrently update two
312 bit-fields in the same structure if all members declared
313 between them are also bit-fields, no matter what the
314 sizes of those intervening bit-fields happen to be.
315
272 316
273========================= 317=========================
274WHAT ARE MEMORY BARRIERS? 318WHAT ARE MEMORY BARRIERS?
@@ -750,7 +794,7 @@ In summary:
750 However, they do -not- guarantee any other sort of ordering: 794 However, they do -not- guarantee any other sort of ordering:
751 Not prior loads against later loads, nor prior stores against 795 Not prior loads against later loads, nor prior stores against
752 later anything. If you need these other forms of ordering, 796 later anything. If you need these other forms of ordering,
753 use smb_rmb(), smp_wmb(), or, in the case of prior stores and 797 use smp_rmb(), smp_wmb(), or, in the case of prior stores and
754 later loads, smp_mb(). 798 later loads, smp_mb().
755 799
756 (*) If both legs of the "if" statement begin with identical stores 800 (*) If both legs of the "if" statement begin with identical stores
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt
index c6af4bac5aa8..54f10478e8e3 100644
--- a/Documentation/networking/netlink_mmap.txt
+++ b/Documentation/networking/netlink_mmap.txt
@@ -199,16 +199,9 @@ frame header.
199TX limitations 199TX limitations
200-------------- 200--------------
201 201
202Kernel processing usually involves validation of the message received by 202As of Jan 2015 the message is always copied from the ring frame to an
203user-space, then processing its contents. The kernel must assure that 203allocated buffer due to unresolved security concerns.
204userspace is not able to modify the message contents after they have been 204See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX.").
205validated. In order to do so, the message is copied from the ring frame
206to an allocated buffer if either of these conditions is false:
207
208- only a single mapping of the ring exists
209- the file descriptor is not shared between processes
210
211This means that for threaded programs, the kernel will fall back to copying.
212 205
213Example 206Example
214------- 207-------
diff --git a/MAINTAINERS b/MAINTAINERS
index 2ebb056cbe0a..d66a97dd3a12 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -708,6 +708,16 @@ X: drivers/iio/*/adjd*
708F: drivers/staging/iio/*/ad* 708F: drivers/staging/iio/*/ad*
709F: staging/iio/trigger/iio-trig-bfin-timer.c 709F: staging/iio/trigger/iio-trig-bfin-timer.c
710 710
711ANDROID DRIVERS
712M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
713M: Arve Hjønnevåg <arve@android.com>
714M: Riley Andrews <riandrews@android.com>
715T: git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git
716L: devel@driverdev.osuosl.org
717S: Supported
718F: drivers/android/
719F: drivers/staging/android/
720
711AOA (Apple Onboard Audio) ALSA DRIVER 721AOA (Apple Onboard Audio) ALSA DRIVER
712M: Johannes Berg <johannes@sipsolutions.net> 722M: Johannes Berg <johannes@sipsolutions.net>
713L: linuxppc-dev@lists.ozlabs.org 723L: linuxppc-dev@lists.ozlabs.org
@@ -4943,6 +4953,16 @@ F: Documentation/input/multi-touch-protocol.txt
4943F: drivers/input/input-mt.c 4953F: drivers/input/input-mt.c
4944K: \b(ABS|SYN)_MT_ 4954K: \b(ABS|SYN)_MT_
4945 4955
4956INTEL ASoC BDW/HSW DRIVERS
4957M: Jie Yang <yang.jie@linux.intel.com>
4958L: alsa-devel@alsa-project.org
4959S: Supported
4960F: sound/soc/intel/sst-haswell*
4961F: sound/soc/intel/sst-dsp*
4962F: sound/soc/intel/sst-firmware.c
4963F: sound/soc/intel/broadwell.c
4964F: sound/soc/intel/haswell.c
4965
4946INTEL C600 SERIES SAS CONTROLLER DRIVER 4966INTEL C600 SERIES SAS CONTROLLER DRIVER
4947M: Intel SCU Linux support <intel-linux-scu@intel.com> 4967M: Intel SCU Linux support <intel-linux-scu@intel.com>
4948M: Artur Paszkiewicz <artur.paszkiewicz@intel.com> 4968M: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
@@ -9231,7 +9251,6 @@ F: drivers/net/ethernet/dlink/sundance.c
9231 9251
9232SUPERH 9252SUPERH
9233L: linux-sh@vger.kernel.org 9253L: linux-sh@vger.kernel.org
9234W: http://www.linux-sh.org
9235Q: http://patchwork.kernel.org/project/linux-sh/list/ 9254Q: http://patchwork.kernel.org/project/linux-sh/list/
9236S: Orphan 9255S: Orphan
9237F: Documentation/sh/ 9256F: Documentation/sh/
@@ -10166,6 +10185,7 @@ USERSPACE I/O (UIO)
10166M: "Hans J. Koch" <hjk@hansjkoch.de> 10185M: "Hans J. Koch" <hjk@hansjkoch.de>
10167M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 10186M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
10168S: Maintained 10187S: Maintained
10188T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
10169F: Documentation/DocBook/uio-howto.tmpl 10189F: Documentation/DocBook/uio-howto.tmpl
10170F: drivers/uio/ 10190F: drivers/uio/
10171F: include/linux/uio*.h 10191F: include/linux/uio*.h
diff --git a/Makefile b/Makefile
index 95a0e827ecd3..b15036b1890c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 19 2PATCHLEVEL = 19
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION =
5NAME = Diseased Newt 5NAME = Diseased Newt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 98838a05ba6d..9d0ac091a52a 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -156,6 +156,8 @@ retry:
156 if (unlikely(fault & VM_FAULT_ERROR)) { 156 if (unlikely(fault & VM_FAULT_ERROR)) {
157 if (fault & VM_FAULT_OOM) 157 if (fault & VM_FAULT_OOM)
158 goto out_of_memory; 158 goto out_of_memory;
159 else if (fault & VM_FAULT_SIGSEGV)
160 goto bad_area;
159 else if (fault & VM_FAULT_SIGBUS) 161 else if (fault & VM_FAULT_SIGBUS)
160 goto do_sigbus; 162 goto do_sigbus;
161 BUG(); 163 BUG();
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 6f7e3a68803a..563cb27e37f5 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -161,6 +161,8 @@ good_area:
161 161
162 if (fault & VM_FAULT_OOM) 162 if (fault & VM_FAULT_OOM)
163 goto out_of_memory; 163 goto out_of_memory;
164 else if (fault & VM_FAULT_SIGSEGV)
165 goto bad_area;
164 else if (fault & VM_FAULT_SIGBUS) 166 else if (fault & VM_FAULT_SIGBUS)
165 goto do_sigbus; 167 goto do_sigbus;
166 168
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 68be9017593d..132c70e2d2f1 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -263,16 +263,37 @@ restart: adr r0, LC0
263 * OK... Let's do some funky business here. 263 * OK... Let's do some funky business here.
264 * If we do have a DTB appended to zImage, and we do have 264 * If we do have a DTB appended to zImage, and we do have
265 * an ATAG list around, we want the later to be translated 265 * an ATAG list around, we want the later to be translated
266 * and folded into the former here. To be on the safe side, 266 * and folded into the former here. No GOT fixup has occurred
267 * let's temporarily move the stack away into the malloc 267 * yet, but none of the code we're about to call uses any
268 * area. No GOT fixup has occurred yet, but none of the 268 * global variable.
269 * code we're about to call uses any global variable.
270 */ 269 */
271 add sp, sp, #0x10000 270
271 /* Get the initial DTB size */
272 ldr r5, [r6, #4]
273#ifndef __ARMEB__
274 /* convert to little endian */
275 eor r1, r5, r5, ror #16
276 bic r1, r1, #0x00ff0000
277 mov r5, r5, ror #8
278 eor r5, r5, r1, lsr #8
279#endif
280 /* 50% DTB growth should be good enough */
281 add r5, r5, r5, lsr #1
282 /* preserve 64-bit alignment */
283 add r5, r5, #7
284 bic r5, r5, #7
285 /* clamp to 32KB min and 1MB max */
286 cmp r5, #(1 << 15)
287 movlo r5, #(1 << 15)
288 cmp r5, #(1 << 20)
289 movhi r5, #(1 << 20)
290 /* temporarily relocate the stack past the DTB work space */
291 add sp, sp, r5
292
272 stmfd sp!, {r0-r3, ip, lr} 293 stmfd sp!, {r0-r3, ip, lr}
273 mov r0, r8 294 mov r0, r8
274 mov r1, r6 295 mov r1, r6
275 sub r2, sp, r6 296 mov r2, r5
276 bl atags_to_fdt 297 bl atags_to_fdt
277 298
278 /* 299 /*
@@ -285,11 +306,11 @@ restart: adr r0, LC0
285 bic r0, r0, #1 306 bic r0, r0, #1
286 add r0, r0, #0x100 307 add r0, r0, #0x100
287 mov r1, r6 308 mov r1, r6
288 sub r2, sp, r6 309 mov r2, r5
289 bleq atags_to_fdt 310 bleq atags_to_fdt
290 311
291 ldmfd sp!, {r0-r3, ip, lr} 312 ldmfd sp!, {r0-r3, ip, lr}
292 sub sp, sp, #0x10000 313 sub sp, sp, r5
293#endif 314#endif
294 315
295 mov r8, r6 @ use the appended device tree 316 mov r8, r6 @ use the appended device tree
@@ -306,7 +327,7 @@ restart: adr r0, LC0
306 subs r1, r5, r1 327 subs r1, r5, r1
307 addhi r9, r9, r1 328 addhi r9, r9, r1
308 329
309 /* Get the dtb's size */ 330 /* Get the current DTB size */
310 ldr r5, [r6, #4] 331 ldr r5, [r6, #4]
311#ifndef __ARMEB__ 332#ifndef __ARMEB__
312 /* convert r5 (dtb size) to little endian */ 333 /* convert r5 (dtb size) to little endian */
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index b8168f1f8139..24ff27049ce0 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -368,7 +368,7 @@
368 }; 368 };
369 369
370 i2s1: i2s@13960000 { 370 i2s1: i2s@13960000 {
371 compatible = "samsung,s5pv210-i2s"; 371 compatible = "samsung,s3c6410-i2s";
372 reg = <0x13960000 0x100>; 372 reg = <0x13960000 0x100>;
373 clocks = <&clock CLK_I2S1>; 373 clocks = <&clock CLK_I2S1>;
374 clock-names = "iis"; 374 clock-names = "iis";
@@ -378,7 +378,7 @@
378 }; 378 };
379 379
380 i2s2: i2s@13970000 { 380 i2s2: i2s@13970000 {
381 compatible = "samsung,s5pv210-i2s"; 381 compatible = "samsung,s3c6410-i2s";
382 reg = <0x13970000 0x100>; 382 reg = <0x13970000 0x100>;
383 clocks = <&clock CLK_I2S2>; 383 clocks = <&clock CLK_I2S2>;
384 clock-names = "iis"; 384 clock-names = "iis";
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 8c1febd7e3f2..c108bb451337 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -166,12 +166,12 @@
166 #address-cells = <1>; 166 #address-cells = <1>;
167 #size-cells = <0>; 167 #size-cells = <0>;
168 168
169 ethphy1: ethernet-phy@0 { 169 ethphy1: ethernet-phy@1 {
170 reg = <0>; 170 reg = <1>;
171 }; 171 };
172 172
173 ethphy2: ethernet-phy@1 { 173 ethphy2: ethernet-phy@2 {
174 reg = <1>; 174 reg = <2>;
175 }; 175 };
176 }; 176 };
177}; 177};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 7b4099fcf817..d5c4669224b1 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -17,14 +17,6 @@
17 17
18 aliases { 18 aliases {
19 ethernet0 = &emac; 19 ethernet0 = &emac;
20 serial0 = &uart0;
21 serial1 = &uart1;
22 serial2 = &uart2;
23 serial3 = &uart3;
24 serial4 = &uart4;
25 serial5 = &uart5;
26 serial6 = &uart6;
27 serial7 = &uart7;
28 }; 20 };
29 21
30 chosen { 22 chosen {
@@ -39,6 +31,14 @@
39 <&ahb_gates 44>; 31 <&ahb_gates 44>;
40 status = "disabled"; 32 status = "disabled";
41 }; 33 };
34
35 framebuffer@1 {
36 compatible = "allwinner,simple-framebuffer", "simple-framebuffer";
37 allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
38 clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
39 <&ahb_gates 44>, <&ahb_gates 46>;
40 status = "disabled";
41 };
42 }; 42 };
43 43
44 cpus { 44 cpus {
@@ -438,8 +438,8 @@
438 reg-names = "phy_ctrl", "pmu1", "pmu2"; 438 reg-names = "phy_ctrl", "pmu1", "pmu2";
439 clocks = <&usb_clk 8>; 439 clocks = <&usb_clk 8>;
440 clock-names = "usb_phy"; 440 clock-names = "usb_phy";
441 resets = <&usb_clk 1>, <&usb_clk 2>; 441 resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>;
442 reset-names = "usb1_reset", "usb2_reset"; 442 reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
443 status = "disabled"; 443 status = "disabled";
444 }; 444 };
445 445
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
index fe3c559ca6a8..bfa742817690 100644
--- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
@@ -55,6 +55,12 @@
55 model = "Olimex A10s-Olinuxino Micro"; 55 model = "Olimex A10s-Olinuxino Micro";
56 compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s"; 56 compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s";
57 57
58 aliases {
59 serial0 = &uart0;
60 serial1 = &uart2;
61 serial2 = &uart3;
62 };
63
58 soc@01c00000 { 64 soc@01c00000 {
59 emac: ethernet@01c0b000 { 65 emac: ethernet@01c0b000 {
60 pinctrl-names = "default"; 66 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 1b76667f3182..2e7d8263799d 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -18,10 +18,6 @@
18 18
19 aliases { 19 aliases {
20 ethernet0 = &emac; 20 ethernet0 = &emac;
21 serial0 = &uart0;
22 serial1 = &uart1;
23 serial2 = &uart2;
24 serial3 = &uart3;
25 }; 21 };
26 22
27 chosen { 23 chosen {
@@ -390,8 +386,8 @@
390 reg-names = "phy_ctrl", "pmu1"; 386 reg-names = "phy_ctrl", "pmu1";
391 clocks = <&usb_clk 8>; 387 clocks = <&usb_clk 8>;
392 clock-names = "usb_phy"; 388 clock-names = "usb_phy";
393 resets = <&usb_clk 1>; 389 resets = <&usb_clk 0>, <&usb_clk 1>;
394 reset-names = "usb1_reset"; 390 reset-names = "usb0_reset", "usb1_reset";
395 status = "disabled"; 391 status = "disabled";
396 }; 392 };
397 393
diff --git a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
index eeed1f236ee8..c7be3abd9fcc 100644
--- a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
+++ b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
@@ -53,6 +53,10 @@
53 model = "HSG H702"; 53 model = "HSG H702";
54 compatible = "hsg,h702", "allwinner,sun5i-a13"; 54 compatible = "hsg,h702", "allwinner,sun5i-a13";
55 55
56 aliases {
57 serial0 = &uart1;
58 };
59
56 soc@01c00000 { 60 soc@01c00000 {
57 mmc0: mmc@01c0f000 { 61 mmc0: mmc@01c0f000 {
58 pinctrl-names = "default"; 62 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
index 916ee8bb826f..3decefb3c37a 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
@@ -54,6 +54,10 @@
54 model = "Olimex A13-Olinuxino Micro"; 54 model = "Olimex A13-Olinuxino Micro";
55 compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13"; 55 compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13";
56 56
57 aliases {
58 serial0 = &uart1;
59 };
60
57 soc@01c00000 { 61 soc@01c00000 {
58 mmc0: mmc@01c0f000 { 62 mmc0: mmc@01c0f000 {
59 pinctrl-names = "default"; 63 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index e31d291d14cb..b421f7fa197b 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -55,6 +55,10 @@
55 model = "Olimex A13-Olinuxino"; 55 model = "Olimex A13-Olinuxino";
56 compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13"; 56 compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
57 57
58 aliases {
59 serial0 = &uart1;
60 };
61
58 soc@01c00000 { 62 soc@01c00000 {
59 mmc0: mmc@01c0f000 { 63 mmc0: mmc@01c0f000 {
60 pinctrl-names = "default"; 64 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index c35217ea1f64..c556688f8b8b 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -16,11 +16,6 @@
16/ { 16/ {
17 interrupt-parent = <&intc>; 17 interrupt-parent = <&intc>;
18 18
19 aliases {
20 serial0 = &uart1;
21 serial1 = &uart3;
22 };
23
24 cpus { 19 cpus {
25 #address-cells = <1>; 20 #address-cells = <1>;
26 #size-cells = <0>; 21 #size-cells = <0>;
@@ -349,8 +344,8 @@
349 reg-names = "phy_ctrl", "pmu1"; 344 reg-names = "phy_ctrl", "pmu1";
350 clocks = <&usb_clk 8>; 345 clocks = <&usb_clk 8>;
351 clock-names = "usb_phy"; 346 clock-names = "usb_phy";
352 resets = <&usb_clk 1>; 347 resets = <&usb_clk 0>, <&usb_clk 1>;
353 reset-names = "usb1_reset"; 348 reset-names = "usb0_reset", "usb1_reset";
354 status = "disabled"; 349 status = "disabled";
355 }; 350 };
356 351
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index f47156b6572b..1e7e7bcf8307 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -53,12 +53,6 @@
53 interrupt-parent = <&gic>; 53 interrupt-parent = <&gic>;
54 54
55 aliases { 55 aliases {
56 serial0 = &uart0;
57 serial1 = &uart1;
58 serial2 = &uart2;
59 serial3 = &uart3;
60 serial4 = &uart4;
61 serial5 = &uart5;
62 ethernet0 = &gmac; 56 ethernet0 = &gmac;
63 }; 57 };
64 58
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
index 1cf1214cc068..bd7b15add697 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
@@ -55,6 +55,12 @@
55 model = "LeMaker Banana Pi"; 55 model = "LeMaker Banana Pi";
56 compatible = "lemaker,bananapi", "allwinner,sun7i-a20"; 56 compatible = "lemaker,bananapi", "allwinner,sun7i-a20";
57 57
58 aliases {
59 serial0 = &uart0;
60 serial1 = &uart3;
61 serial2 = &uart7;
62 };
63
58 soc@01c00000 { 64 soc@01c00000 {
59 spi0: spi@01c05000 { 65 spi0: spi@01c05000 {
60 pinctrl-names = "default"; 66 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
index 0e4bfa3b2b85..0bcefcbbb756 100644
--- a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
+++ b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
@@ -19,6 +19,14 @@
19 model = "Merrii A20 Hummingbird"; 19 model = "Merrii A20 Hummingbird";
20 compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20"; 20 compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20";
21 21
22 aliases {
23 serial0 = &uart0;
24 serial1 = &uart2;
25 serial2 = &uart3;
26 serial3 = &uart4;
27 serial4 = &uart5;
28 };
29
22 soc@01c00000 { 30 soc@01c00000 {
23 mmc0: mmc@01c0f000 { 31 mmc0: mmc@01c0f000 {
24 pinctrl-names = "default"; 32 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
index 9d669cdf031d..66cc77707198 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
@@ -20,6 +20,9 @@
20 compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20"; 20 compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20";
21 21
22 aliases { 22 aliases {
23 serial0 = &uart0;
24 serial1 = &uart6;
25 serial2 = &uart7;
23 spi0 = &spi1; 26 spi0 = &spi1;
24 spi1 = &spi2; 27 spi1 = &spi2;
25 }; 28 };
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index e21ce5992d56..89749ce34a84 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -54,14 +54,6 @@
54 54
55 aliases { 55 aliases {
56 ethernet0 = &gmac; 56 ethernet0 = &gmac;
57 serial0 = &uart0;
58 serial1 = &uart1;
59 serial2 = &uart2;
60 serial3 = &uart3;
61 serial4 = &uart4;
62 serial5 = &uart5;
63 serial6 = &uart6;
64 serial7 = &uart7;
65 }; 57 };
66 58
67 chosen { 59 chosen {
diff --git a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
index 7f2117ce6985..32ad80804dbb 100644
--- a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
+++ b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
@@ -55,6 +55,10 @@
55 model = "Ippo Q8H Dual Core Tablet (v5)"; 55 model = "Ippo Q8H Dual Core Tablet (v5)";
56 compatible = "ippo,q8h-v5", "allwinner,sun8i-a23"; 56 compatible = "ippo,q8h-v5", "allwinner,sun8i-a23";
57 57
58 aliases {
59 serial0 = &r_uart;
60 };
61
58 chosen { 62 chosen {
59 bootargs = "earlyprintk console=ttyS0,115200"; 63 bootargs = "earlyprintk console=ttyS0,115200";
60 }; 64 };
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi
index 0746cd1024d7..86584fcf5e32 100644
--- a/arch/arm/boot/dts/sun8i-a23.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23.dtsi
@@ -52,15 +52,6 @@
52/ { 52/ {
53 interrupt-parent = <&gic>; 53 interrupt-parent = <&gic>;
54 54
55 aliases {
56 serial0 = &uart0;
57 serial1 = &uart1;
58 serial2 = &uart2;
59 serial3 = &uart3;
60 serial4 = &uart4;
61 serial5 = &r_uart;
62 };
63
64 cpus { 55 cpus {
65 #address-cells = <1>; 56 #address-cells = <1>;
66 #size-cells = <0>; 57 #size-cells = <0>;
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
index 506948f582ee..11ec71072e81 100644
--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
+++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
@@ -54,6 +54,11 @@
54 model = "Merrii A80 Optimus Board"; 54 model = "Merrii A80 Optimus Board";
55 compatible = "merrii,a80-optimus", "allwinner,sun9i-a80"; 55 compatible = "merrii,a80-optimus", "allwinner,sun9i-a80";
56 56
57 aliases {
58 serial0 = &uart0;
59 serial1 = &uart4;
60 };
61
57 chosen { 62 chosen {
58 bootargs = "earlyprintk console=ttyS0,115200"; 63 bootargs = "earlyprintk console=ttyS0,115200";
59 }; 64 };
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index 494714f67b57..9ef4438206a9 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -52,16 +52,6 @@
52/ { 52/ {
53 interrupt-parent = <&gic>; 53 interrupt-parent = <&gic>;
54 54
55 aliases {
56 serial0 = &uart0;
57 serial1 = &uart1;
58 serial2 = &uart2;
59 serial3 = &uart3;
60 serial4 = &uart4;
61 serial5 = &uart5;
62 serial6 = &r_uart;
63 };
64
65 cpus { 55 cpus {
66 #address-cells = <1>; 56 #address-cells = <1>;
67 #size-cells = <0>; 57 #size-cells = <0>;
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 66ce17655bb9..7b0152321b20 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
38 vcpu->arch.hcr = HCR_GUEST_MASK; 38 vcpu->arch.hcr = HCR_GUEST_MASK;
39} 39}
40 40
41static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
42{
43 return vcpu->arch.hcr;
44}
45
46static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
47{
48 vcpu->arch.hcr = hcr;
49}
50
41static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) 51static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
42{ 52{
43 return 1; 53 return 1;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 254e0650e48b..04b4ea0b550a 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
125 * Anything that is not used directly from assembly code goes 125 * Anything that is not used directly from assembly code goes
126 * here. 126 * here.
127 */ 127 */
128 /* dcache set/way operation pending */
129 int last_pcpu;
130 cpumask_t require_dcache_flush;
131 128
132 /* Don't run the guest on this vcpu */ 129 /* Don't run the guest on this vcpu */
133 bool pause; 130 bool pause;
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 63e0ecc04901..1bca8f8af442 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -44,6 +44,7 @@
44 44
45#ifndef __ASSEMBLY__ 45#ifndef __ASSEMBLY__
46 46
47#include <linux/highmem.h>
47#include <asm/cacheflush.h> 48#include <asm/cacheflush.h>
48#include <asm/pgalloc.h> 49#include <asm/pgalloc.h>
49 50
@@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
161 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; 162 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
162} 163}
163 164
164static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, 165static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
165 unsigned long size, 166 unsigned long size,
166 bool ipa_uncached) 167 bool ipa_uncached)
167{ 168{
168 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
169 kvm_flush_dcache_to_poc((void *)hva, size);
170
171 /* 169 /*
172 * If we are going to insert an instruction page and the icache is 170 * If we are going to insert an instruction page and the icache is
173 * either VIPT or PIPT, there is a potential problem where the host 171 * either VIPT or PIPT, there is a potential problem where the host
@@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
179 * 177 *
180 * VIVT caches are tagged using both the ASID and the VMID and doesn't 178 * VIVT caches are tagged using both the ASID and the VMID and doesn't
181 * need any kind of flushing (DDI 0406C.b - Page B3-1392). 179 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
180 *
181 * We need to do this through a kernel mapping (using the
182 * user-space mapping has proved to be the wrong
183 * solution). For that, we need to kmap one page at a time,
184 * and iterate over the range.
182 */ 185 */
183 if (icache_is_pipt()) { 186
184 __cpuc_coherent_user_range(hva, hva + size); 187 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
185 } else if (!icache_is_vivt_asid_tagged()) { 188
189 VM_BUG_ON(size & PAGE_MASK);
190
191 if (!need_flush && !icache_is_pipt())
192 goto vipt_cache;
193
194 while (size) {
195 void *va = kmap_atomic_pfn(pfn);
196
197 if (need_flush)
198 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
199
200 if (icache_is_pipt())
201 __cpuc_coherent_user_range((unsigned long)va,
202 (unsigned long)va + PAGE_SIZE);
203
204 size -= PAGE_SIZE;
205 pfn++;
206
207 kunmap_atomic(va);
208 }
209
210vipt_cache:
211 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
186 /* any kind of VIPT cache */ 212 /* any kind of VIPT cache */
187 __flush_icache_all(); 213 __flush_icache_all();
188 } 214 }
189} 215}
190 216
217static inline void __kvm_flush_dcache_pte(pte_t pte)
218{
219 void *va = kmap_atomic(pte_page(pte));
220
221 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
222
223 kunmap_atomic(va);
224}
225
226static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
227{
228 unsigned long size = PMD_SIZE;
229 pfn_t pfn = pmd_pfn(pmd);
230
231 while (size) {
232 void *va = kmap_atomic_pfn(pfn);
233
234 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
235
236 pfn++;
237 size -= PAGE_SIZE;
238
239 kunmap_atomic(va);
240 }
241}
242
243static inline void __kvm_flush_dcache_pud(pud_t pud)
244{
245}
246
191#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) 247#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
192 248
193void stage2_flush_vm(struct kvm *kvm); 249void kvm_set_way_flush(struct kvm_vcpu *vcpu);
250void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
194 251
195#endif /* !__ASSEMBLY__ */ 252#endif /* !__ASSEMBLY__ */
196 253
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index 2260f1855820..8944f4991c3c 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -22,10 +22,12 @@
22 22
23__invalid_entry: 23__invalid_entry:
24 v7m_exception_entry 24 v7m_exception_entry
25#ifdef CONFIG_PRINTK
25 adr r0, strerr 26 adr r0, strerr
26 mrs r1, ipsr 27 mrs r1, ipsr
27 mov r2, lr 28 mov r2, lr
28 bl printk 29 bl printk
30#endif
29 mov r0, sp 31 mov r0, sp
30 bl show_regs 32 bl show_regs
311: b 1b 331: b 1b
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 466bd299b1a8..3afee5f40f4f 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -23,6 +23,7 @@ config KVM
23 select HAVE_KVM_CPU_RELAX_INTERCEPT 23 select HAVE_KVM_CPU_RELAX_INTERCEPT
24 select KVM_MMIO 24 select KVM_MMIO
25 select KVM_ARM_HOST 25 select KVM_ARM_HOST
26 select SRCU
26 depends on ARM_VIRT_EXT && ARM_LPAE 27 depends on ARM_VIRT_EXT && ARM_LPAE
27 ---help--- 28 ---help---
28 Support hosting virtualized guest machines. You will also 29 Support hosting virtualized guest machines. You will also
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2d6d91001062..0b0d58a905c4 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
281 vcpu->cpu = cpu; 281 vcpu->cpu = cpu;
282 vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); 282 vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
283 283
284 /*
285 * Check whether this vcpu requires the cache to be flushed on
286 * this physical CPU. This is a consequence of doing dcache
287 * operations by set/way on this vcpu. We do it here to be in
288 * a non-preemptible section.
289 */
290 if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
291 flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
292
293 kvm_arm_set_running_vcpu(vcpu); 284 kvm_arm_set_running_vcpu(vcpu);
294} 285}
295 286
@@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
541 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); 532 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
542 533
543 vcpu->mode = OUTSIDE_GUEST_MODE; 534 vcpu->mode = OUTSIDE_GUEST_MODE;
544 vcpu->arch.last_pcpu = smp_processor_id();
545 kvm_guest_exit(); 535 kvm_guest_exit();
546 trace_kvm_exit(*vcpu_pc(vcpu)); 536 trace_kvm_exit(*vcpu_pc(vcpu));
547 /* 537 /*
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 7928dbdf2102..f3d88dc388bc 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
189 return true; 189 return true;
190} 190}
191 191
192/* See note at ARM ARM B1.14.4 */ 192/*
193 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
194 */
193static bool access_dcsw(struct kvm_vcpu *vcpu, 195static bool access_dcsw(struct kvm_vcpu *vcpu,
194 const struct coproc_params *p, 196 const struct coproc_params *p,
195 const struct coproc_reg *r) 197 const struct coproc_reg *r)
196{ 198{
197 unsigned long val;
198 int cpu;
199
200 if (!p->is_write) 199 if (!p->is_write)
201 return read_from_write_only(vcpu, p); 200 return read_from_write_only(vcpu, p);
202 201
203 cpu = get_cpu(); 202 kvm_set_way_flush(vcpu);
204
205 cpumask_setall(&vcpu->arch.require_dcache_flush);
206 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
207
208 /* If we were already preempted, take the long way around */
209 if (cpu != vcpu->arch.last_pcpu) {
210 flush_cache_all();
211 goto done;
212 }
213
214 val = *vcpu_reg(vcpu, p->Rt1);
215
216 switch (p->CRm) {
217 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
218 case 14: /* DCCISW */
219 asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
220 break;
221
222 case 10: /* DCCSW */
223 asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
224 break;
225 }
226
227done:
228 put_cpu();
229
230 return true; 203 return true;
231} 204}
232 205
233/* 206/*
234 * Generic accessor for VM registers. Only called as long as HCR_TVM 207 * Generic accessor for VM registers. Only called as long as HCR_TVM
235 * is set. 208 * is set. If the guest enables the MMU, we stop trapping the VM
209 * sys_regs and leave it in complete control of the caches.
210 *
211 * Used by the cpu-specific code.
236 */ 212 */
237static bool access_vm_reg(struct kvm_vcpu *vcpu, 213bool access_vm_reg(struct kvm_vcpu *vcpu,
238 const struct coproc_params *p, 214 const struct coproc_params *p,
239 const struct coproc_reg *r) 215 const struct coproc_reg *r)
240{ 216{
217 bool was_enabled = vcpu_has_cache_enabled(vcpu);
218
241 BUG_ON(!p->is_write); 219 BUG_ON(!p->is_write);
242 220
243 vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); 221 vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
244 if (p->is_64bit) 222 if (p->is_64bit)
245 vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); 223 vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
246 224
247 return true; 225 kvm_toggle_cache(vcpu, was_enabled);
248}
249
250/*
251 * SCTLR accessor. Only called as long as HCR_TVM is set. If the
252 * guest enables the MMU, we stop trapping the VM sys_regs and leave
253 * it in complete control of the caches.
254 *
255 * Used by the cpu-specific code.
256 */
257bool access_sctlr(struct kvm_vcpu *vcpu,
258 const struct coproc_params *p,
259 const struct coproc_reg *r)
260{
261 access_vm_reg(vcpu, p, r);
262
263 if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
264 vcpu->arch.hcr &= ~HCR_TVM;
265 stage2_flush_vm(vcpu->kvm);
266 }
267
268 return true; 226 return true;
269} 227}
270 228
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 1a44bbe39643..88d24a3a9778 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
153#define is64 .is_64 = true 153#define is64 .is_64 = true
154#define is32 .is_64 = false 154#define is32 .is_64 = false
155 155
156bool access_sctlr(struct kvm_vcpu *vcpu, 156bool access_vm_reg(struct kvm_vcpu *vcpu,
157 const struct coproc_params *p, 157 const struct coproc_params *p,
158 const struct coproc_reg *r); 158 const struct coproc_reg *r);
159 159
160#endif /* __ARM_KVM_COPROC_LOCAL_H__ */ 160#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index e6f4ae48bda9..a7136757d373 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -34,7 +34,7 @@
34static const struct coproc_reg a15_regs[] = { 34static const struct coproc_reg a15_regs[] = {
35 /* SCTLR: swapped by interrupt.S. */ 35 /* SCTLR: swapped by interrupt.S. */
36 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, 36 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
37 access_sctlr, reset_val, c1_SCTLR, 0x00C50078 }, 37 access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
38}; 38};
39 39
40static struct kvm_coproc_target_table a15_target_table = { 40static struct kvm_coproc_target_table a15_target_table = {
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c
index 17fc7cd479d3..b19e46d1b2c0 100644
--- a/arch/arm/kvm/coproc_a7.c
+++ b/arch/arm/kvm/coproc_a7.c
@@ -37,7 +37,7 @@
37static const struct coproc_reg a7_regs[] = { 37static const struct coproc_reg a7_regs[] = {
38 /* SCTLR: swapped by interrupt.S. */ 38 /* SCTLR: swapped by interrupt.S. */
39 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, 39 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
40 access_sctlr, reset_val, c1_SCTLR, 0x00C50878 }, 40 access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
41}; 41};
42 42
43static struct kvm_coproc_target_table a7_target_table = { 43static struct kvm_coproc_target_table a7_target_table = {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 1dc9778a00af..136662547ca6 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
58 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); 58 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
59} 59}
60 60
61/*
62 * D-Cache management functions. They take the page table entries by
63 * value, as they are flushing the cache using the kernel mapping (or
64 * kmap on 32bit).
65 */
66static void kvm_flush_dcache_pte(pte_t pte)
67{
68 __kvm_flush_dcache_pte(pte);
69}
70
71static void kvm_flush_dcache_pmd(pmd_t pmd)
72{
73 __kvm_flush_dcache_pmd(pmd);
74}
75
76static void kvm_flush_dcache_pud(pud_t pud)
77{
78 __kvm_flush_dcache_pud(pud);
79}
80
61static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 81static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
62 int min, int max) 82 int min, int max)
63{ 83{
@@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
119 put_page(virt_to_page(pmd)); 139 put_page(virt_to_page(pmd));
120} 140}
121 141
142/*
143 * Unmapping vs dcache management:
144 *
145 * If a guest maps certain memory pages as uncached, all writes will
146 * bypass the data cache and go directly to RAM. However, the CPUs
147 * can still speculate reads (not writes) and fill cache lines with
148 * data.
149 *
150 * Those cache lines will be *clean* cache lines though, so a
151 * clean+invalidate operation is equivalent to an invalidate
152 * operation, because no cache lines are marked dirty.
153 *
154 * Those clean cache lines could be filled prior to an uncached write
155 * by the guest, and the cache coherent IO subsystem would therefore
156 * end up writing old data to disk.
157 *
158 * This is why right after unmapping a page/section and invalidating
159 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
160 * the IO subsystem will never hit in the cache.
161 */
122static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, 162static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
123 phys_addr_t addr, phys_addr_t end) 163 phys_addr_t addr, phys_addr_t end)
124{ 164{
@@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
128 start_pte = pte = pte_offset_kernel(pmd, addr); 168 start_pte = pte = pte_offset_kernel(pmd, addr);
129 do { 169 do {
130 if (!pte_none(*pte)) { 170 if (!pte_none(*pte)) {
171 pte_t old_pte = *pte;
172
131 kvm_set_pte(pte, __pte(0)); 173 kvm_set_pte(pte, __pte(0));
132 put_page(virt_to_page(pte));
133 kvm_tlb_flush_vmid_ipa(kvm, addr); 174 kvm_tlb_flush_vmid_ipa(kvm, addr);
175
176 /* No need to invalidate the cache for device mappings */
177 if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
178 kvm_flush_dcache_pte(old_pte);
179
180 put_page(virt_to_page(pte));
134 } 181 }
135 } while (pte++, addr += PAGE_SIZE, addr != end); 182 } while (pte++, addr += PAGE_SIZE, addr != end);
136 183
@@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
149 next = kvm_pmd_addr_end(addr, end); 196 next = kvm_pmd_addr_end(addr, end);
150 if (!pmd_none(*pmd)) { 197 if (!pmd_none(*pmd)) {
151 if (kvm_pmd_huge(*pmd)) { 198 if (kvm_pmd_huge(*pmd)) {
199 pmd_t old_pmd = *pmd;
200
152 pmd_clear(pmd); 201 pmd_clear(pmd);
153 kvm_tlb_flush_vmid_ipa(kvm, addr); 202 kvm_tlb_flush_vmid_ipa(kvm, addr);
203
204 kvm_flush_dcache_pmd(old_pmd);
205
154 put_page(virt_to_page(pmd)); 206 put_page(virt_to_page(pmd));
155 } else { 207 } else {
156 unmap_ptes(kvm, pmd, addr, next); 208 unmap_ptes(kvm, pmd, addr, next);
@@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
173 next = kvm_pud_addr_end(addr, end); 225 next = kvm_pud_addr_end(addr, end);
174 if (!pud_none(*pud)) { 226 if (!pud_none(*pud)) {
175 if (pud_huge(*pud)) { 227 if (pud_huge(*pud)) {
228 pud_t old_pud = *pud;
229
176 pud_clear(pud); 230 pud_clear(pud);
177 kvm_tlb_flush_vmid_ipa(kvm, addr); 231 kvm_tlb_flush_vmid_ipa(kvm, addr);
232
233 kvm_flush_dcache_pud(old_pud);
234
178 put_page(virt_to_page(pud)); 235 put_page(virt_to_page(pud));
179 } else { 236 } else {
180 unmap_pmds(kvm, pud, addr, next); 237 unmap_pmds(kvm, pud, addr, next);
@@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
209 266
210 pte = pte_offset_kernel(pmd, addr); 267 pte = pte_offset_kernel(pmd, addr);
211 do { 268 do {
212 if (!pte_none(*pte)) { 269 if (!pte_none(*pte) &&
213 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 270 (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
214 kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); 271 kvm_flush_dcache_pte(*pte);
215 }
216 } while (pte++, addr += PAGE_SIZE, addr != end); 272 } while (pte++, addr += PAGE_SIZE, addr != end);
217} 273}
218 274
@@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
226 do { 282 do {
227 next = kvm_pmd_addr_end(addr, end); 283 next = kvm_pmd_addr_end(addr, end);
228 if (!pmd_none(*pmd)) { 284 if (!pmd_none(*pmd)) {
229 if (kvm_pmd_huge(*pmd)) { 285 if (kvm_pmd_huge(*pmd))
230 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 286 kvm_flush_dcache_pmd(*pmd);
231 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); 287 else
232 } else {
233 stage2_flush_ptes(kvm, pmd, addr, next); 288 stage2_flush_ptes(kvm, pmd, addr, next);
234 }
235 } 289 }
236 } while (pmd++, addr = next, addr != end); 290 } while (pmd++, addr = next, addr != end);
237} 291}
@@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
246 do { 300 do {
247 next = kvm_pud_addr_end(addr, end); 301 next = kvm_pud_addr_end(addr, end);
248 if (!pud_none(*pud)) { 302 if (!pud_none(*pud)) {
249 if (pud_huge(*pud)) { 303 if (pud_huge(*pud))
250 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 304 kvm_flush_dcache_pud(*pud);
251 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); 305 else
252 } else {
253 stage2_flush_pmds(kvm, pud, addr, next); 306 stage2_flush_pmds(kvm, pud, addr, next);
254 }
255 } 307 }
256 } while (pud++, addr = next, addr != end); 308 } while (pud++, addr = next, addr != end);
257} 309}
@@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
278 * Go through the stage 2 page tables and invalidate any cache lines 330 * Go through the stage 2 page tables and invalidate any cache lines
279 * backing memory already mapped to the VM. 331 * backing memory already mapped to the VM.
280 */ 332 */
281void stage2_flush_vm(struct kvm *kvm) 333static void stage2_flush_vm(struct kvm *kvm)
282{ 334{
283 struct kvm_memslots *slots; 335 struct kvm_memslots *slots;
284 struct kvm_memory_slot *memslot; 336 struct kvm_memory_slot *memslot;
@@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
905 return !pfn_valid(pfn); 957 return !pfn_valid(pfn);
906} 958}
907 959
960static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
961 unsigned long size, bool uncached)
962{
963 __coherent_cache_guest_page(vcpu, pfn, size, uncached);
964}
965
908static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 966static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
909 struct kvm_memory_slot *memslot, unsigned long hva, 967 struct kvm_memory_slot *memslot, unsigned long hva,
910 unsigned long fault_status) 968 unsigned long fault_status)
@@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
994 kvm_set_s2pmd_writable(&new_pmd); 1052 kvm_set_s2pmd_writable(&new_pmd);
995 kvm_set_pfn_dirty(pfn); 1053 kvm_set_pfn_dirty(pfn);
996 } 1054 }
997 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, 1055 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
998 fault_ipa_uncached);
999 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 1056 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1000 } else { 1057 } else {
1001 pte_t new_pte = pfn_pte(pfn, mem_type); 1058 pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1003 kvm_set_s2pte_writable(&new_pte); 1060 kvm_set_s2pte_writable(&new_pte);
1004 kvm_set_pfn_dirty(pfn); 1061 kvm_set_pfn_dirty(pfn);
1005 } 1062 }
1006 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, 1063 coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
1007 fault_ipa_uncached);
1008 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, 1064 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
1009 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); 1065 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
1010 } 1066 }
@@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1411 unmap_stage2_range(kvm, gpa, size); 1467 unmap_stage2_range(kvm, gpa, size);
1412 spin_unlock(&kvm->mmu_lock); 1468 spin_unlock(&kvm->mmu_lock);
1413} 1469}
1470
1471/*
1472 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
1473 *
1474 * Main problems:
1475 * - S/W ops are local to a CPU (not broadcast)
1476 * - We have line migration behind our back (speculation)
1477 * - System caches don't support S/W at all (damn!)
1478 *
1479 * In the face of the above, the best we can do is to try and convert
1480 * S/W ops to VA ops. Because the guest is not allowed to infer the
1481 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
1482 * which is a rather good thing for us.
1483 *
1484 * Also, it is only used when turning caches on/off ("The expected
1485 * usage of the cache maintenance instructions that operate by set/way
1486 * is associated with the cache maintenance instructions associated
1487 * with the powerdown and powerup of caches, if this is required by
1488 * the implementation.").
1489 *
1490 * We use the following policy:
1491 *
1492 * - If we trap a S/W operation, we enable VM trapping to detect
1493 * caches being turned on/off, and do a full clean.
1494 *
1495 * - We flush the caches on both caches being turned on and off.
1496 *
1497 * - Once the caches are enabled, we stop trapping VM ops.
1498 */
1499void kvm_set_way_flush(struct kvm_vcpu *vcpu)
1500{
1501 unsigned long hcr = vcpu_get_hcr(vcpu);
1502
1503 /*
1504 * If this is the first time we do a S/W operation
1505 * (i.e. HCR_TVM not set) flush the whole memory, and set the
1506 * VM trapping.
1507 *
1508 * Otherwise, rely on the VM trapping to wait for the MMU +
1509 * Caches to be turned off. At that point, we'll be able to
1510 * clean the caches again.
1511 */
1512 if (!(hcr & HCR_TVM)) {
1513 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
1514 vcpu_has_cache_enabled(vcpu));
1515 stage2_flush_vm(vcpu->kvm);
1516 vcpu_set_hcr(vcpu, hcr | HCR_TVM);
1517 }
1518}
1519
1520void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
1521{
1522 bool now_enabled = vcpu_has_cache_enabled(vcpu);
1523
1524 /*
1525 * If switching the MMU+caches on, need to invalidate the caches.
1526 * If switching it off, need to clean the caches.
1527 * Clean + invalidate does the trick always.
1528 */
1529 if (now_enabled != was_enabled)
1530 stage2_flush_vm(vcpu->kvm);
1531
1532 /* Caches are now on, stop trapping VM ops (until a S/W op) */
1533 if (now_enabled)
1534 vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
1535
1536 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
1537}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index b1d640f78623..b6a6e7102201 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc,
223 __entry->vcpu_pc, __entry->r0, __entry->imm) 223 __entry->vcpu_pc, __entry->r0, __entry->imm)
224); 224);
225 225
226TRACE_EVENT(kvm_set_way_flush,
227 TP_PROTO(unsigned long vcpu_pc, bool cache),
228 TP_ARGS(vcpu_pc, cache),
229
230 TP_STRUCT__entry(
231 __field( unsigned long, vcpu_pc )
232 __field( bool, cache )
233 ),
234
235 TP_fast_assign(
236 __entry->vcpu_pc = vcpu_pc;
237 __entry->cache = cache;
238 ),
239
240 TP_printk("S/W flush at 0x%016lx (cache %s)",
241 __entry->vcpu_pc, __entry->cache ? "on" : "off")
242);
243
244TRACE_EVENT(kvm_toggle_cache,
245 TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
246 TP_ARGS(vcpu_pc, was, now),
247
248 TP_STRUCT__entry(
249 __field( unsigned long, vcpu_pc )
250 __field( bool, was )
251 __field( bool, now )
252 ),
253
254 TP_fast_assign(
255 __entry->vcpu_pc = vcpu_pc;
256 __entry->was = was;
257 __entry->now = now;
258 ),
259
260 TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
261 __entry->vcpu_pc, __entry->was ? "on" : "off",
262 __entry->now ? "on" : "off")
263);
264
226#endif /* _TRACE_KVM_H */ 265#endif /* _TRACE_KVM_H */
227 266
228#undef TRACE_INCLUDE_PATH 267#undef TRACE_INCLUDE_PATH
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index caa21e9b8cd9..ccef8806bb58 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -190,6 +190,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
190 arch_ioremap_caller = armada_pcie_wa_ioremap_caller; 190 arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
191 191
192 /* 192 /*
193 * We should switch the PL310 to I/O coherency mode only if
194 * I/O coherency is actually enabled.
195 */
196 if (!coherency_available())
197 return;
198
199 /*
193 * Add the PL310 property "arm,io-coherent". This makes sure the 200 * Add the PL310 property "arm,io-coherent". This makes sure the
194 * outer sync operation is not used, which allows to 201 * outer sync operation is not used, which allows to
195 * workaround the system erratum that causes deadlocks when 202 * workaround the system erratum that causes deadlocks when
diff --git a/arch/arm/mach-shmobile/board-ape6evm.c b/arch/arm/mach-shmobile/board-ape6evm.c
index 66f67816a844..444f22d370f0 100644
--- a/arch/arm/mach-shmobile/board-ape6evm.c
+++ b/arch/arm/mach-shmobile/board-ape6evm.c
@@ -18,6 +18,8 @@
18#include <linux/gpio_keys.h> 18#include <linux/gpio_keys.h>
19#include <linux/input.h> 19#include <linux/input.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/irqchip.h>
22#include <linux/irqchip/arm-gic.h>
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/mfd/tmio.h> 24#include <linux/mfd/tmio.h>
23#include <linux/mmc/host.h> 25#include <linux/mmc/host.h>
@@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void)
273 sizeof(ape6evm_leds_pdata)); 275 sizeof(ape6evm_leds_pdata));
274} 276}
275 277
278static void __init ape6evm_legacy_init_time(void)
279{
280 /* Do not invoke DT-based timers via clocksource_of_init() */
281}
282
283static void __init ape6evm_legacy_init_irq(void)
284{
285 void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
286 void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
287
288 gic_init(0, 29, gic_dist_base, gic_cpu_base);
289
290 /* Do not invoke DT-based interrupt code via irqchip_init() */
291}
292
293
276static const char *ape6evm_boards_compat_dt[] __initdata = { 294static const char *ape6evm_boards_compat_dt[] __initdata = {
277 "renesas,ape6evm", 295 "renesas,ape6evm",
278 NULL, 296 NULL,
@@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = {
280 298
281DT_MACHINE_START(APE6EVM_DT, "ape6evm") 299DT_MACHINE_START(APE6EVM_DT, "ape6evm")
282 .init_early = shmobile_init_delay, 300 .init_early = shmobile_init_delay,
301 .init_irq = ape6evm_legacy_init_irq,
283 .init_machine = ape6evm_add_standard_devices, 302 .init_machine = ape6evm_add_standard_devices,
284 .init_late = shmobile_init_late, 303 .init_late = shmobile_init_late,
285 .dt_compat = ape6evm_boards_compat_dt, 304 .dt_compat = ape6evm_boards_compat_dt,
305 .init_time = ape6evm_legacy_init_time,
286MACHINE_END 306MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index f8197eb6e566..65b128dd4072 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -21,6 +21,8 @@
21#include <linux/input.h> 21#include <linux/input.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/irqchip.h>
25#include <linux/irqchip/arm-gic.h>
24#include <linux/kernel.h> 26#include <linux/kernel.h>
25#include <linux/leds.h> 27#include <linux/leds.h>
26#include <linux/mfd/tmio.h> 28#include <linux/mfd/tmio.h>
@@ -811,6 +813,16 @@ static void __init lager_init(void)
811 lager_ksz8041_fixup); 813 lager_ksz8041_fixup);
812} 814}
813 815
816static void __init lager_legacy_init_irq(void)
817{
818 void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
819 void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
820
821 gic_init(0, 29, gic_dist_base, gic_cpu_base);
822
823 /* Do not invoke DT-based interrupt code via irqchip_init() */
824}
825
814static const char * const lager_boards_compat_dt[] __initconst = { 826static const char * const lager_boards_compat_dt[] __initconst = {
815 "renesas,lager", 827 "renesas,lager",
816 NULL, 828 NULL,
@@ -819,6 +831,7 @@ static const char * const lager_boards_compat_dt[] __initconst = {
819DT_MACHINE_START(LAGER_DT, "lager") 831DT_MACHINE_START(LAGER_DT, "lager")
820 .smp = smp_ops(r8a7790_smp_ops), 832 .smp = smp_ops(r8a7790_smp_ops),
821 .init_early = shmobile_init_delay, 833 .init_early = shmobile_init_delay,
834 .init_irq = lager_legacy_init_irq,
822 .init_time = rcar_gen2_timer_init, 835 .init_time = rcar_gen2_timer_init,
823 .init_machine = lager_init, 836 .init_machine = lager_init,
824 .init_late = shmobile_init_late, 837 .init_late = shmobile_init_late,
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index 3dd6edd9bd1d..cc9470dfb1ce 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -133,7 +133,9 @@ void __init rcar_gen2_timer_init(void)
133#ifdef CONFIG_COMMON_CLK 133#ifdef CONFIG_COMMON_CLK
134 rcar_gen2_clocks_init(mode); 134 rcar_gen2_clocks_init(mode);
135#endif 135#endif
136#ifdef CONFIG_ARCH_SHMOBILE_MULTI
136 clocksource_of_init(); 137 clocksource_of_init();
138#endif
137} 139}
138 140
139struct memory_reserve_config { 141struct memory_reserve_config {
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index f1d027aa7a81..0edf2a6d2bbe 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -70,6 +70,18 @@ void __init shmobile_init_delay(void)
70 if (!max_freq) 70 if (!max_freq)
71 return; 71 return;
72 72
73#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
74 /* Non-multiplatform r8a73a4 SoC cannot use arch timer due
75 * to GIC being initialized from C and arch timer via DT */
76 if (of_machine_is_compatible("renesas,r8a73a4"))
77 has_arch_timer = false;
78
79 /* Non-multiplatform r8a7790 SoC cannot use arch timer due
80 * to GIC being initialized from C and arch timer via DT */
81 if (of_machine_is_compatible("renesas,r8a7790"))
82 has_arch_timer = false;
83#endif
84
73 if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { 85 if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
74 if (is_a7_a8_a9) 86 if (is_a7_a8_a9)
75 shmobile_setup_delay_hz(max_freq, 1, 3); 87 shmobile_setup_delay_hz(max_freq, 1, 3);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 03823e784f63..c43c71455566 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN
1012 1012
1013config ARM_KERNMEM_PERMS 1013config ARM_KERNMEM_PERMS
1014 bool "Restrict kernel memory permissions" 1014 bool "Restrict kernel memory permissions"
1015 depends on MMU
1015 help 1016 help
1016 If this is set, kernel memory other than kernel text (and rodata) 1017 If this is set, kernel memory other than kernel text (and rodata)
1017 will be made non-executable. The tradeoff is that each region is 1018 will be made non-executable. The tradeoff is that each region is
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 91892569710f..845769e41332 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
144 /* Update the list of reserved ASIDs and the ASID bitmap. */ 144 /* Update the list of reserved ASIDs and the ASID bitmap. */
145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
146 for_each_possible_cpu(i) { 146 for_each_possible_cpu(i) {
147 if (i == cpu) { 147 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
148 asid = 0; 148 /*
149 } else { 149 * If this CPU has already been through a
150 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 150 * rollover, but hasn't run another task in
151 /* 151 * the meantime, we must preserve its reserved
152 * If this CPU has already been through a 152 * ASID, as this is the only trace we have of
153 * rollover, but hasn't run another task in 153 * the process it is still running.
154 * the meantime, we must preserve its reserved 154 */
155 * ASID, as this is the only trace we have of 155 if (asid == 0)
156 * the process it is still running. 156 asid = per_cpu(reserved_asids, i);
157 */ 157 __set_bit(asid & ~ASID_MASK, asid_map);
158 if (asid == 0)
159 asid = per_cpu(reserved_asids, i);
160 __set_bit(asid & ~ASID_MASK, asid_map);
161 }
162 per_cpu(reserved_asids, i) = asid; 158 per_cpu(reserved_asids, i) = asid;
163 } 159 }
164 160
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7864797609b3..903dba064a03 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1940,13 +1940,32 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1940} 1940}
1941EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 1941EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
1942 1942
1943static int __arm_iommu_attach_device(struct device *dev,
1944 struct dma_iommu_mapping *mapping)
1945{
1946 int err;
1947
1948 err = iommu_attach_device(mapping->domain, dev);
1949 if (err)
1950 return err;
1951
1952 kref_get(&mapping->kref);
1953 dev->archdata.mapping = mapping;
1954
1955 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1956 return 0;
1957}
1958
1943/** 1959/**
1944 * arm_iommu_attach_device 1960 * arm_iommu_attach_device
1945 * @dev: valid struct device pointer 1961 * @dev: valid struct device pointer
1946 * @mapping: io address space mapping structure (returned from 1962 * @mapping: io address space mapping structure (returned from
1947 * arm_iommu_create_mapping) 1963 * arm_iommu_create_mapping)
1948 * 1964 *
1949 * Attaches specified io address space mapping to the provided device, 1965 * Attaches specified io address space mapping to the provided device.
1966 * This replaces the dma operations (dma_map_ops pointer) with the
1967 * IOMMU aware version.
1968 *
1950 * More than one client might be attached to the same io address space 1969 * More than one client might be attached to the same io address space
1951 * mapping. 1970 * mapping.
1952 */ 1971 */
@@ -1955,25 +1974,16 @@ int arm_iommu_attach_device(struct device *dev,
1955{ 1974{
1956 int err; 1975 int err;
1957 1976
1958 err = iommu_attach_device(mapping->domain, dev); 1977 err = __arm_iommu_attach_device(dev, mapping);
1959 if (err) 1978 if (err)
1960 return err; 1979 return err;
1961 1980
1962 kref_get(&mapping->kref); 1981 set_dma_ops(dev, &iommu_ops);
1963 dev->archdata.mapping = mapping;
1964
1965 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1966 return 0; 1982 return 0;
1967} 1983}
1968EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 1984EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
1969 1985
1970/** 1986static void __arm_iommu_detach_device(struct device *dev)
1971 * arm_iommu_detach_device
1972 * @dev: valid struct device pointer
1973 *
1974 * Detaches the provided device from a previously attached map.
1975 */
1976void arm_iommu_detach_device(struct device *dev)
1977{ 1987{
1978 struct dma_iommu_mapping *mapping; 1988 struct dma_iommu_mapping *mapping;
1979 1989
@@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev)
1989 1999
1990 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2000 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
1991} 2001}
2002
2003/**
2004 * arm_iommu_detach_device
2005 * @dev: valid struct device pointer
2006 *
2007 * Detaches the provided device from a previously attached map.
2008 * This voids the dma operations (dma_map_ops pointer)
2009 */
2010void arm_iommu_detach_device(struct device *dev)
2011{
2012 __arm_iommu_detach_device(dev);
2013 set_dma_ops(dev, NULL);
2014}
1992EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2015EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
1993 2016
1994static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2017static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2011 return false; 2034 return false;
2012 } 2035 }
2013 2036
2014 if (arm_iommu_attach_device(dev, mapping)) { 2037 if (__arm_iommu_attach_device(dev, mapping)) {
2015 pr_warn("Failed to attached device %s to IOMMU_mapping\n", 2038 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2016 dev_name(dev)); 2039 dev_name(dev));
2017 arm_iommu_release_mapping(mapping); 2040 arm_iommu_release_mapping(mapping);
@@ -2025,7 +2048,10 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
2025{ 2048{
2026 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 2049 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
2027 2050
2028 arm_iommu_detach_device(dev); 2051 if (!mapping)
2052 return;
2053
2054 __arm_iommu_detach_device(dev);
2029 arm_iommu_release_mapping(mapping); 2055 arm_iommu_release_mapping(mapping);
2030} 2056}
2031 2057
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 865a7e28ea2d..3cb4c856b10d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -45,6 +45,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45 vcpu->arch.hcr_el2 &= ~HCR_RW; 45 vcpu->arch.hcr_el2 &= ~HCR_RW;
46} 46}
47 47
48static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
49{
50 return vcpu->arch.hcr_el2;
51}
52
53static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
54{
55 vcpu->arch.hcr_el2 = hcr;
56}
57
48static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 58static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
49{ 59{
50 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; 60 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0b7dfdb931df..acd101a9014d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -116,9 +116,6 @@ struct kvm_vcpu_arch {
116 * Anything that is not used directly from assembly code goes 116 * Anything that is not used directly from assembly code goes
117 * here. 117 * here.
118 */ 118 */
119 /* dcache set/way operation pending */
120 int last_pcpu;
121 cpumask_t require_dcache_flush;
122 119
123 /* Don't run the guest */ 120 /* Don't run the guest */
124 bool pause; 121 bool pause;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 14a74f136272..adcf49547301 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
243 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 243 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
244} 244}
245 245
246static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, 246static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
247 unsigned long size, 247 unsigned long size,
248 bool ipa_uncached) 248 bool ipa_uncached)
249{ 249{
250 void *va = page_address(pfn_to_page(pfn));
251
250 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) 252 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
251 kvm_flush_dcache_to_poc((void *)hva, size); 253 kvm_flush_dcache_to_poc(va, size);
252 254
253 if (!icache_is_aliasing()) { /* PIPT */ 255 if (!icache_is_aliasing()) { /* PIPT */
254 flush_icache_range(hva, hva + size); 256 flush_icache_range((unsigned long)va,
257 (unsigned long)va + size);
255 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ 258 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
256 /* any kind of VIPT cache */ 259 /* any kind of VIPT cache */
257 __flush_icache_all(); 260 __flush_icache_all();
258 } 261 }
259} 262}
260 263
264static inline void __kvm_flush_dcache_pte(pte_t pte)
265{
266 struct page *page = pte_page(pte);
267 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
268}
269
270static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
271{
272 struct page *page = pmd_page(pmd);
273 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
274}
275
276static inline void __kvm_flush_dcache_pud(pud_t pud)
277{
278 struct page *page = pud_page(pud);
279 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
280}
281
261#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 282#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
262 283
263void stage2_flush_vm(struct kvm *kvm); 284void kvm_set_way_flush(struct kvm_vcpu *vcpu);
285void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
264 286
265#endif /* __ASSEMBLY__ */ 287#endif /* __ASSEMBLY__ */
266#endif /* __ARM64_KVM_MMU_H__ */ 288#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 8ba85e9ea388..b334084d3675 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -26,6 +26,7 @@ config KVM
26 select KVM_ARM_HOST 26 select KVM_ARM_HOST
27 select KVM_ARM_VGIC 27 select KVM_ARM_VGIC
28 select KVM_ARM_TIMER 28 select KVM_ARM_TIMER
29 select SRCU
29 ---help--- 30 ---help---
30 Support hosting virtualized guest machines. 31 Support hosting virtualized guest machines.
31 32
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3d7c2df89946..f31e8bb2bc5b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr)
69 return ccsidr; 69 return ccsidr;
70} 70}
71 71
72static void do_dc_cisw(u32 val) 72/*
73{ 73 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
74 asm volatile("dc cisw, %x0" : : "r" (val)); 74 */
75 dsb(ish);
76}
77
78static void do_dc_csw(u32 val)
79{
80 asm volatile("dc csw, %x0" : : "r" (val));
81 dsb(ish);
82}
83
84/* See note at ARM ARM B1.14.4 */
85static bool access_dcsw(struct kvm_vcpu *vcpu, 75static bool access_dcsw(struct kvm_vcpu *vcpu,
86 const struct sys_reg_params *p, 76 const struct sys_reg_params *p,
87 const struct sys_reg_desc *r) 77 const struct sys_reg_desc *r)
88{ 78{
89 unsigned long val;
90 int cpu;
91
92 if (!p->is_write) 79 if (!p->is_write)
93 return read_from_write_only(vcpu, p); 80 return read_from_write_only(vcpu, p);
94 81
95 cpu = get_cpu(); 82 kvm_set_way_flush(vcpu);
96
97 cpumask_setall(&vcpu->arch.require_dcache_flush);
98 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
99
100 /* If we were already preempted, take the long way around */
101 if (cpu != vcpu->arch.last_pcpu) {
102 flush_cache_all();
103 goto done;
104 }
105
106 val = *vcpu_reg(vcpu, p->Rt);
107
108 switch (p->CRm) {
109 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
110 case 14: /* DCCISW */
111 do_dc_cisw(val);
112 break;
113
114 case 10: /* DCCSW */
115 do_dc_csw(val);
116 break;
117 }
118
119done:
120 put_cpu();
121
122 return true; 83 return true;
123} 84}
124 85
125/* 86/*
126 * Generic accessor for VM registers. Only called as long as HCR_TVM 87 * Generic accessor for VM registers. Only called as long as HCR_TVM
127 * is set. 88 * is set. If the guest enables the MMU, we stop trapping the VM
89 * sys_regs and leave it in complete control of the caches.
128 */ 90 */
129static bool access_vm_reg(struct kvm_vcpu *vcpu, 91static bool access_vm_reg(struct kvm_vcpu *vcpu,
130 const struct sys_reg_params *p, 92 const struct sys_reg_params *p,
131 const struct sys_reg_desc *r) 93 const struct sys_reg_desc *r)
132{ 94{
133 unsigned long val; 95 unsigned long val;
96 bool was_enabled = vcpu_has_cache_enabled(vcpu);
134 97
135 BUG_ON(!p->is_write); 98 BUG_ON(!p->is_write);
136 99
@@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
143 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; 106 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
144 } 107 }
145 108
146 return true; 109 kvm_toggle_cache(vcpu, was_enabled);
147}
148
149/*
150 * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
151 * guest enables the MMU, we stop trapping the VM sys_regs and leave
152 * it in complete control of the caches.
153 */
154static bool access_sctlr(struct kvm_vcpu *vcpu,
155 const struct sys_reg_params *p,
156 const struct sys_reg_desc *r)
157{
158 access_vm_reg(vcpu, p, r);
159
160 if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
161 vcpu->arch.hcr_el2 &= ~HCR_TVM;
162 stage2_flush_vm(vcpu->kvm);
163 }
164
165 return true; 110 return true;
166} 111}
167 112
@@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
377 NULL, reset_mpidr, MPIDR_EL1 }, 322 NULL, reset_mpidr, MPIDR_EL1 },
378 /* SCTLR_EL1 */ 323 /* SCTLR_EL1 */
379 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 324 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
380 access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, 325 access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
381 /* CPACR_EL1 */ 326 /* CPACR_EL1 */
382 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), 327 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
383 NULL, reset_val, CPACR_EL1, 0 }, 328 NULL, reset_val, CPACR_EL1, 0 },
@@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
657 * register). 602 * register).
658 */ 603 */
659static const struct sys_reg_desc cp15_regs[] = { 604static const struct sys_reg_desc cp15_regs[] = {
660 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, 605 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
661 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 606 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
662 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 607 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
663 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, 608 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 0eca93327195..d223a8b57c1e 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -142,6 +142,8 @@ good_area:
142 if (unlikely(fault & VM_FAULT_ERROR)) { 142 if (unlikely(fault & VM_FAULT_ERROR)) {
143 if (fault & VM_FAULT_OOM) 143 if (fault & VM_FAULT_OOM)
144 goto out_of_memory; 144 goto out_of_memory;
145 else if (fault & VM_FAULT_SIGSEGV)
146 goto bad_area;
145 else if (fault & VM_FAULT_SIGBUS) 147 else if (fault & VM_FAULT_SIGBUS)
146 goto do_sigbus; 148 goto do_sigbus;
147 BUG(); 149 BUG();
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 1790f22e71a2..2686a7aa8ec8 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -176,6 +176,8 @@ retry:
176 if (unlikely(fault & VM_FAULT_ERROR)) { 176 if (unlikely(fault & VM_FAULT_ERROR)) {
177 if (fault & VM_FAULT_OOM) 177 if (fault & VM_FAULT_OOM)
178 goto out_of_memory; 178 goto out_of_memory;
179 else if (fault & VM_FAULT_SIGSEGV)
180 goto bad_area;
179 else if (fault & VM_FAULT_SIGBUS) 181 else if (fault & VM_FAULT_SIGBUS)
180 goto do_sigbus; 182 goto do_sigbus;
181 BUG(); 183 BUG();
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 9a66372fc7c7..ec4917ddf678 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
168 if (unlikely(fault & VM_FAULT_ERROR)) { 168 if (unlikely(fault & VM_FAULT_ERROR)) {
169 if (fault & VM_FAULT_OOM) 169 if (fault & VM_FAULT_OOM)
170 goto out_of_memory; 170 goto out_of_memory;
171 else if (fault & VM_FAULT_SIGSEGV)
172 goto bad_area;
171 else if (fault & VM_FAULT_SIGBUS) 173 else if (fault & VM_FAULT_SIGBUS)
172 goto do_sigbus; 174 goto do_sigbus;
173 BUG(); 175 BUG();
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 7225dad87094..ba5ba7accd0d 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -172,6 +172,8 @@ retry:
172 */ 172 */
173 if (fault & VM_FAULT_OOM) { 173 if (fault & VM_FAULT_OOM) {
174 goto out_of_memory; 174 goto out_of_memory;
175 } else if (fault & VM_FAULT_SIGSEGV) {
176 goto bad_area;
175 } else if (fault & VM_FAULT_SIGBUS) { 177 } else if (fault & VM_FAULT_SIGBUS) {
176 signal = SIGBUS; 178 signal = SIGBUS;
177 goto bad_area; 179 goto bad_area;
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index e9c6a8014bd6..e3d4d4890104 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -200,6 +200,8 @@ good_area:
200 if (unlikely(fault & VM_FAULT_ERROR)) { 200 if (unlikely(fault & VM_FAULT_ERROR)) {
201 if (fault & VM_FAULT_OOM) 201 if (fault & VM_FAULT_OOM)
202 goto out_of_memory; 202 goto out_of_memory;
203 else if (fault & VM_FAULT_SIGSEGV)
204 goto bad_area;
203 else if (fault & VM_FAULT_SIGBUS) 205 else if (fault & VM_FAULT_SIGBUS)
204 goto do_sigbus; 206 goto do_sigbus;
205 BUG(); 207 BUG();
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 2bd7487440c4..b2f04aee46ec 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -145,6 +145,8 @@ good_area:
145 if (unlikely(fault & VM_FAULT_ERROR)) { 145 if (unlikely(fault & VM_FAULT_ERROR)) {
146 if (fault & VM_FAULT_OOM) 146 if (fault & VM_FAULT_OOM)
147 goto out_of_memory; 147 goto out_of_memory;
148 else if (fault & VM_FAULT_SIGSEGV)
149 goto map_err;
148 else if (fault & VM_FAULT_SIGBUS) 150 else if (fault & VM_FAULT_SIGBUS)
149 goto bus_err; 151 goto bus_err;
150 BUG(); 152 BUG();
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index 332680e5ebf2..2de5dc695a87 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -141,6 +141,8 @@ good_area:
141 if (unlikely(fault & VM_FAULT_ERROR)) { 141 if (unlikely(fault & VM_FAULT_ERROR)) {
142 if (fault & VM_FAULT_OOM) 142 if (fault & VM_FAULT_OOM)
143 goto out_of_memory; 143 goto out_of_memory;
144 else if (fault & VM_FAULT_SIGSEGV)
145 goto bad_area;
144 else if (fault & VM_FAULT_SIGBUS) 146 else if (fault & VM_FAULT_SIGBUS)
145 goto do_sigbus; 147 goto do_sigbus;
146 BUG(); 148 BUG();
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index fa4cf52aa7a6..d46a5ebb7570 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -224,6 +224,8 @@ good_area:
224 if (unlikely(fault & VM_FAULT_ERROR)) { 224 if (unlikely(fault & VM_FAULT_ERROR)) {
225 if (fault & VM_FAULT_OOM) 225 if (fault & VM_FAULT_OOM)
226 goto out_of_memory; 226 goto out_of_memory;
227 else if (fault & VM_FAULT_SIGSEGV)
228 goto bad_area;
227 else if (fault & VM_FAULT_SIGBUS) 229 else if (fault & VM_FAULT_SIGBUS)
228 goto do_sigbus; 230 goto do_sigbus;
229 BUG(); 231 BUG();
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3289969ee423..843713c05b79 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2656,27 +2656,21 @@ config TRAD_SIGNALS
2656 bool 2656 bool
2657 2657
2658config MIPS32_COMPAT 2658config MIPS32_COMPAT
2659 bool "Kernel support for Linux/MIPS 32-bit binary compatibility" 2659 bool
2660 depends on 64BIT
2661 help
2662 Select this option if you want Linux/MIPS 32-bit binary
2663 compatibility. Since all software available for Linux/MIPS is
2664 currently 32-bit you should say Y here.
2665 2660
2666config COMPAT 2661config COMPAT
2667 bool 2662 bool
2668 depends on MIPS32_COMPAT
2669 select ARCH_WANT_OLD_COMPAT_IPC
2670 default y
2671 2663
2672config SYSVIPC_COMPAT 2664config SYSVIPC_COMPAT
2673 bool 2665 bool
2674 depends on COMPAT && SYSVIPC
2675 default y
2676 2666
2677config MIPS32_O32 2667config MIPS32_O32
2678 bool "Kernel support for o32 binaries" 2668 bool "Kernel support for o32 binaries"
2679 depends on MIPS32_COMPAT 2669 depends on 64BIT
2670 select ARCH_WANT_OLD_COMPAT_IPC
2671 select COMPAT
2672 select MIPS32_COMPAT
2673 select SYSVIPC_COMPAT if SYSVIPC
2680 help 2674 help
2681 Select this option if you want to run o32 binaries. These are pure 2675 Select this option if you want to run o32 binaries. These are pure
2682 32-bit binaries as used by the 32-bit Linux/MIPS port. Most of 2676 32-bit binaries as used by the 32-bit Linux/MIPS port. Most of
@@ -2686,7 +2680,10 @@ config MIPS32_O32
2686 2680
2687config MIPS32_N32 2681config MIPS32_N32
2688 bool "Kernel support for n32 binaries" 2682 bool "Kernel support for n32 binaries"
2689 depends on MIPS32_COMPAT 2683 depends on 64BIT
2684 select COMPAT
2685 select MIPS32_COMPAT
2686 select SYSVIPC_COMPAT if SYSVIPC
2690 help 2687 help
2691 Select this option if you want to run n32 binaries. These are 2688 Select this option if you want to run n32 binaries. These are
2692 64-bit binaries using 32-bit quantities for addressing and certain 2689 64-bit binaries using 32-bit quantities for addressing and certain
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c
index 8585078ae50e..2a4c52e27f41 100644
--- a/arch/mips/boot/elf2ecoff.c
+++ b/arch/mips/boot/elf2ecoff.c
@@ -49,7 +49,8 @@
49/* 49/*
50 * Some extra ELF definitions 50 * Some extra ELF definitions
51 */ 51 */
52#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */ 52#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */
53#define PT_MIPS_ABIFLAGS 0x70000003 /* Records ABI related flags */
53 54
54/* -------------------------------------------------------------------- */ 55/* -------------------------------------------------------------------- */
55 56
@@ -349,39 +350,46 @@ int main(int argc, char *argv[])
349 350
350 for (i = 0; i < ex.e_phnum; i++) { 351 for (i = 0; i < ex.e_phnum; i++) {
351 /* Section types we can ignore... */ 352 /* Section types we can ignore... */
352 if (ph[i].p_type == PT_NULL || ph[i].p_type == PT_NOTE || 353 switch (ph[i].p_type) {
353 ph[i].p_type == PT_PHDR 354 case PT_NULL:
354 || ph[i].p_type == PT_MIPS_REGINFO) 355 case PT_NOTE:
356 case PT_PHDR:
357 case PT_MIPS_REGINFO:
358 case PT_MIPS_ABIFLAGS:
355 continue; 359 continue;
356 /* Section types we can't handle... */
357 else if (ph[i].p_type != PT_LOAD) {
358 fprintf(stderr,
359 "Program header %d type %d can't be converted.\n",
360 ex.e_phnum, ph[i].p_type);
361 exit(1);
362 }
363 /* Writable (data) segment? */
364 if (ph[i].p_flags & PF_W) {
365 struct sect ndata, nbss;
366 360
367 ndata.vaddr = ph[i].p_vaddr; 361 case PT_LOAD:
368 ndata.len = ph[i].p_filesz; 362 /* Writable (data) segment? */
369 nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz; 363 if (ph[i].p_flags & PF_W) {
370 nbss.len = ph[i].p_memsz - ph[i].p_filesz; 364 struct sect ndata, nbss;
365
366 ndata.vaddr = ph[i].p_vaddr;
367 ndata.len = ph[i].p_filesz;
368 nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz;
369 nbss.len = ph[i].p_memsz - ph[i].p_filesz;
371 370
372 combine(&data, &ndata, 0); 371 combine(&data, &ndata, 0);
373 combine(&bss, &nbss, 1); 372 combine(&bss, &nbss, 1);
374 } else { 373 } else {
375 struct sect ntxt; 374 struct sect ntxt;
376 375
377 ntxt.vaddr = ph[i].p_vaddr; 376 ntxt.vaddr = ph[i].p_vaddr;
378 ntxt.len = ph[i].p_filesz; 377 ntxt.len = ph[i].p_filesz;
379 378
380 combine(&text, &ntxt, 0); 379 combine(&text, &ntxt, 0);
380 }
381 /* Remember the lowest segment start address. */
382 if (ph[i].p_vaddr < cur_vma)
383 cur_vma = ph[i].p_vaddr;
384 break;
385
386 default:
387 /* Section types we can't handle... */
388 fprintf(stderr,
389 "Program header %d type %d can't be converted.\n",
390 ex.e_phnum, ph[i].p_type);
391 exit(1);
381 } 392 }
382 /* Remember the lowest segment start address. */
383 if (ph[i].p_vaddr < cur_vma)
384 cur_vma = ph[i].p_vaddr;
385 } 393 }
386 394
387 /* Sections must be in order to be converted... */ 395 /* Sections must be in order to be converted... */
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index ecd903dd1c45..8b1eeffa12ed 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -240,9 +240,7 @@ static int octeon_cpu_disable(void)
240 240
241 set_cpu_online(cpu, false); 241 set_cpu_online(cpu, false);
242 cpu_clear(cpu, cpu_callin_map); 242 cpu_clear(cpu, cpu_callin_map);
243 local_irq_disable();
244 octeon_fixup_irqs(); 243 octeon_fixup_irqs();
245 local_irq_enable();
246 244
247 flush_cache_all(); 245 flush_cache_all();
248 local_flush_tlb_all(); 246 local_flush_tlb_all();
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index f57b96dcf7df..61a4460d67d3 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -132,7 +132,6 @@ CONFIG_IP_NF_MATCH_ECN=m
132CONFIG_IP_NF_MATCH_TTL=m 132CONFIG_IP_NF_MATCH_TTL=m
133CONFIG_IP_NF_FILTER=m 133CONFIG_IP_NF_FILTER=m
134CONFIG_IP_NF_TARGET_REJECT=m 134CONFIG_IP_NF_TARGET_REJECT=m
135CONFIG_IP_NF_TARGET_ULOG=m
136CONFIG_IP_NF_MANGLE=m 135CONFIG_IP_NF_MANGLE=m
137CONFIG_IP_NF_TARGET_CLUSTERIP=m 136CONFIG_IP_NF_TARGET_CLUSTERIP=m
138CONFIG_IP_NF_TARGET_ECN=m 137CONFIG_IP_NF_TARGET_ECN=m
@@ -175,7 +174,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
175CONFIG_BRIDGE_EBT_REDIRECT=m 174CONFIG_BRIDGE_EBT_REDIRECT=m
176CONFIG_BRIDGE_EBT_SNAT=m 175CONFIG_BRIDGE_EBT_SNAT=m
177CONFIG_BRIDGE_EBT_LOG=m 176CONFIG_BRIDGE_EBT_LOG=m
178CONFIG_BRIDGE_EBT_ULOG=m
179CONFIG_BRIDGE_EBT_NFLOG=m 177CONFIG_BRIDGE_EBT_NFLOG=m
180CONFIG_IP_SCTP=m 178CONFIG_IP_SCTP=m
181CONFIG_BRIDGE=m 179CONFIG_BRIDGE=m
@@ -220,8 +218,6 @@ CONFIG_NET_ACT_SKBEDIT=m
220CONFIG_NET_CLS_IND=y 218CONFIG_NET_CLS_IND=y
221CONFIG_CFG80211=m 219CONFIG_CFG80211=m
222CONFIG_MAC80211=m 220CONFIG_MAC80211=m
223CONFIG_MAC80211_RC_PID=y
224CONFIG_MAC80211_RC_DEFAULT_PID=y
225CONFIG_MAC80211_MESH=y 221CONFIG_MAC80211_MESH=y
226CONFIG_RFKILL=m 222CONFIG_RFKILL=m
227CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 223CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -248,19 +244,13 @@ CONFIG_ATA_OVER_ETH=m
248CONFIG_IDE=y 244CONFIG_IDE=y
249CONFIG_BLK_DEV_IDECD=y 245CONFIG_BLK_DEV_IDECD=y
250CONFIG_IDE_GENERIC=y 246CONFIG_IDE_GENERIC=y
251CONFIG_BLK_DEV_GENERIC=y
252CONFIG_BLK_DEV_PIIX=y
253CONFIG_BLK_DEV_IT8213=m
254CONFIG_BLK_DEV_TC86C001=m
255CONFIG_RAID_ATTRS=m 247CONFIG_RAID_ATTRS=m
256CONFIG_SCSI=m 248CONFIG_BLK_DEV_SD=y
257CONFIG_BLK_DEV_SD=m
258CONFIG_CHR_DEV_ST=m 249CONFIG_CHR_DEV_ST=m
259CONFIG_CHR_DEV_OSST=m 250CONFIG_CHR_DEV_OSST=m
260CONFIG_BLK_DEV_SR=m 251CONFIG_BLK_DEV_SR=m
261CONFIG_BLK_DEV_SR_VENDOR=y 252CONFIG_BLK_DEV_SR_VENDOR=y
262CONFIG_CHR_DEV_SG=m 253CONFIG_CHR_DEV_SG=m
263CONFIG_SCSI_MULTI_LUN=y
264CONFIG_SCSI_CONSTANTS=y 254CONFIG_SCSI_CONSTANTS=y
265CONFIG_SCSI_LOGGING=y 255CONFIG_SCSI_LOGGING=y
266CONFIG_SCSI_SCAN_ASYNC=y 256CONFIG_SCSI_SCAN_ASYNC=y
@@ -273,6 +263,8 @@ CONFIG_SCSI_AACRAID=m
273CONFIG_SCSI_AIC7XXX=m 263CONFIG_SCSI_AIC7XXX=m
274CONFIG_AIC7XXX_RESET_DELAY_MS=15000 264CONFIG_AIC7XXX_RESET_DELAY_MS=15000
275# CONFIG_AIC7XXX_DEBUG_ENABLE is not set 265# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
266CONFIG_ATA=y
267CONFIG_ATA_PIIX=y
276CONFIG_MD=y 268CONFIG_MD=y
277CONFIG_BLK_DEV_MD=m 269CONFIG_BLK_DEV_MD=m
278CONFIG_MD_LINEAR=m 270CONFIG_MD_LINEAR=m
@@ -340,6 +332,7 @@ CONFIG_UIO=m
340CONFIG_UIO_CIF=m 332CONFIG_UIO_CIF=m
341CONFIG_EXT2_FS=y 333CONFIG_EXT2_FS=y
342CONFIG_EXT3_FS=y 334CONFIG_EXT3_FS=y
335CONFIG_EXT4_FS=y
343CONFIG_REISERFS_FS=m 336CONFIG_REISERFS_FS=m
344CONFIG_REISERFS_PROC_INFO=y 337CONFIG_REISERFS_PROC_INFO=y
345CONFIG_REISERFS_FS_XATTR=y 338CONFIG_REISERFS_FS_XATTR=y
@@ -441,4 +434,3 @@ CONFIG_CRYPTO_SERPENT=m
441CONFIG_CRYPTO_TEA=m 434CONFIG_CRYPTO_TEA=m
442CONFIG_CRYPTO_TWOFISH=m 435CONFIG_CRYPTO_TWOFISH=m
443# CONFIG_CRYPTO_ANSI_CPRNG is not set 436# CONFIG_CRYPTO_ANSI_CPRNG is not set
444CONFIG_CRC16=m
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 994d21939676..affebb78f5d6 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -64,7 +64,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
64 return SIGFPE; 64 return SIGFPE;
65 65
66 /* set FRE */ 66 /* set FRE */
67 write_c0_config5(read_c0_config5() | MIPS_CONF5_FRE); 67 set_c0_config5(MIPS_CONF5_FRE);
68 goto fr_common; 68 goto fr_common;
69 69
70 case FPU_64BIT: 70 case FPU_64BIT:
@@ -74,8 +74,10 @@ static inline int __enable_fpu(enum fpu_mode mode)
74#endif 74#endif
75 /* fall through */ 75 /* fall through */
76 case FPU_32BIT: 76 case FPU_32BIT:
77 /* clear FRE */ 77 if (cpu_has_fre) {
78 write_c0_config5(read_c0_config5() & ~MIPS_CONF5_FRE); 78 /* clear FRE */
79 clear_c0_config5(MIPS_CONF5_FRE);
80 }
79fr_common: 81fr_common:
80 /* set CU1 & change FR appropriately */ 82 /* set CU1 & change FR appropriately */
81 fr = (int)mode & FPU_FR_MASK; 83 fr = (int)mode & FPU_FR_MASK;
@@ -182,25 +184,32 @@ static inline int init_fpu(void)
182 int ret = 0; 184 int ret = 0;
183 185
184 if (cpu_has_fpu) { 186 if (cpu_has_fpu) {
187 unsigned int config5;
188
185 ret = __own_fpu(); 189 ret = __own_fpu();
186 if (!ret) { 190 if (ret)
187 unsigned int config5 = read_c0_config5(); 191 return ret;
188
189 /*
190 * Ensure FRE is clear whilst running _init_fpu, since
191 * single precision FP instructions are used. If FRE
192 * was set then we'll just end up initialising all 32
193 * 64b registers.
194 */
195 write_c0_config5(config5 & ~MIPS_CONF5_FRE);
196 enable_fpu_hazard();
197 192
193 if (!cpu_has_fre) {
198 _init_fpu(); 194 _init_fpu();
199 195
200 /* Restore FRE */ 196 return 0;
201 write_c0_config5(config5);
202 enable_fpu_hazard();
203 } 197 }
198
199 /*
200 * Ensure FRE is clear whilst running _init_fpu, since
201 * single precision FP instructions are used. If FRE
202 * was set then we'll just end up initialising all 32
203 * 64b registers.
204 */
205 config5 = clear_c0_config5(MIPS_CONF5_FRE);
206 enable_fpu_hazard();
207
208 _init_fpu();
209
210 /* Restore FRE */
211 write_c0_config5(config5);
212 enable_fpu_hazard();
204 } else 213 } else
205 fpu_emulator_init_fpu(); 214 fpu_emulator_init_fpu();
206 215
diff --git a/arch/mips/include/asm/fw/arc/hinv.h b/arch/mips/include/asm/fw/arc/hinv.h
index f8d37d1df5de..9fac64a26353 100644
--- a/arch/mips/include/asm/fw/arc/hinv.h
+++ b/arch/mips/include/asm/fw/arc/hinv.h
@@ -119,7 +119,7 @@ union key_u {
119#define SGI_ARCS_REV 10 /* rev .10, 3/04/92 */ 119#define SGI_ARCS_REV 10 /* rev .10, 3/04/92 */
120#endif 120#endif
121 121
122typedef struct component { 122typedef struct {
123 CONFIGCLASS Class; 123 CONFIGCLASS Class;
124 CONFIGTYPE Type; 124 CONFIGTYPE Type;
125 IDENTIFIERFLAG Flags; 125 IDENTIFIERFLAG Flags;
@@ -140,7 +140,7 @@ struct cfgdata {
140}; 140};
141 141
142/* System ID */ 142/* System ID */
143typedef struct systemid { 143typedef struct {
144 CHAR VendorId[8]; 144 CHAR VendorId[8];
145 CHAR ProductId[8]; 145 CHAR ProductId[8];
146} SYSTEMID; 146} SYSTEMID;
@@ -166,7 +166,7 @@ typedef enum memorytype {
166#endif /* _NT_PROM */ 166#endif /* _NT_PROM */
167} MEMORYTYPE; 167} MEMORYTYPE;
168 168
169typedef struct memorydescriptor { 169typedef struct {
170 MEMORYTYPE Type; 170 MEMORYTYPE Type;
171 LONG BasePage; 171 LONG BasePage;
172 LONG PageCount; 172 LONG PageCount;
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index b95a827d763e..59c0901bdd84 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -89,9 +89,9 @@ static inline bool mips_cm_has_l2sync(void)
89 89
90/* Macros to ease the creation of register access functions */ 90/* Macros to ease the creation of register access functions */
91#define BUILD_CM_R_(name, off) \ 91#define BUILD_CM_R_(name, off) \
92static inline u32 *addr_gcr_##name(void) \ 92static inline u32 __iomem *addr_gcr_##name(void) \
93{ \ 93{ \
94 return (u32 *)(mips_cm_base + (off)); \ 94 return (u32 __iomem *)(mips_cm_base + (off)); \
95} \ 95} \
96 \ 96 \
97static inline u32 read_gcr_##name(void) \ 97static inline u32 read_gcr_##name(void) \
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 5e4aef304b02..5b720d8c2745 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1386,12 +1386,27 @@ do { \
1386 __res; \ 1386 __res; \
1387}) 1387})
1388 1388
1389#define _write_32bit_cp1_register(dest, val, gas_hardfloat) \
1390do { \
1391 __asm__ __volatile__( \
1392 " .set push \n" \
1393 " .set reorder \n" \
1394 " "STR(gas_hardfloat)" \n" \
1395 " ctc1 %0,"STR(dest)" \n" \
1396 " .set pop \n" \
1397 : : "r" (val)); \
1398} while (0)
1399
1389#ifdef GAS_HAS_SET_HARDFLOAT 1400#ifdef GAS_HAS_SET_HARDFLOAT
1390#define read_32bit_cp1_register(source) \ 1401#define read_32bit_cp1_register(source) \
1391 _read_32bit_cp1_register(source, .set hardfloat) 1402 _read_32bit_cp1_register(source, .set hardfloat)
1403#define write_32bit_cp1_register(dest, val) \
1404 _write_32bit_cp1_register(dest, val, .set hardfloat)
1392#else 1405#else
1393#define read_32bit_cp1_register(source) \ 1406#define read_32bit_cp1_register(source) \
1394 _read_32bit_cp1_register(source, ) 1407 _read_32bit_cp1_register(source, )
1408#define write_32bit_cp1_register(dest, val) \
1409 _write_32bit_cp1_register(dest, val, )
1395#endif 1410#endif
1396 1411
1397#ifdef HAVE_AS_DSP 1412#ifdef HAVE_AS_DSP
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index bb7963753730..6499d93ae68d 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -29,13 +29,7 @@
29static inline long syscall_get_nr(struct task_struct *task, 29static inline long syscall_get_nr(struct task_struct *task,
30 struct pt_regs *regs) 30 struct pt_regs *regs)
31{ 31{
32 /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ 32 return current_thread_info()->syscall;
33 if ((config_enabled(CONFIG_32BIT) ||
34 test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
35 (regs->regs[2] == __NR_syscall))
36 return regs->regs[4];
37 else
38 return regs->regs[2];
39} 33}
40 34
41static inline unsigned long mips_get_syscall_arg(unsigned long *arg, 35static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 99eea59604e9..e4440f92b366 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -36,6 +36,7 @@ struct thread_info {
36 */ 36 */
37 struct restart_block restart_block; 37 struct restart_block restart_block;
38 struct pt_regs *regs; 38 struct pt_regs *regs;
39 long syscall; /* syscall number */
39}; 40};
40 41
41/* 42/*
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index d001bb1ad177..c03088f9f514 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -376,16 +376,17 @@
376#define __NR_getrandom (__NR_Linux + 353) 376#define __NR_getrandom (__NR_Linux + 353)
377#define __NR_memfd_create (__NR_Linux + 354) 377#define __NR_memfd_create (__NR_Linux + 354)
378#define __NR_bpf (__NR_Linux + 355) 378#define __NR_bpf (__NR_Linux + 355)
379#define __NR_execveat (__NR_Linux + 356)
379 380
380/* 381/*
381 * Offset of the last Linux o32 flavoured syscall 382 * Offset of the last Linux o32 flavoured syscall
382 */ 383 */
383#define __NR_Linux_syscalls 355 384#define __NR_Linux_syscalls 356
384 385
385#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 386#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
386 387
387#define __NR_O32_Linux 4000 388#define __NR_O32_Linux 4000
388#define __NR_O32_Linux_syscalls 355 389#define __NR_O32_Linux_syscalls 356
389 390
390#if _MIPS_SIM == _MIPS_SIM_ABI64 391#if _MIPS_SIM == _MIPS_SIM_ABI64
391 392
@@ -709,16 +710,17 @@
709#define __NR_getrandom (__NR_Linux + 313) 710#define __NR_getrandom (__NR_Linux + 313)
710#define __NR_memfd_create (__NR_Linux + 314) 711#define __NR_memfd_create (__NR_Linux + 314)
711#define __NR_bpf (__NR_Linux + 315) 712#define __NR_bpf (__NR_Linux + 315)
713#define __NR_execveat (__NR_Linux + 316)
712 714
713/* 715/*
714 * Offset of the last Linux 64-bit flavoured syscall 716 * Offset of the last Linux 64-bit flavoured syscall
715 */ 717 */
716#define __NR_Linux_syscalls 315 718#define __NR_Linux_syscalls 316
717 719
718#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 720#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
719 721
720#define __NR_64_Linux 5000 722#define __NR_64_Linux 5000
721#define __NR_64_Linux_syscalls 315 723#define __NR_64_Linux_syscalls 316
722 724
723#if _MIPS_SIM == _MIPS_SIM_NABI32 725#if _MIPS_SIM == _MIPS_SIM_NABI32
724 726
@@ -1046,15 +1048,16 @@
1046#define __NR_getrandom (__NR_Linux + 317) 1048#define __NR_getrandom (__NR_Linux + 317)
1047#define __NR_memfd_create (__NR_Linux + 318) 1049#define __NR_memfd_create (__NR_Linux + 318)
1048#define __NR_bpf (__NR_Linux + 319) 1050#define __NR_bpf (__NR_Linux + 319)
1051#define __NR_execveat (__NR_Linux + 320)
1049 1052
1050/* 1053/*
1051 * Offset of the last N32 flavoured syscall 1054 * Offset of the last N32 flavoured syscall
1052 */ 1055 */
1053#define __NR_Linux_syscalls 319 1056#define __NR_Linux_syscalls 320
1054 1057
1055#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1058#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1056 1059
1057#define __NR_N32_Linux 6000 1060#define __NR_N32_Linux 6000
1058#define __NR_N32_Linux_syscalls 319 1061#define __NR_N32_Linux_syscalls 320
1059 1062
1060#endif /* _UAPI_ASM_UNISTD_H */ 1063#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/jz4740/irq.c b/arch/mips/jz4740/irq.c
index 2531da1d3add..97206b3deb97 100644
--- a/arch/mips/jz4740/irq.c
+++ b/arch/mips/jz4740/irq.c
@@ -30,6 +30,9 @@
30#include <asm/irq_cpu.h> 30#include <asm/irq_cpu.h>
31 31
32#include <asm/mach-jz4740/base.h> 32#include <asm/mach-jz4740/base.h>
33#include <asm/mach-jz4740/irq.h>
34
35#include "irq.h"
33 36
34static void __iomem *jz_intc_base; 37static void __iomem *jz_intc_base;
35 38
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index c92b15df6893..a5b5b56485c1 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -19,8 +19,8 @@ enum {
19int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, 19int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
20 bool is_interp, struct arch_elf_state *state) 20 bool is_interp, struct arch_elf_state *state)
21{ 21{
22 struct elfhdr *ehdr = _ehdr; 22 struct elf32_hdr *ehdr = _ehdr;
23 struct elf_phdr *phdr = _phdr; 23 struct elf32_phdr *phdr = _phdr;
24 struct mips_elf_abiflags_v0 abiflags; 24 struct mips_elf_abiflags_v0 abiflags;
25 int ret; 25 int ret;
26 26
@@ -48,7 +48,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
48 return 0; 48 return 0;
49} 49}
50 50
51static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi) 51static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi)
52{ 52{
53 /* If the ABI requirement is provided, simply return that */ 53 /* If the ABI requirement is provided, simply return that */
54 if (in_abi != -1) 54 if (in_abi != -1)
@@ -65,7 +65,7 @@ static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi)
65int arch_check_elf(void *_ehdr, bool has_interpreter, 65int arch_check_elf(void *_ehdr, bool has_interpreter,
66 struct arch_elf_state *state) 66 struct arch_elf_state *state)
67{ 67{
68 struct elfhdr *ehdr = _ehdr; 68 struct elf32_hdr *ehdr = _ehdr;
69 unsigned fp_abi, interp_fp_abi, abi0, abi1; 69 unsigned fp_abi, interp_fp_abi, abi0, abi1;
70 70
71 /* Ignore non-O32 binaries */ 71 /* Ignore non-O32 binaries */
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 590c2c980fd3..6eb7a3f515fc 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -57,6 +57,8 @@ static struct irq_chip mips_cpu_irq_controller = {
57 .irq_mask_ack = mask_mips_irq, 57 .irq_mask_ack = mask_mips_irq,
58 .irq_unmask = unmask_mips_irq, 58 .irq_unmask = unmask_mips_irq,
59 .irq_eoi = unmask_mips_irq, 59 .irq_eoi = unmask_mips_irq,
60 .irq_disable = mask_mips_irq,
61 .irq_enable = unmask_mips_irq,
60}; 62};
61 63
62/* 64/*
@@ -93,6 +95,8 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
93 .irq_mask_ack = mips_mt_cpu_irq_ack, 95 .irq_mask_ack = mips_mt_cpu_irq_ack,
94 .irq_unmask = unmask_mips_irq, 96 .irq_unmask = unmask_mips_irq,
95 .irq_eoi = unmask_mips_irq, 97 .irq_eoi = unmask_mips_irq,
98 .irq_disable = mask_mips_irq,
99 .irq_enable = unmask_mips_irq,
96}; 100};
97 101
98asmlinkage void __weak plat_irq_dispatch(void) 102asmlinkage void __weak plat_irq_dispatch(void)
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index eb76434828e8..85bff5d513e5 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -82,6 +82,30 @@ void flush_thread(void)
82{ 82{
83} 83}
84 84
85int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
86{
87 /*
88 * Save any process state which is live in hardware registers to the
89 * parent context prior to duplication. This prevents the new child
90 * state becoming stale if the parent is preempted before copy_thread()
91 * gets a chance to save the parent's live hardware registers to the
92 * child context.
93 */
94 preempt_disable();
95
96 if (is_msa_enabled())
97 save_msa(current);
98 else if (is_fpu_owner())
99 _save_fp(current);
100
101 save_dsp(current);
102
103 preempt_enable();
104
105 *dst = *src;
106 return 0;
107}
108
85int copy_thread(unsigned long clone_flags, unsigned long usp, 109int copy_thread(unsigned long clone_flags, unsigned long usp,
86 unsigned long arg, struct task_struct *p) 110 unsigned long arg, struct task_struct *p)
87{ 111{
@@ -92,18 +116,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
92 116
93 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; 117 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
94 118
95 preempt_disable();
96
97 if (is_msa_enabled())
98 save_msa(p);
99 else if (is_fpu_owner())
100 save_fp(p);
101
102 if (cpu_has_dsp)
103 save_dsp(p);
104
105 preempt_enable();
106
107 /* set up new TSS. */ 119 /* set up new TSS. */
108 childregs = (struct pt_regs *) childksp - 1; 120 childregs = (struct pt_regs *) childksp - 1;
109 /* Put the stack after the struct pt_regs. */ 121 /* Put the stack after the struct pt_regs. */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 9d1487d83293..510452812594 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -770,6 +770,8 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
770 long ret = 0; 770 long ret = 0;
771 user_exit(); 771 user_exit();
772 772
773 current_thread_info()->syscall = syscall;
774
773 if (secure_computing() == -1) 775 if (secure_computing() == -1)
774 return -1; 776 return -1;
775 777
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 00cad1005a16..6e8de80bb446 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -181,6 +181,7 @@ illegal_syscall:
181 sll t1, t0, 2 181 sll t1, t0, 2
182 beqz v0, einval 182 beqz v0, einval
183 lw t2, sys_call_table(t1) # syscall routine 183 lw t2, sys_call_table(t1) # syscall routine
184 sw a0, PT_R2(sp) # call routine directly on restart
184 185
185 /* Some syscalls like execve get their arguments from struct pt_regs 186 /* Some syscalls like execve get their arguments from struct pt_regs
186 and claim zero arguments in the syscall table. Thus we have to 187 and claim zero arguments in the syscall table. Thus we have to
@@ -580,3 +581,4 @@ EXPORT(sys_call_table)
580 PTR sys_getrandom 581 PTR sys_getrandom
581 PTR sys_memfd_create 582 PTR sys_memfd_create
582 PTR sys_bpf /* 4355 */ 583 PTR sys_bpf /* 4355 */
584 PTR sys_execveat
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 5251565e344b..ad4d44635c76 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -435,4 +435,5 @@ EXPORT(sys_call_table)
435 PTR sys_getrandom 435 PTR sys_getrandom
436 PTR sys_memfd_create 436 PTR sys_memfd_create
437 PTR sys_bpf /* 5315 */ 437 PTR sys_bpf /* 5315 */
438 PTR sys_execveat
438 .size sys_call_table,.-sys_call_table 439 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 77e74398b828..446cc654da56 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -428,4 +428,5 @@ EXPORT(sysn32_call_table)
428 PTR sys_getrandom 428 PTR sys_getrandom
429 PTR sys_memfd_create 429 PTR sys_memfd_create
430 PTR sys_bpf 430 PTR sys_bpf
431 PTR compat_sys_execveat /* 6320 */
431 .size sysn32_call_table,.-sysn32_call_table 432 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 6f8db9f728e8..d07b210fbeff 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -186,6 +186,7 @@ LEAF(sys32_syscall)
186 dsll t1, t0, 3 186 dsll t1, t0, 3
187 beqz v0, einval 187 beqz v0, einval
188 ld t2, sys32_call_table(t1) # syscall routine 188 ld t2, sys32_call_table(t1) # syscall routine
189 sd a0, PT_R2(sp) # call routine directly on restart
189 190
190 move a0, a1 # shift argument registers 191 move a0, a1 # shift argument registers
191 move a1, a2 192 move a1, a2
@@ -565,4 +566,5 @@ EXPORT(sys32_call_table)
565 PTR sys_getrandom 566 PTR sys_getrandom
566 PTR sys_memfd_create 567 PTR sys_memfd_create
567 PTR sys_bpf /* 4355 */ 568 PTR sys_bpf /* 4355 */
569 PTR compat_sys_execveat
568 .size sys32_call_table,.-sys32_call_table 570 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 1e0a93c5a3e7..e36a859af666 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -44,8 +44,8 @@ static void cmp_init_secondary(void)
44 struct cpuinfo_mips *c __maybe_unused = &current_cpu_data; 44 struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
45 45
46 /* Assume GIC is present */ 46 /* Assume GIC is present */
47 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | 47 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
48 STATUSF_IP7); 48 STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
49 49
50 /* Enable per-cpu interrupts: platform specific */ 50 /* Enable per-cpu interrupts: platform specific */
51 51
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index ad86951b73bd..17ea705f6c40 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -161,7 +161,8 @@ static void vsmp_init_secondary(void)
161#ifdef CONFIG_MIPS_GIC 161#ifdef CONFIG_MIPS_GIC
162 /* This is Malta specific: IPI,performance and timer interrupts */ 162 /* This is Malta specific: IPI,performance and timer interrupts */
163 if (gic_present) 163 if (gic_present)
164 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 164 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
165 STATUSF_IP4 | STATUSF_IP5 |
165 STATUSF_IP6 | STATUSF_IP7); 166 STATUSF_IP6 | STATUSF_IP7);
166 else 167 else
167#endif 168#endif
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index c94c4e92e17d..1c0d8c50b7e1 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -123,10 +123,10 @@ asmlinkage void start_secondary(void)
123 unsigned int cpu; 123 unsigned int cpu;
124 124
125 cpu_probe(); 125 cpu_probe();
126 cpu_report();
127 per_cpu_trap_init(false); 126 per_cpu_trap_init(false);
128 mips_clockevent_init(); 127 mips_clockevent_init();
129 mp_ops->init_secondary(); 128 mp_ops->init_secondary();
129 cpu_report();
130 130
131 /* 131 /*
132 * XXX parity protection should be folded in here when it's converted 132 * XXX parity protection should be folded in here when it's converted
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index ad3d2031c327..c3b41e24c05a 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1231,7 +1231,8 @@ static int enable_restore_fp_context(int msa)
1231 1231
1232 /* Restore the scalar FP control & status register */ 1232 /* Restore the scalar FP control & status register */
1233 if (!was_fpu_owner) 1233 if (!was_fpu_owner)
1234 asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); 1234 write_32bit_cp1_register(CP1_STATUS,
1235 current->thread.fpu.fcr31);
1235 } 1236 }
1236 1237
1237out: 1238out:
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 30e334e823bd..2ae12825529f 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
20 select PREEMPT_NOTIFIERS 20 select PREEMPT_NOTIFIERS
21 select ANON_INODES 21 select ANON_INODES
22 select KVM_MMIO 22 select KVM_MMIO
23 select SRCU
23 ---help--- 24 ---help---
24 Support for hosting Guest kernels. 25 Support for hosting Guest kernels.
25 Currently supported on MIPS32 processors. 26 Currently supported on MIPS32 processors.
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index becc42bb1849..70ab5d664332 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -158,6 +158,8 @@ good_area:
158 if (unlikely(fault & VM_FAULT_ERROR)) { 158 if (unlikely(fault & VM_FAULT_ERROR)) {
159 if (fault & VM_FAULT_OOM) 159 if (fault & VM_FAULT_OOM)
160 goto out_of_memory; 160 goto out_of_memory;
161 else if (fault & VM_FAULT_SIGSEGV)
162 goto bad_area;
161 else if (fault & VM_FAULT_SIGBUS) 163 else if (fault & VM_FAULT_SIGBUS)
162 goto do_sigbus; 164 goto do_sigbus;
163 BUG(); 165 BUG();
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index e90b2e899291..30639a6e9b8c 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -489,6 +489,8 @@ static void r4k_tlb_configure(void)
489#ifdef CONFIG_64BIT 489#ifdef CONFIG_64BIT
490 pg |= PG_ELPA; 490 pg |= PG_ELPA;
491#endif 491#endif
492 if (cpu_has_rixiex)
493 pg |= PG_IEC;
492 write_c0_pagegrain(pg); 494 write_c0_pagegrain(pg);
493 } 495 }
494 496
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
index faed90240ded..6d6df839948f 100644
--- a/arch/mn10300/include/asm/cacheflush.h
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -159,13 +159,6 @@ extern void flush_icache_range(unsigned long start, unsigned long end);
159#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 159#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
160 memcpy(dst, src, len) 160 memcpy(dst, src, len)
161 161
162/*
163 * Internal debugging function
164 */
165#ifdef CONFIG_DEBUG_PAGEALLOC
166extern void kernel_map_pages(struct page *page, int numpages, int enable);
167#endif
168
169#endif /* __ASSEMBLY__ */ 162#endif /* __ASSEMBLY__ */
170 163
171#endif /* _ASM_CACHEFLUSH_H */ 164#endif /* _ASM_CACHEFLUSH_H */
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 3516cbdf1ee9..0c2cc5d39c8e 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -262,6 +262,8 @@ good_area:
262 if (unlikely(fault & VM_FAULT_ERROR)) { 262 if (unlikely(fault & VM_FAULT_ERROR)) {
263 if (fault & VM_FAULT_OOM) 263 if (fault & VM_FAULT_OOM)
264 goto out_of_memory; 264 goto out_of_memory;
265 else if (fault & VM_FAULT_SIGSEGV)
266 goto bad_area;
265 else if (fault & VM_FAULT_SIGBUS) 267 else if (fault & VM_FAULT_SIGBUS)
266 goto do_sigbus; 268 goto do_sigbus;
267 BUG(); 269 BUG();
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 15a0bb5fc06d..d194c0427b26 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -135,6 +135,8 @@ survive:
135 if (unlikely(fault & VM_FAULT_ERROR)) { 135 if (unlikely(fault & VM_FAULT_ERROR)) {
136 if (fault & VM_FAULT_OOM) 136 if (fault & VM_FAULT_OOM)
137 goto out_of_memory; 137 goto out_of_memory;
138 else if (fault & VM_FAULT_SIGSEGV)
139 goto bad_area;
138 else if (fault & VM_FAULT_SIGBUS) 140 else if (fault & VM_FAULT_SIGBUS)
139 goto do_sigbus; 141 goto do_sigbus;
140 BUG(); 142 BUG();
@@ -157,9 +159,11 @@ bad_area:
157bad_area_nosemaphore: 159bad_area_nosemaphore:
158 /* User mode accesses just cause a SIGSEGV */ 160 /* User mode accesses just cause a SIGSEGV */
159 if (user_mode(regs)) { 161 if (user_mode(regs)) {
160 pr_alert("%s: unhandled page fault (%d) at 0x%08lx, " 162 if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
161 "cause %ld\n", current->comm, SIGSEGV, address, cause); 163 pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
162 show_regs(regs); 164 "cause %ld\n", current->comm, SIGSEGV, address, cause);
165 show_regs(regs);
166 }
163 _exception(SIGSEGV, regs, code, address); 167 _exception(SIGSEGV, regs, code, address);
164 return; 168 return;
165 } 169 }
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 0703acf7d327..230ac20ae794 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -171,6 +171,8 @@ good_area:
171 if (unlikely(fault & VM_FAULT_ERROR)) { 171 if (unlikely(fault & VM_FAULT_ERROR)) {
172 if (fault & VM_FAULT_OOM) 172 if (fault & VM_FAULT_OOM)
173 goto out_of_memory; 173 goto out_of_memory;
174 else if (fault & VM_FAULT_SIGSEGV)
175 goto bad_area;
174 else if (fault & VM_FAULT_SIGBUS) 176 else if (fault & VM_FAULT_SIGBUS)
175 goto do_sigbus; 177 goto do_sigbus;
176 BUG(); 178 BUG();
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 3ca9c1131cfe..e5120e653240 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -256,6 +256,8 @@ good_area:
256 */ 256 */
257 if (fault & VM_FAULT_OOM) 257 if (fault & VM_FAULT_OOM)
258 goto out_of_memory; 258 goto out_of_memory;
259 else if (fault & VM_FAULT_SIGSEGV)
260 goto bad_area;
259 else if (fault & VM_FAULT_SIGBUS) 261 else if (fault & VM_FAULT_SIGBUS)
260 goto bad_area; 262 goto bad_area;
261 BUG(); 263 BUG();
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 5b9312220e84..30b35fff2dea 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -60,13 +60,6 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
60#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 60#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
61 memcpy(dst, src, len) 61 memcpy(dst, src, len)
62 62
63
64
65#ifdef CONFIG_DEBUG_PAGEALLOC
66/* internal debugging function */
67void kernel_map_pages(struct page *page, int numpages, int enable);
68#endif
69
70#endif /* __KERNEL__ */ 63#endif /* __KERNEL__ */
71 64
72#endif /* _ASM_POWERPC_CACHEFLUSH_H */ 65#endif /* _ASM_POWERPC_CACHEFLUSH_H */
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index f5769f19ae25..11850f310fb4 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
21 select PREEMPT_NOTIFIERS 21 select PREEMPT_NOTIFIERS
22 select ANON_INODES 22 select ANON_INODES
23 select HAVE_KVM_EVENTFD 23 select HAVE_KVM_EVENTFD
24 select SRCU
24 25
25config KVM_BOOK3S_HANDLER 26config KVM_BOOK3S_HANDLER
26 bool 27 bool
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 5a236f082c78..1b5305d4bdab 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
76 if (*flt & VM_FAULT_OOM) { 76 if (*flt & VM_FAULT_OOM) {
77 ret = -ENOMEM; 77 ret = -ENOMEM;
78 goto out_unlock; 78 goto out_unlock;
79 } else if (*flt & VM_FAULT_SIGBUS) { 79 } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
80 ret = -EFAULT; 80 ret = -EFAULT;
81 goto out_unlock; 81 goto out_unlock;
82 } 82 }
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index eb79907f34fa..6154b0a2b063 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -437,6 +437,8 @@ good_area:
437 */ 437 */
438 fault = handle_mm_fault(mm, vma, address, flags); 438 fault = handle_mm_fault(mm, vma, address, flags);
439 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { 439 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
440 if (fault & VM_FAULT_SIGSEGV)
441 goto bad_area;
440 rc = mm_fault_error(regs, address, fault); 442 rc = mm_fault_error(regs, address, fault);
441 if (rc >= MM_FAULT_RETURN) 443 if (rc >= MM_FAULT_RETURN)
442 goto bail; 444 goto bail;
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index b700a329c31d..d2de7d5d7574 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void)
304 * all cpus at boot. Get these reg values of current cpu and use the 304 * all cpus at boot. Get these reg values of current cpu and use the
305 * same accross all cpus. 305 * same accross all cpus.
306 */ 306 */
307 uint64_t lpcr_val = mfspr(SPRN_LPCR); 307 uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
308 uint64_t hid0_val = mfspr(SPRN_HID0); 308 uint64_t hid0_val = mfspr(SPRN_HID0);
309 uint64_t hid1_val = mfspr(SPRN_HID1); 309 uint64_t hid1_val = mfspr(SPRN_HID1);
310 uint64_t hid4_val = mfspr(SPRN_HID4); 310 uint64_t hid4_val = mfspr(SPRN_HID4);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 5b150f0c5df9..13c6e200b24e 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -337,6 +337,7 @@ static inline void disable_surveillance(void)
337 args.token = rtas_token("set-indicator"); 337 args.token = rtas_token("set-indicator");
338 if (args.token == RTAS_UNKNOWN_SERVICE) 338 if (args.token == RTAS_UNKNOWN_SERVICE)
339 return; 339 return;
340 args.token = cpu_to_be32(args.token);
340 args.nargs = cpu_to_be32(3); 341 args.nargs = cpu_to_be32(3);
341 args.nret = cpu_to_be32(1); 342 args.nret = cpu_to_be32(1);
342 args.rets = &args.args[3]; 343 args.rets = &args.args[3];
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 3e20383d0921..58fae7d098cf 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -4,10 +4,6 @@
4/* Caches aren't brain-dead on the s390. */ 4/* Caches aren't brain-dead on the s390. */
5#include <asm-generic/cacheflush.h> 5#include <asm-generic/cacheflush.h>
6 6
7#ifdef CONFIG_DEBUG_PAGEALLOC
8void kernel_map_pages(struct page *page, int numpages, int enable);
9#endif
10
11int set_memory_ro(unsigned long addr, int numpages); 7int set_memory_ro(unsigned long addr, int numpages);
12int set_memory_rw(unsigned long addr, int numpages); 8int set_memory_rw(unsigned long addr, int numpages);
13int set_memory_nx(unsigned long addr, int numpages); 9int set_memory_nx(unsigned long addr, int numpages);
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 646db9c467d1..5fce52cf0e57 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -28,6 +28,7 @@ config KVM
28 select HAVE_KVM_IRQCHIP 28 select HAVE_KVM_IRQCHIP
29 select HAVE_KVM_IRQFD 29 select HAVE_KVM_IRQFD
30 select HAVE_KVM_IRQ_ROUTING 30 select HAVE_KVM_IRQ_ROUTING
31 select SRCU
31 ---help--- 32 ---help---
32 Support hosting paravirtualized guest machines using the SIE 33 Support hosting paravirtualized guest machines using the SIE
33 virtualization capability on the mainframe. This should work 34 virtualization capability on the mainframe. This should work
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 811937bb90be..9065d5aa3932 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
374 do_no_context(regs); 374 do_no_context(regs);
375 else 375 else
376 pagefault_out_of_memory(); 376 pagefault_out_of_memory();
377 } else if (fault & VM_FAULT_SIGSEGV) {
378 /* Kernel mode? Handle exceptions or die */
379 if (!user_mode(regs))
380 do_no_context(regs);
381 else
382 do_sigsegv(regs, SEGV_MAPERR);
377 } else if (fault & VM_FAULT_SIGBUS) { 383 } else if (fault & VM_FAULT_SIGBUS) {
378 /* Kernel mode? Handle exceptions or die */ 384 /* Kernel mode? Handle exceptions or die */
379 if (!user_mode(regs)) 385 if (!user_mode(regs))
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 52238983527d..6860beb2a280 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -114,6 +114,8 @@ good_area:
114 if (unlikely(fault & VM_FAULT_ERROR)) { 114 if (unlikely(fault & VM_FAULT_ERROR)) {
115 if (fault & VM_FAULT_OOM) 115 if (fault & VM_FAULT_OOM)
116 goto out_of_memory; 116 goto out_of_memory;
117 else if (fault & VM_FAULT_SIGSEGV)
118 goto bad_area;
117 else if (fault & VM_FAULT_SIGBUS) 119 else if (fault & VM_FAULT_SIGBUS)
118 goto do_sigbus; 120 goto do_sigbus;
119 BUG(); 121 BUG();
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 541dc6101508..a58fec9b55e0 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
353 } else { 353 } else {
354 if (fault & VM_FAULT_SIGBUS) 354 if (fault & VM_FAULT_SIGBUS)
355 do_sigbus(regs, error_code, address); 355 do_sigbus(regs, error_code, address);
356 else if (fault & VM_FAULT_SIGSEGV)
357 bad_area(regs, error_code, address);
356 else 358 else
357 BUG(); 359 BUG();
358 } 360 }
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
index 38965379e350..68513c41e10d 100644
--- a/arch/sparc/include/asm/cacheflush_64.h
+++ b/arch/sparc/include/asm/cacheflush_64.h
@@ -74,11 +74,6 @@ void flush_ptrace_access(struct vm_area_struct *, struct page *,
74#define flush_cache_vmap(start, end) do { } while (0) 74#define flush_cache_vmap(start, end) do { } while (0)
75#define flush_cache_vunmap(start, end) do { } while (0) 75#define flush_cache_vunmap(start, end) do { } while (0)
76 76
77#ifdef CONFIG_DEBUG_PAGEALLOC
78/* internal debugging function */
79void kernel_map_pages(struct page *page, int numpages, int enable);
80#endif
81
82#endif /* !__ASSEMBLY__ */ 77#endif /* !__ASSEMBLY__ */
83 78
84#endif /* _SPARC64_CACHEFLUSH_H */ 79#endif /* _SPARC64_CACHEFLUSH_H */
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 908e8c17c902..70d817154fe8 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -249,6 +249,8 @@ good_area:
249 if (unlikely(fault & VM_FAULT_ERROR)) { 249 if (unlikely(fault & VM_FAULT_ERROR)) {
250 if (fault & VM_FAULT_OOM) 250 if (fault & VM_FAULT_OOM)
251 goto out_of_memory; 251 goto out_of_memory;
252 else if (fault & VM_FAULT_SIGSEGV)
253 goto bad_area;
252 else if (fault & VM_FAULT_SIGBUS) 254 else if (fault & VM_FAULT_SIGBUS)
253 goto do_sigbus; 255 goto do_sigbus;
254 BUG(); 256 BUG();
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 18fcd7167095..479823249429 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -446,6 +446,8 @@ good_area:
446 if (unlikely(fault & VM_FAULT_ERROR)) { 446 if (unlikely(fault & VM_FAULT_ERROR)) {
447 if (fault & VM_FAULT_OOM) 447 if (fault & VM_FAULT_OOM)
448 goto out_of_memory; 448 goto out_of_memory;
449 else if (fault & VM_FAULT_SIGSEGV)
450 goto bad_area;
449 else if (fault & VM_FAULT_SIGBUS) 451 else if (fault & VM_FAULT_SIGBUS)
450 goto do_sigbus; 452 goto do_sigbus;
451 BUG(); 453 BUG();
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
index 2298cb1daff7..1e968f7550dc 100644
--- a/arch/tile/kvm/Kconfig
+++ b/arch/tile/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
21 depends on HAVE_KVM && MODULES 21 depends on HAVE_KVM && MODULES
22 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
23 select ANON_INODES 23 select ANON_INODES
24 select SRCU
24 ---help--- 25 ---help---
25 Support hosting paravirtualized guest machines. 26 Support hosting paravirtualized guest machines.
26 27
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 565e25a98334..0f61a73534e6 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -442,6 +442,8 @@ good_area:
442 if (unlikely(fault & VM_FAULT_ERROR)) { 442 if (unlikely(fault & VM_FAULT_ERROR)) {
443 if (fault & VM_FAULT_OOM) 443 if (fault & VM_FAULT_OOM)
444 goto out_of_memory; 444 goto out_of_memory;
445 else if (fault & VM_FAULT_SIGSEGV)
446 goto bad_area;
445 else if (fault & VM_FAULT_SIGBUS) 447 else if (fault & VM_FAULT_SIGBUS)
446 goto do_sigbus; 448 goto do_sigbus;
447 BUG(); 449 BUG();
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 5678c3571e7c..209617302df8 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -80,6 +80,8 @@ good_area:
80 if (unlikely(fault & VM_FAULT_ERROR)) { 80 if (unlikely(fault & VM_FAULT_ERROR)) {
81 if (fault & VM_FAULT_OOM) { 81 if (fault & VM_FAULT_OOM) {
82 goto out_of_memory; 82 goto out_of_memory;
83 } else if (fault & VM_FAULT_SIGSEGV) {
84 goto out;
83 } else if (fault & VM_FAULT_SIGBUS) { 85 } else if (fault & VM_FAULT_SIGBUS) {
84 err = -EACCES; 86 err = -EACCES;
85 goto out; 87 goto out;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0dc9d0144a27..85588a891c06 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -138,6 +138,7 @@ config X86
138 select HAVE_ACPI_APEI_NMI if ACPI 138 select HAVE_ACPI_APEI_NMI if ACPI
139 select ACPI_LEGACY_TABLES_LOOKUP if ACPI 139 select ACPI_LEGACY_TABLES_LOOKUP if ACPI
140 select X86_FEATURE_NAMES if PROC_FS 140 select X86_FEATURE_NAMES if PROC_FS
141 select SRCU
141 142
142config INSTRUCTION_DECODER 143config INSTRUCTION_DECODER
143 def_bool y 144 def_bool y
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index aede2c347bde..90a54851aedc 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -174,6 +174,7 @@
174#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ 174#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
175#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ 175#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
176#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ 176#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
177#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
177#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ 178#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
178 179
179/* 180/*
@@ -388,6 +389,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
388#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 389#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
389#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) 390#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
390#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) 391#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
392#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
391 393
392#if __GNUC__ >= 4 394#if __GNUC__ >= 4
393extern void warn_pre_alternatives(void); 395extern void warn_pre_alternatives(void);
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 61fd18b83b6c..12cb66f6d3a5 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -114,5 +114,10 @@ static inline void debug_stack_usage_inc(void) { }
114static inline void debug_stack_usage_dec(void) { } 114static inline void debug_stack_usage_dec(void) { }
115#endif /* X86_64 */ 115#endif /* X86_64 */
116 116
117#ifdef CONFIG_CPU_SUP_AMD
118extern void set_dr_addr_mask(unsigned long mask, int dr);
119#else
120static inline void set_dr_addr_mask(unsigned long mask, int dr) { }
121#endif
117 122
118#endif /* _ASM_X86_DEBUGREG_H */ 123#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index ef1c4d2d41ec..6c98be864a75 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -12,6 +12,7 @@
12 */ 12 */
13struct arch_hw_breakpoint { 13struct arch_hw_breakpoint {
14 unsigned long address; 14 unsigned long address;
15 unsigned long mask;
15 u8 len; 16 u8 len;
16 u8 type; 17 u8 type;
17}; 18};
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index c8aa65d56027..d979e5abae55 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -251,6 +251,10 @@
251/* Fam 16h MSRs */ 251/* Fam 16h MSRs */
252#define MSR_F16H_L2I_PERF_CTL 0xc0010230 252#define MSR_F16H_L2I_PERF_CTL 0xc0010230
253#define MSR_F16H_L2I_PERF_CTR 0xc0010231 253#define MSR_F16H_L2I_PERF_CTR 0xc0010231
254#define MSR_F16H_DR1_ADDR_MASK 0xc0011019
255#define MSR_F16H_DR2_ADDR_MASK 0xc001101a
256#define MSR_F16H_DR3_ADDR_MASK 0xc001101b
257#define MSR_F16H_DR0_ADDR_MASK 0xc0011027
254 258
255/* Fam 15h MSRs */ 259/* Fam 15h MSRs */
256#define MSR_F15H_PERF_CTL 0xc0010200 260#define MSR_F15H_PERF_CTL 0xc0010200
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 15c5df92f74e..a220239cea65 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -869,3 +869,22 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
869 869
870 return false; 870 return false;
871} 871}
872
873void set_dr_addr_mask(unsigned long mask, int dr)
874{
875 if (!cpu_has_bpext)
876 return;
877
878 switch (dr) {
879 case 0:
880 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
881 break;
882 case 1:
883 case 2:
884 case 3:
885 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
886 break;
887 default:
888 break;
889 }
890}
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 15c29096136b..36a83617eb21 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -552,7 +552,7 @@ static int __init microcode_init(void)
552 int error; 552 int error;
553 553
554 if (paravirt_enabled() || dis_ucode_ldr) 554 if (paravirt_enabled() || dis_ucode_ldr)
555 return 0; 555 return -EINVAL;
556 556
557 if (c->x86_vendor == X86_VENDOR_INTEL) 557 if (c->x86_vendor == X86_VENDOR_INTEL)
558 microcode_ops = init_intel_microcode(); 558 microcode_ops = init_intel_microcode();
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 944bf019b74f..498b6d967138 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
2431 break; 2431 break;
2432 2432
2433 case 55: /* 22nm Atom "Silvermont" */ 2433 case 55: /* 22nm Atom "Silvermont" */
2434 case 76: /* 14nm Atom "Airmont" */
2434 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ 2435 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
2435 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 2436 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2436 sizeof(hw_cache_event_ids)); 2437 sizeof(hw_cache_event_ids));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 6e434f8e5fc8..c4bb8b8e5017 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -142,7 +142,7 @@ static inline u64 rapl_scale(u64 v)
142 * or use ldexp(count, -32). 142 * or use ldexp(count, -32).
143 * Watts = Joules/Time delta 143 * Watts = Joules/Time delta
144 */ 144 */
145 return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); 145 return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
146} 146}
147 147
148static u64 rapl_event_update(struct perf_event *event) 148static u64 rapl_event_update(struct perf_event *event)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 10b8d3eaaf15..c635b8b49e93 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
840 box->phys_id = phys_id; 840 box->phys_id = phys_id;
841 box->pci_dev = pdev; 841 box->pci_dev = pdev;
842 box->pmu = pmu; 842 box->pmu = pmu;
843 uncore_box_init(box);
844 pci_set_drvdata(pdev, box); 843 pci_set_drvdata(pdev, box);
845 844
846 raw_spin_lock(&uncore_box_lock); 845 raw_spin_lock(&uncore_box_lock);
@@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu)
1004 pmu = &type->pmus[j]; 1003 pmu = &type->pmus[j];
1005 box = *per_cpu_ptr(pmu->box, cpu); 1004 box = *per_cpu_ptr(pmu->box, cpu);
1006 /* called by uncore_cpu_init? */ 1005 /* called by uncore_cpu_init? */
1007 if (box && box->phys_id >= 0) { 1006 if (box && box->phys_id >= 0)
1008 uncore_box_init(box);
1009 continue; 1007 continue;
1010 }
1011 1008
1012 for_each_online_cpu(k) { 1009 for_each_online_cpu(k) {
1013 exist = *per_cpu_ptr(pmu->box, k); 1010 exist = *per_cpu_ptr(pmu->box, k);
@@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu)
1023 } 1020 }
1024 } 1021 }
1025 1022
1026 if (box) { 1023 if (box)
1027 box->phys_id = phys_id; 1024 box->phys_id = phys_id;
1028 uncore_box_init(box);
1029 }
1030 } 1025 }
1031 } 1026 }
1032 return 0; 1027 return 0;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 863d9b02563e..6c8c1e7e69d8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
257 return box->pmu->type->num_counters; 257 return box->pmu->type->num_counters;
258} 258}
259 259
260static inline void uncore_box_init(struct intel_uncore_box *box)
261{
262 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
263 if (box->pmu->type->ops->init_box)
264 box->pmu->type->ops->init_box(box);
265 }
266}
267
260static inline void uncore_disable_box(struct intel_uncore_box *box) 268static inline void uncore_disable_box(struct intel_uncore_box *box)
261{ 269{
262 if (box->pmu->type->ops->disable_box) 270 if (box->pmu->type->ops->disable_box)
@@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
265 273
266static inline void uncore_enable_box(struct intel_uncore_box *box) 274static inline void uncore_enable_box(struct intel_uncore_box *box)
267{ 275{
276 uncore_box_init(box);
277
268 if (box->pmu->type->ops->enable_box) 278 if (box->pmu->type->ops->enable_box)
269 box->pmu->type->ops->enable_box(box); 279 box->pmu->type->ops->enable_box(box);
270} 280}
@@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
287 return box->pmu->type->ops->read_counter(box, event); 297 return box->pmu->type->ops->read_counter(box, event);
288} 298}
289 299
290static inline void uncore_box_init(struct intel_uncore_box *box)
291{
292 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
293 if (box->pmu->type->ops->init_box)
294 box->pmu->type->ops->init_box(box);
295 }
296}
297
298static inline bool uncore_box_is_fake(struct intel_uncore_box *box) 300static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
299{ 301{
300 return (box->phys_id < 0); 302 return (box->phys_id < 0);
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 3d5fb509bdeb..7114ba220fd4 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -126,6 +126,8 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
126 *dr7 |= encode_dr7(i, info->len, info->type); 126 *dr7 |= encode_dr7(i, info->len, info->type);
127 127
128 set_debugreg(*dr7, 7); 128 set_debugreg(*dr7, 7);
129 if (info->mask)
130 set_dr_addr_mask(info->mask, i);
129 131
130 return 0; 132 return 0;
131} 133}
@@ -161,29 +163,8 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
161 *dr7 &= ~__encode_dr7(i, info->len, info->type); 163 *dr7 &= ~__encode_dr7(i, info->len, info->type);
162 164
163 set_debugreg(*dr7, 7); 165 set_debugreg(*dr7, 7);
164} 166 if (info->mask)
165 167 set_dr_addr_mask(0, i);
166static int get_hbp_len(u8 hbp_len)
167{
168 unsigned int len_in_bytes = 0;
169
170 switch (hbp_len) {
171 case X86_BREAKPOINT_LEN_1:
172 len_in_bytes = 1;
173 break;
174 case X86_BREAKPOINT_LEN_2:
175 len_in_bytes = 2;
176 break;
177 case X86_BREAKPOINT_LEN_4:
178 len_in_bytes = 4;
179 break;
180#ifdef CONFIG_X86_64
181 case X86_BREAKPOINT_LEN_8:
182 len_in_bytes = 8;
183 break;
184#endif
185 }
186 return len_in_bytes;
187} 168}
188 169
189/* 170/*
@@ -196,7 +177,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
196 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 177 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
197 178
198 va = info->address; 179 va = info->address;
199 len = get_hbp_len(info->len); 180 len = bp->attr.bp_len;
200 181
201 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 182 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
202} 183}
@@ -277,6 +258,8 @@ static int arch_build_bp_info(struct perf_event *bp)
277 } 258 }
278 259
279 /* Len */ 260 /* Len */
261 info->mask = 0;
262
280 switch (bp->attr.bp_len) { 263 switch (bp->attr.bp_len) {
281 case HW_BREAKPOINT_LEN_1: 264 case HW_BREAKPOINT_LEN_1:
282 info->len = X86_BREAKPOINT_LEN_1; 265 info->len = X86_BREAKPOINT_LEN_1;
@@ -293,11 +276,17 @@ static int arch_build_bp_info(struct perf_event *bp)
293 break; 276 break;
294#endif 277#endif
295 default: 278 default:
296 return -EINVAL; 279 if (!is_power_of_2(bp->attr.bp_len))
280 return -EINVAL;
281 if (!cpu_has_bpext)
282 return -EOPNOTSUPP;
283 info->mask = bp->attr.bp_len - 1;
284 info->len = X86_BREAKPOINT_LEN_1;
297 } 285 }
298 286
299 return 0; 287 return 0;
300} 288}
289
301/* 290/*
302 * Validate the arch-specific HW Breakpoint register settings 291 * Validate the arch-specific HW Breakpoint register settings
303 */ 292 */
@@ -312,11 +301,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
312 if (ret) 301 if (ret)
313 return ret; 302 return ret;
314 303
315 ret = -EINVAL;
316
317 switch (info->len) { 304 switch (info->len) {
318 case X86_BREAKPOINT_LEN_1: 305 case X86_BREAKPOINT_LEN_1:
319 align = 0; 306 align = 0;
307 if (info->mask)
308 align = info->mask;
320 break; 309 break;
321 case X86_BREAKPOINT_LEN_2: 310 case X86_BREAKPOINT_LEN_2:
322 align = 1; 311 align = 1;
@@ -330,7 +319,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
330 break; 319 break;
331#endif 320#endif
332 default: 321 default:
333 return ret; 322 WARN_ON_ONCE(1);
334 } 323 }
335 324
336 /* 325 /*
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index f9d16ff56c6b..7dc7ba577ecd 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -40,6 +40,7 @@ config KVM
40 select HAVE_KVM_MSI 40 select HAVE_KVM_MSI
41 select HAVE_KVM_CPU_RELAX_INTERCEPT 41 select HAVE_KVM_CPU_RELAX_INTERCEPT
42 select KVM_VFIO 42 select KVM_VFIO
43 select SRCU
43 ---help--- 44 ---help---
44 Support hosting fully virtualized guest machines using hardware 45 Support hosting fully virtualized guest machines using hardware
45 virtualization extensions. You will need a fairly recent 46 virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 4f0c0b954686..d52dcf0776ea 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm)
192 u16 cid, lid; 192 u16 cid, lid;
193 u32 ldr, aid; 193 u32 ldr, aid;
194 194
195 if (!kvm_apic_present(vcpu))
196 continue;
197
195 aid = kvm_apic_id(apic); 198 aid = kvm_apic_id(apic);
196 ldr = kvm_apic_get_reg(apic, APIC_LDR); 199 ldr = kvm_apic_get_reg(apic, APIC_LDR);
197 cid = apic_cluster_id(new, ldr); 200 cid = apic_cluster_id(new, ldr);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 38dcec403b46..e3ff27a5b634 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
898 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 898 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
899 VM_FAULT_HWPOISON_LARGE)) 899 VM_FAULT_HWPOISON_LARGE))
900 do_sigbus(regs, error_code, address, fault); 900 do_sigbus(regs, error_code, address, fault);
901 else if (fault & VM_FAULT_SIGSEGV)
902 bad_area_nosemaphore(regs, error_code, address);
901 else 903 else
902 BUG(); 904 BUG();
903 } 905 }
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 7b20bccf3648..2fb384724ebb 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
448 DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"), 448 DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
449 }, 449 },
450 }, 450 },
451 {
452 .callback = set_scan_all,
453 .ident = "Stratus/NEC ftServer",
454 .matches = {
455 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
456 DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
457 },
458 },
459 {
460 .callback = set_scan_all,
461 .ident = "Stratus/NEC ftServer",
462 .matches = {
463 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
464 DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
465 },
466 },
451 {} 467 {}
452}; 468};
453 469
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 44b9271580b5..852aa4c92da0 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -293,7 +293,6 @@ static void mrst_power_off_unused_dev(struct pci_dev *dev)
293DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev); 293DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev);
294DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev); 294DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev);
295DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev); 295DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev);
296DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0812, mrst_power_off_unused_dev);
297DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev); 296DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev);
298 297
299/* 298/*
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index b57c4f91f487..9e3571a6535c 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -117,6 +117,8 @@ good_area:
117 if (unlikely(fault & VM_FAULT_ERROR)) { 117 if (unlikely(fault & VM_FAULT_ERROR)) {
118 if (fault & VM_FAULT_OOM) 118 if (fault & VM_FAULT_OOM)
119 goto out_of_memory; 119 goto out_of_memory;
120 else if (fault & VM_FAULT_SIGSEGV)
121 goto bad_area;
120 else if (fault & VM_FAULT_SIGBUS) 122 else if (fault & VM_FAULT_SIGBUS)
121 goto do_sigbus; 123 goto do_sigbus;
122 BUG(); 124 BUG();
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 6774a0e69867..1630a20d5dcf 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -15,26 +15,6 @@
15 15
16static void blk_mq_sysfs_release(struct kobject *kobj) 16static void blk_mq_sysfs_release(struct kobject *kobj)
17{ 17{
18 struct request_queue *q;
19
20 q = container_of(kobj, struct request_queue, mq_kobj);
21 free_percpu(q->queue_ctx);
22}
23
24static void blk_mq_ctx_release(struct kobject *kobj)
25{
26 struct blk_mq_ctx *ctx;
27
28 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29 kobject_put(&ctx->queue->mq_kobj);
30}
31
32static void blk_mq_hctx_release(struct kobject *kobj)
33{
34 struct blk_mq_hw_ctx *hctx;
35
36 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
37 kfree(hctx);
38} 18}
39 19
40struct blk_mq_ctx_sysfs_entry { 20struct blk_mq_ctx_sysfs_entry {
@@ -338,13 +318,13 @@ static struct kobj_type blk_mq_ktype = {
338static struct kobj_type blk_mq_ctx_ktype = { 318static struct kobj_type blk_mq_ctx_ktype = {
339 .sysfs_ops = &blk_mq_sysfs_ops, 319 .sysfs_ops = &blk_mq_sysfs_ops,
340 .default_attrs = default_ctx_attrs, 320 .default_attrs = default_ctx_attrs,
341 .release = blk_mq_ctx_release, 321 .release = blk_mq_sysfs_release,
342}; 322};
343 323
344static struct kobj_type blk_mq_hw_ktype = { 324static struct kobj_type blk_mq_hw_ktype = {
345 .sysfs_ops = &blk_mq_hw_sysfs_ops, 325 .sysfs_ops = &blk_mq_hw_sysfs_ops,
346 .default_attrs = default_hw_ctx_attrs, 326 .default_attrs = default_hw_ctx_attrs,
347 .release = blk_mq_hctx_release, 327 .release = blk_mq_sysfs_release,
348}; 328};
349 329
350static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) 330static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -375,7 +355,6 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
375 return ret; 355 return ret;
376 356
377 hctx_for_each_ctx(hctx, ctx, i) { 357 hctx_for_each_ctx(hctx, ctx, i) {
378 kobject_get(&q->mq_kobj);
379 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); 358 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
380 if (ret) 359 if (ret)
381 break; 360 break;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9ee3b87c4498..2390c5541e71 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1867,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1867 mutex_unlock(&set->tag_list_lock); 1867 mutex_unlock(&set->tag_list_lock);
1868} 1868}
1869 1869
1870/*
1871 * It is the actual release handler for mq, but we do it from
1872 * request queue's release handler for avoiding use-after-free
1873 * and headache because q->mq_kobj shouldn't have been introduced,
1874 * but we can't group ctx/kctx kobj without it.
1875 */
1876void blk_mq_release(struct request_queue *q)
1877{
1878 struct blk_mq_hw_ctx *hctx;
1879 unsigned int i;
1880
1881 /* hctx kobj stays in hctx */
1882 queue_for_each_hw_ctx(q, hctx, i)
1883 kfree(hctx);
1884
1885 kfree(q->queue_hw_ctx);
1886
1887 /* ctx kobj stays in queue_ctx */
1888 free_percpu(q->queue_ctx);
1889}
1890
1870struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 1891struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1871{ 1892{
1872 struct blk_mq_hw_ctx **hctxs; 1893 struct blk_mq_hw_ctx **hctxs;
@@ -2000,10 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q)
2000 2021
2001 percpu_ref_exit(&q->mq_usage_counter); 2022 percpu_ref_exit(&q->mq_usage_counter);
2002 2023
2003 kfree(q->queue_hw_ctx);
2004 kfree(q->mq_map); 2024 kfree(q->mq_map);
2005 2025
2006 q->queue_hw_ctx = NULL;
2007 q->mq_map = NULL; 2026 q->mq_map = NULL;
2008 2027
2009 mutex_lock(&all_q_mutex); 2028 mutex_lock(&all_q_mutex);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 4f4f943c22c3..6a48c4c0d8a2 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -62,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q);
62 62
63extern void blk_mq_rq_timed_out(struct request *req, bool reserved); 63extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
64 64
65void blk_mq_release(struct request_queue *q);
66
65/* 67/*
66 * Basic implementation of sparser bitmap, allowing the user to spread 68 * Basic implementation of sparser bitmap, allowing the user to spread
67 * the bits over more cachelines. 69 * the bits over more cachelines.
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 935ea2aa0730..faaf36ade7eb 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj)
517 517
518 if (!q->mq_ops) 518 if (!q->mq_ops)
519 blk_free_flush_queue(q->fq); 519 blk_free_flush_queue(q->fq);
520 else
521 blk_mq_release(q);
520 522
521 blk_trace_shutdown(q); 523 blk_trace_shutdown(q);
522 524
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 694d5a70d6ce..c70d6e45dc10 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -134,8 +134,6 @@ source "drivers/staging/Kconfig"
134 134
135source "drivers/platform/Kconfig" 135source "drivers/platform/Kconfig"
136 136
137source "drivers/soc/Kconfig"
138
139source "drivers/clk/Kconfig" 137source "drivers/clk/Kconfig"
140 138
141source "drivers/hwspinlock/Kconfig" 139source "drivers/hwspinlock/Kconfig"
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 4f3febf8a589..e75737fd7eef 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ACPI support for Intel Lynxpoint LPSS. 2 * ACPI support for Intel Lynxpoint LPSS.
3 * 3 *
4 * Copyright (C) 2013, 2014, Intel Corporation 4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * 7 *
@@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss");
60#define LPSS_CLK_DIVIDER BIT(2) 60#define LPSS_CLK_DIVIDER BIT(2)
61#define LPSS_LTR BIT(3) 61#define LPSS_LTR BIT(3)
62#define LPSS_SAVE_CTX BIT(4) 62#define LPSS_SAVE_CTX BIT(4)
63#define LPSS_DEV_PROXY BIT(5)
64#define LPSS_PROXY_REQ BIT(6)
65 63
66struct lpss_private_data; 64struct lpss_private_data;
67 65
@@ -72,10 +70,8 @@ struct lpss_device_desc {
72 void (*setup)(struct lpss_private_data *pdata); 70 void (*setup)(struct lpss_private_data *pdata);
73}; 71};
74 72
75static struct device *proxy_device;
76
77static struct lpss_device_desc lpss_dma_desc = { 73static struct lpss_device_desc lpss_dma_desc = {
78 .flags = LPSS_CLK | LPSS_PROXY_REQ, 74 .flags = LPSS_CLK,
79}; 75};
80 76
81struct lpss_private_data { 77struct lpss_private_data {
@@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
150}; 146};
151 147
152static struct lpss_device_desc byt_uart_dev_desc = { 148static struct lpss_device_desc byt_uart_dev_desc = {
153 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | 149 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
154 LPSS_DEV_PROXY,
155 .prv_offset = 0x800, 150 .prv_offset = 0x800,
156 .setup = lpss_uart_setup, 151 .setup = lpss_uart_setup,
157}; 152};
158 153
159static struct lpss_device_desc byt_spi_dev_desc = { 154static struct lpss_device_desc byt_spi_dev_desc = {
160 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | 155 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
161 LPSS_DEV_PROXY,
162 .prv_offset = 0x400, 156 .prv_offset = 0x400,
163}; 157};
164 158
165static struct lpss_device_desc byt_sdio_dev_desc = { 159static struct lpss_device_desc byt_sdio_dev_desc = {
166 .flags = LPSS_CLK | LPSS_DEV_PROXY, 160 .flags = LPSS_CLK,
167}; 161};
168 162
169static struct lpss_device_desc byt_i2c_dev_desc = { 163static struct lpss_device_desc byt_i2c_dev_desc = {
170 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY, 164 .flags = LPSS_CLK | LPSS_SAVE_CTX,
171 .prv_offset = 0x800, 165 .prv_offset = 0x800,
172 .setup = byt_i2c_setup, 166 .setup = byt_i2c_setup,
173}; 167};
@@ -374,8 +368,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
374 adev->driver_data = pdata; 368 adev->driver_data = pdata;
375 pdev = acpi_create_platform_device(adev); 369 pdev = acpi_create_platform_device(adev);
376 if (!IS_ERR_OR_NULL(pdev)) { 370 if (!IS_ERR_OR_NULL(pdev)) {
377 if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY)
378 proxy_device = &pdev->dev;
379 return 1; 371 return 1;
380 } 372 }
381 373
@@ -600,14 +592,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
600 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 592 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
601 acpi_lpss_save_ctx(dev, pdata); 593 acpi_lpss_save_ctx(dev, pdata);
602 594
603 ret = acpi_dev_runtime_suspend(dev); 595 return acpi_dev_runtime_suspend(dev);
604 if (ret)
605 return ret;
606
607 if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device)
608 return pm_runtime_put_sync_suspend(proxy_device);
609
610 return 0;
611} 596}
612 597
613static int acpi_lpss_runtime_resume(struct device *dev) 598static int acpi_lpss_runtime_resume(struct device *dev)
@@ -615,12 +600,6 @@ static int acpi_lpss_runtime_resume(struct device *dev)
615 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 600 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
616 int ret; 601 int ret;
617 602
618 if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) {
619 ret = pm_runtime_get_sync(proxy_device);
620 if (ret)
621 return ret;
622 }
623
624 ret = acpi_dev_runtime_resume(dev); 603 ret = acpi_dev_runtime_resume(dev);
625 if (ret) 604 if (ret)
626 return ret; 605 return ret;
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 0da5865df5b1..beb8b27d4621 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -51,9 +51,11 @@ struct regmap_async {
51struct regmap { 51struct regmap {
52 union { 52 union {
53 struct mutex mutex; 53 struct mutex mutex;
54 spinlock_t spinlock; 54 struct {
55 spinlock_t spinlock;
56 unsigned long spinlock_flags;
57 };
55 }; 58 };
56 unsigned long spinlock_flags;
57 regmap_lock lock; 59 regmap_lock lock;
58 regmap_unlock unlock; 60 regmap_unlock unlock;
59 void *lock_arg; /* This is passed to lock/unlock functions */ 61 void *lock_arg; /* This is passed to lock/unlock functions */
@@ -233,6 +235,10 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
233 235
234void regmap_async_complete_cb(struct regmap_async *async, int ret); 236void regmap_async_complete_cb(struct regmap_async *async, int ret);
235 237
238enum regmap_endian regmap_get_val_endian(struct device *dev,
239 const struct regmap_bus *bus,
240 const struct regmap_config *config);
241
236extern struct regcache_ops regcache_rbtree_ops; 242extern struct regcache_ops regcache_rbtree_ops;
237extern struct regcache_ops regcache_lzo_ops; 243extern struct regcache_ops regcache_lzo_ops;
238extern struct regcache_ops regcache_flat_ops; 244extern struct regcache_ops regcache_flat_ops;
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index e4c45d2299c1..8d304e2a943d 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -74,8 +74,8 @@ static int regmap_ac97_reg_write(void *context, unsigned int reg,
74} 74}
75 75
76static const struct regmap_bus ac97_regmap_bus = { 76static const struct regmap_bus ac97_regmap_bus = {
77 .reg_write = regmap_ac97_reg_write, 77 .reg_write = regmap_ac97_reg_write,
78 .reg_read = regmap_ac97_reg_read, 78 .reg_read = regmap_ac97_reg_read,
79}; 79};
80 80
81/** 81/**
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 053150a7f9f2..4b76e33110a2 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -14,6 +14,7 @@
14#include <linux/i2c.h> 14#include <linux/i2c.h>
15#include <linux/module.h> 15#include <linux/module.h>
16 16
17#include "internal.h"
17 18
18static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, 19static int regmap_smbus_byte_reg_read(void *context, unsigned int reg,
19 unsigned int *val) 20 unsigned int *val)
@@ -87,6 +88,42 @@ static struct regmap_bus regmap_smbus_word = {
87 .reg_read = regmap_smbus_word_reg_read, 88 .reg_read = regmap_smbus_word_reg_read,
88}; 89};
89 90
91static int regmap_smbus_word_read_swapped(void *context, unsigned int reg,
92 unsigned int *val)
93{
94 struct device *dev = context;
95 struct i2c_client *i2c = to_i2c_client(dev);
96 int ret;
97
98 if (reg > 0xff)
99 return -EINVAL;
100
101 ret = i2c_smbus_read_word_swapped(i2c, reg);
102 if (ret < 0)
103 return ret;
104
105 *val = ret;
106
107 return 0;
108}
109
110static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
111 unsigned int val)
112{
113 struct device *dev = context;
114 struct i2c_client *i2c = to_i2c_client(dev);
115
116 if (val > 0xffff || reg > 0xff)
117 return -EINVAL;
118
119 return i2c_smbus_write_word_swapped(i2c, reg, val);
120}
121
122static struct regmap_bus regmap_smbus_word_swapped = {
123 .reg_write = regmap_smbus_word_write_swapped,
124 .reg_read = regmap_smbus_word_read_swapped,
125};
126
90static int regmap_i2c_write(void *context, const void *data, size_t count) 127static int regmap_i2c_write(void *context, const void *data, size_t count)
91{ 128{
92 struct device *dev = context; 129 struct device *dev = context;
@@ -180,7 +217,14 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
180 else if (config->val_bits == 16 && config->reg_bits == 8 && 217 else if (config->val_bits == 16 && config->reg_bits == 8 &&
181 i2c_check_functionality(i2c->adapter, 218 i2c_check_functionality(i2c->adapter,
182 I2C_FUNC_SMBUS_WORD_DATA)) 219 I2C_FUNC_SMBUS_WORD_DATA))
183 return &regmap_smbus_word; 220 switch (regmap_get_val_endian(&i2c->dev, NULL, config)) {
221 case REGMAP_ENDIAN_LITTLE:
222 return &regmap_smbus_word;
223 case REGMAP_ENDIAN_BIG:
224 return &regmap_smbus_word_swapped;
225 default: /* everything else is not supported */
226 break;
227 }
184 else if (config->val_bits == 8 && config->reg_bits == 8 && 228 else if (config->val_bits == 8 && config->reg_bits == 8 &&
185 i2c_check_functionality(i2c->adapter, 229 i2c_check_functionality(i2c->adapter,
186 I2C_FUNC_SMBUS_BYTE_DATA)) 230 I2C_FUNC_SMBUS_BYTE_DATA))
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index d2f8a818d200..f99b098ddabf 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -473,9 +473,9 @@ static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
473 return REGMAP_ENDIAN_BIG; 473 return REGMAP_ENDIAN_BIG;
474} 474}
475 475
476static enum regmap_endian regmap_get_val_endian(struct device *dev, 476enum regmap_endian regmap_get_val_endian(struct device *dev,
477 const struct regmap_bus *bus, 477 const struct regmap_bus *bus,
478 const struct regmap_config *config) 478 const struct regmap_config *config)
479{ 479{
480 struct device_node *np; 480 struct device_node *np;
481 enum regmap_endian endian; 481 enum regmap_endian endian;
@@ -513,6 +513,7 @@ static enum regmap_endian regmap_get_val_endian(struct device *dev,
513 /* Use this if no other value was found */ 513 /* Use this if no other value was found */
514 return REGMAP_ENDIAN_BIG; 514 return REGMAP_ENDIAN_BIG;
515} 515}
516EXPORT_SYMBOL_GPL(regmap_get_val_endian);
516 517
517/** 518/**
518 * regmap_init(): Initialise register map 519 * regmap_init(): Initialise register map
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3ec85dfce124..8a86b62466f7 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2098 * If an image has a non-zero parent overlap, get a reference to its 2098 * If an image has a non-zero parent overlap, get a reference to its
2099 * parent. 2099 * parent.
2100 * 2100 *
2101 * We must get the reference before checking for the overlap to
2102 * coordinate properly with zeroing the parent overlap in
2103 * rbd_dev_v2_parent_info() when an image gets flattened. We
2104 * drop it again if there is no overlap.
2105 *
2106 * Returns true if the rbd device has a parent with a non-zero 2101 * Returns true if the rbd device has a parent with a non-zero
2107 * overlap and a reference for it was successfully taken, or 2102 * overlap and a reference for it was successfully taken, or
2108 * false otherwise. 2103 * false otherwise.
2109 */ 2104 */
2110static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) 2105static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2111{ 2106{
2112 int counter; 2107 int counter = 0;
2113 2108
2114 if (!rbd_dev->parent_spec) 2109 if (!rbd_dev->parent_spec)
2115 return false; 2110 return false;
2116 2111
2117 counter = atomic_inc_return_safe(&rbd_dev->parent_ref); 2112 down_read(&rbd_dev->header_rwsem);
2118 if (counter > 0 && rbd_dev->parent_overlap) 2113 if (rbd_dev->parent_overlap)
2119 return true; 2114 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2120 2115 up_read(&rbd_dev->header_rwsem);
2121 /* Image was flattened, but parent is not yet torn down */
2122 2116
2123 if (counter < 0) 2117 if (counter < 0)
2124 rbd_warn(rbd_dev, "parent reference overflow"); 2118 rbd_warn(rbd_dev, "parent reference overflow");
2125 2119
2126 return false; 2120 return counter > 0;
2127} 2121}
2128 2122
2129/* 2123/*
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4239 */ 4233 */
4240 if (rbd_dev->parent_overlap) { 4234 if (rbd_dev->parent_overlap) {
4241 rbd_dev->parent_overlap = 0; 4235 rbd_dev->parent_overlap = 0;
4242 smp_mb();
4243 rbd_dev_parent_put(rbd_dev); 4236 rbd_dev_parent_put(rbd_dev);
4244 pr_info("%s: clone image has been flattened\n", 4237 pr_info("%s: clone image has been flattened\n",
4245 rbd_dev->disk->disk_name); 4238 rbd_dev->disk->disk_name);
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4285 * treat it specially. 4278 * treat it specially.
4286 */ 4279 */
4287 rbd_dev->parent_overlap = overlap; 4280 rbd_dev->parent_overlap = overlap;
4288 smp_mb();
4289 if (!overlap) { 4281 if (!overlap) {
4290 4282
4291 /* A null parent_spec indicates it's the initial probe */ 4283 /* A null parent_spec indicates it's the initial probe */
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5114{ 5106{
5115 struct rbd_image_header *header; 5107 struct rbd_image_header *header;
5116 5108
5117 /* Drop parent reference unless it's already been done (or none) */ 5109 rbd_dev_parent_put(rbd_dev);
5118
5119 if (rbd_dev->parent_overlap)
5120 rbd_dev_parent_put(rbd_dev);
5121 5110
5122 /* Free dynamic fields from the header, then zero it out */ 5111 /* Free dynamic fields from the header, then zero it out */
5123 5112
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 04645c09fe5e..9cd6968e2f92 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -569,19 +569,19 @@ static void fast_mix(struct fast_pool *f)
569 __u32 c = f->pool[2], d = f->pool[3]; 569 __u32 c = f->pool[2], d = f->pool[3];
570 570
571 a += b; c += d; 571 a += b; c += d;
572 b = rol32(a, 6); d = rol32(c, 27); 572 b = rol32(b, 6); d = rol32(d, 27);
573 d ^= a; b ^= c; 573 d ^= a; b ^= c;
574 574
575 a += b; c += d; 575 a += b; c += d;
576 b = rol32(a, 16); d = rol32(c, 14); 576 b = rol32(b, 16); d = rol32(d, 14);
577 d ^= a; b ^= c; 577 d ^= a; b ^= c;
578 578
579 a += b; c += d; 579 a += b; c += d;
580 b = rol32(a, 6); d = rol32(c, 27); 580 b = rol32(b, 6); d = rol32(d, 27);
581 d ^= a; b ^= c; 581 d ^= a; b ^= c;
582 582
583 a += b; c += d; 583 a += b; c += d;
584 b = rol32(a, 16); d = rol32(c, 14); 584 b = rol32(b, 16); d = rol32(d, 14);
585 d ^= a; b ^= c; 585 d ^= a; b ^= c;
586 586
587 f->pool[0] = a; f->pool[1] = b; 587 f->pool[0] = a; f->pool[1] = b;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 3f44f292d066..91f86131bb7a 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -13,6 +13,7 @@ config COMMON_CLK
13 bool 13 bool
14 select HAVE_CLK_PREPARE 14 select HAVE_CLK_PREPARE
15 select CLKDEV_LOOKUP 15 select CLKDEV_LOOKUP
16 select SRCU
16 ---help--- 17 ---help---
17 The common clock framework is a single definition of struct 18 The common clock framework is a single definition of struct
18 clk, useful across many platforms, as well as an 19 clk, useful across many platforms, as well as an
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 29b2ef5a68b9..a171fef2c2b6 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -2,6 +2,7 @@ menu "CPU Frequency scaling"
2 2
3config CPU_FREQ 3config CPU_FREQ
4 bool "CPU Frequency scaling" 4 bool "CPU Frequency scaling"
5 select SRCU
5 help 6 help
6 CPU Frequency scaling allows you to change the clock speed of 7 CPU Frequency scaling allows you to change the clock speed of
7 CPUs on the fly. This is a nice method to save power, because 8 CPUs on the fly. This is a nice method to save power, because
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index faf4e70c42e0..3891f6781298 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,5 +1,6 @@
1menuconfig PM_DEVFREQ 1menuconfig PM_DEVFREQ
2 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" 2 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
3 select SRCU
3 help 4 help
4 A device may have a list of frequencies and voltages available. 5 A device may have a list of frequencies and voltages available.
5 devfreq, a generic DVFS framework can be registered for a device 6 devfreq, a generic DVFS framework can be registered for a device
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index da9c316059bc..eea5d7e578c9 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client,
801 client->irq = irq_of_parse_and_map(client->dev.of_node, 0); 801 client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
802 } else { 802 } else {
803 pdata = dev_get_platdata(&client->dev); 803 pdata = dev_get_platdata(&client->dev);
804 if (!pdata || !gpio_is_valid(pdata->base)) { 804 if (!pdata) {
805 dev_dbg(&client->dev, "invalid platform data\n"); 805 pdata = devm_kzalloc(&client->dev,
806 return -EINVAL; 806 sizeof(struct mcp23s08_platform_data),
807 GFP_KERNEL);
808 pdata->base = -1;
807 } 809 }
808 } 810 }
809 811
@@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi)
924 } else { 926 } else {
925 type = spi_get_device_id(spi)->driver_data; 927 type = spi_get_device_id(spi)->driver_data;
926 pdata = dev_get_platdata(&spi->dev); 928 pdata = dev_get_platdata(&spi->dev);
927 if (!pdata || !gpio_is_valid(pdata->base)) { 929 if (!pdata) {
928 dev_dbg(&spi->dev, 930 pdata = devm_kzalloc(&spi->dev,
929 "invalid or missing platform data\n"); 931 sizeof(struct mcp23s08_platform_data),
930 return -EINVAL; 932 GFP_KERNEL);
933 pdata->base = -1;
931 } 934 }
932 935
933 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { 936 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 30646cfe0efa..f476ae2eb0b3 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -88,6 +88,8 @@ struct gpio_bank {
88#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 88#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
89#define LINE_USED(line, offset) (line & (BIT(offset))) 89#define LINE_USED(line, offset) (line & (BIT(offset)))
90 90
91static void omap_gpio_unmask_irq(struct irq_data *d);
92
91static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) 93static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
92{ 94{
93 return bank->chip.base + gpio_irq; 95 return bank->chip.base + gpio_irq;
@@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
477 return readl_relaxed(reg) & mask; 479 return readl_relaxed(reg) & mask;
478} 480}
479 481
482static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio,
483 unsigned offset)
484{
485 if (!LINE_USED(bank->mod_usage, offset)) {
486 omap_enable_gpio_module(bank, offset);
487 omap_set_gpio_direction(bank, offset, 1);
488 }
489 bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
490}
491
480static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 492static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
481{ 493{
482 struct gpio_bank *bank = omap_irq_data_get_bank(d); 494 struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
506 spin_lock_irqsave(&bank->lock, flags); 518 spin_lock_irqsave(&bank->lock, flags);
507 offset = GPIO_INDEX(bank, gpio); 519 offset = GPIO_INDEX(bank, gpio);
508 retval = omap_set_gpio_triggering(bank, offset, type); 520 retval = omap_set_gpio_triggering(bank, offset, type);
509 if (!LINE_USED(bank->mod_usage, offset)) { 521 omap_gpio_init_irq(bank, gpio, offset);
510 omap_enable_gpio_module(bank, offset); 522 if (!omap_gpio_is_input(bank, BIT(offset))) {
511 omap_set_gpio_direction(bank, offset, 1);
512 } else if (!omap_gpio_is_input(bank, BIT(offset))) {
513 spin_unlock_irqrestore(&bank->lock, flags); 523 spin_unlock_irqrestore(&bank->lock, flags);
514 return -EINVAL; 524 return -EINVAL;
515 } 525 }
516
517 bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
518 spin_unlock_irqrestore(&bank->lock, flags); 526 spin_unlock_irqrestore(&bank->lock, flags);
519 527
520 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 528 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -792,6 +800,24 @@ exit:
792 pm_runtime_put(bank->dev); 800 pm_runtime_put(bank->dev);
793} 801}
794 802
803static unsigned int omap_gpio_irq_startup(struct irq_data *d)
804{
805 struct gpio_bank *bank = omap_irq_data_get_bank(d);
806 unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
807 unsigned long flags;
808 unsigned offset = GPIO_INDEX(bank, gpio);
809
810 if (!BANK_USED(bank))
811 pm_runtime_get_sync(bank->dev);
812
813 spin_lock_irqsave(&bank->lock, flags);
814 omap_gpio_init_irq(bank, gpio, offset);
815 spin_unlock_irqrestore(&bank->lock, flags);
816 omap_gpio_unmask_irq(d);
817
818 return 0;
819}
820
795static void omap_gpio_irq_shutdown(struct irq_data *d) 821static void omap_gpio_irq_shutdown(struct irq_data *d)
796{ 822{
797 struct gpio_bank *bank = omap_irq_data_get_bank(d); 823 struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
1181 if (!irqc) 1207 if (!irqc)
1182 return -ENOMEM; 1208 return -ENOMEM;
1183 1209
1210 irqc->irq_startup = omap_gpio_irq_startup,
1184 irqc->irq_shutdown = omap_gpio_irq_shutdown, 1211 irqc->irq_shutdown = omap_gpio_irq_shutdown,
1185 irqc->irq_ack = omap_gpio_ack_irq, 1212 irqc->irq_ack = omap_gpio_ack_irq,
1186 irqc->irq_mask = omap_gpio_mask_irq, 1213 irqc->irq_mask = omap_gpio_mask_irq,
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index f62aa115d79a..7722ed53bd65 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -648,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name,
648 if (tdev != NULL) { 648 if (tdev != NULL) {
649 status = sysfs_create_link(&dev->kobj, &tdev->kobj, 649 status = sysfs_create_link(&dev->kobj, &tdev->kobj,
650 name); 650 name);
651 put_device(tdev);
651 } else { 652 } else {
652 status = -ENODEV; 653 status = -ENODEV;
653 } 654 }
@@ -695,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
695 } 696 }
696 697
697 status = sysfs_set_active_low(desc, dev, value); 698 status = sysfs_set_active_low(desc, dev, value);
698 699 put_device(dev);
699unlock: 700unlock:
700 mutex_unlock(&sysfs_lock); 701 mutex_unlock(&sysfs_lock);
701 702
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 633532a2e7ec..25bc47f3c1cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,6 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include "kfd_priv.h" 27#include "kfd_priv.h"
28#include "kfd_device_queue_manager.h" 28#include "kfd_device_queue_manager.h"
29#include "kfd_pm4_headers.h"
29 30
30#define MQD_SIZE_ALIGNED 768 31#define MQD_SIZE_ALIGNED 768
31 32
@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
169 kfd->shared_resources = *gpu_resources; 170 kfd->shared_resources = *gpu_resources;
170 171
171 /* calculate max size of mqds needed for queues */ 172 /* calculate max size of mqds needed for queues */
172 size = max_num_of_processes * 173 size = max_num_of_queues_per_device *
173 max_num_of_queues_per_process * 174 kfd->device_info->mqd_size_aligned;
174 kfd->device_info->mqd_size_aligned;
175 175
176 /* add another 512KB for all other allocations on gart */ 176 /* add another 512KB for all other allocations on gart */
177 size += 512 * 1024; 177 size += 512 * 1024;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 30c8fda9622e..0fd592799d58 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
183 183
184 mutex_lock(&dqm->lock); 184 mutex_lock(&dqm->lock);
185 185
186 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
187 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
188 dqm->total_queue_count);
189 mutex_unlock(&dqm->lock);
190 return -EPERM;
191 }
192
186 if (list_empty(&qpd->queues_list)) { 193 if (list_empty(&qpd->queues_list)) {
187 retval = allocate_vmid(dqm, qpd, q); 194 retval = allocate_vmid(dqm, qpd, q);
188 if (retval != 0) { 195 if (retval != 0) {
@@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
207 list_add(&q->list, &qpd->queues_list); 214 list_add(&q->list, &qpd->queues_list);
208 dqm->queue_count++; 215 dqm->queue_count++;
209 216
217 /*
218 * Unconditionally increment this counter, regardless of the queue's
219 * type or whether the queue is active.
220 */
221 dqm->total_queue_count++;
222 pr_debug("Total of %d queues are accountable so far\n",
223 dqm->total_queue_count);
224
210 mutex_unlock(&dqm->lock); 225 mutex_unlock(&dqm->lock);
211 return 0; 226 return 0;
212} 227}
@@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
326 if (list_empty(&qpd->queues_list)) 341 if (list_empty(&qpd->queues_list))
327 deallocate_vmid(dqm, qpd, q); 342 deallocate_vmid(dqm, qpd, q);
328 dqm->queue_count--; 343 dqm->queue_count--;
344
345 /*
346 * Unconditionally decrement this counter, regardless of the queue's
347 * type
348 */
349 dqm->total_queue_count--;
350 pr_debug("Total of %d queues are accountable so far\n",
351 dqm->total_queue_count);
352
329out: 353out:
330 mutex_unlock(&dqm->lock); 354 mutex_unlock(&dqm->lock);
331 return retval; 355 return retval;
@@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
541 565
542 for (i = 0; i < pipes_num; i++) { 566 for (i = 0; i < pipes_num; i++) {
543 inx = i + first_pipe; 567 inx = i + first_pipe;
568 /*
569 * HPD buffer on GTT is allocated by amdkfd, no need to waste
570 * space in GTT for pipelines we don't initialize
571 */
544 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; 572 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
545 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); 573 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
546 /* = log2(bytes/4)-1 */ 574 /* = log2(bytes/4)-1 */
547 kfd2kgd->init_pipeline(dqm->dev->kgd, i, 575 kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
548 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); 576 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
549 } 577 }
550 578
@@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
560 588
561 pr_debug("kfd: In %s\n", __func__); 589 pr_debug("kfd: In %s\n", __func__);
562 590
563 retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); 591 retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
564 if (retval != 0) 592 if (retval != 0)
565 return retval; 593 return retval;
566 594
@@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
752 pr_debug("kfd: In func %s\n", __func__); 780 pr_debug("kfd: In func %s\n", __func__);
753 781
754 mutex_lock(&dqm->lock); 782 mutex_lock(&dqm->lock);
783 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
784 pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
785 dqm->total_queue_count);
786 mutex_unlock(&dqm->lock);
787 return -EPERM;
788 }
789
790 /*
791 * Unconditionally increment this counter, regardless of the queue's
792 * type or whether the queue is active.
793 */
794 dqm->total_queue_count++;
795 pr_debug("Total of %d queues are accountable so far\n",
796 dqm->total_queue_count);
797
755 list_add(&kq->list, &qpd->priv_queue_list); 798 list_add(&kq->list, &qpd->priv_queue_list);
756 dqm->queue_count++; 799 dqm->queue_count++;
757 qpd->is_debug = true; 800 qpd->is_debug = true;
@@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
775 dqm->queue_count--; 818 dqm->queue_count--;
776 qpd->is_debug = false; 819 qpd->is_debug = false;
777 execute_queues_cpsch(dqm, false); 820 execute_queues_cpsch(dqm, false);
821 /*
822 * Unconditionally decrement this counter, regardless of the queue's
823 * type.
824 */
825 dqm->total_queue_count--;
826 pr_debug("Total of %d queues are accountable so far\n",
827 dqm->total_queue_count);
778 mutex_unlock(&dqm->lock); 828 mutex_unlock(&dqm->lock);
779} 829}
780 830
@@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
793 843
794 mutex_lock(&dqm->lock); 844 mutex_lock(&dqm->lock);
795 845
846 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
847 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
848 dqm->total_queue_count);
849 retval = -EPERM;
850 goto out;
851 }
852
796 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); 853 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
797 if (mqd == NULL) { 854 if (mqd == NULL) {
798 mutex_unlock(&dqm->lock); 855 mutex_unlock(&dqm->lock);
@@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
810 retval = execute_queues_cpsch(dqm, false); 867 retval = execute_queues_cpsch(dqm, false);
811 } 868 }
812 869
870 /*
871 * Unconditionally increment this counter, regardless of the queue's
872 * type or whether the queue is active.
873 */
874 dqm->total_queue_count++;
875
876 pr_debug("Total of %d queues are accountable so far\n",
877 dqm->total_queue_count);
878
813out: 879out:
814 mutex_unlock(&dqm->lock); 880 mutex_unlock(&dqm->lock);
815 return retval; 881 return retval;
@@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
930 996
931 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); 997 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
932 998
999 /*
1000 * Unconditionally decrement this counter, regardless of the queue's
1001 * type
1002 */
1003 dqm->total_queue_count--;
1004 pr_debug("Total of %d queues are accountable so far\n",
1005 dqm->total_queue_count);
1006
933 mutex_unlock(&dqm->lock); 1007 mutex_unlock(&dqm->lock);
934 1008
935 return 0; 1009 return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index c3f189e8ae35..52035bf0c1cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -130,6 +130,7 @@ struct device_queue_manager {
130 struct list_head queues; 130 struct list_head queues;
131 unsigned int processes_count; 131 unsigned int processes_count;
132 unsigned int queue_count; 132 unsigned int queue_count;
133 unsigned int total_queue_count;
133 unsigned int next_pipe_to_allocate; 134 unsigned int next_pipe_to_allocate;
134 unsigned int *allocated_queues; 135 unsigned int *allocated_queues;
135 unsigned int vmid_bitmap; 136 unsigned int vmid_bitmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 95d5af138e6e..1c385c23dd0b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
50MODULE_PARM_DESC(sched_policy, 50MODULE_PARM_DESC(sched_policy,
51 "Kernel cmdline parameter that defines the amdkfd scheduling policy"); 51 "Kernel cmdline parameter that defines the amdkfd scheduling policy");
52 52
53int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; 53int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
54module_param(max_num_of_processes, int, 0444); 54module_param(max_num_of_queues_per_device, int, 0444);
55MODULE_PARM_DESC(max_num_of_processes, 55MODULE_PARM_DESC(max_num_of_queues_per_device,
56 "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); 56 "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
57
58int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
59module_param(max_num_of_queues_per_process, int, 0444);
60MODULE_PARM_DESC(max_num_of_queues_per_process,
61 "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
62 57
63bool kgd2kfd_init(unsigned interface_version, 58bool kgd2kfd_init(unsigned interface_version,
64 const struct kfd2kgd_calls *f2g, 59 const struct kfd2kgd_calls *f2g,
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
100 } 95 }
101 96
102 /* Verify module parameters */ 97 /* Verify module parameters */
103 if ((max_num_of_processes < 0) || 98 if ((max_num_of_queues_per_device < 1) ||
104 (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { 99 (max_num_of_queues_per_device >
105 pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); 100 KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
106 return -1; 101 pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
107 }
108
109 if ((max_num_of_queues_per_process < 0) ||
110 (max_num_of_queues_per_process >
111 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
112 pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
113 return -1; 102 return -1;
114 } 103 }
115 104
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 4c25ef504f79..6cfe7f1f18cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
30 30
31int kfd_pasid_init(void) 31int kfd_pasid_init(void)
32{ 32{
33 pasid_limit = max_num_of_processes; 33 pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
34 34
35 pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); 35 pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
36 if (!pasid_bitmap) 36 if (!pasid_bitmap)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index b3dc13c83169..96dc10e8904a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -52,20 +52,19 @@
52#define kfd_alloc_struct(ptr_to_struct) \ 52#define kfd_alloc_struct(ptr_to_struct) \
53 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 53 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
54 54
55/* Kernel module parameter to specify maximum number of supported processes */
56extern int max_num_of_processes;
57
58#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
59#define KFD_MAX_NUM_OF_PROCESSES 512 55#define KFD_MAX_NUM_OF_PROCESSES 512
56#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
60 57
61/* 58/*
62 * Kernel module parameter to specify maximum number of supported queues 59 * Kernel module parameter to specify maximum number of supported queues per
63 * per process 60 * device
64 */ 61 */
65extern int max_num_of_queues_per_process; 62extern int max_num_of_queues_per_device;
66 63
67#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 64#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
68#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 65#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
66 (KFD_MAX_NUM_OF_PROCESSES * \
67 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
69 68
70#define KFD_KERNEL_QUEUE_SIZE 2048 69#define KFD_KERNEL_QUEUE_SIZE 2048
71 70
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 47526780d736..2fda1927bff7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
54 pr_debug("kfd: in %s\n", __func__); 54 pr_debug("kfd: in %s\n", __func__);
55 55
56 found = find_first_zero_bit(pqm->queue_slot_bitmap, 56 found = find_first_zero_bit(pqm->queue_slot_bitmap,
57 max_num_of_queues_per_process); 57 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
58 58
59 pr_debug("kfd: the new slot id %lu\n", found); 59 pr_debug("kfd: the new slot id %lu\n", found);
60 60
61 if (found >= max_num_of_queues_per_process) { 61 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
62 pr_info("amdkfd: Can not open more queues for process with pasid %d\n", 62 pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
63 pqm->process->pasid); 63 pqm->process->pasid);
64 return -ENOMEM; 64 return -ENOMEM;
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
76 76
77 INIT_LIST_HEAD(&pqm->queues); 77 INIT_LIST_HEAD(&pqm->queues);
78 pqm->queue_slot_bitmap = 78 pqm->queue_slot_bitmap =
79 kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, 79 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
80 BITS_PER_BYTE), GFP_KERNEL); 80 BITS_PER_BYTE), GFP_KERNEL);
81 if (pqm->queue_slot_bitmap == NULL) 81 if (pqm->queue_slot_bitmap == NULL)
82 return -ENOMEM; 82 return -ENOMEM;
@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
203 pqn->kq = NULL; 203 pqn->kq = NULL;
204 retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, 204 retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
205 &q->properties.vmid); 205 &q->properties.vmid);
206 pr_debug("DQM returned %d for create_queue\n", retval);
206 print_queue(q); 207 print_queue(q);
207 break; 208 break;
208 case KFD_QUEUE_TYPE_DIQ: 209 case KFD_QUEUE_TYPE_DIQ:
@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
222 } 223 }
223 224
224 if (retval != 0) { 225 if (retval != 0) {
225 pr_err("kfd: error dqm create queue\n"); 226 pr_debug("Error dqm create queue\n");
226 goto err_create_queue; 227 goto err_create_queue;
227 } 228 }
228 229
@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
241err_create_queue: 242err_create_queue:
242 kfree(pqn); 243 kfree(pqn);
243err_allocate_pqn: 244err_allocate_pqn:
245 /* check if queues list is empty unregister process from device */
244 clear_bit(*qid, pqm->queue_slot_bitmap); 246 clear_bit(*qid, pqm->queue_slot_bitmap);
247 if (list_empty(&pqm->queues))
248 dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
245 return retval; 249 return retval;
246} 250}
247 251
@@ -311,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
311 BUG_ON(!pqm); 315 BUG_ON(!pqm);
312 316
313 pqn = get_queue_by_qid(pqm, qid); 317 pqn = get_queue_by_qid(pqm, qid);
314 BUG_ON(!pqn); 318 if (!pqn) {
319 pr_debug("amdkfd: No queue %d exists for update operation\n",
320 qid);
321 return -EFAULT;
322 }
315 323
316 pqn->q->properties.queue_address = p->queue_address; 324 pqn->q->properties.queue_address = p->queue_address;
317 pqn->q->properties.queue_size = p->queue_size; 325 pqn->q->properties.queue_size = p->queue_size;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index c2a1cba1e984..b9140032962d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -16,9 +16,12 @@
16#include "cirrus_drv.h" 16#include "cirrus_drv.h"
17 17
18int cirrus_modeset = -1; 18int cirrus_modeset = -1;
19int cirrus_bpp = 24;
19 20
20MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); 21MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
21module_param_named(modeset, cirrus_modeset, int, 0400); 22module_param_named(modeset, cirrus_modeset, int, 0400);
23MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
24module_param_named(bpp, cirrus_bpp, int, 0400);
22 25
23/* 26/*
24 * This is the generic driver code. This binds the driver to the drm core, 27 * This is the generic driver code. This binds the driver to the drm core,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 693a4565c4ff..705061537a27 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
262 262
263int cirrus_bo_push_sysram(struct cirrus_bo *bo); 263int cirrus_bo_push_sysram(struct cirrus_bo *bo);
264int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); 264int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
265
266extern int cirrus_bpp;
267
265#endif /* __CIRRUS_DRV_H__ */ 268#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 4c2d68e9102d..e4b976658087 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
320 const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */ 320 const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
321 const int max_size = cdev->mc.vram_size; 321 const int max_size = cdev->mc.vram_size;
322 322
323 if (bpp > cirrus_bpp)
324 return false;
323 if (bpp > 32) 325 if (bpp > 32)
324 return false; 326 return false;
325 327
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 99d4a74ffeaf..61385f2298bf 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
501 int count; 501 int count;
502 502
503 /* Just add a static list of modes */ 503 /* Just add a static list of modes */
504 count = drm_add_modes_noedid(connector, 1280, 1024); 504 if (cirrus_bpp <= 24) {
505 drm_set_preferred_mode(connector, 1024, 768); 505 count = drm_add_modes_noedid(connector, 1280, 1024);
506 drm_set_preferred_mode(connector, 1024, 768);
507 } else {
508 count = drm_add_modes_noedid(connector, 800, 600);
509 drm_set_preferred_mode(connector, 800, 600);
510 }
506 return count; 511 return count;
507} 512}
508 513
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index cf775a4449c1..dc386ebe5193 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
145} 145}
146EXPORT_SYMBOL(drm_fb_helper_add_one_connector); 146EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
147 147
148static void remove_from_modeset(struct drm_mode_set *set,
149 struct drm_connector *connector)
150{
151 int i, j;
152
153 for (i = 0; i < set->num_connectors; i++) {
154 if (set->connectors[i] == connector)
155 break;
156 }
157
158 if (i == set->num_connectors)
159 return;
160
161 for (j = i + 1; j < set->num_connectors; j++) {
162 set->connectors[j - 1] = set->connectors[j];
163 }
164 set->num_connectors--;
165
166 /* because i915 is pissy about this..
167 * TODO maybe need to makes sure we set it back to !=NULL somewhere?
168 */
169 if (set->num_connectors == 0)
170 set->fb = NULL;
171}
172
148int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, 173int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
149 struct drm_connector *connector) 174 struct drm_connector *connector)
150{ 175{
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
167 } 192 }
168 fb_helper->connector_count--; 193 fb_helper->connector_count--;
169 kfree(fb_helper_connector); 194 kfree(fb_helper_connector);
195
196 /* also cleanup dangling references to the connector: */
197 for (i = 0; i < fb_helper->crtc_count; i++)
198 remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
199
170 return 0; 200 return 0;
171} 201}
172EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); 202EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index d4762799351d..a9041d1a8ff0 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -32,6 +32,8 @@
32struct tda998x_priv { 32struct tda998x_priv {
33 struct i2c_client *cec; 33 struct i2c_client *cec;
34 struct i2c_client *hdmi; 34 struct i2c_client *hdmi;
35 struct mutex mutex;
36 struct delayed_work dwork;
35 uint16_t rev; 37 uint16_t rev;
36 uint8_t current_page; 38 uint8_t current_page;
37 int dpms; 39 int dpms;
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
402 uint8_t addr = REG2ADDR(reg); 404 uint8_t addr = REG2ADDR(reg);
403 int ret; 405 int ret;
404 406
407 mutex_lock(&priv->mutex);
405 ret = set_page(priv, reg); 408 ret = set_page(priv, reg);
406 if (ret < 0) 409 if (ret < 0)
407 return ret; 410 goto out;
408 411
409 ret = i2c_master_send(client, &addr, sizeof(addr)); 412 ret = i2c_master_send(client, &addr, sizeof(addr));
410 if (ret < 0) 413 if (ret < 0)
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
414 if (ret < 0) 417 if (ret < 0)
415 goto fail; 418 goto fail;
416 419
417 return ret; 420 goto out;
418 421
419fail: 422fail:
420 dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); 423 dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
424out:
425 mutex_unlock(&priv->mutex);
421 return ret; 426 return ret;
422} 427}
423 428
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
431 buf[0] = REG2ADDR(reg); 436 buf[0] = REG2ADDR(reg);
432 memcpy(&buf[1], p, cnt); 437 memcpy(&buf[1], p, cnt);
433 438
439 mutex_lock(&priv->mutex);
434 ret = set_page(priv, reg); 440 ret = set_page(priv, reg);
435 if (ret < 0) 441 if (ret < 0)
436 return; 442 goto out;
437 443
438 ret = i2c_master_send(client, buf, cnt + 1); 444 ret = i2c_master_send(client, buf, cnt + 1);
439 if (ret < 0) 445 if (ret < 0)
440 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 446 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
447out:
448 mutex_unlock(&priv->mutex);
441} 449}
442 450
443static int 451static int
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
459 uint8_t buf[] = {REG2ADDR(reg), val}; 467 uint8_t buf[] = {REG2ADDR(reg), val};
460 int ret; 468 int ret;
461 469
470 mutex_lock(&priv->mutex);
462 ret = set_page(priv, reg); 471 ret = set_page(priv, reg);
463 if (ret < 0) 472 if (ret < 0)
464 return; 473 goto out;
465 474
466 ret = i2c_master_send(client, buf, sizeof(buf)); 475 ret = i2c_master_send(client, buf, sizeof(buf));
467 if (ret < 0) 476 if (ret < 0)
468 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 477 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
478out:
479 mutex_unlock(&priv->mutex);
469} 480}
470 481
471static void 482static void
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
475 uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; 486 uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
476 int ret; 487 int ret;
477 488
489 mutex_lock(&priv->mutex);
478 ret = set_page(priv, reg); 490 ret = set_page(priv, reg);
479 if (ret < 0) 491 if (ret < 0)
480 return; 492 goto out;
481 493
482 ret = i2c_master_send(client, buf, sizeof(buf)); 494 ret = i2c_master_send(client, buf, sizeof(buf));
483 if (ret < 0) 495 if (ret < 0)
484 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 496 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
497out:
498 mutex_unlock(&priv->mutex);
485} 499}
486 500
487static void 501static void
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
536 reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); 550 reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
537} 551}
538 552
553/* handle HDMI connect/disconnect */
554static void tda998x_hpd(struct work_struct *work)
555{
556 struct delayed_work *dwork = to_delayed_work(work);
557 struct tda998x_priv *priv =
558 container_of(dwork, struct tda998x_priv, dwork);
559
560 if (priv->encoder && priv->encoder->dev)
561 drm_kms_helper_hotplug_event(priv->encoder->dev);
562}
563
539/* 564/*
540 * only 2 interrupts may occur: screen plug/unplug and EDID read 565 * only 2 interrupts may occur: screen plug/unplug and EDID read
541 */ 566 */
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
559 priv->wq_edid_wait = 0; 584 priv->wq_edid_wait = 0;
560 wake_up(&priv->wq_edid); 585 wake_up(&priv->wq_edid);
561 } else if (cec != 0) { /* HPD change */ 586 } else if (cec != 0) { /* HPD change */
562 if (priv->encoder && priv->encoder->dev) 587 schedule_delayed_work(&priv->dwork, HZ/10);
563 drm_helper_hpd_irq_event(priv->encoder->dev);
564 } 588 }
565 return IRQ_HANDLED; 589 return IRQ_HANDLED;
566} 590}
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
1170 /* disable all IRQs and free the IRQ handler */ 1194 /* disable all IRQs and free the IRQ handler */
1171 cec_write(priv, REG_CEC_RXSHPDINTENA, 0); 1195 cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
1172 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); 1196 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
1173 if (priv->hdmi->irq) 1197 if (priv->hdmi->irq) {
1174 free_irq(priv->hdmi->irq, priv); 1198 free_irq(priv->hdmi->irq, priv);
1199 cancel_delayed_work_sync(&priv->dwork);
1200 }
1175 1201
1176 i2c_unregister_device(priv->cec); 1202 i2c_unregister_device(priv->cec);
1177} 1203}
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1255 struct device_node *np = client->dev.of_node; 1281 struct device_node *np = client->dev.of_node;
1256 u32 video; 1282 u32 video;
1257 int rev_lo, rev_hi, ret; 1283 int rev_lo, rev_hi, ret;
1284 unsigned short cec_addr;
1258 1285
1259 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); 1286 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
1260 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); 1287 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1262 1289
1263 priv->current_page = 0xff; 1290 priv->current_page = 0xff;
1264 priv->hdmi = client; 1291 priv->hdmi = client;
1265 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1292 /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
1293 cec_addr = 0x34 + (client->addr & 0x03);
1294 priv->cec = i2c_new_dummy(client->adapter, cec_addr);
1266 if (!priv->cec) 1295 if (!priv->cec)
1267 return -ENODEV; 1296 return -ENODEV;
1268 1297
1269 priv->dpms = DRM_MODE_DPMS_OFF; 1298 priv->dpms = DRM_MODE_DPMS_OFF;
1270 1299
1300 mutex_init(&priv->mutex); /* protect the page access */
1301
1271 /* wake up the device: */ 1302 /* wake up the device: */
1272 cec_write(priv, REG_CEC_ENAMODS, 1303 cec_write(priv, REG_CEC_ENAMODS,
1273 CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); 1304 CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1323 if (client->irq) { 1354 if (client->irq) {
1324 int irqf_trigger; 1355 int irqf_trigger;
1325 1356
1326 /* init read EDID waitqueue */ 1357 /* init read EDID waitqueue and HDP work */
1327 init_waitqueue_head(&priv->wq_edid); 1358 init_waitqueue_head(&priv->wq_edid);
1359 INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
1328 1360
1329 /* clear pending interrupts */ 1361 /* clear pending interrupts */
1330 reg_read(priv, REG_INT_FLAGS_0); 1362 reg_read(priv, REG_INT_FLAGS_0);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 574057cd1d09..7643300828c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
462 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 462 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
463 dev_priv->pch_type = PCH_LPT; 463 dev_priv->pch_type = PCH_LPT;
464 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 464 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
465 WARN_ON(!IS_HASWELL(dev)); 465 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
466 WARN_ON(IS_HSW_ULT(dev)); 466 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
467 } else if (IS_BROADWELL(dev)) {
468 dev_priv->pch_type = PCH_LPT;
469 dev_priv->pch_id =
470 INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
471 DRM_DEBUG_KMS("This is Broadwell, assuming "
472 "LynxPoint LP PCH\n");
473 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 467 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
474 dev_priv->pch_type = PCH_LPT; 468 dev_priv->pch_type = PCH_LPT;
475 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 469 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
476 WARN_ON(!IS_HASWELL(dev)); 470 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
477 WARN_ON(!IS_HSW_ULT(dev)); 471 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
478 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 472 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
479 dev_priv->pch_type = PCH_SPT; 473 dev_priv->pch_type = PCH_SPT;
480 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 474 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e9f891c432f8..9d7a7155bf02 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table {
2159#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2159#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
2160 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2160 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
2161#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2161#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
2162 ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ 2162 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
2163 (INTEL_DEVID(dev) & 0xf) == 0x6 || \
2164 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2163 (INTEL_DEVID(dev) & 0xf) == 0xe))
2165#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2164#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
2166 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2165 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 76354d3ba925..5f614828d365 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
3148 u32 size = i915_gem_obj_ggtt_size(obj); 3148 u32 size = i915_gem_obj_ggtt_size(obj);
3149 uint64_t val; 3149 uint64_t val;
3150 3150
3151 /* Adjust fence size to match tiled area */
3152 if (obj->tiling_mode != I915_TILING_NONE) {
3153 uint32_t row_size = obj->stride *
3154 (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
3155 size = (size / row_size) * row_size;
3156 }
3157
3151 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 3158 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3152 0xfffff000) << 32; 3159 0xfffff000) << 32;
3153 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; 3160 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@@ -4884,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev)
4884 for (i = 0; i < NUM_L3_SLICES(dev); i++) 4891 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4885 i915_gem_l3_remap(&dev_priv->ring[RCS], i); 4892 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4886 4893
4887 /* 4894 ret = i915_ppgtt_init_hw(dev);
4888 * XXX: Contexts should only be initialized once. Doing a switch to the
4889 * default context switch however is something we'd like to do after
4890 * reset or thaw (the latter may not actually be necessary for HW, but
4891 * goes with our code better). Context switching requires rings (for
4892 * the do_switch), but before enabling PPGTT. So don't move this.
4893 */
4894 ret = i915_gem_context_enable(dev_priv);
4895 if (ret && ret != -EIO) { 4895 if (ret && ret != -EIO) {
4896 DRM_ERROR("Context enable failed %d\n", ret); 4896 DRM_ERROR("PPGTT enable failed %d\n", ret);
4897 i915_gem_cleanup_ringbuffer(dev); 4897 i915_gem_cleanup_ringbuffer(dev);
4898
4899 return ret;
4900 } 4898 }
4901 4899
4902 ret = i915_ppgtt_init_hw(dev); 4900 ret = i915_gem_context_enable(dev_priv);
4903 if (ret && ret != -EIO) { 4901 if (ret && ret != -EIO) {
4904 DRM_ERROR("PPGTT enable failed %d\n", ret); 4902 DRM_ERROR("Context enable failed %d\n", ret);
4905 i915_gem_cleanup_ringbuffer(dev); 4903 i915_gem_cleanup_ringbuffer(dev);
4904
4905 return ret;
4906 } 4906 }
4907 4907
4908 return ret; 4908 return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4d63839bd9b4..dfb783a8f2c3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
962 962
963 WARN_ON(panel->backlight.max == 0); 963 WARN_ON(panel->backlight.max == 0);
964 964
965 if (panel->backlight.level == 0) { 965 if (panel->backlight.level <= panel->backlight.min) {
966 panel->backlight.level = panel->backlight.max; 966 panel->backlight.level = panel->backlight.max;
967 if (panel->backlight.device) 967 if (panel->backlight.device)
968 panel->backlight.device->props.brightness = 968 panel->backlight.device->props.brightness =
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index a0133c74f4cf..42cd0cffe210 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
816 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 816 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
817 if (flags & R600_PTE_SYSTEM) { 817 if (flags & R600_PTE_SYSTEM) {
818 value = radeon_vm_map_gart(rdev, addr); 818 value = radeon_vm_map_gart(rdev, addr);
819 value &= 0xFFFFFFFFFFFFF000ULL;
820 } else if (flags & R600_PTE_VALID) { 819 } else if (flags & R600_PTE_VALID) {
821 value = addr; 820 value = addr;
822 } else { 821 } else {
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 4be2bb7cbef3..ce787a9f12c0 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
372 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 372 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
373 if (flags & R600_PTE_SYSTEM) { 373 if (flags & R600_PTE_SYSTEM) {
374 value = radeon_vm_map_gart(rdev, addr); 374 value = radeon_vm_map_gart(rdev, addr);
375 value &= 0xFFFFFFFFFFFFF000ULL;
376 } else if (flags & R600_PTE_VALID) { 375 } else if (flags & R600_PTE_VALID) {
377 value = addr; 376 value = addr;
378 } else { 377 } else {
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 74f06d540591..279801ca5110 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
644 return r; 644 return r;
645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
647 rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
647 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 648 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
648 return radeon_gart_table_ram_alloc(rdev); 649 return radeon_gart_table_ram_alloc(rdev);
649} 650}
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
681 WREG32(RADEON_AIC_HI_ADDR, 0); 682 WREG32(RADEON_AIC_HI_ADDR, 0);
682} 683}
683 684
685uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
686{
687 return addr;
688}
689
684void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 690void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
685 uint64_t addr, uint32_t flags) 691 uint64_t entry)
686{ 692{
687 u32 *gtt = rdev->gart.ptr; 693 u32 *gtt = rdev->gart.ptr;
688 gtt[i] = cpu_to_le32(lower_32_bits(addr)); 694 gtt[i] = cpu_to_le32(lower_32_bits(entry));
689} 695}
690 696
691void r100_pci_gart_fini(struct radeon_device *rdev) 697void r100_pci_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 064ad5569cca..08d68f3e13e9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
73#define R300_PTE_WRITEABLE (1 << 2) 73#define R300_PTE_WRITEABLE (1 << 2)
74#define R300_PTE_READABLE (1 << 3) 74#define R300_PTE_READABLE (1 << 3)
75 75
76void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 76uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
77 uint64_t addr, uint32_t flags)
78{ 77{
79 void __iomem *ptr = rdev->gart.ptr;
80
81 addr = (lower_32_bits(addr) >> 8) | 78 addr = (lower_32_bits(addr) >> 8) |
82 ((upper_32_bits(addr) & 0xff) << 24); 79 ((upper_32_bits(addr) & 0xff) << 24);
83 if (flags & RADEON_GART_PAGE_READ) 80 if (flags & RADEON_GART_PAGE_READ)
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
86 addr |= R300_PTE_WRITEABLE; 83 addr |= R300_PTE_WRITEABLE;
87 if (!(flags & RADEON_GART_PAGE_SNOOP)) 84 if (!(flags & RADEON_GART_PAGE_SNOOP))
88 addr |= R300_PTE_UNSNOOPED; 85 addr |= R300_PTE_UNSNOOPED;
86 return addr;
87}
88
89void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
90 uint64_t entry)
91{
92 void __iomem *ptr = rdev->gart.ptr;
93
89 /* on x86 we want this to be CPU endian, on powerpc 94 /* on x86 we want this to be CPU endian, on powerpc
90 * on powerpc without HW swappers, it'll get swapped on way 95 * on powerpc without HW swappers, it'll get swapped on way
91 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 96 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
92 writel(addr, ((void __iomem *)ptr) + (i * 4)); 97 writel(entry, ((void __iomem *)ptr) + (i * 4));
93} 98}
94 99
95int rv370_pcie_gart_init(struct radeon_device *rdev) 100int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
109 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); 114 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
110 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 115 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
111 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 116 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
117 rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
112 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 118 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
113 return radeon_gart_table_vram_alloc(rdev); 119 return radeon_gart_table_vram_alloc(rdev);
114} 120}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 54529b837afa..3f2a8d3febca 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
242 * Dummy page 242 * Dummy page
243 */ 243 */
244struct radeon_dummy_page { 244struct radeon_dummy_page {
245 uint64_t entry;
245 struct page *page; 246 struct page *page;
246 dma_addr_t addr; 247 dma_addr_t addr;
247}; 248};
@@ -645,7 +646,7 @@ struct radeon_gart {
645 unsigned num_cpu_pages; 646 unsigned num_cpu_pages;
646 unsigned table_size; 647 unsigned table_size;
647 struct page **pages; 648 struct page **pages;
648 dma_addr_t *pages_addr; 649 uint64_t *pages_entry;
649 bool ready; 650 bool ready;
650}; 651};
651 652
@@ -1847,8 +1848,9 @@ struct radeon_asic {
1847 /* gart */ 1848 /* gart */
1848 struct { 1849 struct {
1849 void (*tlb_flush)(struct radeon_device *rdev); 1850 void (*tlb_flush)(struct radeon_device *rdev);
1851 uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
1850 void (*set_page)(struct radeon_device *rdev, unsigned i, 1852 void (*set_page)(struct radeon_device *rdev, unsigned i,
1851 uint64_t addr, uint32_t flags); 1853 uint64_t entry);
1852 } gart; 1854 } gart;
1853 struct { 1855 struct {
1854 int (*init)(struct radeon_device *rdev); 1856 int (*init)(struct radeon_device *rdev);
@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
2852#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 2854#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
2853#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 2855#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
2854#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 2856#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
2855#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) 2857#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
2858#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
2856#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 2859#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
2857#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 2860#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
2858#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) 2861#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 121aff6a3b41..ed0e10eee2dc 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
159 DRM_INFO("Forcing AGP to PCIE mode\n"); 159 DRM_INFO("Forcing AGP to PCIE mode\n");
160 rdev->flags |= RADEON_IS_PCIE; 160 rdev->flags |= RADEON_IS_PCIE;
161 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 161 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
162 rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
162 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 163 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
163 } else { 164 } else {
164 DRM_INFO("Forcing AGP to PCI mode\n"); 165 DRM_INFO("Forcing AGP to PCI mode\n");
165 rdev->flags |= RADEON_IS_PCI; 166 rdev->flags |= RADEON_IS_PCI;
166 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 167 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
168 rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
167 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 169 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
168 } 170 }
169 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 171 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
199 .mc_wait_for_idle = &r100_mc_wait_for_idle, 201 .mc_wait_for_idle = &r100_mc_wait_for_idle,
200 .gart = { 202 .gart = {
201 .tlb_flush = &r100_pci_gart_tlb_flush, 203 .tlb_flush = &r100_pci_gart_tlb_flush,
204 .get_page_entry = &r100_pci_gart_get_page_entry,
202 .set_page = &r100_pci_gart_set_page, 205 .set_page = &r100_pci_gart_set_page,
203 }, 206 },
204 .ring = { 207 .ring = {
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
265 .mc_wait_for_idle = &r100_mc_wait_for_idle, 268 .mc_wait_for_idle = &r100_mc_wait_for_idle,
266 .gart = { 269 .gart = {
267 .tlb_flush = &r100_pci_gart_tlb_flush, 270 .tlb_flush = &r100_pci_gart_tlb_flush,
271 .get_page_entry = &r100_pci_gart_get_page_entry,
268 .set_page = &r100_pci_gart_set_page, 272 .set_page = &r100_pci_gart_set_page,
269 }, 273 },
270 .ring = { 274 .ring = {
@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {
359 .mc_wait_for_idle = &r300_mc_wait_for_idle, 363 .mc_wait_for_idle = &r300_mc_wait_for_idle,
360 .gart = { 364 .gart = {
361 .tlb_flush = &r100_pci_gart_tlb_flush, 365 .tlb_flush = &r100_pci_gart_tlb_flush,
366 .get_page_entry = &r100_pci_gart_get_page_entry,
362 .set_page = &r100_pci_gart_set_page, 367 .set_page = &r100_pci_gart_set_page,
363 }, 368 },
364 .ring = { 369 .ring = {
@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
425 .mc_wait_for_idle = &r300_mc_wait_for_idle, 430 .mc_wait_for_idle = &r300_mc_wait_for_idle,
426 .gart = { 431 .gart = {
427 .tlb_flush = &rv370_pcie_gart_tlb_flush, 432 .tlb_flush = &rv370_pcie_gart_tlb_flush,
433 .get_page_entry = &rv370_pcie_gart_get_page_entry,
428 .set_page = &rv370_pcie_gart_set_page, 434 .set_page = &rv370_pcie_gart_set_page,
429 }, 435 },
430 .ring = { 436 .ring = {
@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {
491 .mc_wait_for_idle = &r300_mc_wait_for_idle, 497 .mc_wait_for_idle = &r300_mc_wait_for_idle,
492 .gart = { 498 .gart = {
493 .tlb_flush = &rv370_pcie_gart_tlb_flush, 499 .tlb_flush = &rv370_pcie_gart_tlb_flush,
500 .get_page_entry = &rv370_pcie_gart_get_page_entry,
494 .set_page = &rv370_pcie_gart_set_page, 501 .set_page = &rv370_pcie_gart_set_page,
495 }, 502 },
496 .ring = { 503 .ring = {
@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {
557 .mc_wait_for_idle = &rs400_mc_wait_for_idle, 564 .mc_wait_for_idle = &rs400_mc_wait_for_idle,
558 .gart = { 565 .gart = {
559 .tlb_flush = &rs400_gart_tlb_flush, 566 .tlb_flush = &rs400_gart_tlb_flush,
567 .get_page_entry = &rs400_gart_get_page_entry,
560 .set_page = &rs400_gart_set_page, 568 .set_page = &rs400_gart_set_page,
561 }, 569 },
562 .ring = { 570 .ring = {
@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {
623 .mc_wait_for_idle = &rs600_mc_wait_for_idle, 631 .mc_wait_for_idle = &rs600_mc_wait_for_idle,
624 .gart = { 632 .gart = {
625 .tlb_flush = &rs600_gart_tlb_flush, 633 .tlb_flush = &rs600_gart_tlb_flush,
634 .get_page_entry = &rs600_gart_get_page_entry,
626 .set_page = &rs600_gart_set_page, 635 .set_page = &rs600_gart_set_page,
627 }, 636 },
628 .ring = { 637 .ring = {
@@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = {
691 .mc_wait_for_idle = &rs690_mc_wait_for_idle, 700 .mc_wait_for_idle = &rs690_mc_wait_for_idle,
692 .gart = { 701 .gart = {
693 .tlb_flush = &rs400_gart_tlb_flush, 702 .tlb_flush = &rs400_gart_tlb_flush,
703 .get_page_entry = &rs400_gart_get_page_entry,
694 .set_page = &rs400_gart_set_page, 704 .set_page = &rs400_gart_set_page,
695 }, 705 },
696 .ring = { 706 .ring = {
@@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = {
759 .mc_wait_for_idle = &rv515_mc_wait_for_idle, 769 .mc_wait_for_idle = &rv515_mc_wait_for_idle,
760 .gart = { 770 .gart = {
761 .tlb_flush = &rv370_pcie_gart_tlb_flush, 771 .tlb_flush = &rv370_pcie_gart_tlb_flush,
772 .get_page_entry = &rv370_pcie_gart_get_page_entry,
762 .set_page = &rv370_pcie_gart_set_page, 773 .set_page = &rv370_pcie_gart_set_page,
763 }, 774 },
764 .ring = { 775 .ring = {
@@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = {
825 .mc_wait_for_idle = &r520_mc_wait_for_idle, 836 .mc_wait_for_idle = &r520_mc_wait_for_idle,
826 .gart = { 837 .gart = {
827 .tlb_flush = &rv370_pcie_gart_tlb_flush, 838 .tlb_flush = &rv370_pcie_gart_tlb_flush,
839 .get_page_entry = &rv370_pcie_gart_get_page_entry,
828 .set_page = &rv370_pcie_gart_set_page, 840 .set_page = &rv370_pcie_gart_set_page,
829 }, 841 },
830 .ring = { 842 .ring = {
@@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = {
919 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 931 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
920 .gart = { 932 .gart = {
921 .tlb_flush = &r600_pcie_gart_tlb_flush, 933 .tlb_flush = &r600_pcie_gart_tlb_flush,
934 .get_page_entry = &rs600_gart_get_page_entry,
922 .set_page = &rs600_gart_set_page, 935 .set_page = &rs600_gart_set_page,
923 }, 936 },
924 .ring = { 937 .ring = {
@@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
1004 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1017 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1005 .gart = { 1018 .gart = {
1006 .tlb_flush = &r600_pcie_gart_tlb_flush, 1019 .tlb_flush = &r600_pcie_gart_tlb_flush,
1020 .get_page_entry = &rs600_gart_get_page_entry,
1007 .set_page = &rs600_gart_set_page, 1021 .set_page = &rs600_gart_set_page,
1008 }, 1022 },
1009 .ring = { 1023 .ring = {
@@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
1095 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1109 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1096 .gart = { 1110 .gart = {
1097 .tlb_flush = &r600_pcie_gart_tlb_flush, 1111 .tlb_flush = &r600_pcie_gart_tlb_flush,
1112 .get_page_entry = &rs600_gart_get_page_entry,
1098 .set_page = &rs600_gart_set_page, 1113 .set_page = &rs600_gart_set_page,
1099 }, 1114 },
1100 .ring = { 1115 .ring = {
@@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
1199 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1214 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1200 .gart = { 1215 .gart = {
1201 .tlb_flush = &r600_pcie_gart_tlb_flush, 1216 .tlb_flush = &r600_pcie_gart_tlb_flush,
1217 .get_page_entry = &rs600_gart_get_page_entry,
1202 .set_page = &rs600_gart_set_page, 1218 .set_page = &rs600_gart_set_page,
1203 }, 1219 },
1204 .ring = { 1220 .ring = {
@@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
1317 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1333 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1318 .gart = { 1334 .gart = {
1319 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1335 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1336 .get_page_entry = &rs600_gart_get_page_entry,
1320 .set_page = &rs600_gart_set_page, 1337 .set_page = &rs600_gart_set_page,
1321 }, 1338 },
1322 .ring = { 1339 .ring = {
@@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
1409 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1426 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1410 .gart = { 1427 .gart = {
1411 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1428 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1429 .get_page_entry = &rs600_gart_get_page_entry,
1412 .set_page = &rs600_gart_set_page, 1430 .set_page = &rs600_gart_set_page,
1413 }, 1431 },
1414 .ring = { 1432 .ring = {
@@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = {
1500 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1518 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1501 .gart = { 1519 .gart = {
1502 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1520 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1521 .get_page_entry = &rs600_gart_get_page_entry,
1503 .set_page = &rs600_gart_set_page, 1522 .set_page = &rs600_gart_set_page,
1504 }, 1523 },
1505 .ring = { 1524 .ring = {
@@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
1635 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1654 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1636 .gart = { 1655 .gart = {
1637 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1656 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1657 .get_page_entry = &rs600_gart_get_page_entry,
1638 .set_page = &rs600_gart_set_page, 1658 .set_page = &rs600_gart_set_page,
1639 }, 1659 },
1640 .vm = { 1660 .vm = {
@@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
1738 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1758 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1739 .gart = { 1759 .gart = {
1740 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1760 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1761 .get_page_entry = &rs600_gart_get_page_entry,
1741 .set_page = &rs600_gart_set_page, 1762 .set_page = &rs600_gart_set_page,
1742 }, 1763 },
1743 .vm = { 1764 .vm = {
@@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = {
1871 .get_gpu_clock_counter = &si_get_gpu_clock_counter, 1892 .get_gpu_clock_counter = &si_get_gpu_clock_counter,
1872 .gart = { 1893 .gart = {
1873 .tlb_flush = &si_pcie_gart_tlb_flush, 1894 .tlb_flush = &si_pcie_gart_tlb_flush,
1895 .get_page_entry = &rs600_gart_get_page_entry,
1874 .set_page = &rs600_gart_set_page, 1896 .set_page = &rs600_gart_set_page,
1875 }, 1897 },
1876 .vm = { 1898 .vm = {
@@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = {
2032 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2054 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2033 .gart = { 2055 .gart = {
2034 .tlb_flush = &cik_pcie_gart_tlb_flush, 2056 .tlb_flush = &cik_pcie_gart_tlb_flush,
2057 .get_page_entry = &rs600_gart_get_page_entry,
2035 .set_page = &rs600_gart_set_page, 2058 .set_page = &rs600_gart_set_page,
2036 }, 2059 },
2037 .vm = { 2060 .vm = {
@@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = {
2139 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2162 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2140 .gart = { 2163 .gart = {
2141 .tlb_flush = &cik_pcie_gart_tlb_flush, 2164 .tlb_flush = &cik_pcie_gart_tlb_flush,
2165 .get_page_entry = &rs600_gart_get_page_entry,
2142 .set_page = &rs600_gart_set_page, 2166 .set_page = &rs600_gart_set_page,
2143 }, 2167 },
2144 .vm = { 2168 .vm = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 2a45d548d5ec..8d787d115653 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
67int r100_asic_reset(struct radeon_device *rdev); 67int r100_asic_reset(struct radeon_device *rdev);
68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
69void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 69void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
70uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
70void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 71void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
71 uint64_t addr, uint32_t flags); 72 uint64_t entry);
72void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 73void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
73int r100_irq_set(struct radeon_device *rdev); 74int r100_irq_set(struct radeon_device *rdev);
74int r100_irq_process(struct radeon_device *rdev); 75int r100_irq_process(struct radeon_device *rdev);
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
172 struct radeon_fence *fence); 173 struct radeon_fence *fence);
173extern int r300_cs_parse(struct radeon_cs_parser *p); 174extern int r300_cs_parse(struct radeon_cs_parser *p);
174extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); 175extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
176extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
175extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 177extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
176 uint64_t addr, uint32_t flags); 178 uint64_t entry);
177extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 179extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
178extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 180extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
179extern void r300_set_reg_safe(struct radeon_device *rdev); 181extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
208extern int rs400_suspend(struct radeon_device *rdev); 210extern int rs400_suspend(struct radeon_device *rdev);
209extern int rs400_resume(struct radeon_device *rdev); 211extern int rs400_resume(struct radeon_device *rdev);
210void rs400_gart_tlb_flush(struct radeon_device *rdev); 212void rs400_gart_tlb_flush(struct radeon_device *rdev);
213uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
211void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 214void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
212 uint64_t addr, uint32_t flags); 215 uint64_t entry);
213uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 216uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
214void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 217void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
215int rs400_gart_init(struct radeon_device *rdev); 218int rs400_gart_init(struct radeon_device *rdev);
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
232void rs600_irq_disable(struct radeon_device *rdev); 235void rs600_irq_disable(struct radeon_device *rdev);
233u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 236u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
234void rs600_gart_tlb_flush(struct radeon_device *rdev); 237void rs600_gart_tlb_flush(struct radeon_device *rdev);
238uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
235void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 239void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
236 uint64_t addr, uint32_t flags); 240 uint64_t entry);
237uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 241uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
238void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 242void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
239void rs600_bandwidth_update(struct radeon_device *rdev); 243void rs600_bandwidth_update(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 9e7f23dd14bd..87d5fb21cb61 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -34,7 +34,8 @@
34 34
35static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, 35static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
36 uint64_t saddr, uint64_t daddr, 36 uint64_t saddr, uint64_t daddr,
37 int flag, int n) 37 int flag, int n,
38 struct reservation_object *resv)
38{ 39{
39 unsigned long start_jiffies; 40 unsigned long start_jiffies;
40 unsigned long end_jiffies; 41 unsigned long end_jiffies;
@@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
47 case RADEON_BENCHMARK_COPY_DMA: 48 case RADEON_BENCHMARK_COPY_DMA:
48 fence = radeon_copy_dma(rdev, saddr, daddr, 49 fence = radeon_copy_dma(rdev, saddr, daddr,
49 size / RADEON_GPU_PAGE_SIZE, 50 size / RADEON_GPU_PAGE_SIZE,
50 NULL); 51 resv);
51 break; 52 break;
52 case RADEON_BENCHMARK_COPY_BLIT: 53 case RADEON_BENCHMARK_COPY_BLIT:
53 fence = radeon_copy_blit(rdev, saddr, daddr, 54 fence = radeon_copy_blit(rdev, saddr, daddr,
54 size / RADEON_GPU_PAGE_SIZE, 55 size / RADEON_GPU_PAGE_SIZE,
55 NULL); 56 resv);
56 break; 57 break;
57 default: 58 default:
58 DRM_ERROR("Unknown copy method\n"); 59 DRM_ERROR("Unknown copy method\n");
@@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
120 121
121 if (rdev->asic->copy.dma) { 122 if (rdev->asic->copy.dma) {
122 time = radeon_benchmark_do_move(rdev, size, saddr, daddr, 123 time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
123 RADEON_BENCHMARK_COPY_DMA, n); 124 RADEON_BENCHMARK_COPY_DMA, n,
125 dobj->tbo.resv);
124 if (time < 0) 126 if (time < 0)
125 goto out_cleanup; 127 goto out_cleanup;
126 if (time > 0) 128 if (time > 0)
@@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
130 132
131 if (rdev->asic->copy.blit) { 133 if (rdev->asic->copy.blit) {
132 time = radeon_benchmark_do_move(rdev, size, saddr, daddr, 134 time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
133 RADEON_BENCHMARK_COPY_BLIT, n); 135 RADEON_BENCHMARK_COPY_BLIT, n,
136 dobj->tbo.resv);
134 if (time < 0) 137 if (time < 0)
135 goto out_cleanup; 138 goto out_cleanup;
136 if (time > 0) 139 if (time > 0)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0ec65168f331..bd7519fdd3f4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
774 rdev->dummy_page.page = NULL; 774 rdev->dummy_page.page = NULL;
775 return -ENOMEM; 775 return -ENOMEM;
776 } 776 }
777 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
778 RADEON_GART_PAGE_DUMMY);
777 return 0; 779 return 0;
778} 780}
779 781
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 102116902a07..913fafa597ad 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
960 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && 960 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
961 pll->flags & RADEON_PLL_USE_REF_DIV) 961 pll->flags & RADEON_PLL_USE_REF_DIV)
962 ref_div_max = pll->reference_div; 962 ref_div_max = pll->reference_div;
963 else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
964 /* fix for problems on RS880 */
965 ref_div_max = min(pll->max_ref_div, 7u);
963 else 966 else
964 ref_div_max = pll->max_ref_div; 967 ref_div_max = pll->max_ref_div;
965 968
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 84146d5901aa..5450fa95a47e 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
165 radeon_bo_unpin(rdev->gart.robj); 165 radeon_bo_unpin(rdev->gart.robj);
166 radeon_bo_unreserve(rdev->gart.robj); 166 radeon_bo_unreserve(rdev->gart.robj);
167 rdev->gart.table_addr = gpu_addr; 167 rdev->gart.table_addr = gpu_addr;
168
169 if (!r) {
170 int i;
171
172 /* We might have dropped some GART table updates while it wasn't
173 * mapped, restore all entries
174 */
175 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
176 radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
177 mb();
178 radeon_gart_tlb_flush(rdev);
179 }
180
168 return r; 181 return r;
169} 182}
170 183
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
228 unsigned t; 241 unsigned t;
229 unsigned p; 242 unsigned p;
230 int i, j; 243 int i, j;
231 u64 page_base;
232 244
233 if (!rdev->gart.ready) { 245 if (!rdev->gart.ready) {
234 WARN(1, "trying to unbind memory from uninitialized GART !\n"); 246 WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
239 for (i = 0; i < pages; i++, p++) { 251 for (i = 0; i < pages; i++, p++) {
240 if (rdev->gart.pages[p]) { 252 if (rdev->gart.pages[p]) {
241 rdev->gart.pages[p] = NULL; 253 rdev->gart.pages[p] = NULL;
242 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
243 page_base = rdev->gart.pages_addr[p];
244 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 254 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
255 rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
245 if (rdev->gart.ptr) { 256 if (rdev->gart.ptr) {
246 radeon_gart_set_page(rdev, t, page_base, 257 radeon_gart_set_page(rdev, t,
247 RADEON_GART_PAGE_DUMMY); 258 rdev->dummy_page.entry);
248 } 259 }
249 page_base += RADEON_GPU_PAGE_SIZE;
250 } 260 }
251 } 261 }
252 } 262 }
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
274{ 284{
275 unsigned t; 285 unsigned t;
276 unsigned p; 286 unsigned p;
277 uint64_t page_base; 287 uint64_t page_base, page_entry;
278 int i, j; 288 int i, j;
279 289
280 if (!rdev->gart.ready) { 290 if (!rdev->gart.ready) {
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
285 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 295 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
286 296
287 for (i = 0; i < pages; i++, p++) { 297 for (i = 0; i < pages; i++, p++) {
288 rdev->gart.pages_addr[p] = dma_addr[i];
289 rdev->gart.pages[p] = pagelist[i]; 298 rdev->gart.pages[p] = pagelist[i];
290 if (rdev->gart.ptr) { 299 page_base = dma_addr[i];
291 page_base = rdev->gart.pages_addr[p]; 300 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
292 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 301 page_entry = radeon_gart_get_page_entry(page_base, flags);
293 radeon_gart_set_page(rdev, t, page_base, flags); 302 rdev->gart.pages_entry[t] = page_entry;
294 page_base += RADEON_GPU_PAGE_SIZE; 303 if (rdev->gart.ptr) {
304 radeon_gart_set_page(rdev, t, page_entry);
295 } 305 }
306 page_base += RADEON_GPU_PAGE_SIZE;
296 } 307 }
297 } 308 }
298 mb(); 309 mb();
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
334 radeon_gart_fini(rdev); 345 radeon_gart_fini(rdev);
335 return -ENOMEM; 346 return -ENOMEM;
336 } 347 }
337 rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * 348 rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
338 rdev->gart.num_cpu_pages); 349 rdev->gart.num_gpu_pages);
339 if (rdev->gart.pages_addr == NULL) { 350 if (rdev->gart.pages_entry == NULL) {
340 radeon_gart_fini(rdev); 351 radeon_gart_fini(rdev);
341 return -ENOMEM; 352 return -ENOMEM;
342 } 353 }
343 /* set GART entry to point to the dummy page by default */ 354 /* set GART entry to point to the dummy page by default */
344 for (i = 0; i < rdev->gart.num_cpu_pages; i++) { 355 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
345 rdev->gart.pages_addr[i] = rdev->dummy_page.addr; 356 rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
346 }
347 return 0; 357 return 0;
348} 358}
349 359
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
356 */ 366 */
357void radeon_gart_fini(struct radeon_device *rdev) 367void radeon_gart_fini(struct radeon_device *rdev)
358{ 368{
359 if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { 369 if (rdev->gart.ready) {
360 /* unbind pages */ 370 /* unbind pages */
361 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); 371 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
362 } 372 }
363 rdev->gart.ready = false; 373 rdev->gart.ready = false;
364 vfree(rdev->gart.pages); 374 vfree(rdev->gart.pages);
365 vfree(rdev->gart.pages_addr); 375 vfree(rdev->gart.pages_entry);
366 rdev->gart.pages = NULL; 376 rdev->gart.pages = NULL;
367 rdev->gart.pages_addr = NULL; 377 rdev->gart.pages_entry = NULL;
368 378
369 radeon_dummy_page_fini(rdev); 379 radeon_dummy_page_fini(rdev);
370} 380}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d0b4f7d1140d..ac3c1310b953 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
146 struct radeon_bo_va *bo_va; 146 struct radeon_bo_va *bo_va;
147 int r; 147 int r;
148 148
149 if (rdev->family < CHIP_CAYMAN) { 149 if ((rdev->family < CHIP_CAYMAN) ||
150 (!rdev->accel_working)) {
150 return 0; 151 return 0;
151 } 152 }
152 153
@@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
176 struct radeon_bo_va *bo_va; 177 struct radeon_bo_va *bo_va;
177 int r; 178 int r;
178 179
179 if (rdev->family < CHIP_CAYMAN) { 180 if ((rdev->family < CHIP_CAYMAN) ||
181 (!rdev->accel_working)) {
180 return; 182 return;
181 } 183 }
182 184
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 8bf87f1203cc..bef9a0953284 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd)
436static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, 436static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
437 uint32_t hpd_size, uint64_t hpd_gpu_addr) 437 uint32_t hpd_size, uint64_t hpd_gpu_addr)
438{ 438{
439 uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; 439 uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
440 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); 440 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
441 441
442 lock_srbm(kgd, mec, pipe, 0, 0); 442 lock_srbm(kgd, mec, pipe, 0, 0);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3cf9c1fa6475..686411e4e4f6 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
605 return -ENOMEM; 605 return -ENOMEM;
606 } 606 }
607 607
608 vm = &fpriv->vm;
609 r = radeon_vm_init(rdev, vm);
610 if (r) {
611 kfree(fpriv);
612 return r;
613 }
614
615 if (rdev->accel_working) { 608 if (rdev->accel_working) {
609 vm = &fpriv->vm;
610 r = radeon_vm_init(rdev, vm);
611 if (r) {
612 kfree(fpriv);
613 return r;
614 }
615
616 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 616 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
617 if (r) { 617 if (r) {
618 radeon_vm_fini(rdev, vm); 618 radeon_vm_fini(rdev, vm);
@@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
668 radeon_vm_bo_rmv(rdev, vm->ib_bo_va); 668 radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
669 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 669 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
670 } 670 }
671 radeon_vm_fini(rdev, vm);
671 } 672 }
672 673
673 radeon_vm_fini(rdev, vm);
674 kfree(fpriv); 674 kfree(fpriv);
675 file_priv->driver_priv = NULL; 675 file_priv->driver_priv = NULL;
676 } 676 }
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 07b506b41008..791818165c76 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
119 if (ring == R600_RING_TYPE_DMA_INDEX) 119 if (ring == R600_RING_TYPE_DMA_INDEX)
120 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, 120 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
121 size / RADEON_GPU_PAGE_SIZE, 121 size / RADEON_GPU_PAGE_SIZE,
122 NULL); 122 vram_obj->tbo.resv);
123 else 123 else
124 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, 124 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
125 size / RADEON_GPU_PAGE_SIZE, 125 size / RADEON_GPU_PAGE_SIZE,
126 NULL); 126 vram_obj->tbo.resv);
127 if (IS_ERR(fence)) { 127 if (IS_ERR(fence)) {
128 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 128 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
129 r = PTR_ERR(fence); 129 r = PTR_ERR(fence);
@@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
170 if (ring == R600_RING_TYPE_DMA_INDEX) 170 if (ring == R600_RING_TYPE_DMA_INDEX)
171 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, 171 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
172 size / RADEON_GPU_PAGE_SIZE, 172 size / RADEON_GPU_PAGE_SIZE,
173 NULL); 173 vram_obj->tbo.resv);
174 else 174 else
175 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, 175 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
176 size / RADEON_GPU_PAGE_SIZE, 176 size / RADEON_GPU_PAGE_SIZE,
177 NULL); 177 vram_obj->tbo.resv);
178 if (IS_ERR(fence)) { 178 if (IS_ERR(fence)) {
179 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 179 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
180 r = PTR_ERR(fence); 180 r = PTR_ERR(fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index cde48c42b30a..2a5a4a9e772d 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
587 uint64_t result; 587 uint64_t result;
588 588
589 /* page table offset */ 589 /* page table offset */
590 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; 590 result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
591 591 result &= ~RADEON_GPU_PAGE_MASK;
592 /* in case cpu page size != gpu page size*/
593 result |= addr & (~PAGE_MASK);
594 592
595 return result; 593 return result;
596} 594}
@@ -745,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
745 */ 743 */
746 744
747 /* NI is optimized for 256KB fragments, SI and newer for 64KB */ 745 /* NI is optimized for 256KB fragments, SI and newer for 64KB */
748 uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? 746 uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
747 (rdev->family == CHIP_ARUBA)) ?
749 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; 748 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
750 uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; 749 uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
750 (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
751 751
752 uint64_t frag_start = ALIGN(pe_start, frag_align); 752 uint64_t frag_start = ALIGN(pe_start, frag_align);
753 uint64_t frag_end = pe_end & ~(frag_align - 1); 753 uint64_t frag_end = pe_end & ~(frag_align - 1);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c5799f16aa4b..34e3235f41d2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
212#define RS400_PTE_WRITEABLE (1 << 2) 212#define RS400_PTE_WRITEABLE (1 << 2)
213#define RS400_PTE_READABLE (1 << 3) 213#define RS400_PTE_READABLE (1 << 3)
214 214
215void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 215uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
216 uint64_t addr, uint32_t flags)
217{ 216{
218 uint32_t entry; 217 uint32_t entry;
219 u32 *gtt = rdev->gart.ptr;
220 218
221 entry = (lower_32_bits(addr) & PAGE_MASK) | 219 entry = (lower_32_bits(addr) & PAGE_MASK) |
222 ((upper_32_bits(addr) & 0xff) << 4); 220 ((upper_32_bits(addr) & 0xff) << 4);
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
226 entry |= RS400_PTE_WRITEABLE; 224 entry |= RS400_PTE_WRITEABLE;
227 if (!(flags & RADEON_GART_PAGE_SNOOP)) 225 if (!(flags & RADEON_GART_PAGE_SNOOP))
228 entry |= RS400_PTE_UNSNOOPED; 226 entry |= RS400_PTE_UNSNOOPED;
229 entry = cpu_to_le32(entry); 227 return entry;
230 gtt[i] = entry; 228}
229
230void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
231 uint64_t entry)
232{
233 u32 *gtt = rdev->gart.ptr;
234 gtt[i] = cpu_to_le32(lower_32_bits(entry));
231} 235}
232 236
233int rs400_mc_wait_for_idle(struct radeon_device *rdev) 237int rs400_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 9acb1c3c005b..74bce91aecc1 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
625 radeon_gart_table_vram_free(rdev); 625 radeon_gart_table_vram_free(rdev);
626} 626}
627 627
628void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 628uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
629 uint64_t addr, uint32_t flags)
630{ 629{
631 void __iomem *ptr = (void *)rdev->gart.ptr;
632
633 addr = addr & 0xFFFFFFFFFFFFF000ULL; 630 addr = addr & 0xFFFFFFFFFFFFF000ULL;
634 addr |= R600_PTE_SYSTEM; 631 addr |= R600_PTE_SYSTEM;
635 if (flags & RADEON_GART_PAGE_VALID) 632 if (flags & RADEON_GART_PAGE_VALID)
@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
640 addr |= R600_PTE_WRITEABLE; 637 addr |= R600_PTE_WRITEABLE;
641 if (flags & RADEON_GART_PAGE_SNOOP) 638 if (flags & RADEON_GART_PAGE_SNOOP)
642 addr |= R600_PTE_SNOOPED; 639 addr |= R600_PTE_SNOOPED;
643 writeq(addr, ptr + (i * 8)); 640 return addr;
641}
642
643void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
644 uint64_t entry)
645{
646 void __iomem *ptr = (void *)rdev->gart.ptr;
647 writeq(entry, ptr + (i * 8));
644} 648}
645 649
646int rs600_irq_set(struct radeon_device *rdev) 650int rs600_irq_set(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index aa7b872b2c43..83207929fc62 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
123 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 123 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
124 if (flags & R600_PTE_SYSTEM) { 124 if (flags & R600_PTE_SYSTEM) {
125 value = radeon_vm_map_gart(rdev, addr); 125 value = radeon_vm_map_gart(rdev, addr);
126 value &= 0xFFFFFFFFFFFFF000ULL;
127 } else if (flags & R600_PTE_VALID) { 126 } else if (flags & R600_PTE_VALID) {
128 value = addr; 127 value = addr;
129 } else { 128 } else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 7b5d22110f25..6c6b655defcf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
406 if (unlikely(ret != 0)) 406 if (unlikely(ret != 0))
407 --dev_priv->num_3d_resources; 407 --dev_priv->num_3d_resources;
408 } else if (unhide_svga) { 408 } else if (unhide_svga) {
409 mutex_lock(&dev_priv->hw_mutex);
410 vmw_write(dev_priv, SVGA_REG_ENABLE, 409 vmw_write(dev_priv, SVGA_REG_ENABLE,
411 vmw_read(dev_priv, SVGA_REG_ENABLE) & 410 vmw_read(dev_priv, SVGA_REG_ENABLE) &
412 ~SVGA_REG_ENABLE_HIDE); 411 ~SVGA_REG_ENABLE_HIDE);
413 mutex_unlock(&dev_priv->hw_mutex);
414 } 412 }
415 413
416 mutex_unlock(&dev_priv->release_mutex); 414 mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
433 mutex_lock(&dev_priv->release_mutex); 431 mutex_lock(&dev_priv->release_mutex);
434 if (unlikely(--dev_priv->num_3d_resources == 0)) 432 if (unlikely(--dev_priv->num_3d_resources == 0))
435 vmw_release_device(dev_priv); 433 vmw_release_device(dev_priv);
436 else if (hide_svga) { 434 else if (hide_svga)
437 mutex_lock(&dev_priv->hw_mutex);
438 vmw_write(dev_priv, SVGA_REG_ENABLE, 435 vmw_write(dev_priv, SVGA_REG_ENABLE,
439 vmw_read(dev_priv, SVGA_REG_ENABLE) | 436 vmw_read(dev_priv, SVGA_REG_ENABLE) |
440 SVGA_REG_ENABLE_HIDE); 437 SVGA_REG_ENABLE_HIDE);
441 mutex_unlock(&dev_priv->hw_mutex);
442 }
443 438
444 n3d = (int32_t) dev_priv->num_3d_resources; 439 n3d = (int32_t) dev_priv->num_3d_resources;
445 mutex_unlock(&dev_priv->release_mutex); 440 mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
600 dev_priv->dev = dev; 595 dev_priv->dev = dev;
601 dev_priv->vmw_chipset = chipset; 596 dev_priv->vmw_chipset = chipset;
602 dev_priv->last_read_seqno = (uint32_t) -100; 597 dev_priv->last_read_seqno = (uint32_t) -100;
603 mutex_init(&dev_priv->hw_mutex);
604 mutex_init(&dev_priv->cmdbuf_mutex); 598 mutex_init(&dev_priv->cmdbuf_mutex);
605 mutex_init(&dev_priv->release_mutex); 599 mutex_init(&dev_priv->release_mutex);
606 mutex_init(&dev_priv->binding_mutex); 600 mutex_init(&dev_priv->binding_mutex);
607 rwlock_init(&dev_priv->resource_lock); 601 rwlock_init(&dev_priv->resource_lock);
608 ttm_lock_init(&dev_priv->reservation_sem); 602 ttm_lock_init(&dev_priv->reservation_sem);
603 spin_lock_init(&dev_priv->hw_lock);
604 spin_lock_init(&dev_priv->waiter_lock);
605 spin_lock_init(&dev_priv->cap_lock);
609 606
610 for (i = vmw_res_context; i < vmw_res_max; ++i) { 607 for (i = vmw_res_context; i < vmw_res_max; ++i) {
611 idr_init(&dev_priv->res_idr[i]); 608 idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
626 623
627 dev_priv->enable_fb = enable_fbdev; 624 dev_priv->enable_fb = enable_fbdev;
628 625
629 mutex_lock(&dev_priv->hw_mutex);
630
631 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 626 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
632 svga_id = vmw_read(dev_priv, SVGA_REG_ID); 627 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
633 if (svga_id != SVGA_ID_2) { 628 if (svga_id != SVGA_ID_2) {
634 ret = -ENOSYS; 629 ret = -ENOSYS;
635 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); 630 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
636 mutex_unlock(&dev_priv->hw_mutex);
637 goto out_err0; 631 goto out_err0;
638 } 632 }
639 633
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
683 dev_priv->prim_bb_mem = dev_priv->vram_size; 677 dev_priv->prim_bb_mem = dev_priv->vram_size;
684 678
685 ret = vmw_dma_masks(dev_priv); 679 ret = vmw_dma_masks(dev_priv);
686 if (unlikely(ret != 0)) { 680 if (unlikely(ret != 0))
687 mutex_unlock(&dev_priv->hw_mutex);
688 goto out_err0; 681 goto out_err0;
689 }
690 682
691 /* 683 /*
692 * Limit back buffer size to VRAM size. Remove this once 684 * Limit back buffer size to VRAM size. Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
695 if (dev_priv->prim_bb_mem > dev_priv->vram_size) 687 if (dev_priv->prim_bb_mem > dev_priv->vram_size)
696 dev_priv->prim_bb_mem = dev_priv->vram_size; 688 dev_priv->prim_bb_mem = dev_priv->vram_size;
697 689
698 mutex_unlock(&dev_priv->hw_mutex);
699
700 vmw_print_capabilities(dev_priv->capabilities); 690 vmw_print_capabilities(dev_priv->capabilities);
701 691
702 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 692 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
1160 if (unlikely(ret != 0)) 1150 if (unlikely(ret != 0))
1161 return ret; 1151 return ret;
1162 vmw_kms_save_vga(dev_priv); 1152 vmw_kms_save_vga(dev_priv);
1163 mutex_lock(&dev_priv->hw_mutex);
1164 vmw_write(dev_priv, SVGA_REG_TRACES, 0); 1153 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
1165 mutex_unlock(&dev_priv->hw_mutex);
1166 } 1154 }
1167 1155
1168 if (active) { 1156 if (active) {
@@ -1196,9 +1184,7 @@ out_no_active_lock:
1196 if (!dev_priv->enable_fb) { 1184 if (!dev_priv->enable_fb) {
1197 vmw_kms_restore_vga(dev_priv); 1185 vmw_kms_restore_vga(dev_priv);
1198 vmw_3d_resource_dec(dev_priv, true); 1186 vmw_3d_resource_dec(dev_priv, true);
1199 mutex_lock(&dev_priv->hw_mutex);
1200 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 1187 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1201 mutex_unlock(&dev_priv->hw_mutex);
1202 } 1188 }
1203 return ret; 1189 return ret;
1204} 1190}
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
1233 DRM_ERROR("Unable to clean VRAM on master drop.\n"); 1219 DRM_ERROR("Unable to clean VRAM on master drop.\n");
1234 vmw_kms_restore_vga(dev_priv); 1220 vmw_kms_restore_vga(dev_priv);
1235 vmw_3d_resource_dec(dev_priv, true); 1221 vmw_3d_resource_dec(dev_priv, true);
1236 mutex_lock(&dev_priv->hw_mutex);
1237 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 1222 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1238 mutex_unlock(&dev_priv->hw_mutex);
1239 } 1223 }
1240 1224
1241 dev_priv->active_master = &dev_priv->fbdev_master; 1225 dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
1367 struct drm_device *dev = pci_get_drvdata(pdev); 1351 struct drm_device *dev = pci_get_drvdata(pdev);
1368 struct vmw_private *dev_priv = vmw_priv(dev); 1352 struct vmw_private *dev_priv = vmw_priv(dev);
1369 1353
1370 mutex_lock(&dev_priv->hw_mutex);
1371 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1354 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1372 (void) vmw_read(dev_priv, SVGA_REG_ID); 1355 (void) vmw_read(dev_priv, SVGA_REG_ID);
1373 mutex_unlock(&dev_priv->hw_mutex);
1374 1356
1375 /** 1357 /**
1376 * Reclaim 3d reference held by fbdev and potentially 1358 * Reclaim 3d reference held by fbdev and potentially
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ee799b43d5d..d26a6daa9719 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -399,7 +399,8 @@ struct vmw_private {
399 uint32_t memory_size; 399 uint32_t memory_size;
400 bool has_gmr; 400 bool has_gmr;
401 bool has_mob; 401 bool has_mob;
402 struct mutex hw_mutex; 402 spinlock_t hw_lock;
403 spinlock_t cap_lock;
403 404
404 /* 405 /*
405 * VGA registers. 406 * VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
449 atomic_t marker_seq; 450 atomic_t marker_seq;
450 wait_queue_head_t fence_queue; 451 wait_queue_head_t fence_queue;
451 wait_queue_head_t fifo_queue; 452 wait_queue_head_t fifo_queue;
452 int fence_queue_waiters; /* Protected by hw_mutex */ 453 spinlock_t waiter_lock;
453 int goal_queue_waiters; /* Protected by hw_mutex */ 454 int fence_queue_waiters; /* Protected by waiter_lock */
455 int goal_queue_waiters; /* Protected by waiter_lock */
454 atomic_t fifo_queue_waiters; 456 atomic_t fifo_queue_waiters;
455 uint32_t last_read_seqno; 457 uint32_t last_read_seqno;
456 spinlock_t irq_lock; 458 spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
553 return (struct vmw_master *) master->driver_priv; 555 return (struct vmw_master *) master->driver_priv;
554} 556}
555 557
558/*
559 * The locking here is fine-grained, so that it is performed once
560 * for every read- and write operation. This is of course costly, but we
561 * don't perform much register access in the timing critical paths anyway.
562 * Instead we have the extra benefit of being sure that we don't forget
563 * the hw lock around register accesses.
564 */
556static inline void vmw_write(struct vmw_private *dev_priv, 565static inline void vmw_write(struct vmw_private *dev_priv,
557 unsigned int offset, uint32_t value) 566 unsigned int offset, uint32_t value)
558{ 567{
568 unsigned long irq_flags;
569
570 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
559 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 571 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
560 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); 572 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
573 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
561} 574}
562 575
563static inline uint32_t vmw_read(struct vmw_private *dev_priv, 576static inline uint32_t vmw_read(struct vmw_private *dev_priv,
564 unsigned int offset) 577 unsigned int offset)
565{ 578{
566 uint32_t val; 579 unsigned long irq_flags;
580 u32 val;
567 581
582 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
568 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 583 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
569 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 584 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
585 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
586
570 return val; 587 return val;
571} 588}
572 589
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b7594cb758af..945f1e0dad92 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
35 struct vmw_private *dev_priv; 35 struct vmw_private *dev_priv;
36 spinlock_t lock; 36 spinlock_t lock;
37 struct list_head fence_list; 37 struct list_head fence_list;
38 struct work_struct work, ping_work; 38 struct work_struct work;
39 u32 user_fence_size; 39 u32 user_fence_size;
40 u32 fence_size; 40 u32 fence_size;
41 u32 event_fence_action_size; 41 u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
134 return "svga"; 134 return "svga";
135} 135}
136 136
137static void vmw_fence_ping_func(struct work_struct *work)
138{
139 struct vmw_fence_manager *fman =
140 container_of(work, struct vmw_fence_manager, ping_work);
141
142 vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
143}
144
145static bool vmw_fence_enable_signaling(struct fence *f) 137static bool vmw_fence_enable_signaling(struct fence *f)
146{ 138{
147 struct vmw_fence_obj *fence = 139 struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
155 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
156 return false; 148 return false;
157 149
158 if (mutex_trylock(&dev_priv->hw_mutex)) { 150 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
159 vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
160 mutex_unlock(&dev_priv->hw_mutex);
161 } else
162 schedule_work(&fman->ping_work);
163 151
164 return true; 152 return true;
165} 153}
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
305 INIT_LIST_HEAD(&fman->fence_list); 293 INIT_LIST_HEAD(&fman->fence_list);
306 INIT_LIST_HEAD(&fman->cleanup_list); 294 INIT_LIST_HEAD(&fman->cleanup_list);
307 INIT_WORK(&fman->work, &vmw_fence_work_func); 295 INIT_WORK(&fman->work, &vmw_fence_work_func);
308 INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
309 fman->fifo_down = true; 296 fman->fifo_down = true;
310 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); 297 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
311 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); 298 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
323 bool lists_empty; 310 bool lists_empty;
324 311
325 (void) cancel_work_sync(&fman->work); 312 (void) cancel_work_sync(&fman->work);
326 (void) cancel_work_sync(&fman->ping_work);
327 313
328 spin_lock_irqsave(&fman->lock, irq_flags); 314 spin_lock_irqsave(&fman->lock, irq_flags);
329 lists_empty = list_empty(&fman->fence_list) && 315 lists_empty = list_empty(&fman->fence_list) &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 09e10aefcd8e..39f2b03888e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
44 if (!dev_priv->has_mob) 44 if (!dev_priv->has_mob)
45 return false; 45 return false;
46 46
47 mutex_lock(&dev_priv->hw_mutex); 47 spin_lock(&dev_priv->cap_lock);
48 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); 48 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
49 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 49 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
50 mutex_unlock(&dev_priv->hw_mutex); 50 spin_unlock(&dev_priv->cap_lock);
51 51
52 return (result != 0); 52 return (result != 0);
53 } 53 }
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); 120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); 121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
122 122
123 mutex_lock(&dev_priv->hw_mutex);
124 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 123 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
125 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 124 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
126 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 125 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
143 mb(); 142 mb();
144 143
145 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); 144 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
146 mutex_unlock(&dev_priv->hw_mutex);
147 145
148 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 146 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
149 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 147 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
160 return vmw_fifo_send_fence(dev_priv, &dummy); 158 return vmw_fifo_send_fence(dev_priv, &dummy);
161} 159}
162 160
163void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) 161void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
164{ 162{
165 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 163 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
164 static DEFINE_SPINLOCK(ping_lock);
165 unsigned long irq_flags;
166 166
167 /*
168 * The ping_lock is needed because we don't have an atomic
169 * test-and-set of the SVGA_FIFO_BUSY register.
170 */
171 spin_lock_irqsave(&ping_lock, irq_flags);
167 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { 172 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
168 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); 173 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
169 vmw_write(dev_priv, SVGA_REG_SYNC, reason); 174 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
170 } 175 }
171} 176 spin_unlock_irqrestore(&ping_lock, irq_flags);
172
173void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
174{
175 mutex_lock(&dev_priv->hw_mutex);
176
177 vmw_fifo_ping_host_locked(dev_priv, reason);
178
179 mutex_unlock(&dev_priv->hw_mutex);
180} 177}
181 178
182void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 179void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
183{ 180{
184 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 181 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
185 182
186 mutex_lock(&dev_priv->hw_mutex);
187
188 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 183 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
189 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 184 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
190 ; 185 ;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
198 vmw_write(dev_priv, SVGA_REG_TRACES, 193 vmw_write(dev_priv, SVGA_REG_TRACES,
199 dev_priv->traces_state); 194 dev_priv->traces_state);
200 195
201 mutex_unlock(&dev_priv->hw_mutex);
202 vmw_marker_queue_takedown(&fifo->marker_queue); 196 vmw_marker_queue_takedown(&fifo->marker_queue);
203 197
204 if (likely(fifo->static_buffer != NULL)) { 198 if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
271 return vmw_fifo_wait_noirq(dev_priv, bytes, 265 return vmw_fifo_wait_noirq(dev_priv, bytes,
272 interruptible, timeout); 266 interruptible, timeout);
273 267
274 mutex_lock(&dev_priv->hw_mutex); 268 spin_lock(&dev_priv->waiter_lock);
275 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { 269 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
276 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 270 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
277 outl(SVGA_IRQFLAG_FIFO_PROGRESS, 271 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
280 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 274 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
281 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 275 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
282 } 276 }
283 mutex_unlock(&dev_priv->hw_mutex); 277 spin_unlock(&dev_priv->waiter_lock);
284 278
285 if (interruptible) 279 if (interruptible)
286 ret = wait_event_interruptible_timeout 280 ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
296 else if (likely(ret > 0)) 290 else if (likely(ret > 0))
297 ret = 0; 291 ret = 0;
298 292
299 mutex_lock(&dev_priv->hw_mutex); 293 spin_lock(&dev_priv->waiter_lock);
300 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { 294 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
301 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 295 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
302 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; 296 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
303 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 297 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
304 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 298 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
305 } 299 }
306 mutex_unlock(&dev_priv->hw_mutex); 300 spin_unlock(&dev_priv->waiter_lock);
307 301
308 return ret; 302 return ret;
309} 303}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 37881ecf5d7a..69c8ce23123c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
135 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); 135 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
136 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; 136 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
137 137
138 mutex_lock(&dev_priv->hw_mutex); 138 spin_lock(&dev_priv->cap_lock);
139 for (i = 0; i < max_size; ++i) { 139 for (i = 0; i < max_size; ++i) {
140 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 140 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
141 compat_cap->pairs[i][0] = i; 141 compat_cap->pairs[i][0] = i;
142 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 142 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
143 } 143 }
144 mutex_unlock(&dev_priv->hw_mutex); 144 spin_unlock(&dev_priv->cap_lock);
145 145
146 return 0; 146 return 0;
147} 147}
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
191 if (num > SVGA3D_DEVCAP_MAX) 191 if (num > SVGA3D_DEVCAP_MAX)
192 num = SVGA3D_DEVCAP_MAX; 192 num = SVGA3D_DEVCAP_MAX;
193 193
194 mutex_lock(&dev_priv->hw_mutex); 194 spin_lock(&dev_priv->cap_lock);
195 for (i = 0; i < num; ++i) { 195 for (i = 0; i < num; ++i) {
196 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 196 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
197 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 197 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
198 } 198 }
199 mutex_unlock(&dev_priv->hw_mutex); 199 spin_unlock(&dev_priv->cap_lock);
200 } else if (gb_objects) { 200 } else if (gb_objects) {
201 ret = vmw_fill_compat_cap(dev_priv, bounce, size); 201 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
202 if (unlikely(ret != 0)) 202 if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c423766c441..9fe9827ee499 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
62 62
63static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) 63static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
64{ 64{
65 uint32_t busy;
66 65
67 mutex_lock(&dev_priv->hw_mutex); 66 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
68 busy = vmw_read(dev_priv, SVGA_REG_BUSY);
69 mutex_unlock(&dev_priv->hw_mutex);
70
71 return (busy == 0);
72} 67}
73 68
74void vmw_update_seqno(struct vmw_private *dev_priv, 69void vmw_update_seqno(struct vmw_private *dev_priv,
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
184 179
185void vmw_seqno_waiter_add(struct vmw_private *dev_priv) 180void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
186{ 181{
187 mutex_lock(&dev_priv->hw_mutex); 182 spin_lock(&dev_priv->waiter_lock);
188 if (dev_priv->fence_queue_waiters++ == 0) { 183 if (dev_priv->fence_queue_waiters++ == 0) {
189 unsigned long irq_flags; 184 unsigned long irq_flags;
190 185
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
195 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 190 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
196 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 191 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
197 } 192 }
198 mutex_unlock(&dev_priv->hw_mutex); 193 spin_unlock(&dev_priv->waiter_lock);
199} 194}
200 195
201void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) 196void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
202{ 197{
203 mutex_lock(&dev_priv->hw_mutex); 198 spin_lock(&dev_priv->waiter_lock);
204 if (--dev_priv->fence_queue_waiters == 0) { 199 if (--dev_priv->fence_queue_waiters == 0) {
205 unsigned long irq_flags; 200 unsigned long irq_flags;
206 201
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
209 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 204 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
210 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 205 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
211 } 206 }
212 mutex_unlock(&dev_priv->hw_mutex); 207 spin_unlock(&dev_priv->waiter_lock);
213} 208}
214 209
215 210
216void vmw_goal_waiter_add(struct vmw_private *dev_priv) 211void vmw_goal_waiter_add(struct vmw_private *dev_priv)
217{ 212{
218 mutex_lock(&dev_priv->hw_mutex); 213 spin_lock(&dev_priv->waiter_lock);
219 if (dev_priv->goal_queue_waiters++ == 0) { 214 if (dev_priv->goal_queue_waiters++ == 0) {
220 unsigned long irq_flags; 215 unsigned long irq_flags;
221 216
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
226 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 221 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
227 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 222 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
228 } 223 }
229 mutex_unlock(&dev_priv->hw_mutex); 224 spin_unlock(&dev_priv->waiter_lock);
230} 225}
231 226
232void vmw_goal_waiter_remove(struct vmw_private *dev_priv) 227void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
233{ 228{
234 mutex_lock(&dev_priv->hw_mutex); 229 spin_lock(&dev_priv->waiter_lock);
235 if (--dev_priv->goal_queue_waiters == 0) { 230 if (--dev_priv->goal_queue_waiters == 0) {
236 unsigned long irq_flags; 231 unsigned long irq_flags;
237 232
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
240 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 235 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
241 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 236 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
242 } 237 }
243 mutex_unlock(&dev_priv->hw_mutex); 238 spin_unlock(&dev_priv->waiter_lock);
244} 239}
245 240
246int vmw_wait_seqno(struct vmw_private *dev_priv, 241int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
315 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 310 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
316 return; 311 return;
317 312
318 mutex_lock(&dev_priv->hw_mutex);
319 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); 313 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
320 mutex_unlock(&dev_priv->hw_mutex);
321 314
322 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 315 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
323 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 316 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3725b521d931..8725b79e7847 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
1828 struct vmw_private *dev_priv = vmw_priv(dev); 1828 struct vmw_private *dev_priv = vmw_priv(dev);
1829 struct vmw_display_unit *du = vmw_connector_to_du(connector); 1829 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1830 1830
1831 mutex_lock(&dev_priv->hw_mutex);
1832 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 1831 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1833 mutex_unlock(&dev_priv->hw_mutex);
1834 1832
1835 return ((vmw_connector_to_du(connector)->unit < num_displays && 1833 return ((vmw_connector_to_du(connector)->unit < num_displays &&
1836 du->pref_active) ? 1834 du->pref_active) ?
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index a7de26d1ac80..d931cbbed240 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1389,6 +1389,7 @@ config SENSORS_ADS1015
1389config SENSORS_ADS7828 1389config SENSORS_ADS7828
1390 tristate "Texas Instruments ADS7828 and compatibles" 1390 tristate "Texas Instruments ADS7828 and compatibles"
1391 depends on I2C 1391 depends on I2C
1392 select REGMAP_I2C
1392 help 1393 help
1393 If you say yes here you get support for Texas Instruments ADS7828 and 1394 If you say yes here you get support for Texas Instruments ADS7828 and
1394 ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while 1395 ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while
@@ -1430,8 +1431,8 @@ config SENSORS_INA2XX
1430 tristate "Texas Instruments INA219 and compatibles" 1431 tristate "Texas Instruments INA219 and compatibles"
1431 depends on I2C 1432 depends on I2C
1432 help 1433 help
1433 If you say yes here you get support for INA219, INA220, INA226, and 1434 If you say yes here you get support for INA219, INA220, INA226,
1434 INA230 power monitor chips. 1435 INA230, and INA231 power monitor chips.
1435 1436
1436 The INA2xx driver is configured for the default configuration of 1437 The INA2xx driver is configured for the default configuration of
1437 the part as described in the datasheet. 1438 the part as described in the datasheet.
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 13875968c844..6cb89c0ebab6 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -221,7 +221,7 @@ static ssize_t show_min(struct device *dev,
221 struct abx500_temp *data = dev_get_drvdata(dev); 221 struct abx500_temp *data = dev_get_drvdata(dev);
222 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 222 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
223 223
224 return sprintf(buf, "%ld\n", data->min[attr->index]); 224 return sprintf(buf, "%lu\n", data->min[attr->index]);
225} 225}
226 226
227static ssize_t show_max(struct device *dev, 227static ssize_t show_max(struct device *dev,
@@ -230,7 +230,7 @@ static ssize_t show_max(struct device *dev,
230 struct abx500_temp *data = dev_get_drvdata(dev); 230 struct abx500_temp *data = dev_get_drvdata(dev);
231 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 231 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
232 232
233 return sprintf(buf, "%ld\n", data->max[attr->index]); 233 return sprintf(buf, "%lu\n", data->max[attr->index]);
234} 234}
235 235
236static ssize_t show_max_hyst(struct device *dev, 236static ssize_t show_max_hyst(struct device *dev,
@@ -239,7 +239,7 @@ static ssize_t show_max_hyst(struct device *dev,
239 struct abx500_temp *data = dev_get_drvdata(dev); 239 struct abx500_temp *data = dev_get_drvdata(dev);
240 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 240 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
241 241
242 return sprintf(buf, "%ld\n", data->max_hyst[attr->index]); 242 return sprintf(buf, "%lu\n", data->max_hyst[attr->index]);
243} 243}
244 244
245static ssize_t show_min_alarm(struct device *dev, 245static ssize_t show_min_alarm(struct device *dev,
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index f4f9b219bf16..11955467fc0f 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/hwmon.h> 17#include <linux/hwmon.h>
18#include <linux/hwmon-sysfs.h> 18#include <linux/hwmon-sysfs.h>
19#include <linux/bitops.h>
19 20
20/* 21/*
21 * AD7314 temperature masks 22 * AD7314 temperature masks
@@ -67,7 +68,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
67 switch (spi_get_device_id(chip->spi_dev)->driver_data) { 68 switch (spi_get_device_id(chip->spi_dev)->driver_data) {
68 case ad7314: 69 case ad7314:
69 data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT; 70 data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
70 data = (data << 6) >> 6; 71 data = sign_extend32(data, 9);
71 72
72 return sprintf(buf, "%d\n", 250 * data); 73 return sprintf(buf, "%d\n", 250 * data);
73 case adt7301: 74 case adt7301:
@@ -78,7 +79,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
78 * register. 1lsb - 31.25 milli degrees centigrade 79 * register. 1lsb - 31.25 milli degrees centigrade
79 */ 80 */
80 data = ret & ADT7301_TEMP_MASK; 81 data = ret & ADT7301_TEMP_MASK;
81 data = (data << 2) >> 2; 82 data = sign_extend32(data, 13);
82 83
83 return sprintf(buf, "%d\n", 84 return sprintf(buf, "%d\n",
84 DIV_ROUND_CLOSEST(data * 3125, 100)); 85 DIV_ROUND_CLOSEST(data * 3125, 100));
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 0625e50d7a6e..ad2b47e40345 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -27,6 +27,7 @@
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/regulator/consumer.h> 28#include <linux/regulator/consumer.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/bitops.h>
30 31
31/* Addresses to scan 32/* Addresses to scan
32 * The chip also supports addresses 0x35..0x37. Don't scan those addresses 33 * The chip also supports addresses 0x35..0x37. Don't scan those addresses
@@ -189,7 +190,7 @@ static ssize_t adc128_show_temp(struct device *dev,
189 if (IS_ERR(data)) 190 if (IS_ERR(data))
190 return PTR_ERR(data); 191 return PTR_ERR(data);
191 192
192 temp = (data->temp[index] << 7) >> 7; /* sign extend */ 193 temp = sign_extend32(data->temp[index], 8);
193 return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */ 194 return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */
194} 195}
195 196
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index a622d40eec17..bce4e9ff21bf 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -30,14 +30,12 @@
30#include <linux/hwmon-sysfs.h> 30#include <linux/hwmon-sysfs.h>
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/jiffies.h>
34#include <linux/module.h> 33#include <linux/module.h>
35#include <linux/mutex.h>
36#include <linux/platform_data/ads7828.h> 34#include <linux/platform_data/ads7828.h>
35#include <linux/regmap.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38 37
39/* The ADS7828 registers */ 38/* The ADS7828 registers */
40#define ADS7828_NCH 8 /* 8 channels supported */
41#define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */ 39#define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */
42#define ADS7828_CMD_PD1 0x04 /* Internal vref OFF && A/D ON */ 40#define ADS7828_CMD_PD1 0x04 /* Internal vref OFF && A/D ON */
43#define ADS7828_CMD_PD3 0x0C /* Internal vref ON && A/D ON */ 41#define ADS7828_CMD_PD3 0x0C /* Internal vref ON && A/D ON */
@@ -50,17 +48,9 @@ enum ads7828_chips { ads7828, ads7830 };
50 48
51/* Client specific data */ 49/* Client specific data */
52struct ads7828_data { 50struct ads7828_data {
53 struct i2c_client *client; 51 struct regmap *regmap;
54 struct mutex update_lock; /* Mutex protecting updates */
55 unsigned long last_updated; /* Last updated time (in jiffies) */
56 u16 adc_input[ADS7828_NCH]; /* ADS7828_NCH samples */
57 bool valid; /* Validity flag */
58 bool diff_input; /* Differential input */
59 bool ext_vref; /* External voltage reference */
60 unsigned int vref_mv; /* voltage reference value */
61 u8 cmd_byte; /* Command byte without channel bits */ 52 u8 cmd_byte; /* Command byte without channel bits */
62 unsigned int lsb_resol; /* Resolution of the ADC sample LSB */ 53 unsigned int lsb_resol; /* Resolution of the ADC sample LSB */
63 s32 (*read_channel)(const struct i2c_client *client, u8 command);
64}; 54};
65 55
66/* Command byte C2,C1,C0 - see datasheet */ 56/* Command byte C2,C1,C0 - see datasheet */
@@ -69,42 +59,22 @@ static inline u8 ads7828_cmd_byte(u8 cmd, int ch)
69 return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4); 59 return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4);
70} 60}
71 61
72/* Update data for the device (all 8 channels) */
73static struct ads7828_data *ads7828_update_device(struct device *dev)
74{
75 struct ads7828_data *data = dev_get_drvdata(dev);
76 struct i2c_client *client = data->client;
77
78 mutex_lock(&data->update_lock);
79
80 if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
81 || !data->valid) {
82 unsigned int ch;
83 dev_dbg(&client->dev, "Starting ads7828 update\n");
84
85 for (ch = 0; ch < ADS7828_NCH; ch++) {
86 u8 cmd = ads7828_cmd_byte(data->cmd_byte, ch);
87 data->adc_input[ch] = data->read_channel(client, cmd);
88 }
89 data->last_updated = jiffies;
90 data->valid = true;
91 }
92
93 mutex_unlock(&data->update_lock);
94
95 return data;
96}
97
98/* sysfs callback function */ 62/* sysfs callback function */
99static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da, 63static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da,
100 char *buf) 64 char *buf)
101{ 65{
102 struct sensor_device_attribute *attr = to_sensor_dev_attr(da); 66 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
103 struct ads7828_data *data = ads7828_update_device(dev); 67 struct ads7828_data *data = dev_get_drvdata(dev);
104 unsigned int value = DIV_ROUND_CLOSEST(data->adc_input[attr->index] * 68 u8 cmd = ads7828_cmd_byte(data->cmd_byte, attr->index);
105 data->lsb_resol, 1000); 69 unsigned int regval;
70 int err;
106 71
107 return sprintf(buf, "%d\n", value); 72 err = regmap_read(data->regmap, cmd, &regval);
73 if (err < 0)
74 return err;
75
76 return sprintf(buf, "%d\n",
77 DIV_ROUND_CLOSEST(regval * data->lsb_resol, 1000));
108} 78}
109 79
110static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0); 80static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0);
@@ -130,6 +100,16 @@ static struct attribute *ads7828_attrs[] = {
130 100
131ATTRIBUTE_GROUPS(ads7828); 101ATTRIBUTE_GROUPS(ads7828);
132 102
103static const struct regmap_config ads2828_regmap_config = {
104 .reg_bits = 8,
105 .val_bits = 16,
106};
107
108static const struct regmap_config ads2830_regmap_config = {
109 .reg_bits = 8,
110 .val_bits = 8,
111};
112
133static int ads7828_probe(struct i2c_client *client, 113static int ads7828_probe(struct i2c_client *client,
134 const struct i2c_device_id *id) 114 const struct i2c_device_id *id)
135{ 115{
@@ -137,42 +117,40 @@ static int ads7828_probe(struct i2c_client *client,
137 struct ads7828_platform_data *pdata = dev_get_platdata(dev); 117 struct ads7828_platform_data *pdata = dev_get_platdata(dev);
138 struct ads7828_data *data; 118 struct ads7828_data *data;
139 struct device *hwmon_dev; 119 struct device *hwmon_dev;
120 unsigned int vref_mv = ADS7828_INT_VREF_MV;
121 bool diff_input = false;
122 bool ext_vref = false;
140 123
141 data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL); 124 data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL);
142 if (!data) 125 if (!data)
143 return -ENOMEM; 126 return -ENOMEM;
144 127
145 if (pdata) { 128 if (pdata) {
146 data->diff_input = pdata->diff_input; 129 diff_input = pdata->diff_input;
147 data->ext_vref = pdata->ext_vref; 130 ext_vref = pdata->ext_vref;
148 if (data->ext_vref) 131 if (ext_vref && pdata->vref_mv)
149 data->vref_mv = pdata->vref_mv; 132 vref_mv = pdata->vref_mv;
150 } 133 }
151 134
152 /* Bound Vref with min/max values if it was provided */ 135 /* Bound Vref with min/max values */
153 if (data->vref_mv) 136 vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN,
154 data->vref_mv = clamp_val(data->vref_mv, 137 ADS7828_EXT_VREF_MV_MAX);
155 ADS7828_EXT_VREF_MV_MIN,
156 ADS7828_EXT_VREF_MV_MAX);
157 else
158 data->vref_mv = ADS7828_INT_VREF_MV;
159 138
160 /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */ 139 /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */
161 if (id->driver_data == ads7828) { 140 if (id->driver_data == ads7828) {
162 data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 4096); 141 data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 4096);
163 data->read_channel = i2c_smbus_read_word_swapped; 142 data->regmap = devm_regmap_init_i2c(client,
143 &ads2828_regmap_config);
164 } else { 144 } else {
165 data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 256); 145 data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 256);
166 data->read_channel = i2c_smbus_read_byte_data; 146 data->regmap = devm_regmap_init_i2c(client,
147 &ads2830_regmap_config);
167 } 148 }
168 149
169 data->cmd_byte = data->ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; 150 data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
170 if (!data->diff_input) 151 if (!diff_input)
171 data->cmd_byte |= ADS7828_CMD_SD_SE; 152 data->cmd_byte |= ADS7828_CMD_SD_SE;
172 153
173 data->client = client;
174 mutex_init(&data->update_lock);
175
176 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, 154 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
177 data, 155 data,
178 ads7828_groups); 156 ads7828_groups);
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index e01feba909c3..d1542b7d4bc3 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -35,6 +35,7 @@
35#include <linux/hwmon-sysfs.h> 35#include <linux/hwmon-sysfs.h>
36#include <linux/jiffies.h> 36#include <linux/jiffies.h>
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/delay.h>
38 39
39#include <linux/platform_data/ina2xx.h> 40#include <linux/platform_data/ina2xx.h>
40 41
@@ -51,7 +52,6 @@
51#define INA226_ALERT_LIMIT 0x07 52#define INA226_ALERT_LIMIT 0x07
52#define INA226_DIE_ID 0xFF 53#define INA226_DIE_ID 0xFF
53 54
54
55/* register count */ 55/* register count */
56#define INA219_REGISTERS 6 56#define INA219_REGISTERS 6
57#define INA226_REGISTERS 8 57#define INA226_REGISTERS 8
@@ -64,6 +64,24 @@
64 64
65/* worst case is 68.10 ms (~14.6Hz, ina219) */ 65/* worst case is 68.10 ms (~14.6Hz, ina219) */
66#define INA2XX_CONVERSION_RATE 15 66#define INA2XX_CONVERSION_RATE 15
67#define INA2XX_MAX_DELAY 69 /* worst case delay in ms */
68
69#define INA2XX_RSHUNT_DEFAULT 10000
70
71/* bit mask for reading the averaging setting in the configuration register */
72#define INA226_AVG_RD_MASK 0x0E00
73
74#define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9)
75#define INA226_SHIFT_AVG(val) ((val) << 9)
76
77/* common attrs, ina226 attrs and NULL */
78#define INA2XX_MAX_ATTRIBUTE_GROUPS 3
79
80/*
81 * Both bus voltage and shunt voltage conversion times for ina226 are set
82 * to 0b0100 on POR, which translates to 2200 microseconds in total.
83 */
84#define INA226_TOTAL_CONV_TIME_DEFAULT 2200
67 85
68enum ina2xx_ids { ina219, ina226 }; 86enum ina2xx_ids { ina219, ina226 };
69 87
@@ -81,11 +99,16 @@ struct ina2xx_data {
81 struct i2c_client *client; 99 struct i2c_client *client;
82 const struct ina2xx_config *config; 100 const struct ina2xx_config *config;
83 101
102 long rshunt;
103 u16 curr_config;
104
84 struct mutex update_lock; 105 struct mutex update_lock;
85 bool valid; 106 bool valid;
86 unsigned long last_updated; 107 unsigned long last_updated;
108 int update_interval; /* in jiffies */
87 109
88 int kind; 110 int kind;
111 const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS];
89 u16 regs[INA2XX_MAX_REGISTERS]; 112 u16 regs[INA2XX_MAX_REGISTERS];
90}; 113};
91 114
@@ -110,34 +133,156 @@ static const struct ina2xx_config ina2xx_config[] = {
110 }, 133 },
111}; 134};
112 135
113static struct ina2xx_data *ina2xx_update_device(struct device *dev) 136/*
137 * Available averaging rates for ina226. The indices correspond with
138 * the bit values expected by the chip (according to the ina226 datasheet,
139 * table 3 AVG bit settings, found at
140 * http://www.ti.com/lit/ds/symlink/ina226.pdf.
141 */
142static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 };
143
144static int ina226_avg_bits(int avg)
145{
146 int i;
147
148 /* Get the closest average from the tab. */
149 for (i = 0; i < ARRAY_SIZE(ina226_avg_tab) - 1; i++) {
150 if (avg <= (ina226_avg_tab[i] + ina226_avg_tab[i + 1]) / 2)
151 break;
152 }
153
154 return i; /* Return 0b0111 for values greater than 1024. */
155}
156
157static int ina226_reg_to_interval(u16 config)
158{
159 int avg = ina226_avg_tab[INA226_READ_AVG(config)];
160
161 /*
162 * Multiply the total conversion time by the number of averages.
163 * Return the result in milliseconds.
164 */
165 return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000);
166}
167
168static u16 ina226_interval_to_reg(int interval, u16 config)
169{
170 int avg, avg_bits;
171
172 avg = DIV_ROUND_CLOSEST(interval * 1000,
173 INA226_TOTAL_CONV_TIME_DEFAULT);
174 avg_bits = ina226_avg_bits(avg);
175
176 return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits);
177}
178
179static void ina226_set_update_interval(struct ina2xx_data *data)
180{
181 int ms;
182
183 ms = ina226_reg_to_interval(data->curr_config);
184 data->update_interval = msecs_to_jiffies(ms);
185}
186
187static int ina2xx_calibrate(struct ina2xx_data *data)
188{
189 u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
190 data->rshunt);
191
192 return i2c_smbus_write_word_swapped(data->client,
193 INA2XX_CALIBRATION, val);
194}
195
196/*
197 * Initialize the configuration and calibration registers.
198 */
199static int ina2xx_init(struct ina2xx_data *data)
114{ 200{
115 struct ina2xx_data *data = dev_get_drvdata(dev);
116 struct i2c_client *client = data->client; 201 struct i2c_client *client = data->client;
117 struct ina2xx_data *ret = data; 202 int ret;
118 203
119 mutex_lock(&data->update_lock); 204 /* device configuration */
205 ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
206 data->curr_config);
207 if (ret < 0)
208 return ret;
120 209
121 if (time_after(jiffies, data->last_updated + 210 /*
122 HZ / INA2XX_CONVERSION_RATE) || !data->valid) { 211 * Set current LSB to 1mA, shunt is in uOhms
212 * (equation 13 in datasheet).
213 */
214 return ina2xx_calibrate(data);
215}
123 216
124 int i; 217static int ina2xx_do_update(struct device *dev)
218{
219 struct ina2xx_data *data = dev_get_drvdata(dev);
220 struct i2c_client *client = data->client;
221 int i, rv, retry;
125 222
126 dev_dbg(&client->dev, "Starting ina2xx update\n"); 223 dev_dbg(&client->dev, "Starting ina2xx update\n");
127 224
225 for (retry = 5; retry; retry--) {
128 /* Read all registers */ 226 /* Read all registers */
129 for (i = 0; i < data->config->registers; i++) { 227 for (i = 0; i < data->config->registers; i++) {
130 int rv = i2c_smbus_read_word_swapped(client, i); 228 rv = i2c_smbus_read_word_swapped(client, i);
131 if (rv < 0) { 229 if (rv < 0)
132 ret = ERR_PTR(rv); 230 return rv;
133 goto abort;
134 }
135 data->regs[i] = rv; 231 data->regs[i] = rv;
136 } 232 }
233
234 /*
235 * If the current value in the calibration register is 0, the
236 * power and current registers will also remain at 0. In case
237 * the chip has been reset let's check the calibration
238 * register and reinitialize if needed.
239 */
240 if (data->regs[INA2XX_CALIBRATION] == 0) {
241 dev_warn(dev, "chip not calibrated, reinitializing\n");
242
243 rv = ina2xx_init(data);
244 if (rv < 0)
245 return rv;
246
247 /*
248 * Let's make sure the power and current registers
249 * have been updated before trying again.
250 */
251 msleep(INA2XX_MAX_DELAY);
252 continue;
253 }
254
137 data->last_updated = jiffies; 255 data->last_updated = jiffies;
138 data->valid = 1; 256 data->valid = 1;
257
258 return 0;
139 } 259 }
140abort: 260
261 /*
262 * If we're here then although all write operations succeeded, the
263 * chip still returns 0 in the calibration register. Nothing more we
264 * can do here.
265 */
266 dev_err(dev, "unable to reinitialize the chip\n");
267 return -ENODEV;
268}
269
270static struct ina2xx_data *ina2xx_update_device(struct device *dev)
271{
272 struct ina2xx_data *data = dev_get_drvdata(dev);
273 struct ina2xx_data *ret = data;
274 unsigned long after;
275 int rv;
276
277 mutex_lock(&data->update_lock);
278
279 after = data->last_updated + data->update_interval;
280 if (time_after(jiffies, after) || !data->valid) {
281 rv = ina2xx_do_update(dev);
282 if (rv < 0)
283 ret = ERR_PTR(rv);
284 }
285
141 mutex_unlock(&data->update_lock); 286 mutex_unlock(&data->update_lock);
142 return ret; 287 return ret;
143} 288}
@@ -164,6 +309,10 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
164 /* signed register, LSB=1mA (selected), in mA */ 309 /* signed register, LSB=1mA (selected), in mA */
165 val = (s16)data->regs[reg]; 310 val = (s16)data->regs[reg];
166 break; 311 break;
312 case INA2XX_CALIBRATION:
313 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
314 data->regs[reg]);
315 break;
167 default: 316 default:
168 /* programmer goofed */ 317 /* programmer goofed */
169 WARN_ON_ONCE(1); 318 WARN_ON_ONCE(1);
@@ -187,6 +336,85 @@ static ssize_t ina2xx_show_value(struct device *dev,
187 ina2xx_get_value(data, attr->index)); 336 ina2xx_get_value(data, attr->index));
188} 337}
189 338
339static ssize_t ina2xx_set_shunt(struct device *dev,
340 struct device_attribute *da,
341 const char *buf, size_t count)
342{
343 struct ina2xx_data *data = ina2xx_update_device(dev);
344 unsigned long val;
345 int status;
346
347 if (IS_ERR(data))
348 return PTR_ERR(data);
349
350 status = kstrtoul(buf, 10, &val);
351 if (status < 0)
352 return status;
353
354 if (val == 0 ||
355 /* Values greater than the calibration factor make no sense. */
356 val > data->config->calibration_factor)
357 return -EINVAL;
358
359 mutex_lock(&data->update_lock);
360 data->rshunt = val;
361 status = ina2xx_calibrate(data);
362 mutex_unlock(&data->update_lock);
363 if (status < 0)
364 return status;
365
366 return count;
367}
368
369static ssize_t ina226_set_interval(struct device *dev,
370 struct device_attribute *da,
371 const char *buf, size_t count)
372{
373 struct ina2xx_data *data = dev_get_drvdata(dev);
374 unsigned long val;
375 int status;
376
377 status = kstrtoul(buf, 10, &val);
378 if (status < 0)
379 return status;
380
381 if (val > INT_MAX || val == 0)
382 return -EINVAL;
383
384 mutex_lock(&data->update_lock);
385 data->curr_config = ina226_interval_to_reg(val,
386 data->regs[INA2XX_CONFIG]);
387 status = i2c_smbus_write_word_swapped(data->client,
388 INA2XX_CONFIG,
389 data->curr_config);
390
391 ina226_set_update_interval(data);
392 /* Make sure the next access re-reads all registers. */
393 data->valid = 0;
394 mutex_unlock(&data->update_lock);
395 if (status < 0)
396 return status;
397
398 return count;
399}
400
401static ssize_t ina226_show_interval(struct device *dev,
402 struct device_attribute *da, char *buf)
403{
404 struct ina2xx_data *data = ina2xx_update_device(dev);
405
406 if (IS_ERR(data))
407 return PTR_ERR(data);
408
409 /*
410 * We don't use data->update_interval here as we want to display
411 * the actual interval used by the chip and jiffies_to_msecs()
412 * doesn't seem to be accurate enough.
413 */
414 return snprintf(buf, PAGE_SIZE, "%d\n",
415 ina226_reg_to_interval(data->regs[INA2XX_CONFIG]));
416}
417
190/* shunt voltage */ 418/* shunt voltage */
191static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL, 419static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL,
192 INA2XX_SHUNT_VOLTAGE); 420 INA2XX_SHUNT_VOLTAGE);
@@ -203,15 +431,37 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina2xx_show_value, NULL,
203static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, 431static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
204 INA2XX_POWER); 432 INA2XX_POWER);
205 433
434/* shunt resistance */
435static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
436 ina2xx_show_value, ina2xx_set_shunt,
437 INA2XX_CALIBRATION);
438
439/* update interval (ina226 only) */
440static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
441 ina226_show_interval, ina226_set_interval, 0);
442
206/* pointers to created device attributes */ 443/* pointers to created device attributes */
207static struct attribute *ina2xx_attrs[] = { 444static struct attribute *ina2xx_attrs[] = {
208 &sensor_dev_attr_in0_input.dev_attr.attr, 445 &sensor_dev_attr_in0_input.dev_attr.attr,
209 &sensor_dev_attr_in1_input.dev_attr.attr, 446 &sensor_dev_attr_in1_input.dev_attr.attr,
210 &sensor_dev_attr_curr1_input.dev_attr.attr, 447 &sensor_dev_attr_curr1_input.dev_attr.attr,
211 &sensor_dev_attr_power1_input.dev_attr.attr, 448 &sensor_dev_attr_power1_input.dev_attr.attr,
449 &sensor_dev_attr_shunt_resistor.dev_attr.attr,
212 NULL, 450 NULL,
213}; 451};
214ATTRIBUTE_GROUPS(ina2xx); 452
453static const struct attribute_group ina2xx_group = {
454 .attrs = ina2xx_attrs,
455};
456
457static struct attribute *ina226_attrs[] = {
458 &sensor_dev_attr_update_interval.dev_attr.attr,
459 NULL,
460};
461
462static const struct attribute_group ina226_group = {
463 .attrs = ina226_attrs,
464};
215 465
216static int ina2xx_probe(struct i2c_client *client, 466static int ina2xx_probe(struct i2c_client *client,
217 const struct i2c_device_id *id) 467 const struct i2c_device_id *id)
@@ -221,9 +471,8 @@ static int ina2xx_probe(struct i2c_client *client,
221 struct device *dev = &client->dev; 471 struct device *dev = &client->dev;
222 struct ina2xx_data *data; 472 struct ina2xx_data *data;
223 struct device *hwmon_dev; 473 struct device *hwmon_dev;
224 long shunt = 10000; /* default shunt value 10mOhms */
225 u32 val; 474 u32 val;
226 int ret; 475 int ret, group = 0;
227 476
228 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) 477 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
229 return -ENODEV; 478 return -ENODEV;
@@ -234,50 +483,52 @@ static int ina2xx_probe(struct i2c_client *client,
234 483
235 if (dev_get_platdata(dev)) { 484 if (dev_get_platdata(dev)) {
236 pdata = dev_get_platdata(dev); 485 pdata = dev_get_platdata(dev);
237 shunt = pdata->shunt_uohms; 486 data->rshunt = pdata->shunt_uohms;
238 } else if (!of_property_read_u32(dev->of_node, 487 } else if (!of_property_read_u32(dev->of_node,
239 "shunt-resistor", &val)) { 488 "shunt-resistor", &val)) {
240 shunt = val; 489 data->rshunt = val;
490 } else {
491 data->rshunt = INA2XX_RSHUNT_DEFAULT;
241 } 492 }
242 493
243 if (shunt <= 0)
244 return -ENODEV;
245
246 /* set the device type */ 494 /* set the device type */
247 data->kind = id->driver_data; 495 data->kind = id->driver_data;
248 data->config = &ina2xx_config[data->kind]; 496 data->config = &ina2xx_config[data->kind];
249 497 data->curr_config = data->config->config_default;
250 /* device configuration */ 498 data->client = client;
251 ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
252 data->config->config_default);
253 if (ret < 0) {
254 dev_err(dev,
255 "error writing to the config register: %d", ret);
256 return -ENODEV;
257 }
258 499
259 /* 500 /*
260 * Set current LSB to 1mA, shunt is in uOhms 501 * Ina226 has a variable update_interval. For ina219 we
261 * (equation 13 in datasheet). 502 * use a constant value.
262 */ 503 */
263 ret = i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION, 504 if (data->kind == ina226)
264 data->config->calibration_factor / shunt); 505 ina226_set_update_interval(data);
506 else
507 data->update_interval = HZ / INA2XX_CONVERSION_RATE;
508
509 if (data->rshunt <= 0 ||
510 data->rshunt > data->config->calibration_factor)
511 return -ENODEV;
512
513 ret = ina2xx_init(data);
265 if (ret < 0) { 514 if (ret < 0) {
266 dev_err(dev, 515 dev_err(dev, "error configuring the device: %d\n", ret);
267 "error writing to the calibration register: %d", ret);
268 return -ENODEV; 516 return -ENODEV;
269 } 517 }
270 518
271 data->client = client;
272 mutex_init(&data->update_lock); 519 mutex_init(&data->update_lock);
273 520
521 data->groups[group++] = &ina2xx_group;
522 if (data->kind == ina226)
523 data->groups[group++] = &ina226_group;
524
274 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, 525 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
275 data, ina2xx_groups); 526 data, data->groups);
276 if (IS_ERR(hwmon_dev)) 527 if (IS_ERR(hwmon_dev))
277 return PTR_ERR(hwmon_dev); 528 return PTR_ERR(hwmon_dev);
278 529
279 dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n", 530 dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
280 id->name, shunt); 531 id->name, data->rshunt);
281 532
282 return 0; 533 return 0;
283} 534}
@@ -287,6 +538,7 @@ static const struct i2c_device_id ina2xx_id[] = {
287 { "ina220", ina219 }, 538 { "ina220", ina219 },
288 { "ina226", ina226 }, 539 { "ina226", ina226 },
289 { "ina230", ina226 }, 540 { "ina230", ina226 },
541 { "ina231", ina226 },
290 { } 542 { }
291}; 543};
292MODULE_DEVICE_TABLE(i2c, ina2xx_id); 544MODULE_DEVICE_TABLE(i2c, ina2xx_id);
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 388f8bcd898e..996bdfd5cf25 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -201,7 +201,7 @@ struct jc42_data {
201#define JC42_TEMP_MIN 0 201#define JC42_TEMP_MIN 0
202#define JC42_TEMP_MAX 125000 202#define JC42_TEMP_MAX 125000
203 203
204static u16 jc42_temp_to_reg(int temp, bool extended) 204static u16 jc42_temp_to_reg(long temp, bool extended)
205{ 205{
206 int ntemp = clamp_val(temp, 206 int ntemp = clamp_val(temp,
207 extended ? JC42_TEMP_MIN_EXTENDED : 207 extended ? JC42_TEMP_MIN_EXTENDED :
@@ -213,11 +213,7 @@ static u16 jc42_temp_to_reg(int temp, bool extended)
213 213
214static int jc42_temp_from_reg(s16 reg) 214static int jc42_temp_from_reg(s16 reg)
215{ 215{
216 reg &= 0x1fff; 216 reg = sign_extend32(reg, 12);
217
218 /* sign extend register */
219 if (reg & 0x1000)
220 reg |= 0xf000;
221 217
222 /* convert from 0.0625 to 0.001 resolution */ 218 /* convert from 0.0625 to 0.001 resolution */
223 return reg * 125 / 2; 219 return reg * 125 / 2;
@@ -308,15 +304,18 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
308 const char *buf, size_t count) 304 const char *buf, size_t count)
309{ 305{
310 struct jc42_data *data = dev_get_drvdata(dev); 306 struct jc42_data *data = dev_get_drvdata(dev);
311 unsigned long val; 307 long val;
312 int diff, hyst; 308 int diff, hyst;
313 int err; 309 int err;
314 int ret = count; 310 int ret = count;
315 311
316 if (kstrtoul(buf, 10, &val) < 0) 312 if (kstrtol(buf, 10, &val) < 0)
317 return -EINVAL; 313 return -EINVAL;
318 314
315 val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED :
316 JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX);
319 diff = jc42_temp_from_reg(data->temp[t_crit]) - val; 317 diff = jc42_temp_from_reg(data->temp[t_crit]) - val;
318
320 hyst = 0; 319 hyst = 0;
321 if (diff > 0) { 320 if (diff > 0) {
322 if (diff < 2250) 321 if (diff < 2250)
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index ec5678289e4a..55765790907b 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -779,7 +779,7 @@ static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg)
779 return reg != REG_BANK && reg <= 0x20; 779 return reg != REG_BANK && reg <= 0x20;
780} 780}
781 781
782static struct regmap_config nct7802_regmap_config = { 782static const struct regmap_config nct7802_regmap_config = {
783 .reg_bits = 8, 783 .reg_bits = 8,
784 .val_bits = 8, 784 .val_bits = 8,
785 .cache_type = REGCACHE_RBTREE, 785 .cache_type = REGCACHE_RBTREE,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index ba9f478f64ee..9da2735f1424 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -253,7 +253,7 @@ static int tmp102_remove(struct i2c_client *client)
253 return 0; 253 return 0;
254} 254}
255 255
256#ifdef CONFIG_PM 256#ifdef CONFIG_PM_SLEEP
257static int tmp102_suspend(struct device *dev) 257static int tmp102_suspend(struct device *dev)
258{ 258{
259 struct i2c_client *client = to_i2c_client(dev); 259 struct i2c_client *client = to_i2c_client(dev);
@@ -279,17 +279,10 @@ static int tmp102_resume(struct device *dev)
279 config &= ~TMP102_CONF_SD; 279 config &= ~TMP102_CONF_SD;
280 return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config); 280 return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
281} 281}
282
283static const struct dev_pm_ops tmp102_dev_pm_ops = {
284 .suspend = tmp102_suspend,
285 .resume = tmp102_resume,
286};
287
288#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops)
289#else
290#define TMP102_DEV_PM_OPS NULL
291#endif /* CONFIG_PM */ 282#endif /* CONFIG_PM */
292 283
284static SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume);
285
293static const struct i2c_device_id tmp102_id[] = { 286static const struct i2c_device_id tmp102_id[] = {
294 { "tmp102", 0 }, 287 { "tmp102", 0 },
295 { } 288 { }
@@ -298,7 +291,7 @@ MODULE_DEVICE_TABLE(i2c, tmp102_id);
298 291
299static struct i2c_driver tmp102_driver = { 292static struct i2c_driver tmp102_driver = {
300 .driver.name = DRIVER_NAME, 293 .driver.name = DRIVER_NAME,
301 .driver.pm = TMP102_DEV_PM_OPS, 294 .driver.pm = &tmp102_dev_pm_ops,
302 .probe = tmp102_probe, 295 .probe = tmp102_probe,
303 .remove = tmp102_remove, 296 .remove = tmp102_remove,
304 .id_table = tmp102_id, 297 .id_table = tmp102_id,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 31e8308ba899..ab838d9e28b6 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -881,6 +881,7 @@ config I2C_XLR
881config I2C_RCAR 881config I2C_RCAR
882 tristate "Renesas R-Car I2C Controller" 882 tristate "Renesas R-Car I2C Controller"
883 depends on ARCH_SHMOBILE || COMPILE_TEST 883 depends on ARCH_SHMOBILE || COMPILE_TEST
884 select I2C_SLAVE
884 help 885 help
885 If you say yes to this option, support will be included for the 886 If you say yes to this option, support will be included for the
886 R-Car I2C controller. 887 R-Car I2C controller.
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index bff20a589621..958c8db4ec30 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
785 int ret; 785 int ret;
786 786
787 pm_runtime_get_sync(&adap->dev); 787 pm_runtime_get_sync(&adap->dev);
788 clk_prepare_enable(i2c->clk); 788 ret = clk_enable(i2c->clk);
789 if (ret)
790 return ret;
789 791
790 for (retry = 0; retry < adap->retries; retry++) { 792 for (retry = 0; retry < adap->retries; retry++) {
791 793
792 ret = s3c24xx_i2c_doxfer(i2c, msgs, num); 794 ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
793 795
794 if (ret != -EAGAIN) { 796 if (ret != -EAGAIN) {
795 clk_disable_unprepare(i2c->clk); 797 clk_disable(i2c->clk);
796 pm_runtime_put(&adap->dev); 798 pm_runtime_put(&adap->dev);
797 return ret; 799 return ret;
798 } 800 }
@@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
802 udelay(100); 804 udelay(100);
803 } 805 }
804 806
805 clk_disable_unprepare(i2c->clk); 807 clk_disable(i2c->clk);
806 pm_runtime_put(&adap->dev); 808 pm_runtime_put(&adap->dev);
807 return -EREMOTEIO; 809 return -EREMOTEIO;
808} 810}
@@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1197 1199
1198 clk_prepare_enable(i2c->clk); 1200 clk_prepare_enable(i2c->clk);
1199 ret = s3c24xx_i2c_init(i2c); 1201 ret = s3c24xx_i2c_init(i2c);
1200 clk_disable_unprepare(i2c->clk); 1202 clk_disable(i2c->clk);
1201 if (ret != 0) { 1203 if (ret != 0) {
1202 dev_err(&pdev->dev, "I2C controller init failed\n"); 1204 dev_err(&pdev->dev, "I2C controller init failed\n");
1203 return ret; 1205 return ret;
@@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1210 i2c->irq = ret = platform_get_irq(pdev, 0); 1212 i2c->irq = ret = platform_get_irq(pdev, 0);
1211 if (ret <= 0) { 1213 if (ret <= 0) {
1212 dev_err(&pdev->dev, "cannot find IRQ\n"); 1214 dev_err(&pdev->dev, "cannot find IRQ\n");
1215 clk_unprepare(i2c->clk);
1213 return ret; 1216 return ret;
1214 } 1217 }
1215 1218
@@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1218 1221
1219 if (ret != 0) { 1222 if (ret != 0) {
1220 dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); 1223 dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
1224 clk_unprepare(i2c->clk);
1221 return ret; 1225 return ret;
1222 } 1226 }
1223 } 1227 }
@@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1225 ret = s3c24xx_i2c_register_cpufreq(i2c); 1229 ret = s3c24xx_i2c_register_cpufreq(i2c);
1226 if (ret < 0) { 1230 if (ret < 0) {
1227 dev_err(&pdev->dev, "failed to register cpufreq notifier\n"); 1231 dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
1232 clk_unprepare(i2c->clk);
1228 return ret; 1233 return ret;
1229 } 1234 }
1230 1235
@@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1241 if (ret < 0) { 1246 if (ret < 0) {
1242 dev_err(&pdev->dev, "failed to add bus to i2c core\n"); 1247 dev_err(&pdev->dev, "failed to add bus to i2c core\n");
1243 s3c24xx_i2c_deregister_cpufreq(i2c); 1248 s3c24xx_i2c_deregister_cpufreq(i2c);
1249 clk_unprepare(i2c->clk);
1244 return ret; 1250 return ret;
1245 } 1251 }
1246 1252
@@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
1262{ 1268{
1263 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); 1269 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
1264 1270
1271 clk_unprepare(i2c->clk);
1272
1265 pm_runtime_disable(&i2c->adap.dev); 1273 pm_runtime_disable(&i2c->adap.dev);
1266 pm_runtime_disable(&pdev->dev); 1274 pm_runtime_disable(&pdev->dev);
1267 1275
@@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
1293{ 1301{
1294 struct platform_device *pdev = to_platform_device(dev); 1302 struct platform_device *pdev = to_platform_device(dev);
1295 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); 1303 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
1304 int ret;
1296 1305
1297 if (!IS_ERR(i2c->sysreg)) 1306 if (!IS_ERR(i2c->sysreg))
1298 regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg); 1307 regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg);
1299 1308
1300 clk_prepare_enable(i2c->clk); 1309 ret = clk_enable(i2c->clk);
1310 if (ret)
1311 return ret;
1301 s3c24xx_i2c_init(i2c); 1312 s3c24xx_i2c_init(i2c);
1302 clk_disable_unprepare(i2c->clk); 1313 clk_disable(i2c->clk);
1303 i2c->suspended = 0; 1314 i2c->suspended = 0;
1304 1315
1305 return 0; 1316 return 0;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 440d5dbc8b5f..007818b3e174 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -139,6 +139,7 @@ struct sh_mobile_i2c_data {
139 int pos; 139 int pos;
140 int sr; 140 int sr;
141 bool send_stop; 141 bool send_stop;
142 bool stop_after_dma;
142 143
143 struct resource *res; 144 struct resource *res;
144 struct dma_chan *dma_tx; 145 struct dma_chan *dma_tx;
@@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd)
407 408
408 if (pd->pos == pd->msg->len) { 409 if (pd->pos == pd->msg->len) {
409 /* Send stop if we haven't yet (DMA case) */ 410 /* Send stop if we haven't yet (DMA case) */
410 if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY)) 411 if (pd->send_stop && pd->stop_after_dma)
411 i2c_op(pd, OP_TX_STOP, 0); 412 i2c_op(pd, OP_TX_STOP, 0);
412 return 1; 413 return 1;
413 } 414 }
@@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
449 real_pos = pd->pos - 2; 450 real_pos = pd->pos - 2;
450 451
451 if (pd->pos == pd->msg->len) { 452 if (pd->pos == pd->msg->len) {
453 if (pd->stop_after_dma) {
454 /* Simulate PIO end condition after DMA transfer */
455 i2c_op(pd, OP_RX_STOP, 0);
456 pd->pos++;
457 break;
458 }
459
452 if (real_pos < 0) { 460 if (real_pos < 0) {
453 i2c_op(pd, OP_RX_STOP, 0); 461 i2c_op(pd, OP_RX_STOP, 0);
454 break; 462 break;
@@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data)
536 544
537 sh_mobile_i2c_dma_unmap(pd); 545 sh_mobile_i2c_dma_unmap(pd);
538 pd->pos = pd->msg->len; 546 pd->pos = pd->msg->len;
547 pd->stop_after_dma = true;
539 548
540 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); 549 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
541} 550}
@@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
726 bool do_start = pd->send_stop || !i; 735 bool do_start = pd->send_stop || !i;
727 msg = &msgs[i]; 736 msg = &msgs[i];
728 pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; 737 pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
738 pd->stop_after_dma = false;
729 739
730 err = start_ch(pd, msg, do_start); 740 err = start_ch(pd, msg, do_start);
731 if (err) 741 if (err)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 39d25a8cb1ad..e9eae57a2b50 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -2972,6 +2972,7 @@ trace:
2972} 2972}
2973EXPORT_SYMBOL(i2c_smbus_xfer); 2973EXPORT_SYMBOL(i2c_smbus_xfer);
2974 2974
2975#if IS_ENABLED(CONFIG_I2C_SLAVE)
2975int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) 2976int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
2976{ 2977{
2977 int ret; 2978 int ret;
@@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client)
3019 return ret; 3020 return ret;
3020} 3021}
3021EXPORT_SYMBOL_GPL(i2c_slave_unregister); 3022EXPORT_SYMBOL_GPL(i2c_slave_unregister);
3023#endif
3022 3024
3023MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); 3025MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
3024MODULE_DESCRIPTION("I2C-Bus main module"); 3026MODULE_DESCRIPTION("I2C-Bus main module");
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 6631400b5f02..cf9b09db092f 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
74 struct eeprom_data *eeprom; 74 struct eeprom_data *eeprom;
75 unsigned long flags; 75 unsigned long flags;
76 76
77 if (off + count >= attr->size) 77 if (off + count > attr->size)
78 return -EFBIG; 78 return -EFBIG;
79 79
80 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 80 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
92 struct eeprom_data *eeprom; 92 struct eeprom_data *eeprom;
93 unsigned long flags; 93 unsigned long flags;
94 94
95 if (off + count >= attr->size) 95 if (off + count > attr->size)
96 return -EFBIG; 96 return -EFBIG;
97 97
98 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 98 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b716b0815644..643c08a025a5 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -258,6 +258,5 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
258 258
259IB_UVERBS_DECLARE_EX_CMD(create_flow); 259IB_UVERBS_DECLARE_EX_CMD(create_flow);
260IB_UVERBS_DECLARE_EX_CMD(destroy_flow); 260IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
261IB_UVERBS_DECLARE_EX_CMD(query_device);
262 261
263#endif /* UVERBS_H */ 262#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 532d8eba8b02..b7943ff16ed3 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -400,52 +400,6 @@ err:
400 return ret; 400 return ret;
401} 401}
402 402
403static void copy_query_dev_fields(struct ib_uverbs_file *file,
404 struct ib_uverbs_query_device_resp *resp,
405 struct ib_device_attr *attr)
406{
407 resp->fw_ver = attr->fw_ver;
408 resp->node_guid = file->device->ib_dev->node_guid;
409 resp->sys_image_guid = attr->sys_image_guid;
410 resp->max_mr_size = attr->max_mr_size;
411 resp->page_size_cap = attr->page_size_cap;
412 resp->vendor_id = attr->vendor_id;
413 resp->vendor_part_id = attr->vendor_part_id;
414 resp->hw_ver = attr->hw_ver;
415 resp->max_qp = attr->max_qp;
416 resp->max_qp_wr = attr->max_qp_wr;
417 resp->device_cap_flags = attr->device_cap_flags;
418 resp->max_sge = attr->max_sge;
419 resp->max_sge_rd = attr->max_sge_rd;
420 resp->max_cq = attr->max_cq;
421 resp->max_cqe = attr->max_cqe;
422 resp->max_mr = attr->max_mr;
423 resp->max_pd = attr->max_pd;
424 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
425 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
426 resp->max_res_rd_atom = attr->max_res_rd_atom;
427 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
428 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
429 resp->atomic_cap = attr->atomic_cap;
430 resp->max_ee = attr->max_ee;
431 resp->max_rdd = attr->max_rdd;
432 resp->max_mw = attr->max_mw;
433 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
434 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
435 resp->max_mcast_grp = attr->max_mcast_grp;
436 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
437 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
438 resp->max_ah = attr->max_ah;
439 resp->max_fmr = attr->max_fmr;
440 resp->max_map_per_fmr = attr->max_map_per_fmr;
441 resp->max_srq = attr->max_srq;
442 resp->max_srq_wr = attr->max_srq_wr;
443 resp->max_srq_sge = attr->max_srq_sge;
444 resp->max_pkeys = attr->max_pkeys;
445 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
446 resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
447}
448
449ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 403ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
450 const char __user *buf, 404 const char __user *buf,
451 int in_len, int out_len) 405 int in_len, int out_len)
@@ -466,7 +420,47 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
466 return ret; 420 return ret;
467 421
468 memset(&resp, 0, sizeof resp); 422 memset(&resp, 0, sizeof resp);
469 copy_query_dev_fields(file, &resp, &attr); 423
424 resp.fw_ver = attr.fw_ver;
425 resp.node_guid = file->device->ib_dev->node_guid;
426 resp.sys_image_guid = attr.sys_image_guid;
427 resp.max_mr_size = attr.max_mr_size;
428 resp.page_size_cap = attr.page_size_cap;
429 resp.vendor_id = attr.vendor_id;
430 resp.vendor_part_id = attr.vendor_part_id;
431 resp.hw_ver = attr.hw_ver;
432 resp.max_qp = attr.max_qp;
433 resp.max_qp_wr = attr.max_qp_wr;
434 resp.device_cap_flags = attr.device_cap_flags;
435 resp.max_sge = attr.max_sge;
436 resp.max_sge_rd = attr.max_sge_rd;
437 resp.max_cq = attr.max_cq;
438 resp.max_cqe = attr.max_cqe;
439 resp.max_mr = attr.max_mr;
440 resp.max_pd = attr.max_pd;
441 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
442 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
443 resp.max_res_rd_atom = attr.max_res_rd_atom;
444 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
445 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
446 resp.atomic_cap = attr.atomic_cap;
447 resp.max_ee = attr.max_ee;
448 resp.max_rdd = attr.max_rdd;
449 resp.max_mw = attr.max_mw;
450 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
451 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
452 resp.max_mcast_grp = attr.max_mcast_grp;
453 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
454 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
455 resp.max_ah = attr.max_ah;
456 resp.max_fmr = attr.max_fmr;
457 resp.max_map_per_fmr = attr.max_map_per_fmr;
458 resp.max_srq = attr.max_srq;
459 resp.max_srq_wr = attr.max_srq_wr;
460 resp.max_srq_sge = attr.max_srq_sge;
461 resp.max_pkeys = attr.max_pkeys;
462 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
463 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
470 464
471 if (copy_to_user((void __user *) (unsigned long) cmd.response, 465 if (copy_to_user((void __user *) (unsigned long) cmd.response,
472 &resp, sizeof resp)) 466 &resp, sizeof resp))
@@ -3293,52 +3287,3 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3293 3287
3294 return ret ? ret : in_len; 3288 return ret ? ret : in_len;
3295} 3289}
3296
3297int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3298 struct ib_udata *ucore,
3299 struct ib_udata *uhw)
3300{
3301 struct ib_uverbs_ex_query_device_resp resp;
3302 struct ib_uverbs_ex_query_device cmd;
3303 struct ib_device_attr attr;
3304 struct ib_device *device;
3305 int err;
3306
3307 device = file->device->ib_dev;
3308 if (ucore->inlen < sizeof(cmd))
3309 return -EINVAL;
3310
3311 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3312 if (err)
3313 return err;
3314
3315 if (cmd.reserved)
3316 return -EINVAL;
3317
3318 err = device->query_device(device, &attr);
3319 if (err)
3320 return err;
3321
3322 memset(&resp, 0, sizeof(resp));
3323 copy_query_dev_fields(file, &resp.base, &attr);
3324 resp.comp_mask = 0;
3325
3326#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3327 if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) {
3328 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3329 resp.odp_caps.per_transport_caps.rc_odp_caps =
3330 attr.odp_caps.per_transport_caps.rc_odp_caps;
3331 resp.odp_caps.per_transport_caps.uc_odp_caps =
3332 attr.odp_caps.per_transport_caps.uc_odp_caps;
3333 resp.odp_caps.per_transport_caps.ud_odp_caps =
3334 attr.odp_caps.per_transport_caps.ud_odp_caps;
3335 resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP;
3336 }
3337#endif
3338
3339 err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
3340 if (err)
3341 return err;
3342
3343 return 0;
3344}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e6c23b9eab33..5db1a8cc388d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -123,7 +123,6 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
123 struct ib_udata *uhw) = { 123 struct ib_udata *uhw) = {
124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, 124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, 125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
126 [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device
127}; 126};
128 127
129static void ib_uverbs_add_one(struct ib_device *device); 128static void ib_uverbs_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8a87404e9c76..03bf81211a54 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1331,8 +1331,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1331 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 1331 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1332 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 1332 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
1333 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 1333 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
1334 dev->ib_dev.uverbs_ex_cmd_mask =
1335 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
1336 1334
1337 dev->ib_dev.query_device = mlx5_ib_query_device; 1335 dev->ib_dev.query_device = mlx5_ib_query_device;
1338 dev->ib_dev.query_port = mlx5_ib_query_port; 1336 dev->ib_dev.query_port = mlx5_ib_query_port;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 8ba80a6d3a46..d7562beb5423 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -98,15 +98,9 @@ enum {
98 98
99 IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */ 99 IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
100 IPOIB_MCAST_FLAG_SENDONLY = 1, 100 IPOIB_MCAST_FLAG_SENDONLY = 1,
101 /* 101 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
102 * For IPOIB_MCAST_FLAG_BUSY
103 * When set, in flight join and mcast->mc is unreliable
104 * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
105 * haven't started yet
106 * When clear and mcast->mc is valid pointer, join was successful
107 */
108 IPOIB_MCAST_FLAG_BUSY = 2,
109 IPOIB_MCAST_FLAG_ATTACHED = 3, 102 IPOIB_MCAST_FLAG_ATTACHED = 3,
103 IPOIB_MCAST_JOIN_STARTED = 4,
110 104
111 MAX_SEND_CQE = 16, 105 MAX_SEND_CQE = 16,
112 IPOIB_CM_COPYBREAK = 256, 106 IPOIB_CM_COPYBREAK = 256,
@@ -323,7 +317,6 @@ struct ipoib_dev_priv {
323 struct list_head multicast_list; 317 struct list_head multicast_list;
324 struct rb_root multicast_tree; 318 struct rb_root multicast_tree;
325 319
326 struct workqueue_struct *wq;
327 struct delayed_work mcast_task; 320 struct delayed_work mcast_task;
328 struct work_struct carrier_on_task; 321 struct work_struct carrier_on_task;
329 struct work_struct flush_light; 322 struct work_struct flush_light;
@@ -484,10 +477,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
484void ipoib_pkey_event(struct work_struct *work); 477void ipoib_pkey_event(struct work_struct *work);
485void ipoib_ib_dev_cleanup(struct net_device *dev); 478void ipoib_ib_dev_cleanup(struct net_device *dev);
486 479
487int ipoib_ib_dev_open(struct net_device *dev); 480int ipoib_ib_dev_open(struct net_device *dev, int flush);
488int ipoib_ib_dev_up(struct net_device *dev); 481int ipoib_ib_dev_up(struct net_device *dev);
489int ipoib_ib_dev_down(struct net_device *dev); 482int ipoib_ib_dev_down(struct net_device *dev, int flush);
490int ipoib_ib_dev_stop(struct net_device *dev); 483int ipoib_ib_dev_stop(struct net_device *dev, int flush);
491void ipoib_pkey_dev_check_presence(struct net_device *dev); 484void ipoib_pkey_dev_check_presence(struct net_device *dev);
492 485
493int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 486int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -499,7 +492,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
499 492
500void ipoib_mcast_restart_task(struct work_struct *work); 493void ipoib_mcast_restart_task(struct work_struct *work);
501int ipoib_mcast_start_thread(struct net_device *dev); 494int ipoib_mcast_start_thread(struct net_device *dev);
502int ipoib_mcast_stop_thread(struct net_device *dev); 495int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
503 496
504void ipoib_mcast_dev_down(struct net_device *dev); 497void ipoib_mcast_dev_down(struct net_device *dev);
505void ipoib_mcast_dev_flush(struct net_device *dev); 498void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 56959adb6c7d..933efcea0d03 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
474 } 474 }
475 475
476 spin_lock_irq(&priv->lock); 476 spin_lock_irq(&priv->lock);
477 queue_delayed_work(priv->wq, 477 queue_delayed_work(ipoib_workqueue,
478 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 478 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
479 /* Add this entry to passive ids list head, but do not re-add it 479 /* Add this entry to passive ids list head, but do not re-add it
480 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ 480 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
576 spin_lock_irqsave(&priv->lock, flags); 576 spin_lock_irqsave(&priv->lock, flags);
577 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); 577 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
578 ipoib_cm_start_rx_drain(priv); 578 ipoib_cm_start_rx_drain(priv);
579 queue_work(priv->wq, &priv->cm.rx_reap_task); 579 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
580 spin_unlock_irqrestore(&priv->lock, flags); 580 spin_unlock_irqrestore(&priv->lock, flags);
581 } else 581 } else
582 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", 582 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
603 spin_lock_irqsave(&priv->lock, flags); 603 spin_lock_irqsave(&priv->lock, flags);
604 list_move(&p->list, &priv->cm.rx_reap_list); 604 list_move(&p->list, &priv->cm.rx_reap_list);
605 spin_unlock_irqrestore(&priv->lock, flags); 605 spin_unlock_irqrestore(&priv->lock, flags);
606 queue_work(priv->wq, &priv->cm.rx_reap_task); 606 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
607 } 607 }
608 return; 608 return;
609 } 609 }
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
827 827
828 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 828 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
829 list_move(&tx->list, &priv->cm.reap_list); 829 list_move(&tx->list, &priv->cm.reap_list);
830 queue_work(priv->wq, &priv->cm.reap_task); 830 queue_work(ipoib_workqueue, &priv->cm.reap_task);
831 } 831 }
832 832
833 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); 833 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1255 1255
1256 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1256 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1257 list_move(&tx->list, &priv->cm.reap_list); 1257 list_move(&tx->list, &priv->cm.reap_list);
1258 queue_work(priv->wq, &priv->cm.reap_task); 1258 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1259 } 1259 }
1260 1260
1261 spin_unlock_irqrestore(&priv->lock, flags); 1261 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
1284 tx->dev = dev; 1284 tx->dev = dev;
1285 list_add(&tx->list, &priv->cm.start_list); 1285 list_add(&tx->list, &priv->cm.start_list);
1286 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1286 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1287 queue_work(priv->wq, &priv->cm.start_task); 1287 queue_work(ipoib_workqueue, &priv->cm.start_task);
1288 return tx; 1288 return tx;
1289} 1289}
1290 1290
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1295 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1295 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1296 spin_lock_irqsave(&priv->lock, flags); 1296 spin_lock_irqsave(&priv->lock, flags);
1297 list_move(&tx->list, &priv->cm.reap_list); 1297 list_move(&tx->list, &priv->cm.reap_list);
1298 queue_work(priv->wq, &priv->cm.reap_task); 1298 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1299 ipoib_dbg(priv, "Reap connection for gid %pI6\n", 1299 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1300 tx->neigh->daddr + 4); 1300 tx->neigh->daddr + 4);
1301 tx->neigh = NULL; 1301 tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1417 1417
1418 skb_queue_tail(&priv->cm.skb_queue, skb); 1418 skb_queue_tail(&priv->cm.skb_queue, skb);
1419 if (e) 1419 if (e)
1420 queue_work(priv->wq, &priv->cm.skb_task); 1420 queue_work(ipoib_workqueue, &priv->cm.skb_task);
1421} 1421}
1422 1422
1423static void ipoib_cm_rx_reap(struct work_struct *work) 1423static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1450 } 1450 }
1451 1451
1452 if (!list_empty(&priv->cm.passive_ids)) 1452 if (!list_empty(&priv->cm.passive_ids))
1453 queue_delayed_work(priv->wq, 1453 queue_delayed_work(ipoib_workqueue,
1454 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 1454 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1455 spin_unlock_irq(&priv->lock); 1455 spin_unlock_irq(&priv->lock);
1456} 1456}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index fe65abb5150c..72626c348174 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
655 __ipoib_reap_ah(dev); 655 __ipoib_reap_ah(dev);
656 656
657 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) 657 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
658 queue_delayed_work(priv->wq, &priv->ah_reap_task, 658 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
659 round_jiffies_relative(HZ)); 659 round_jiffies_relative(HZ));
660} 660}
661 661
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
664 drain_tx_cq((struct net_device *)ctx); 664 drain_tx_cq((struct net_device *)ctx);
665} 665}
666 666
667int ipoib_ib_dev_open(struct net_device *dev) 667int ipoib_ib_dev_open(struct net_device *dev, int flush)
668{ 668{
669 struct ipoib_dev_priv *priv = netdev_priv(dev); 669 struct ipoib_dev_priv *priv = netdev_priv(dev);
670 int ret; 670 int ret;
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
696 } 696 }
697 697
698 clear_bit(IPOIB_STOP_REAPER, &priv->flags); 698 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
699 queue_delayed_work(priv->wq, &priv->ah_reap_task, 699 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
700 round_jiffies_relative(HZ)); 700 round_jiffies_relative(HZ));
701 701
702 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 702 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
706dev_stop: 706dev_stop:
707 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 707 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
708 napi_enable(&priv->napi); 708 napi_enable(&priv->napi);
709 ipoib_ib_dev_stop(dev); 709 ipoib_ib_dev_stop(dev, flush);
710 return -1; 710 return -1;
711} 711}
712 712
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
738 return ipoib_mcast_start_thread(dev); 738 return ipoib_mcast_start_thread(dev);
739} 739}
740 740
741int ipoib_ib_dev_down(struct net_device *dev) 741int ipoib_ib_dev_down(struct net_device *dev, int flush)
742{ 742{
743 struct ipoib_dev_priv *priv = netdev_priv(dev); 743 struct ipoib_dev_priv *priv = netdev_priv(dev);
744 744
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
747 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 747 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
748 netif_carrier_off(dev); 748 netif_carrier_off(dev);
749 749
750 ipoib_mcast_stop_thread(dev); 750 ipoib_mcast_stop_thread(dev, flush);
751 ipoib_mcast_dev_flush(dev); 751 ipoib_mcast_dev_flush(dev);
752 752
753 ipoib_flush_paths(dev); 753 ipoib_flush_paths(dev);
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev)
807 local_bh_enable(); 807 local_bh_enable();
808} 808}
809 809
810int ipoib_ib_dev_stop(struct net_device *dev) 810int ipoib_ib_dev_stop(struct net_device *dev, int flush)
811{ 811{
812 struct ipoib_dev_priv *priv = netdev_priv(dev); 812 struct ipoib_dev_priv *priv = netdev_priv(dev);
813 struct ib_qp_attr qp_attr; 813 struct ib_qp_attr qp_attr;
@@ -880,7 +880,8 @@ timeout:
880 /* Wait for all AHs to be reaped */ 880 /* Wait for all AHs to be reaped */
881 set_bit(IPOIB_STOP_REAPER, &priv->flags); 881 set_bit(IPOIB_STOP_REAPER, &priv->flags);
882 cancel_delayed_work(&priv->ah_reap_task); 882 cancel_delayed_work(&priv->ah_reap_task);
883 flush_workqueue(priv->wq); 883 if (flush)
884 flush_workqueue(ipoib_workqueue);
884 885
885 begin = jiffies; 886 begin = jiffies;
886 887
@@ -917,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
917 (unsigned long) dev); 918 (unsigned long) dev);
918 919
919 if (dev->flags & IFF_UP) { 920 if (dev->flags & IFF_UP) {
920 if (ipoib_ib_dev_open(dev)) { 921 if (ipoib_ib_dev_open(dev, 1)) {
921 ipoib_transport_dev_cleanup(dev); 922 ipoib_transport_dev_cleanup(dev);
922 return -ENODEV; 923 return -ENODEV;
923 } 924 }
@@ -1039,12 +1040,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1039 } 1040 }
1040 1041
1041 if (level >= IPOIB_FLUSH_NORMAL) 1042 if (level >= IPOIB_FLUSH_NORMAL)
1042 ipoib_ib_dev_down(dev); 1043 ipoib_ib_dev_down(dev, 0);
1043 1044
1044 if (level == IPOIB_FLUSH_HEAVY) { 1045 if (level == IPOIB_FLUSH_HEAVY) {
1045 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 1046 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1046 ipoib_ib_dev_stop(dev); 1047 ipoib_ib_dev_stop(dev, 0);
1047 if (ipoib_ib_dev_open(dev) != 0) 1048 if (ipoib_ib_dev_open(dev, 0) != 0)
1048 return; 1049 return;
1049 if (netif_queue_stopped(dev)) 1050 if (netif_queue_stopped(dev))
1050 netif_start_queue(dev); 1051 netif_start_queue(dev);
@@ -1096,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
1096 */ 1097 */
1097 ipoib_flush_paths(dev); 1098 ipoib_flush_paths(dev);
1098 1099
1099 ipoib_mcast_stop_thread(dev); 1100 ipoib_mcast_stop_thread(dev, 1);
1100 ipoib_mcast_dev_flush(dev); 1101 ipoib_mcast_dev_flush(dev);
1101 1102
1102 ipoib_transport_dev_cleanup(dev); 1103 ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6bad17d4d588..58b5aa3b6f2d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
108 108
109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
110 110
111 if (ipoib_ib_dev_open(dev)) { 111 if (ipoib_ib_dev_open(dev, 1)) {
112 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 112 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
113 return 0; 113 return 0;
114 goto err_disable; 114 goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
139 return 0; 139 return 0;
140 140
141err_stop: 141err_stop:
142 ipoib_ib_dev_stop(dev); 142 ipoib_ib_dev_stop(dev, 1);
143 143
144err_disable: 144err_disable:
145 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 145 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
157 157
158 netif_stop_queue(dev); 158 netif_stop_queue(dev);
159 159
160 ipoib_ib_dev_down(dev); 160 ipoib_ib_dev_down(dev, 1);
161 ipoib_ib_dev_stop(dev); 161 ipoib_ib_dev_stop(dev, 0);
162 162
163 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 163 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
164 struct ipoib_dev_priv *cpriv; 164 struct ipoib_dev_priv *cpriv;
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
839 return; 839 return;
840 } 840 }
841 841
842 queue_work(priv->wq, &priv->restart_task); 842 queue_work(ipoib_workqueue, &priv->restart_task);
843} 843}
844 844
845static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 845static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
954 __ipoib_reap_neigh(priv); 954 __ipoib_reap_neigh(priv);
955 955
956 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 956 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
957 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 957 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
958 arp_tbl.gc_interval); 958 arp_tbl.gc_interval);
959} 959}
960 960
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1133 1133
1134 /* start garbage collection */ 1134 /* start garbage collection */
1135 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1135 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1136 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1136 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
1137 arp_tbl.gc_interval); 1137 arp_tbl.gc_interval);
1138 1138
1139 return 0; 1139 return 0;
@@ -1262,13 +1262,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1262{ 1262{
1263 struct ipoib_dev_priv *priv = netdev_priv(dev); 1263 struct ipoib_dev_priv *priv = netdev_priv(dev);
1264 1264
1265 if (ipoib_neigh_hash_init(priv) < 0)
1266 goto out;
1265 /* Allocate RX/TX "rings" to hold queued skbs */ 1267 /* Allocate RX/TX "rings" to hold queued skbs */
1266 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 1268 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1267 GFP_KERNEL); 1269 GFP_KERNEL);
1268 if (!priv->rx_ring) { 1270 if (!priv->rx_ring) {
1269 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 1271 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
1270 ca->name, ipoib_recvq_size); 1272 ca->name, ipoib_recvq_size);
1271 goto out; 1273 goto out_neigh_hash_cleanup;
1272 } 1274 }
1273 1275
1274 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 1276 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1283,24 +1285,16 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1283 if (ipoib_ib_dev_init(dev, ca, port)) 1285 if (ipoib_ib_dev_init(dev, ca, port))
1284 goto out_tx_ring_cleanup; 1286 goto out_tx_ring_cleanup;
1285 1287
1286 /*
1287 * Must be after ipoib_ib_dev_init so we can allocate a per
1288 * device wq there and use it here
1289 */
1290 if (ipoib_neigh_hash_init(priv) < 0)
1291 goto out_dev_uninit;
1292
1293 return 0; 1288 return 0;
1294 1289
1295out_dev_uninit:
1296 ipoib_ib_dev_cleanup(dev);
1297
1298out_tx_ring_cleanup: 1290out_tx_ring_cleanup:
1299 vfree(priv->tx_ring); 1291 vfree(priv->tx_ring);
1300 1292
1301out_rx_ring_cleanup: 1293out_rx_ring_cleanup:
1302 kfree(priv->rx_ring); 1294 kfree(priv->rx_ring);
1303 1295
1296out_neigh_hash_cleanup:
1297 ipoib_neigh_hash_uninit(dev);
1304out: 1298out:
1305 return -ENOMEM; 1299 return -ENOMEM;
1306} 1300}
@@ -1323,12 +1317,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
1323 } 1317 }
1324 unregister_netdevice_many(&head); 1318 unregister_netdevice_many(&head);
1325 1319
1326 /*
1327 * Must be before ipoib_ib_dev_cleanup or we delete an in use
1328 * work queue
1329 */
1330 ipoib_neigh_hash_uninit(dev);
1331
1332 ipoib_ib_dev_cleanup(dev); 1320 ipoib_ib_dev_cleanup(dev);
1333 1321
1334 kfree(priv->rx_ring); 1322 kfree(priv->rx_ring);
@@ -1336,6 +1324,8 @@ void ipoib_dev_cleanup(struct net_device *dev)
1336 1324
1337 priv->rx_ring = NULL; 1325 priv->rx_ring = NULL;
1338 priv->tx_ring = NULL; 1326 priv->tx_ring = NULL;
1327
1328 ipoib_neigh_hash_uninit(dev);
1339} 1329}
1340 1330
1341static const struct header_ops ipoib_header_ops = { 1331static const struct header_ops ipoib_header_ops = {
@@ -1646,7 +1636,7 @@ register_failed:
1646 /* Stop GC if started before flush */ 1636 /* Stop GC if started before flush */
1647 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1637 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1648 cancel_delayed_work(&priv->neigh_reap_task); 1638 cancel_delayed_work(&priv->neigh_reap_task);
1649 flush_workqueue(priv->wq); 1639 flush_workqueue(ipoib_workqueue);
1650 1640
1651event_failed: 1641event_failed:
1652 ipoib_dev_cleanup(priv->dev); 1642 ipoib_dev_cleanup(priv->dev);
@@ -1717,7 +1707,7 @@ static void ipoib_remove_one(struct ib_device *device)
1717 /* Stop GC */ 1707 /* Stop GC */
1718 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1708 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1719 cancel_delayed_work(&priv->neigh_reap_task); 1709 cancel_delayed_work(&priv->neigh_reap_task);
1720 flush_workqueue(priv->wq); 1710 flush_workqueue(ipoib_workqueue);
1721 1711
1722 unregister_netdev(priv->dev); 1712 unregister_netdev(priv->dev);
1723 free_netdev(priv->dev); 1713 free_netdev(priv->dev);
@@ -1758,13 +1748,8 @@ static int __init ipoib_init_module(void)
1758 * unregister_netdev() and linkwatch_event take the rtnl lock, 1748 * unregister_netdev() and linkwatch_event take the rtnl lock,
1759 * so flush_scheduled_work() can deadlock during device 1749 * so flush_scheduled_work() can deadlock during device
1760 * removal. 1750 * removal.
1761 *
1762 * In addition, bringing one device up and another down at the
1763 * same time can deadlock a single workqueue, so we have this
1764 * global fallback workqueue, but we also attempt to open a
1765 * per device workqueue each time we bring an interface up
1766 */ 1751 */
1767 ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); 1752 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1768 if (!ipoib_workqueue) { 1753 if (!ipoib_workqueue) {
1769 ret = -ENOMEM; 1754 ret = -ENOMEM;
1770 goto err_fs; 1755 goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index bc50dd0d0e4d..ffb83b5f7e80 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -190,6 +190,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
190 spin_unlock_irq(&priv->lock); 190 spin_unlock_irq(&priv->lock);
191 priv->tx_wr.wr.ud.remote_qkey = priv->qkey; 191 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
192 set_qkey = 1; 192 set_qkey = 1;
193
194 if (!ipoib_cm_admin_enabled(dev)) {
195 rtnl_lock();
196 dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
197 rtnl_unlock();
198 }
193 } 199 }
194 200
195 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 201 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -271,27 +277,16 @@ ipoib_mcast_sendonly_join_complete(int status,
271 struct ipoib_mcast *mcast = multicast->context; 277 struct ipoib_mcast *mcast = multicast->context;
272 struct net_device *dev = mcast->dev; 278 struct net_device *dev = mcast->dev;
273 279
274 /*
275 * We have to take the mutex to force mcast_sendonly_join to
276 * return from ib_sa_multicast_join and set mcast->mc to a
277 * valid value. Otherwise we were racing with ourselves in
278 * that we might fail here, but get a valid return from
279 * ib_sa_multicast_join after we had cleared mcast->mc here,
280 * resulting in mis-matched joins and leaves and a deadlock
281 */
282 mutex_lock(&mcast_mutex);
283
284 /* We trap for port events ourselves. */ 280 /* We trap for port events ourselves. */
285 if (status == -ENETRESET) 281 if (status == -ENETRESET)
286 goto out; 282 return 0;
287 283
288 if (!status) 284 if (!status)
289 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 285 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
290 286
291 if (status) { 287 if (status) {
292 if (mcast->logcount++ < 20) 288 if (mcast->logcount++ < 20)
293 ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast " 289 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
294 "join failed for %pI6, status %d\n",
295 mcast->mcmember.mgid.raw, status); 290 mcast->mcmember.mgid.raw, status);
296 291
297 /* Flush out any queued packets */ 292 /* Flush out any queued packets */
@@ -301,15 +296,11 @@ ipoib_mcast_sendonly_join_complete(int status,
301 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 296 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
302 } 297 }
303 netif_tx_unlock_bh(dev); 298 netif_tx_unlock_bh(dev);
299
300 /* Clear the busy flag so we try again */
301 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
302 &mcast->flags);
304 } 303 }
305out:
306 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
307 if (status)
308 mcast->mc = NULL;
309 complete(&mcast->done);
310 if (status == -ENETRESET)
311 status = 0;
312 mutex_unlock(&mcast_mutex);
313 return status; 304 return status;
314} 305}
315 306
@@ -327,14 +318,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
327 int ret = 0; 318 int ret = 0;
328 319
329 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 320 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
330 ipoib_dbg_mcast(priv, "device shutting down, no sendonly " 321 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
331 "multicast joins\n");
332 return -ENODEV; 322 return -ENODEV;
333 } 323 }
334 324
335 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { 325 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
336 ipoib_dbg_mcast(priv, "multicast entry busy, skipping " 326 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
337 "sendonly join\n");
338 return -EBUSY; 327 return -EBUSY;
339 } 328 }
340 329
@@ -342,9 +331,6 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
342 rec.port_gid = priv->local_gid; 331 rec.port_gid = priv->local_gid;
343 rec.pkey = cpu_to_be16(priv->pkey); 332 rec.pkey = cpu_to_be16(priv->pkey);
344 333
345 mutex_lock(&mcast_mutex);
346 init_completion(&mcast->done);
347 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
348 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, 334 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
349 priv->port, &rec, 335 priv->port, &rec,
350 IB_SA_MCMEMBER_REC_MGID | 336 IB_SA_MCMEMBER_REC_MGID |
@@ -357,14 +343,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
357 if (IS_ERR(mcast->mc)) { 343 if (IS_ERR(mcast->mc)) {
358 ret = PTR_ERR(mcast->mc); 344 ret = PTR_ERR(mcast->mc);
359 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 345 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
360 complete(&mcast->done); 346 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
361 ipoib_warn(priv, "ib_sa_join_multicast for sendonly join " 347 ret);
362 "failed (ret = %d)\n", ret);
363 } else { 348 } else {
364 ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting " 349 ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
365 "sendonly join\n", mcast->mcmember.mgid.raw); 350 mcast->mcmember.mgid.raw);
366 } 351 }
367 mutex_unlock(&mcast_mutex);
368 352
369 return ret; 353 return ret;
370} 354}
@@ -375,29 +359,18 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
375 carrier_on_task); 359 carrier_on_task);
376 struct ib_port_attr attr; 360 struct ib_port_attr attr;
377 361
362 /*
363 * Take rtnl_lock to avoid racing with ipoib_stop() and
364 * turning the carrier back on while a device is being
365 * removed.
366 */
378 if (ib_query_port(priv->ca, priv->port, &attr) || 367 if (ib_query_port(priv->ca, priv->port, &attr) ||
379 attr.state != IB_PORT_ACTIVE) { 368 attr.state != IB_PORT_ACTIVE) {
380 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); 369 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
381 return; 370 return;
382 } 371 }
383 372
384 /* 373 rtnl_lock();
385 * Take rtnl_lock to avoid racing with ipoib_stop() and
386 * turning the carrier back on while a device is being
387 * removed. However, ipoib_stop() will attempt to flush
388 * the workqueue while holding the rtnl lock, so loop
389 * on trylock until either we get the lock or we see
390 * FLAG_ADMIN_UP go away as that signals that we are bailing
391 * and can safely ignore the carrier on work.
392 */
393 while (!rtnl_trylock()) {
394 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
395 return;
396 else
397 msleep(20);
398 }
399 if (!ipoib_cm_admin_enabled(priv->dev))
400 dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
401 netif_carrier_on(priv->dev); 374 netif_carrier_on(priv->dev);
402 rtnl_unlock(); 375 rtnl_unlock();
403} 376}
@@ -412,63 +385,60 @@ static int ipoib_mcast_join_complete(int status,
412 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", 385 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
413 mcast->mcmember.mgid.raw, status); 386 mcast->mcmember.mgid.raw, status);
414 387
415 /*
416 * We have to take the mutex to force mcast_join to
417 * return from ib_sa_multicast_join and set mcast->mc to a
418 * valid value. Otherwise we were racing with ourselves in
419 * that we might fail here, but get a valid return from
420 * ib_sa_multicast_join after we had cleared mcast->mc here,
421 * resulting in mis-matched joins and leaves and a deadlock
422 */
423 mutex_lock(&mcast_mutex);
424
425 /* We trap for port events ourselves. */ 388 /* We trap for port events ourselves. */
426 if (status == -ENETRESET) 389 if (status == -ENETRESET) {
390 status = 0;
427 goto out; 391 goto out;
392 }
428 393
429 if (!status) 394 if (!status)
430 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 395 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
431 396
432 if (!status) { 397 if (!status) {
433 mcast->backoff = 1; 398 mcast->backoff = 1;
399 mutex_lock(&mcast_mutex);
434 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 400 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
435 queue_delayed_work(priv->wq, &priv->mcast_task, 0); 401 queue_delayed_work(ipoib_workqueue,
402 &priv->mcast_task, 0);
403 mutex_unlock(&mcast_mutex);
436 404
437 /* 405 /*
438 * Defer carrier on work to priv->wq to avoid a 406 * Defer carrier on work to ipoib_workqueue to avoid a
439 * deadlock on rtnl_lock here. 407 * deadlock on rtnl_lock here.
440 */ 408 */
441 if (mcast == priv->broadcast) 409 if (mcast == priv->broadcast)
442 queue_work(priv->wq, &priv->carrier_on_task); 410 queue_work(ipoib_workqueue, &priv->carrier_on_task);
443 } else {
444 if (mcast->logcount++ < 20) {
445 if (status == -ETIMEDOUT || status == -EAGAIN) {
446 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
447 mcast->mcmember.mgid.raw, status);
448 } else {
449 ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
450 mcast->mcmember.mgid.raw, status);
451 }
452 }
453 411
454 mcast->backoff *= 2; 412 status = 0;
455 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 413 goto out;
456 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
457 } 414 }
458out: 415
416 if (mcast->logcount++ < 20) {
417 if (status == -ETIMEDOUT || status == -EAGAIN) {
418 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
419 mcast->mcmember.mgid.raw, status);
420 } else {
421 ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
422 mcast->mcmember.mgid.raw, status);
423 }
424 }
425
426 mcast->backoff *= 2;
427 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
428 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
429
430 /* Clear the busy flag so we try again */
431 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
432
433 mutex_lock(&mcast_mutex);
459 spin_lock_irq(&priv->lock); 434 spin_lock_irq(&priv->lock);
460 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 435 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
461 if (status) 436 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
462 mcast->mc = NULL;
463 complete(&mcast->done);
464 if (status == -ENETRESET)
465 status = 0;
466 if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
467 queue_delayed_work(priv->wq, &priv->mcast_task,
468 mcast->backoff * HZ); 437 mcast->backoff * HZ);
469 spin_unlock_irq(&priv->lock); 438 spin_unlock_irq(&priv->lock);
470 mutex_unlock(&mcast_mutex); 439 mutex_unlock(&mcast_mutex);
471 440out:
441 complete(&mcast->done);
472 return status; 442 return status;
473} 443}
474 444
@@ -517,9 +487,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
517 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 487 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
518 } 488 }
519 489
520 mutex_lock(&mcast_mutex);
521 init_completion(&mcast->done);
522 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 490 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
491 init_completion(&mcast->done);
492 set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
493
523 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, 494 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
524 &rec, comp_mask, GFP_KERNEL, 495 &rec, comp_mask, GFP_KERNEL,
525 ipoib_mcast_join_complete, mcast); 496 ipoib_mcast_join_complete, mcast);
@@ -533,11 +504,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
533 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 504 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
534 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 505 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
535 506
507 mutex_lock(&mcast_mutex);
536 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 508 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
537 queue_delayed_work(priv->wq, &priv->mcast_task, 509 queue_delayed_work(ipoib_workqueue,
510 &priv->mcast_task,
538 mcast->backoff * HZ); 511 mcast->backoff * HZ);
512 mutex_unlock(&mcast_mutex);
539 } 513 }
540 mutex_unlock(&mcast_mutex);
541} 514}
542 515
543void ipoib_mcast_join_task(struct work_struct *work) 516void ipoib_mcast_join_task(struct work_struct *work)
@@ -574,8 +547,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
574 ipoib_warn(priv, "failed to allocate broadcast group\n"); 547 ipoib_warn(priv, "failed to allocate broadcast group\n");
575 mutex_lock(&mcast_mutex); 548 mutex_lock(&mcast_mutex);
576 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 549 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
577 queue_delayed_work(priv->wq, &priv->mcast_task, 550 queue_delayed_work(ipoib_workqueue,
578 HZ); 551 &priv->mcast_task, HZ);
579 mutex_unlock(&mcast_mutex); 552 mutex_unlock(&mcast_mutex);
580 return; 553 return;
581 } 554 }
@@ -590,8 +563,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
590 } 563 }
591 564
592 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 565 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
593 if (IS_ERR_OR_NULL(priv->broadcast->mc) && 566 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
594 !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
595 ipoib_mcast_join(dev, priv->broadcast, 0); 567 ipoib_mcast_join(dev, priv->broadcast, 0);
596 return; 568 return;
597 } 569 }
@@ -599,33 +571,23 @@ void ipoib_mcast_join_task(struct work_struct *work)
599 while (1) { 571 while (1) {
600 struct ipoib_mcast *mcast = NULL; 572 struct ipoib_mcast *mcast = NULL;
601 573
602 /*
603 * Need the mutex so our flags are consistent, need the
604 * priv->lock so we don't race with list removals in either
605 * mcast_dev_flush or mcast_restart_task
606 */
607 mutex_lock(&mcast_mutex);
608 spin_lock_irq(&priv->lock); 574 spin_lock_irq(&priv->lock);
609 list_for_each_entry(mcast, &priv->multicast_list, list) { 575 list_for_each_entry(mcast, &priv->multicast_list, list) {
610 if (IS_ERR_OR_NULL(mcast->mc) && 576 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
611 !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && 577 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
612 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 578 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
613 /* Found the next unjoined group */ 579 /* Found the next unjoined group */
614 break; 580 break;
615 } 581 }
616 } 582 }
617 spin_unlock_irq(&priv->lock); 583 spin_unlock_irq(&priv->lock);
618 mutex_unlock(&mcast_mutex);
619 584
620 if (&mcast->list == &priv->multicast_list) { 585 if (&mcast->list == &priv->multicast_list) {
621 /* All done */ 586 /* All done */
622 break; 587 break;
623 } 588 }
624 589
625 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 590 ipoib_mcast_join(dev, mcast, 1);
626 ipoib_mcast_sendonly_join(mcast);
627 else
628 ipoib_mcast_join(dev, mcast, 1);
629 return; 591 return;
630 } 592 }
631 593
@@ -642,13 +604,13 @@ int ipoib_mcast_start_thread(struct net_device *dev)
642 604
643 mutex_lock(&mcast_mutex); 605 mutex_lock(&mcast_mutex);
644 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 606 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
645 queue_delayed_work(priv->wq, &priv->mcast_task, 0); 607 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
646 mutex_unlock(&mcast_mutex); 608 mutex_unlock(&mcast_mutex);
647 609
648 return 0; 610 return 0;
649} 611}
650 612
651int ipoib_mcast_stop_thread(struct net_device *dev) 613int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
652{ 614{
653 struct ipoib_dev_priv *priv = netdev_priv(dev); 615 struct ipoib_dev_priv *priv = netdev_priv(dev);
654 616
@@ -659,7 +621,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
659 cancel_delayed_work(&priv->mcast_task); 621 cancel_delayed_work(&priv->mcast_task);
660 mutex_unlock(&mcast_mutex); 622 mutex_unlock(&mcast_mutex);
661 623
662 flush_workqueue(priv->wq); 624 if (flush)
625 flush_workqueue(ipoib_workqueue);
663 626
664 return 0; 627 return 0;
665} 628}
@@ -670,9 +633,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
670 int ret = 0; 633 int ret = 0;
671 634
672 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 635 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
673 ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
674
675 if (!IS_ERR_OR_NULL(mcast->mc))
676 ib_sa_free_multicast(mcast->mc); 636 ib_sa_free_multicast(mcast->mc);
677 637
678 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 638 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -725,8 +685,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
725 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); 685 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
726 __ipoib_mcast_add(dev, mcast); 686 __ipoib_mcast_add(dev, mcast);
727 list_add_tail(&mcast->list, &priv->multicast_list); 687 list_add_tail(&mcast->list, &priv->multicast_list);
728 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
729 queue_delayed_work(priv->wq, &priv->mcast_task, 0);
730 } 688 }
731 689
732 if (!mcast->ah) { 690 if (!mcast->ah) {
@@ -740,6 +698,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
740 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 698 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
741 ipoib_dbg_mcast(priv, "no address vector, " 699 ipoib_dbg_mcast(priv, "no address vector, "
742 "but multicast join already started\n"); 700 "but multicast join already started\n");
701 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
702 ipoib_mcast_sendonly_join(mcast);
743 703
744 /* 704 /*
745 * If lookup completes between here and out:, don't 705 * If lookup completes between here and out:, don't
@@ -799,12 +759,9 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
799 759
800 spin_unlock_irqrestore(&priv->lock, flags); 760 spin_unlock_irqrestore(&priv->lock, flags);
801 761
802 /* 762 /* seperate between the wait to the leave*/
803 * make sure the in-flight joins have finished before we attempt
804 * to leave
805 */
806 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) 763 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
807 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 764 if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
808 wait_for_completion(&mcast->done); 765 wait_for_completion(&mcast->done);
809 766
810 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 767 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -837,6 +794,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
837 794
838 ipoib_dbg_mcast(priv, "restarting multicast task\n"); 795 ipoib_dbg_mcast(priv, "restarting multicast task\n");
839 796
797 ipoib_mcast_stop_thread(dev, 0);
798
840 local_irq_save(flags); 799 local_irq_save(flags);
841 netif_addr_lock(dev); 800 netif_addr_lock(dev);
842 spin_lock(&priv->lock); 801 spin_lock(&priv->lock);
@@ -921,38 +880,14 @@ void ipoib_mcast_restart_task(struct work_struct *work)
921 netif_addr_unlock(dev); 880 netif_addr_unlock(dev);
922 local_irq_restore(flags); 881 local_irq_restore(flags);
923 882
924 /* 883 /* We have to cancel outside of the spinlock */
925 * make sure the in-flight joins have finished before we attempt
926 * to leave
927 */
928 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
929 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
930 wait_for_completion(&mcast->done);
931
932 /*
933 * We have to cancel outside of the spinlock, but we have to
934 * take the rtnl lock or else we race with the removal of
935 * entries from the remove list in mcast_dev_flush as part
936 * of ipoib_stop(). We detect the drop of the ADMIN_UP flag
937 * to signal that we have hit this particular race, and we
938 * return since we know we don't need to do anything else
939 * anyway.
940 */
941 while (!rtnl_trylock()) {
942 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
943 return;
944 else
945 msleep(20);
946 }
947 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 884 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
948 ipoib_mcast_leave(mcast->dev, mcast); 885 ipoib_mcast_leave(mcast->dev, mcast);
949 ipoib_mcast_free(mcast); 886 ipoib_mcast_free(mcast);
950 } 887 }
951 /* 888
952 * Restart our join task if needed 889 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
953 */ 890 ipoib_mcast_start_thread(dev);
954 ipoib_mcast_start_thread(dev);
955 rtnl_unlock();
956} 891}
957 892
958#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 893#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b72a753eb41d..c56d5d44c53b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
145 int ret, size; 145 int ret, size;
146 int i; 146 int i;
147 147
148 /*
149 * the various IPoIB tasks assume they will never race against
150 * themselves, so always use a single thread workqueue
151 */
152 priv->wq = create_singlethread_workqueue("ipoib_wq");
153 if (!priv->wq) {
154 printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
155 return -ENODEV;
156 }
157
158 priv->pd = ib_alloc_pd(priv->ca); 148 priv->pd = ib_alloc_pd(priv->ca);
159 if (IS_ERR(priv->pd)) { 149 if (IS_ERR(priv->pd)) {
160 printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name); 150 printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
161 goto out_free_wq; 151 return -ENODEV;
162 } 152 }
163 153
164 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); 154 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -252,10 +242,6 @@ out_free_mr:
252 242
253out_free_pd: 243out_free_pd:
254 ib_dealloc_pd(priv->pd); 244 ib_dealloc_pd(priv->pd);
255
256out_free_wq:
257 destroy_workqueue(priv->wq);
258 priv->wq = NULL;
259 return -ENODEV; 245 return -ENODEV;
260} 246}
261 247
@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
284 270
285 if (ib_dealloc_pd(priv->pd)) 271 if (ib_dealloc_pd(priv->pd))
286 ipoib_warn(priv, "ib_dealloc_pd failed\n"); 272 ipoib_warn(priv, "ib_dealloc_pd failed\n");
287
288 if (priv->wq) {
289 flush_workqueue(priv->wq);
290 destroy_workqueue(priv->wq);
291 priv->wq = NULL;
292 }
293} 273}
294 274
295void ipoib_event(struct ib_event_handler *handler, 275void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 77ecf6d32237..6e22682c8255 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1097 * Asus UX31 0x361f00 20, 15, 0e clickpad 1097 * Asus UX31 0x361f00 20, 15, 0e clickpad
1098 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1098 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1099 * Avatar AVIU-145A2 0x361f00 ? clickpad 1099 * Avatar AVIU-145A2 0x361f00 ? clickpad
1100 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1101 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
1100 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1102 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
1101 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons 1103 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
1102 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) 1104 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
1475 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), 1477 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
1476 }, 1478 },
1477 }, 1479 },
1480 {
1481 /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
1482 .matches = {
1483 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1484 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
1485 },
1486 },
1487 {
1488 /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */
1489 .matches = {
1490 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1491 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
1492 },
1493 },
1478#endif 1494#endif
1479 { } 1495 { }
1480}; 1496};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index f9472920d986..23e26e0768b5 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
135 1232, 5710, 1156, 4696 135 1232, 5710, 1156, 4696
136 }, 136 },
137 { 137 {
138 (const char * const []){"LEN0034", "LEN0036", "LEN0039", 138 (const char * const []){"LEN0034", "LEN0036", "LEN0037",
139 "LEN2002", "LEN2004", NULL}, 139 "LEN0039", "LEN2002", "LEN2004",
140 NULL},
140 1024, 5112, 2024, 4832 141 1024, 5112, 2024, 4832
141 }, 142 },
142 { 143 {
@@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
165 "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ 166 "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
166 "LEN0035", /* X240 */ 167 "LEN0035", /* X240 */
167 "LEN0036", /* T440 */ 168 "LEN0036", /* T440 */
168 "LEN0037", 169 "LEN0037", /* X1 Carbon 2nd */
169 "LEN0038", 170 "LEN0038",
170 "LEN0039", /* T440s */ 171 "LEN0039", /* T440s */
171 "LEN0041", 172 "LEN0041",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 764857b4e268..c11556563ef0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
152 }, 152 },
153 }, 153 },
154 { 154 {
155 /* Medion Akoya E7225 */
156 .matches = {
157 DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
158 DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
159 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
160 },
161 },
162 {
155 /* Blue FB5601 */ 163 /* Blue FB5601 */
156 .matches = { 164 .matches = {
157 DMI_MATCH(DMI_SYS_VENDOR, "blue"), 165 DMI_MATCH(DMI_SYS_VENDOR, "blue"),
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index f722a0c466cf..c48da057dbb1 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {
315 .attach_dev = gart_iommu_attach_dev, 315 .attach_dev = gart_iommu_attach_dev,
316 .detach_dev = gart_iommu_detach_dev, 316 .detach_dev = gart_iommu_detach_dev,
317 .map = gart_iommu_map, 317 .map = gart_iommu_map,
318 .map_sg = default_iommu_map_sg,
318 .unmap = gart_iommu_unmap, 319 .unmap = gart_iommu_unmap,
319 .iova_to_phys = gart_iommu_iova_to_phys, 320 .iova_to_phys = gart_iommu_iova_to_phys,
320 .pgsize_bitmap = GART_IOMMU_PGSIZES, 321 .pgsize_bitmap = GART_IOMMU_PGSIZES,
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
395 do_gart_setup(gart, NULL); 396 do_gart_setup(gart, NULL);
396 397
397 gart_handle = gart; 398 gart_handle = gart;
398 bus_set_iommu(&platform_bus_type, &gart_iommu_ops); 399
399 return 0; 400 return 0;
400} 401}
401 402
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 2b0468e3df6a..56b96c63dc4b 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -37,6 +37,7 @@ static struct irq_domain *gic_irq_domain;
37static int gic_shared_intrs; 37static int gic_shared_intrs;
38static int gic_vpes; 38static int gic_vpes;
39static unsigned int gic_cpu_pin; 39static unsigned int gic_cpu_pin;
40static unsigned int timer_cpu_pin;
40static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 41static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
41 42
42static void __gic_irq_dispatch(void); 43static void __gic_irq_dispatch(void);
@@ -616,6 +617,8 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
616 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val); 617 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
617 break; 618 break;
618 case GIC_LOCAL_INT_TIMER: 619 case GIC_LOCAL_INT_TIMER:
620 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
621 val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
619 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val); 622 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
620 break; 623 break;
621 case GIC_LOCAL_INT_PERFCTR: 624 case GIC_LOCAL_INT_PERFCTR:
@@ -713,12 +716,36 @@ static void __init __gic_init(unsigned long gic_base_addr,
713 if (cpu_has_veic) { 716 if (cpu_has_veic) {
714 /* Always use vector 1 in EIC mode */ 717 /* Always use vector 1 in EIC mode */
715 gic_cpu_pin = 0; 718 gic_cpu_pin = 0;
719 timer_cpu_pin = gic_cpu_pin;
716 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, 720 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
717 __gic_irq_dispatch); 721 __gic_irq_dispatch);
718 } else { 722 } else {
719 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; 723 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
720 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, 724 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
721 gic_irq_dispatch); 725 gic_irq_dispatch);
726 /*
727 * With the CMP implementation of SMP (deprecated), other CPUs
728 * are started by the bootloader and put into a timer based
729 * waiting poll loop. We must not re-route those CPU's local
730 * timer interrupts as the wait instruction will never finish,
731 * so just handle whatever CPU interrupt it is routed to by
732 * default.
733 *
734 * This workaround should be removed when CMP support is
735 * dropped.
736 */
737 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
738 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
739 timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
740 GIC_VPE_TIMER_MAP)) &
741 GIC_MAP_MSK;
742 irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
743 GIC_CPU_PIN_OFFSET +
744 timer_cpu_pin,
745 gic_irq_dispatch);
746 } else {
747 timer_cpu_pin = gic_cpu_pin;
748 }
722 } 749 }
723 750
724 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + 751 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 0b380603a578..d7c286656a25 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
1474 add_ai(plci, &parms[5]); 1474 add_ai(plci, &parms[5]);
1475 sig_req(plci, REJECT, 0); 1475 sig_req(plci, REJECT, 0);
1476 } 1476 }
1477 else if (Reject == 1 || Reject > 9) 1477 else if (Reject == 1 || Reject >= 9)
1478 { 1478 {
1479 add_ai(plci, &parms[5]); 1479 add_ai(plci, &parms[5]);
1480 sig_req(plci, HANGUP, 0); 1480 sig_req(plci, HANGUP, 0);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 5bdedf6df153..c355a226a024 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -5,6 +5,7 @@
5menuconfig MD 5menuconfig MD
6 bool "Multiple devices driver support (RAID and LVM)" 6 bool "Multiple devices driver support (RAID and LVM)"
7 depends on BLOCK 7 depends on BLOCK
8 select SRCU
8 help 9 help
9 Support multiple physical spindles through a single logical device. 10 Support multiple physical spindles through a single logical device.
10 Required for RAID and logical volume management. 11 Required for RAID and logical volume management.
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index da3604e73e8a..1695ee5f3ffc 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -72,6 +72,19 @@ __acquires(bitmap->lock)
72 /* this page has not been allocated yet */ 72 /* this page has not been allocated yet */
73 73
74 spin_unlock_irq(&bitmap->lock); 74 spin_unlock_irq(&bitmap->lock);
75 /* It is possible that this is being called inside a
76 * prepare_to_wait/finish_wait loop from raid5c:make_request().
77 * In general it is not permitted to sleep in that context as it
78 * can cause the loop to spin freely.
79 * That doesn't apply here as we can only reach this point
80 * once with any loop.
81 * When this function completes, either bp[page].map or
82 * bp[page].hijacked. In either case, this function will
83 * abort before getting to this point again. So there is
84 * no risk of a free-spin, and so it is safe to assert
85 * that sleeping here is allowed.
86 */
87 sched_annotate_sleep();
75 mappage = kzalloc(PAGE_SIZE, GFP_NOIO); 88 mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
76 spin_lock_irq(&bitmap->lock); 89 spin_lock_irq(&bitmap->lock);
77 90
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 21b156242e42..c1c010498a21 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
683 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 683 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
684 if (!cmd) { 684 if (!cmd) {
685 DMERR("could not allocate metadata struct"); 685 DMERR("could not allocate metadata struct");
686 return NULL; 686 return ERR_PTR(-ENOMEM);
687 } 687 }
688 688
689 atomic_set(&cmd->ref_count, 1); 689 atomic_set(&cmd->ref_count, 1);
@@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
745 return cmd; 745 return cmd;
746 746
747 cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); 747 cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
748 if (cmd) { 748 if (!IS_ERR(cmd)) {
749 mutex_lock(&table_lock); 749 mutex_lock(&table_lock);
750 cmd2 = lookup(bdev); 750 cmd2 = lookup(bdev);
751 if (cmd2) { 751 if (cmd2) {
@@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
780{ 780{
781 struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, 781 struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
782 may_format_device, policy_hint_size); 782 may_format_device, policy_hint_size);
783 if (cmd && !same_params(cmd, data_block_size)) { 783
784 if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
784 dm_cache_metadata_close(cmd); 785 dm_cache_metadata_close(cmd);
785 return NULL; 786 return ERR_PTR(-EINVAL);
786 } 787 }
787 788
788 return cmd; 789 return cmd;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 493478989dbd..07705ee181e3 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
3385 struct pool_c *pt = ti->private; 3385 struct pool_c *pt = ti->private;
3386 struct pool *pool = pt->pool; 3386 struct pool *pool = pt->pool;
3387 3387
3388 if (get_pool_mode(pool) >= PM_READ_ONLY) {
3389 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3390 dm_device_name(pool->pool_md));
3391 return -EINVAL;
3392 }
3393
3388 if (!strcasecmp(argv[0], "create_thin")) 3394 if (!strcasecmp(argv[0], "create_thin"))
3389 r = process_create_thin_mesg(argc, argv, pool); 3395 r = process_create_thin_mesg(argc, argv, pool);
3390 3396
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c1b0d52bfcb0..b98765f6f77f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3195,6 +3195,11 @@ static void handle_stripe_dirtying(struct r5conf *conf,
3195 (unsigned long long)sh->sector, 3195 (unsigned long long)sh->sector,
3196 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 3196 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
3197 } 3197 }
3198
3199 if (rcw > disks && rmw > disks &&
3200 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3201 set_bit(STRIPE_DELAYED, &sh->state);
3202
3198 /* now if nothing is locked, and if we have enough data, 3203 /* now if nothing is locked, and if we have enough data,
3199 * we can start a write request 3204 * we can start a write request
3200 */ 3205 */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d6607ee9c855..84673ebcf428 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -197,6 +197,7 @@ config NETCONSOLE_DYNAMIC
197 197
198config NETPOLL 198config NETPOLL
199 def_bool NETCONSOLE 199 def_bool NETCONSOLE
200 select SRCU
200 201
201config NET_POLL_CONTROLLER 202config NET_POLL_CONTROLLER
202 def_bool NETPOLL 203 def_bool NETPOLL
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 5e40a8b68cbe..b3b922adc0e4 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1415 1415
1416 cfhsi = netdev_priv(dev); 1416 cfhsi = netdev_priv(dev);
1417 cfhsi_netlink_parms(data, cfhsi); 1417 cfhsi_netlink_parms(data, cfhsi);
1418 dev_net_set(cfhsi->ndev, src_net);
1419 1418
1420 get_ops = symbol_get(cfhsi_get_ops); 1419 get_ops = symbol_get(cfhsi_get_ops);
1421 if (!get_ops) { 1420 if (!get_ops) {
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f94a9fa60488..c672c4dcffac 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev)
615 615
616 c_can_irq_control(priv, false); 616 c_can_irq_control(priv, false);
617 617
618 /* put ctrl to init on stop to end ongoing transmission */
619 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
620
618 /* deactivate pins */ 621 /* deactivate pins */
619 pinctrl_pm_select_sleep_state(dev->dev.parent); 622 pinctrl_pm_select_sleep_state(dev->dev.parent);
620 priv->can.state = CAN_STATE_STOPPED; 623 priv->can.state = CAN_STATE_STOPPED;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index c32cd61073bc..7af379ca861b 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
587 usb_sndbulkpipe(dev->udev, 587 usb_sndbulkpipe(dev->udev,
588 dev->bulk_out->bEndpointAddress), 588 dev->bulk_out->bEndpointAddress),
589 buf, msg->len, 589 buf, msg->len,
590 kvaser_usb_simple_msg_callback, priv); 590 kvaser_usb_simple_msg_callback, netdev);
591 usb_anchor_urb(urb, &priv->tx_submitted); 591 usb_anchor_urb(urb, &priv->tx_submitted);
592 592
593 err = usb_submit_urb(urb, GFP_ATOMIC); 593 err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
662 priv = dev->nets[channel]; 662 priv = dev->nets[channel];
663 stats = &priv->netdev->stats; 663 stats = &priv->netdev->stats;
664 664
665 if (status & M16C_STATE_BUS_RESET) {
666 kvaser_usb_unlink_tx_urbs(priv);
667 return;
668 }
669
670 skb = alloc_can_err_skb(priv->netdev, &cf); 665 skb = alloc_can_err_skb(priv->netdev, &cf);
671 if (!skb) { 666 if (!skb) {
672 stats->rx_dropped++; 667 stats->rx_dropped++;
@@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
677 672
678 netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); 673 netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
679 674
680 if (status & M16C_STATE_BUS_OFF) { 675 if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
681 cf->can_id |= CAN_ERR_BUSOFF; 676 cf->can_id |= CAN_ERR_BUSOFF;
682 677
683 priv->can.can_stats.bus_off++; 678 priv->can.can_stats.bus_off++;
@@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
703 } 698 }
704 699
705 new_state = CAN_STATE_ERROR_PASSIVE; 700 new_state = CAN_STATE_ERROR_PASSIVE;
706 } 701 } else if (status & M16C_STATE_BUS_ERROR) {
707
708 if (status == M16C_STATE_BUS_ERROR) {
709 if ((priv->can.state < CAN_STATE_ERROR_WARNING) && 702 if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
710 ((txerr >= 96) || (rxerr >= 96))) { 703 ((txerr >= 96) || (rxerr >= 96))) {
711 cf->can_id |= CAN_ERR_CRTL; 704 cf->can_id |= CAN_ERR_CRTL;
@@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
715 708
716 priv->can.can_stats.error_warning++; 709 priv->can.can_stats.error_warning++;
717 new_state = CAN_STATE_ERROR_WARNING; 710 new_state = CAN_STATE_ERROR_WARNING;
718 } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) { 711 } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
712 ((txerr < 96) && (rxerr < 96))) {
719 cf->can_id |= CAN_ERR_PROT; 713 cf->can_id |= CAN_ERR_PROT;
720 cf->data[2] = CAN_ERR_PROT_ACTIVE; 714 cf->data[2] = CAN_ERR_PROT_ACTIVE;
721 715
@@ -1590,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
1590{ 1584{
1591 struct kvaser_usb *dev; 1585 struct kvaser_usb *dev;
1592 int err = -ENOMEM; 1586 int err = -ENOMEM;
1593 int i; 1587 int i, retry = 3;
1594 1588
1595 dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); 1589 dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
1596 if (!dev) 1590 if (!dev)
@@ -1608,7 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
1608 1602
1609 usb_set_intfdata(intf, dev); 1603 usb_set_intfdata(intf, dev);
1610 1604
1611 err = kvaser_usb_get_software_info(dev); 1605 /* On some x86 laptops, plugging a Kvaser device again after
1606 * an unplug makes the firmware always ignore the very first
1607 * command. For such a case, provide some room for retries
1608 * instead of completely exiting the driver.
1609 */
1610 do {
1611 err = kvaser_usb_get_software_info(dev);
1612 } while (--retry && err == -ETIMEDOUT);
1613
1612 if (err) { 1614 if (err) {
1613 dev_err(&intf->dev, 1615 dev_err(&intf->dev,
1614 "Cannot get software infos, error %d\n", err); 1616 "Cannot get software infos, error %d\n", err);
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 7a5e4aa5415e..77f1f6048ddd 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -45,7 +45,7 @@ config AMD8111_ETH
45 45
46config LANCE 46config LANCE
47 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" 47 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
48 depends on ISA && ISA_DMA_API 48 depends on ISA && ISA_DMA_API && !ARM
49 ---help--- 49 ---help---
50 If you have a network (Ethernet) card of this type, say Y and read 50 If you have a network (Ethernet) card of this type, say Y and read
51 the Ethernet-HOWTO, available from 51 the Ethernet-HOWTO, available from
@@ -142,7 +142,7 @@ config PCMCIA_NMCLAN
142 142
143config NI65 143config NI65
144 tristate "NI6510 support" 144 tristate "NI6510 support"
145 depends on ISA && ISA_DMA_API 145 depends on ISA && ISA_DMA_API && !ARM
146 ---help--- 146 ---help---
147 If you have a network (Ethernet) card of this type, say Y and read 147 If you have a network (Ethernet) card of this type, say Y and read
148 the Ethernet-HOWTO, available from 148 the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 5b22764ba88d..27245efe9f50 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
952 do { 952 do {
953 /* WARNING: MACE_IR is a READ/CLEAR port! */ 953 /* WARNING: MACE_IR is a READ/CLEAR port! */
954 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); 954 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
955 if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS)
956 return IRQ_NONE;
955 957
956 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); 958 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
957 959
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 75b08c63d39f..29a09271b64a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -767,16 +767,17 @@
767#define MTL_Q_RQOMR 0x40 767#define MTL_Q_RQOMR 0x40
768#define MTL_Q_RQMPOCR 0x44 768#define MTL_Q_RQMPOCR 0x44
769#define MTL_Q_RQDR 0x4c 769#define MTL_Q_RQDR 0x4c
770#define MTL_Q_RQFCR 0x50
770#define MTL_Q_IER 0x70 771#define MTL_Q_IER 0x70
771#define MTL_Q_ISR 0x74 772#define MTL_Q_ISR 0x74
772 773
773/* MTL queue register entry bit positions and sizes */ 774/* MTL queue register entry bit positions and sizes */
775#define MTL_Q_RQFCR_RFA_INDEX 1
776#define MTL_Q_RQFCR_RFA_WIDTH 6
777#define MTL_Q_RQFCR_RFD_INDEX 17
778#define MTL_Q_RQFCR_RFD_WIDTH 6
774#define MTL_Q_RQOMR_EHFC_INDEX 7 779#define MTL_Q_RQOMR_EHFC_INDEX 7
775#define MTL_Q_RQOMR_EHFC_WIDTH 1 780#define MTL_Q_RQOMR_EHFC_WIDTH 1
776#define MTL_Q_RQOMR_RFA_INDEX 8
777#define MTL_Q_RQOMR_RFA_WIDTH 3
778#define MTL_Q_RQOMR_RFD_INDEX 13
779#define MTL_Q_RQOMR_RFD_WIDTH 3
780#define MTL_Q_RQOMR_RQS_INDEX 16 781#define MTL_Q_RQOMR_RQS_INDEX 16
781#define MTL_Q_RQOMR_RQS_WIDTH 9 782#define MTL_Q_RQOMR_RQS_WIDTH 9
782#define MTL_Q_RQOMR_RSF_INDEX 5 783#define MTL_Q_RQOMR_RSF_INDEX 5
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 53f5f66ec2ee..4c66cd1d1e60 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2079 2079
2080 for (i = 0; i < pdata->rx_q_count; i++) { 2080 for (i = 0; i < pdata->rx_q_count; i++) {
2081 /* Activate flow control when less than 4k left in fifo */ 2081 /* Activate flow control when less than 4k left in fifo */
2082 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); 2082 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
2083 2083
2084 /* De-activate flow control when more than 6k left in fifo */ 2084 /* De-activate flow control when more than 6k left in fifo */
2085 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4); 2085 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
2086 } 2086 }
2087} 2087}
2088 2088
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 7bb5f07dbeef..e5ffb2ccb67d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -523,6 +523,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
523 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 523 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
524 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 524 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
525 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 525 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
526 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
526 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 527 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
527 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 528 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
528 HASHTBLSZ); 529 HASHTBLSZ);
@@ -552,13 +553,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
552 break; 553 break;
553 } 554 }
554 555
555 /* The Queue and Channel counts are zero based so increment them 556 /* The Queue, Channel and TC counts are zero based so increment them
556 * to get the actual number 557 * to get the actual number
557 */ 558 */
558 hw_feat->rx_q_cnt++; 559 hw_feat->rx_q_cnt++;
559 hw_feat->tx_q_cnt++; 560 hw_feat->tx_q_cnt++;
560 hw_feat->rx_ch_cnt++; 561 hw_feat->rx_ch_cnt++;
561 hw_feat->tx_ch_cnt++; 562 hw_feat->tx_ch_cnt++;
563 hw_feat->tc_cnt++;
562 564
563 DBGPR("<--xgbe_get_all_hw_features\n"); 565 DBGPR("<--xgbe_get_all_hw_features\n");
564} 566}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 83a50280bb70..793f3b73eeff 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -369,6 +369,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
369 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 369 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
370 break; 370 break;
371 371
372 /* read fpqnum field after dataaddr field */
373 dma_rmb();
372 if (is_rx_desc(raw_desc)) 374 if (is_rx_desc(raw_desc))
373 ret = xgene_enet_rx_frame(ring, raw_desc); 375 ret = xgene_enet_rx_frame(ring, raw_desc);
374 else 376 else
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 1d1147c93d59..e468ed3f210f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
3175 } 3175 }
3176#endif 3176#endif
3177 if (!bnx2x_fp_lock_napi(fp)) 3177 if (!bnx2x_fp_lock_napi(fp))
3178 return work_done; 3178 return budget;
3179 3179
3180 for_each_cos_in_tx_queue(fp, cos) 3180 for_each_cos_in_tx_queue(fp, cos)
3181 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) 3181 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 7403dff8f14a..905ac5f5d9a6 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -32,7 +32,8 @@ config CS89x0
32 will be called cs89x0. 32 will be called cs89x0.
33 33
34config CS89x0_PLATFORM 34config CS89x0_PLATFORM
35 bool "CS89x0 platform driver support" 35 bool "CS89x0 platform driver support" if HAS_IOPORT_MAP
36 default !HAS_IOPORT_MAP
36 depends on CS89x0 37 depends on CS89x0
37 help 38 help
38 Say Y to compile the cs89x0 driver as a platform driver. This 39 Say Y to compile the cs89x0 driver as a platform driver. This
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b29e027c476e..e356afa44e7d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1335 int err; 1335 int err;
1336 1336
1337 if (!enic_poll_lock_napi(&enic->rq[rq])) 1337 if (!enic_poll_lock_napi(&enic->rq[rq]))
1338 return work_done; 1338 return budget;
1339 /* Service RQ 1339 /* Service RQ
1340 */ 1340 */
1341 1341
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3e1a9c1a67a9..fda12fb32ec7 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1586 return -EBUSY; 1586 return -EBUSY;
1587 1587
1588 /* Fill regular entries */ 1588 /* Fill regular entries */
1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); 1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
1590 i++) 1590 i++)
1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1592 /* Fill the rest with fall-troughs */ 1592 /* Fill the rest with fall-troughs */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 63c807c9b21c..edea13b0ee85 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
1907 1907
1908static int igbvf_tso(struct igbvf_adapter *adapter, 1908static int igbvf_tso(struct igbvf_adapter *adapter,
1909 struct igbvf_ring *tx_ring, 1909 struct igbvf_ring *tx_ring,
1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
1911 __be16 protocol)
1911{ 1912{
1912 struct e1000_adv_tx_context_desc *context_desc; 1913 struct e1000_adv_tx_context_desc *context_desc;
1913 struct igbvf_buffer *buffer_info; 1914 struct igbvf_buffer *buffer_info;
@@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1927 l4len = tcp_hdrlen(skb); 1928 l4len = tcp_hdrlen(skb);
1928 *hdr_len += l4len; 1929 *hdr_len += l4len;
1929 1930
1930 if (skb->protocol == htons(ETH_P_IP)) { 1931 if (protocol == htons(ETH_P_IP)) {
1931 struct iphdr *iph = ip_hdr(skb); 1932 struct iphdr *iph = ip_hdr(skb);
1932 iph->tot_len = 0; 1933 iph->tot_len = 0;
1933 iph->check = 0; 1934 iph->check = 0;
@@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1958 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1959 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1959 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1960 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1960 1961
1961 if (skb->protocol == htons(ETH_P_IP)) 1962 if (protocol == htons(ETH_P_IP))
1962 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1963 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1963 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1964 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1964 1965
@@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1984 1985
1985static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, 1986static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
1986 struct igbvf_ring *tx_ring, 1987 struct igbvf_ring *tx_ring,
1987 struct sk_buff *skb, u32 tx_flags) 1988 struct sk_buff *skb, u32 tx_flags,
1989 __be16 protocol)
1988{ 1990{
1989 struct e1000_adv_tx_context_desc *context_desc; 1991 struct e1000_adv_tx_context_desc *context_desc;
1990 unsigned int i; 1992 unsigned int i;
@@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
2011 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 2013 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2012 2014
2013 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2015 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2014 switch (skb->protocol) { 2016 switch (protocol) {
2015 case htons(ETH_P_IP): 2017 case htons(ETH_P_IP):
2016 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 2018 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2017 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2019 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2211 u8 hdr_len = 0; 2213 u8 hdr_len = 0;
2212 int count = 0; 2214 int count = 0;
2213 int tso = 0; 2215 int tso = 0;
2216 __be16 protocol = vlan_get_protocol(skb);
2214 2217
2215 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2218 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2216 dev_kfree_skb_any(skb); 2219 dev_kfree_skb_any(skb);
@@ -2239,13 +2242,13 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2239 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); 2242 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
2240 } 2243 }
2241 2244
2242 if (skb->protocol == htons(ETH_P_IP)) 2245 if (protocol == htons(ETH_P_IP))
2243 tx_flags |= IGBVF_TX_FLAGS_IPV4; 2246 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2244 2247
2245 first = tx_ring->next_to_use; 2248 first = tx_ring->next_to_use;
2246 2249
2247 tso = skb_is_gso(skb) ? 2250 tso = skb_is_gso(skb) ?
2248 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; 2251 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
2249 if (unlikely(tso < 0)) { 2252 if (unlikely(tso < 0)) {
2250 dev_kfree_skb_any(skb); 2253 dev_kfree_skb_any(skb);
2251 return NETDEV_TX_OK; 2254 return NETDEV_TX_OK;
@@ -2253,7 +2256,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2253 2256
2254 if (tso) 2257 if (tso)
2255 tx_flags |= IGBVF_TX_FLAGS_TSO; 2258 tx_flags |= IGBVF_TX_FLAGS_TSO;
2256 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2259 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
2257 (skb->ip_summed == CHECKSUM_PARTIAL)) 2260 (skb->ip_summed == CHECKSUM_PARTIAL))
2258 tx_flags |= IGBVF_TX_FLAGS_CSUM; 2261 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2259 2262
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2ed2c7de2304..67b02bde179e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7227,11 +7227,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7227 if (!vhdr) 7227 if (!vhdr)
7228 goto out_drop; 7228 goto out_drop;
7229 7229
7230 protocol = vhdr->h_vlan_encapsulated_proto;
7231 tx_flags |= ntohs(vhdr->h_vlan_TCI) << 7230 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
7232 IXGBE_TX_FLAGS_VLAN_SHIFT; 7231 IXGBE_TX_FLAGS_VLAN_SHIFT;
7233 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; 7232 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
7234 } 7233 }
7234 protocol = vlan_get_protocol(skb);
7235 7235
7236 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 7236 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
7237 adapter->ptp_clock && 7237 adapter->ptp_clock &&
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 62a0d8e0f17d..38c7a0be8197 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3099,7 +3099,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3099 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 3099 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3100 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 3100 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3101 3101
3102 if (skb->protocol == htons(ETH_P_IP)) { 3102 if (first->protocol == htons(ETH_P_IP)) {
3103 struct iphdr *iph = ip_hdr(skb); 3103 struct iphdr *iph = ip_hdr(skb);
3104 iph->tot_len = 0; 3104 iph->tot_len = 0;
3105 iph->check = 0; 3105 iph->check = 0;
@@ -3156,7 +3156,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3156 3156
3157 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3157 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3158 u8 l4_hdr = 0; 3158 u8 l4_hdr = 0;
3159 switch (skb->protocol) { 3159 switch (first->protocol) {
3160 case htons(ETH_P_IP): 3160 case htons(ETH_P_IP):
3161 vlan_macip_lens |= skb_network_header_len(skb); 3161 vlan_macip_lens |= skb_network_header_len(skb);
3162 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 3162 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a62fc38f045e..1c75829eb166 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
192#define IS_TSO_HEADER(txq, addr) \ 192#define IS_TSO_HEADER(txq, addr) \
193 ((addr >= txq->tso_hdrs_dma) && \ 193 ((addr >= txq->tso_hdrs_dma) && \
194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) 194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
195
196#define DESC_DMA_MAP_SINGLE 0
197#define DESC_DMA_MAP_PAGE 1
198
195/* 199/*
196 * RX/TX descriptors. 200 * RX/TX descriptors.
197 */ 201 */
@@ -362,6 +366,7 @@ struct tx_queue {
362 dma_addr_t tso_hdrs_dma; 366 dma_addr_t tso_hdrs_dma;
363 367
364 struct tx_desc *tx_desc_area; 368 struct tx_desc *tx_desc_area;
369 char *tx_desc_mapping; /* array to track the type of the dma mapping */
365 dma_addr_t tx_desc_dma; 370 dma_addr_t tx_desc_dma;
366 int tx_desc_area_size; 371 int tx_desc_area_size;
367 372
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
750 if (txq->tx_curr_desc == txq->tx_ring_size) 755 if (txq->tx_curr_desc == txq->tx_ring_size)
751 txq->tx_curr_desc = 0; 756 txq->tx_curr_desc = 0;
752 desc = &txq->tx_desc_area[tx_index]; 757 desc = &txq->tx_desc_area[tx_index];
758 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
753 759
754 desc->l4i_chk = 0; 760 desc->l4i_chk = 0;
755 desc->byte_cnt = length; 761 desc->byte_cnt = length;
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
879 skb_frag_t *this_frag; 885 skb_frag_t *this_frag;
880 int tx_index; 886 int tx_index;
881 struct tx_desc *desc; 887 struct tx_desc *desc;
882 void *addr;
883 888
884 this_frag = &skb_shinfo(skb)->frags[frag]; 889 this_frag = &skb_shinfo(skb)->frags[frag];
885 addr = page_address(this_frag->page.p) + this_frag->page_offset;
886 tx_index = txq->tx_curr_desc++; 890 tx_index = txq->tx_curr_desc++;
887 if (txq->tx_curr_desc == txq->tx_ring_size) 891 if (txq->tx_curr_desc == txq->tx_ring_size)
888 txq->tx_curr_desc = 0; 892 txq->tx_curr_desc = 0;
889 desc = &txq->tx_desc_area[tx_index]; 893 desc = &txq->tx_desc_area[tx_index];
894 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
890 895
891 /* 896 /*
892 * The last fragment will generate an interrupt 897 * The last fragment will generate an interrupt
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
902 907
903 desc->l4i_chk = 0; 908 desc->l4i_chk = 0;
904 desc->byte_cnt = skb_frag_size(this_frag); 909 desc->byte_cnt = skb_frag_size(this_frag);
905 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr, 910 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
906 desc->byte_cnt, DMA_TO_DEVICE); 911 this_frag, 0, desc->byte_cnt,
912 DMA_TO_DEVICE);
907 } 913 }
908} 914}
909 915
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
936 if (txq->tx_curr_desc == txq->tx_ring_size) 942 if (txq->tx_curr_desc == txq->tx_ring_size)
937 txq->tx_curr_desc = 0; 943 txq->tx_curr_desc = 0;
938 desc = &txq->tx_desc_area[tx_index]; 944 desc = &txq->tx_desc_area[tx_index];
945 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
939 946
940 if (nr_frags) { 947 if (nr_frags) {
941 txq_submit_frag_skb(txq, skb); 948 txq_submit_frag_skb(txq, skb);
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1047 int tx_index; 1054 int tx_index;
1048 struct tx_desc *desc; 1055 struct tx_desc *desc;
1049 u32 cmd_sts; 1056 u32 cmd_sts;
1057 char desc_dma_map;
1050 1058
1051 tx_index = txq->tx_used_desc; 1059 tx_index = txq->tx_used_desc;
1052 desc = &txq->tx_desc_area[tx_index]; 1060 desc = &txq->tx_desc_area[tx_index];
1061 desc_dma_map = txq->tx_desc_mapping[tx_index];
1062
1053 cmd_sts = desc->cmd_sts; 1063 cmd_sts = desc->cmd_sts;
1054 1064
1055 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 1065 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1065 reclaimed++; 1075 reclaimed++;
1066 txq->tx_desc_count--; 1076 txq->tx_desc_count--;
1067 1077
1068 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) 1078 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
1069 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 1079
1070 desc->byte_cnt, DMA_TO_DEVICE); 1080 if (desc_dma_map == DESC_DMA_MAP_PAGE)
1081 dma_unmap_page(mp->dev->dev.parent,
1082 desc->buf_ptr,
1083 desc->byte_cnt,
1084 DMA_TO_DEVICE);
1085 else
1086 dma_unmap_single(mp->dev->dev.parent,
1087 desc->buf_ptr,
1088 desc->byte_cnt,
1089 DMA_TO_DEVICE);
1090 }
1071 1091
1072 if (cmd_sts & TX_ENABLE_INTERRUPT) { 1092 if (cmd_sts & TX_ENABLE_INTERRUPT) {
1073 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); 1093 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1996 struct tx_queue *txq = mp->txq + index; 2016 struct tx_queue *txq = mp->txq + index;
1997 struct tx_desc *tx_desc; 2017 struct tx_desc *tx_desc;
1998 int size; 2018 int size;
2019 int ret;
1999 int i; 2020 int i;
2000 2021
2001 txq->index = index; 2022 txq->index = index;
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
2048 nexti * sizeof(struct tx_desc); 2069 nexti * sizeof(struct tx_desc);
2049 } 2070 }
2050 2071
2072 txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
2073 GFP_KERNEL);
2074 if (!txq->tx_desc_mapping) {
2075 ret = -ENOMEM;
2076 goto err_free_desc_area;
2077 }
2078
2051 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ 2079 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2052 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, 2080 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2053 txq->tx_ring_size * TSO_HEADER_SIZE, 2081 txq->tx_ring_size * TSO_HEADER_SIZE,
2054 &txq->tso_hdrs_dma, GFP_KERNEL); 2082 &txq->tso_hdrs_dma, GFP_KERNEL);
2055 if (txq->tso_hdrs == NULL) { 2083 if (txq->tso_hdrs == NULL) {
2056 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2084 ret = -ENOMEM;
2057 txq->tx_desc_area, txq->tx_desc_dma); 2085 goto err_free_desc_mapping;
2058 return -ENOMEM;
2059 } 2086 }
2060 skb_queue_head_init(&txq->tx_skb); 2087 skb_queue_head_init(&txq->tx_skb);
2061 2088
2062 return 0; 2089 return 0;
2090
2091err_free_desc_mapping:
2092 kfree(txq->tx_desc_mapping);
2093err_free_desc_area:
2094 if (index == 0 && size <= mp->tx_desc_sram_size)
2095 iounmap(txq->tx_desc_area);
2096 else
2097 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2098 txq->tx_desc_area, txq->tx_desc_dma);
2099 return ret;
2063} 2100}
2064 2101
2065static void txq_deinit(struct tx_queue *txq) 2102static void txq_deinit(struct tx_queue *txq)
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
2077 else 2114 else
2078 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2115 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2079 txq->tx_desc_area, txq->tx_desc_dma); 2116 txq->tx_desc_area, txq->tx_desc_dma);
2117 kfree(txq->tx_desc_mapping);
2118
2080 if (txq->tso_hdrs) 2119 if (txq->tso_hdrs)
2081 dma_free_coherent(mp->dev->dev.parent, 2120 dma_free_coherent(mp->dev->dev.parent,
2082 txq->tx_ring_size * TSO_HEADER_SIZE, 2121 txq->tx_ring_size * TSO_HEADER_SIZE,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index bdd4eea2247c..210691c89b6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -235,7 +235,8 @@ do { \
235extern int mlx4_log_num_mgm_entry_size; 235extern int mlx4_log_num_mgm_entry_size;
236extern int log_mtts_per_seg; 236extern int log_mtts_per_seg;
237 237
238#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) 238#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
239 MLX4_MFUNC_MAX))
239#define ALL_SLAVES 0xff 240#define ALL_SLAVES 0xff
240 241
241struct mlx4_bitmap { 242struct mlx4_bitmap {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 613037584d08..c531c8ae1be4 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
2388 2388
2389 work_done = netxen_process_rcv_ring(sds_ring, budget); 2389 work_done = netxen_process_rcv_ring(sds_ring, budget);
2390 2390
2391 if ((work_done < budget) && tx_complete) { 2391 if (!tx_complete)
2392 work_done = budget;
2393
2394 if (work_done < budget) {
2392 napi_complete(&sds_ring->napi); 2395 napi_complete(&sds_ring->napi);
2393 if (test_bit(__NX_DEV_UP, &adapter->state)) 2396 if (test_bit(__NX_DEV_UP, &adapter->state))
2394 netxen_nic_enable_int(sds_ring); 2397 netxen_nic_enable_int(sds_ring);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 18e5de72e9b4..4e1f58cf19ce 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -967,7 +967,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
967 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, 967 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
968 budget); 968 budget);
969 work_done = qlcnic_process_rcv_ring(sds_ring, budget); 969 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
970 if ((work_done < budget) && tx_complete) { 970
971 /* Check if we need a repoll */
972 if (!tx_complete)
973 work_done = budget;
974
975 if (work_done < budget) {
971 napi_complete(&sds_ring->napi); 976 napi_complete(&sds_ring->napi);
972 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 977 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
973 qlcnic_enable_sds_intr(adapter, sds_ring); 978 qlcnic_enable_sds_intr(adapter, sds_ring);
@@ -992,6 +997,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
992 napi_complete(&tx_ring->napi); 997 napi_complete(&tx_ring->napi);
993 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 998 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
994 qlcnic_enable_tx_intr(adapter, tx_ring); 999 qlcnic_enable_tx_intr(adapter, tx_ring);
1000 } else {
1001 /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
1002 work_done = budget;
995 } 1003 }
996 1004
997 return work_done; 1005 return work_done;
@@ -1950,7 +1958,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
1950 1958
1951 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 1959 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1952 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); 1960 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1953 if ((work_done < budget) && tx_complete) { 1961
1962 /* Check if we need a repoll */
1963 if (!tx_complete)
1964 work_done = budget;
1965
1966 if (work_done < budget) {
1954 napi_complete(&sds_ring->napi); 1967 napi_complete(&sds_ring->napi);
1955 qlcnic_enable_sds_intr(adapter, sds_ring); 1968 qlcnic_enable_sds_intr(adapter, sds_ring);
1956 } 1969 }
@@ -1973,7 +1986,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1973 1986
1974 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 1987 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1975 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); 1988 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1976 if ((work_done < budget) && tx_complete) { 1989
1990 /* Check if we need a repoll */
1991 if (!tx_complete)
1992 work_done = budget;
1993
1994 if (work_done < budget) {
1977 napi_complete(&sds_ring->napi); 1995 napi_complete(&sds_ring->napi);
1978 qlcnic_enable_sds_intr(adapter, sds_ring); 1996 qlcnic_enable_sds_intr(adapter, sds_ring);
1979 } 1997 }
@@ -1995,6 +2013,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
1995 napi_complete(&tx_ring->napi); 2013 napi_complete(&tx_ring->napi);
1996 if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) 2014 if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
1997 qlcnic_enable_tx_intr(adapter, tx_ring); 2015 qlcnic_enable_tx_intr(adapter, tx_ring);
2016 } else {
2017 /* need a repoll */
2018 work_done = budget;
1998 } 2019 }
1999 2020
2000 return work_done; 2021 return work_done;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 6c904a6cad2a..ef5aed3b1225 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
2351{ 2351{
2352 struct ql_adapter *qdev = netdev_priv(ndev); 2352 struct ql_adapter *qdev = netdev_priv(ndev);
2353 int status = 0; 2353 int status = 0;
2354 bool need_restart = netif_running(ndev);
2354 2355
2355 status = ql_adapter_down(qdev); 2356 if (need_restart) {
2356 if (status) { 2357 status = ql_adapter_down(qdev);
2357 netif_err(qdev, link, qdev->ndev, 2358 if (status) {
2358 "Failed to bring down the adapter\n"); 2359 netif_err(qdev, link, qdev->ndev,
2359 return status; 2360 "Failed to bring down the adapter\n");
2361 return status;
2362 }
2360 } 2363 }
2361 2364
2362 /* update the features with resent change */ 2365 /* update the features with resent change */
2363 ndev->features = features; 2366 ndev->features = features;
2364 2367
2365 status = ql_adapter_up(qdev); 2368 if (need_restart) {
2366 if (status) { 2369 status = ql_adapter_up(qdev);
2367 netif_err(qdev, link, qdev->ndev, 2370 if (status) {
2368 "Failed to bring up the adapter\n"); 2371 netif_err(qdev, link, qdev->ndev,
2369 return status; 2372 "Failed to bring up the adapter\n");
2373 return status;
2374 }
2370 } 2375 }
2376
2371 return status; 2377 return status;
2372} 2378}
2373 2379
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6576243222af..04283fe0e6a7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
396 [TSU_ADRL31] = 0x01fc, 396 [TSU_ADRL31] = 0x01fc,
397}; 397};
398 398
399static void sh_eth_rcv_snd_disable(struct net_device *ndev);
400static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
401
399static bool sh_eth_is_gether(struct sh_eth_private *mdp) 402static bool sh_eth_is_gether(struct sh_eth_private *mdp)
400{ 403{
401 return mdp->reg_offset == sh_eth_offset_gigabit; 404 return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1120 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1123 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1121 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1124 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1122 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1125 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1126 dma_addr_t dma_addr;
1123 1127
1124 mdp->cur_rx = 0; 1128 mdp->cur_rx = 0;
1125 mdp->cur_tx = 0; 1129 mdp->cur_tx = 0;
@@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
1133 /* skb */ 1137 /* skb */
1134 mdp->rx_skbuff[i] = NULL; 1138 mdp->rx_skbuff[i] = NULL;
1135 skb = netdev_alloc_skb(ndev, skbuff_size); 1139 skb = netdev_alloc_skb(ndev, skbuff_size);
1136 mdp->rx_skbuff[i] = skb;
1137 if (skb == NULL) 1140 if (skb == NULL)
1138 break; 1141 break;
1139 sh_eth_set_receive_align(skb); 1142 sh_eth_set_receive_align(skb);
@@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
1142 rxdesc = &mdp->rx_ring[i]; 1145 rxdesc = &mdp->rx_ring[i];
1143 /* The size of the buffer is a multiple of 16 bytes. */ 1146 /* The size of the buffer is a multiple of 16 bytes. */
1144 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1147 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1145 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, 1148 dma_addr = dma_map_single(&ndev->dev, skb->data,
1146 DMA_FROM_DEVICE); 1149 rxdesc->buffer_length,
1147 rxdesc->addr = virt_to_phys(skb->data); 1150 DMA_FROM_DEVICE);
1151 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1152 kfree_skb(skb);
1153 break;
1154 }
1155 mdp->rx_skbuff[i] = skb;
1156 rxdesc->addr = dma_addr;
1148 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1157 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1149 1158
1150 /* Rx descriptor address set */ 1159 /* Rx descriptor address set */
@@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1316 RFLR); 1325 RFLR);
1317 1326
1318 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 1327 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1319 if (start) 1328 if (start) {
1329 mdp->irq_enabled = true;
1320 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1330 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1331 }
1321 1332
1322 /* PAUSE Prohibition */ 1333 /* PAUSE Prohibition */
1323 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 1334 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1356 return ret; 1367 return ret;
1357} 1368}
1358 1369
1370static void sh_eth_dev_exit(struct net_device *ndev)
1371{
1372 struct sh_eth_private *mdp = netdev_priv(ndev);
1373 int i;
1374
1375 /* Deactivate all TX descriptors, so DMA should stop at next
1376 * packet boundary if it's currently running
1377 */
1378 for (i = 0; i < mdp->num_tx_ring; i++)
1379 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
1380
1381 /* Disable TX FIFO egress to MAC */
1382 sh_eth_rcv_snd_disable(ndev);
1383
1384 /* Stop RX DMA at next packet boundary */
1385 sh_eth_write(ndev, 0, EDRRR);
1386
1387 /* Aside from TX DMA, we can't tell when the hardware is
1388 * really stopped, so we need to reset to make sure.
1389 * Before doing that, wait for long enough to *probably*
1390 * finish transmitting the last packet and poll stats.
1391 */
1392 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1393 sh_eth_get_stats(ndev);
1394 sh_eth_reset(ndev);
1395}
1396
1359/* free Tx skb function */ 1397/* free Tx skb function */
1360static int sh_eth_txfree(struct net_device *ndev) 1398static int sh_eth_txfree(struct net_device *ndev)
1361{ 1399{
@@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1400 u16 pkt_len = 0; 1438 u16 pkt_len = 0;
1401 u32 desc_status; 1439 u32 desc_status;
1402 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1440 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1441 dma_addr_t dma_addr;
1403 1442
1404 boguscnt = min(boguscnt, *quota); 1443 boguscnt = min(boguscnt, *quota);
1405 limit = boguscnt; 1444 limit = boguscnt;
@@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1447 mdp->rx_skbuff[entry] = NULL; 1486 mdp->rx_skbuff[entry] = NULL;
1448 if (mdp->cd->rpadir) 1487 if (mdp->cd->rpadir)
1449 skb_reserve(skb, NET_IP_ALIGN); 1488 skb_reserve(skb, NET_IP_ALIGN);
1450 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, 1489 dma_unmap_single(&ndev->dev, rxdesc->addr,
1451 ALIGN(mdp->rx_buf_sz, 16), 1490 ALIGN(mdp->rx_buf_sz, 16),
1452 DMA_FROM_DEVICE); 1491 DMA_FROM_DEVICE);
1453 skb_put(skb, pkt_len); 1492 skb_put(skb, pkt_len);
1454 skb->protocol = eth_type_trans(skb, ndev); 1493 skb->protocol = eth_type_trans(skb, ndev);
1455 netif_receive_skb(skb); 1494 netif_receive_skb(skb);
@@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1469 1508
1470 if (mdp->rx_skbuff[entry] == NULL) { 1509 if (mdp->rx_skbuff[entry] == NULL) {
1471 skb = netdev_alloc_skb(ndev, skbuff_size); 1510 skb = netdev_alloc_skb(ndev, skbuff_size);
1472 mdp->rx_skbuff[entry] = skb;
1473 if (skb == NULL) 1511 if (skb == NULL)
1474 break; /* Better luck next round. */ 1512 break; /* Better luck next round. */
1475 sh_eth_set_receive_align(skb); 1513 sh_eth_set_receive_align(skb);
1476 dma_map_single(&ndev->dev, skb->data, 1514 dma_addr = dma_map_single(&ndev->dev, skb->data,
1477 rxdesc->buffer_length, DMA_FROM_DEVICE); 1515 rxdesc->buffer_length,
1516 DMA_FROM_DEVICE);
1517 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1518 kfree_skb(skb);
1519 break;
1520 }
1521 mdp->rx_skbuff[entry] = skb;
1478 1522
1479 skb_checksum_none_assert(skb); 1523 skb_checksum_none_assert(skb);
1480 rxdesc->addr = virt_to_phys(skb->data); 1524 rxdesc->addr = dma_addr;
1481 } 1525 }
1482 if (entry >= mdp->num_rx_ring - 1) 1526 if (entry >= mdp->num_rx_ring - 1)
1483 rxdesc->status |= 1527 rxdesc->status |=
@@ -1573,7 +1617,6 @@ ignore_link:
1573 if (intr_status & EESR_RFRMER) { 1617 if (intr_status & EESR_RFRMER) {
1574 /* Receive Frame Overflow int */ 1618 /* Receive Frame Overflow int */
1575 ndev->stats.rx_frame_errors++; 1619 ndev->stats.rx_frame_errors++;
1576 netif_err(mdp, rx_err, ndev, "Receive Abort\n");
1577 } 1620 }
1578 } 1621 }
1579 1622
@@ -1592,13 +1635,11 @@ ignore_link:
1592 if (intr_status & EESR_RDE) { 1635 if (intr_status & EESR_RDE) {
1593 /* Receive Descriptor Empty int */ 1636 /* Receive Descriptor Empty int */
1594 ndev->stats.rx_over_errors++; 1637 ndev->stats.rx_over_errors++;
1595 netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
1596 } 1638 }
1597 1639
1598 if (intr_status & EESR_RFE) { 1640 if (intr_status & EESR_RFE) {
1599 /* Receive FIFO Overflow int */ 1641 /* Receive FIFO Overflow int */
1600 ndev->stats.rx_fifo_errors++; 1642 ndev->stats.rx_fifo_errors++;
1601 netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
1602 } 1643 }
1603 1644
1604 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1645 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1653 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) 1694 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1654 ret = IRQ_HANDLED; 1695 ret = IRQ_HANDLED;
1655 else 1696 else
1656 goto other_irq; 1697 goto out;
1698
1699 if (!likely(mdp->irq_enabled)) {
1700 sh_eth_write(ndev, 0, EESIPR);
1701 goto out;
1702 }
1657 1703
1658 if (intr_status & EESR_RX_CHECK) { 1704 if (intr_status & EESR_RX_CHECK) {
1659 if (napi_schedule_prep(&mdp->napi)) { 1705 if (napi_schedule_prep(&mdp->napi)) {
@@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1684 sh_eth_error(ndev, intr_status); 1730 sh_eth_error(ndev, intr_status);
1685 } 1731 }
1686 1732
1687other_irq: 1733out:
1688 spin_unlock(&mdp->lock); 1734 spin_unlock(&mdp->lock);
1689 1735
1690 return ret; 1736 return ret;
@@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
1712 napi_complete(napi); 1758 napi_complete(napi);
1713 1759
1714 /* Reenable Rx interrupts */ 1760 /* Reenable Rx interrupts */
1715 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1761 if (mdp->irq_enabled)
1762 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1716out: 1763out:
1717 return budget - quota; 1764 return budget - quota;
1718} 1765}
@@ -1968,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
1968 return -EINVAL; 2015 return -EINVAL;
1969 2016
1970 if (netif_running(ndev)) { 2017 if (netif_running(ndev)) {
2018 netif_device_detach(ndev);
1971 netif_tx_disable(ndev); 2019 netif_tx_disable(ndev);
1972 /* Disable interrupts by clearing the interrupt mask. */ 2020
1973 sh_eth_write(ndev, 0x0000, EESIPR); 2021 /* Serialise with the interrupt handler and NAPI, then
1974 /* Stop the chip's Tx and Rx processes. */ 2022 * disable interrupts. We have to clear the
1975 sh_eth_write(ndev, 0, EDTRR); 2023 * irq_enabled flag first to ensure that interrupts
1976 sh_eth_write(ndev, 0, EDRRR); 2024 * won't be re-enabled.
2025 */
2026 mdp->irq_enabled = false;
1977 synchronize_irq(ndev->irq); 2027 synchronize_irq(ndev->irq);
1978 } 2028 napi_synchronize(&mdp->napi);
2029 sh_eth_write(ndev, 0x0000, EESIPR);
1979 2030
1980 /* Free all the skbuffs in the Rx queue. */ 2031 sh_eth_dev_exit(ndev);
1981 sh_eth_ring_free(ndev); 2032
1982 /* Free DMA buffer */ 2033 /* Free all the skbuffs in the Rx queue. */
1983 sh_eth_free_dma_buffer(mdp); 2034 sh_eth_ring_free(ndev);
2035 /* Free DMA buffer */
2036 sh_eth_free_dma_buffer(mdp);
2037 }
1984 2038
1985 /* Set new parameters */ 2039 /* Set new parameters */
1986 mdp->num_rx_ring = ring->rx_pending; 2040 mdp->num_rx_ring = ring->rx_pending;
1987 mdp->num_tx_ring = ring->tx_pending; 2041 mdp->num_tx_ring = ring->tx_pending;
1988 2042
1989 ret = sh_eth_ring_init(ndev);
1990 if (ret < 0) {
1991 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
1992 return ret;
1993 }
1994 ret = sh_eth_dev_init(ndev, false);
1995 if (ret < 0) {
1996 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
1997 return ret;
1998 }
1999
2000 if (netif_running(ndev)) { 2043 if (netif_running(ndev)) {
2044 ret = sh_eth_ring_init(ndev);
2045 if (ret < 0) {
2046 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2047 __func__);
2048 return ret;
2049 }
2050 ret = sh_eth_dev_init(ndev, false);
2051 if (ret < 0) {
2052 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2053 __func__);
2054 return ret;
2055 }
2056
2057 mdp->irq_enabled = true;
2001 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 2058 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2002 /* Setting the Rx mode will start the Rx process. */ 2059 /* Setting the Rx mode will start the Rx process. */
2003 sh_eth_write(ndev, EDRRR_R, EDRRR); 2060 sh_eth_write(ndev, EDRRR_R, EDRRR);
2004 netif_wake_queue(ndev); 2061 netif_device_attach(ndev);
2005 } 2062 }
2006 2063
2007 return 0; 2064 return 0;
@@ -2117,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2117 } 2174 }
2118 spin_unlock_irqrestore(&mdp->lock, flags); 2175 spin_unlock_irqrestore(&mdp->lock, flags);
2119 2176
2177 if (skb_padto(skb, ETH_ZLEN))
2178 return NETDEV_TX_OK;
2179
2120 entry = mdp->cur_tx % mdp->num_tx_ring; 2180 entry = mdp->cur_tx % mdp->num_tx_ring;
2121 mdp->tx_skbuff[entry] = skb; 2181 mdp->tx_skbuff[entry] = skb;
2122 txdesc = &mdp->tx_ring[entry]; 2182 txdesc = &mdp->tx_ring[entry];
@@ -2126,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2126 skb->len + 2); 2186 skb->len + 2);
2127 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2187 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2128 DMA_TO_DEVICE); 2188 DMA_TO_DEVICE);
2129 if (skb->len < ETH_ZLEN) 2189 if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
2130 txdesc->buffer_length = ETH_ZLEN; 2190 kfree_skb(skb);
2131 else 2191 return NETDEV_TX_OK;
2132 txdesc->buffer_length = skb->len; 2192 }
2193 txdesc->buffer_length = skb->len;
2133 2194
2134 if (entry >= mdp->num_tx_ring - 1) 2195 if (entry >= mdp->num_tx_ring - 1)
2135 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2196 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
@@ -2181,14 +2242,17 @@ static int sh_eth_close(struct net_device *ndev)
2181 2242
2182 netif_stop_queue(ndev); 2243 netif_stop_queue(ndev);
2183 2244
2184 /* Disable interrupts by clearing the interrupt mask. */ 2245 /* Serialise with the interrupt handler and NAPI, then disable
2246 * interrupts. We have to clear the irq_enabled flag first to
2247 * ensure that interrupts won't be re-enabled.
2248 */
2249 mdp->irq_enabled = false;
2250 synchronize_irq(ndev->irq);
2251 napi_disable(&mdp->napi);
2185 sh_eth_write(ndev, 0x0000, EESIPR); 2252 sh_eth_write(ndev, 0x0000, EESIPR);
2186 2253
2187 /* Stop the chip's Tx and Rx processes. */ 2254 sh_eth_dev_exit(ndev);
2188 sh_eth_write(ndev, 0, EDTRR);
2189 sh_eth_write(ndev, 0, EDRRR);
2190 2255
2191 sh_eth_get_stats(ndev);
2192 /* PHY Disconnect */ 2256 /* PHY Disconnect */
2193 if (mdp->phydev) { 2257 if (mdp->phydev) {
2194 phy_stop(mdp->phydev); 2258 phy_stop(mdp->phydev);
@@ -2198,8 +2262,6 @@ static int sh_eth_close(struct net_device *ndev)
2198 2262
2199 free_irq(ndev->irq, ndev); 2263 free_irq(ndev->irq, ndev);
2200 2264
2201 napi_disable(&mdp->napi);
2202
2203 /* Free all the skbuffs in the Rx queue. */ 2265 /* Free all the skbuffs in the Rx queue. */
2204 sh_eth_ring_free(ndev); 2266 sh_eth_ring_free(ndev);
2205 2267
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 71f5de1171bd..332d3c16d483 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -513,6 +513,7 @@ struct sh_eth_private {
513 u32 rx_buf_sz; /* Based on MTU+slack. */ 513 u32 rx_buf_sz; /* Based on MTU+slack. */
514 int edmac_endian; 514 int edmac_endian;
515 struct napi_struct napi; 515 struct napi_struct napi;
516 bool irq_enabled;
516 /* MII transceiver section. */ 517 /* MII transceiver section. */
517 u32 phy_id; /* PHY ID */ 518 u32 phy_id; /* PHY ID */
518 struct mii_bus *mii_bus; /* MDIO bus control */ 519 struct mii_bus *mii_bus; /* MDIO bus control */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8c6b7c1651e5..cf62ff4c8c56 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2778 * @addr: iobase memory address 2778 * @addr: iobase memory address
2779 * Description: this is the main probe function used to 2779 * Description: this is the main probe function used to
2780 * call the alloc_etherdev, allocate the priv structure. 2780 * call the alloc_etherdev, allocate the priv structure.
2781 * Return:
2782 * on success the new private structure is returned, otherwise the error
2783 * pointer.
2781 */ 2784 */
2782struct stmmac_priv *stmmac_dvr_probe(struct device *device, 2785struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2783 struct plat_stmmacenet_data *plat_dat, 2786 struct plat_stmmacenet_data *plat_dat,
@@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2789 2792
2790 ndev = alloc_etherdev(sizeof(struct stmmac_priv)); 2793 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
2791 if (!ndev) 2794 if (!ndev)
2792 return NULL; 2795 return ERR_PTR(-ENOMEM);
2793 2796
2794 SET_NETDEV_DEV(ndev, device); 2797 SET_NETDEV_DEV(ndev, device);
2795 2798
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index d2835bf7b4fb..3699b98d5b2c 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1119,6 +1119,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1119 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; 1119 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1120 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 1120 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1121 } 1121 }
1122 nskb->queue_mapping = skb->queue_mapping;
1122 dev_kfree_skb(skb); 1123 dev_kfree_skb(skb);
1123 skb = nskb; 1124 skb = nskb;
1124 } 1125 }
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index e068d48b0f21..a39131f494ec 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1683,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1683 if (vid == priv->data.default_vlan) 1683 if (vid == priv->data.default_vlan)
1684 return 0; 1684 return 0;
1685 1685
1686 if (priv->data.dual_emac) {
1687 /* In dual EMAC, reserved VLAN id should not be used for
1688 * creating VLAN interfaces as this can break the dual
1689 * EMAC port separation
1690 */
1691 int i;
1692
1693 for (i = 0; i < priv->data.slaves; i++) {
1694 if (vid == priv->slaves[i].port_vlan)
1695 return -EINVAL;
1696 }
1697 }
1698
1686 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 1699 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1687 return cpsw_add_vlan_ale_entry(priv, vid); 1700 return cpsw_add_vlan_ale_entry(priv, vid);
1688} 1701}
@@ -1696,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1696 if (vid == priv->data.default_vlan) 1709 if (vid == priv->data.default_vlan)
1697 return 0; 1710 return 0;
1698 1711
1712 if (priv->data.dual_emac) {
1713 int i;
1714
1715 for (i = 0; i < priv->data.slaves; i++) {
1716 if (vid == priv->slaves[i].port_vlan)
1717 return -EINVAL;
1718 }
1719 }
1720
1699 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); 1721 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1700 ret = cpsw_ale_del_vlan(priv->ale, vid, 0); 1722 ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
1701 if (ret != 0) 1723 if (ret != 0)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9f49c0129a78..7cd4eb38abfa 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -716,7 +716,7 @@ int netvsc_send(struct hv_device *device,
716 u64 req_id; 716 u64 req_id;
717 unsigned int section_index = NETVSC_INVALID_INDEX; 717 unsigned int section_index = NETVSC_INVALID_INDEX;
718 u32 msg_size = 0; 718 u32 msg_size = 0;
719 struct sk_buff *skb; 719 struct sk_buff *skb = NULL;
720 u16 q_idx = packet->q_idx; 720 u16 q_idx = packet->q_idx;
721 721
722 722
@@ -743,8 +743,6 @@ int netvsc_send(struct hv_device *device,
743 packet); 743 packet);
744 skb = (struct sk_buff *) 744 skb = (struct sk_buff *)
745 (unsigned long)packet->send_completion_tid; 745 (unsigned long)packet->send_completion_tid;
746 if (skb)
747 dev_kfree_skb_any(skb);
748 packet->page_buf_cnt = 0; 746 packet->page_buf_cnt = 0;
749 } 747 }
750 } 748 }
@@ -810,6 +808,13 @@ int netvsc_send(struct hv_device *device,
810 packet, ret); 808 packet, ret);
811 } 809 }
812 810
811 if (ret != 0) {
812 if (section_index != NETVSC_INVALID_INDEX)
813 netvsc_free_send_slot(net_device, section_index);
814 } else if (skb) {
815 dev_kfree_skb_any(skb);
816 }
817
813 return ret; 818 return ret;
814} 819}
815 820
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index a14d87783245..2e195289ddf4 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
377 }; 377 };
378 378
379 dst = ip6_route_output(dev_net(dev), NULL, &fl6); 379 dst = ip6_route_output(dev_net(dev), NULL, &fl6);
380 if (IS_ERR(dst)) 380 if (dst->error) {
381 ret = dst->error;
382 dst_release(dst);
381 goto err; 383 goto err;
382 384 }
383 skb_dst_drop(skb); 385 skb_dst_drop(skb);
384 skb_dst_set(skb, dst); 386 skb_dst_set(skb, dst);
385 err = ip6_local_out(skb); 387 err = ip6_local_out(skb);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 7df221788cd4..919f4fccc322 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -17,7 +17,6 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/uio.h> 18#include <linux/uio.h>
19 19
20#include <net/ipv6.h>
21#include <net/net_namespace.h> 20#include <net/net_namespace.h>
22#include <net/rtnetlink.h> 21#include <net/rtnetlink.h>
23#include <net/sock.h> 22#include <net/sock.h>
@@ -81,7 +80,7 @@ static struct cdev macvtap_cdev;
81static const struct proto_ops macvtap_socket_ops; 80static const struct proto_ops macvtap_socket_ops;
82 81
83#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 82#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
84 NETIF_F_TSO6) 83 NETIF_F_TSO6 | NETIF_F_UFO)
85#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 84#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
86#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 85#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
87 86
@@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
586 gso_type = SKB_GSO_TCPV6; 585 gso_type = SKB_GSO_TCPV6;
587 break; 586 break;
588 case VIRTIO_NET_HDR_GSO_UDP: 587 case VIRTIO_NET_HDR_GSO_UDP:
589 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
590 current->comm);
591 gso_type = SKB_GSO_UDP; 588 gso_type = SKB_GSO_UDP;
592 if (skb->protocol == htons(ETH_P_IPV6))
593 ipv6_proxy_select_ident(skb);
594 break; 589 break;
595 default: 590 default:
596 return -EINVAL; 591 return -EINVAL;
@@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
636 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 631 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
637 else if (sinfo->gso_type & SKB_GSO_TCPV6) 632 else if (sinfo->gso_type & SKB_GSO_TCPV6)
638 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 633 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
634 else if (sinfo->gso_type & SKB_GSO_UDP)
635 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
639 else 636 else
640 BUG(); 637 BUG();
641 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 638 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
965 if (arg & TUN_F_TSO6) 962 if (arg & TUN_F_TSO6)
966 feature_mask |= NETIF_F_TSO6; 963 feature_mask |= NETIF_F_TSO6;
967 } 964 }
965
966 if (arg & TUN_F_UFO)
967 feature_mask |= NETIF_F_UFO;
968 } 968 }
969 969
970 /* tun/tap driver inverts the usage for TSO offloads, where 970 /* tun/tap driver inverts the usage for TSO offloads, where
@@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
975 * When user space turns off TSO, we turn off GSO/LRO so that 975 * When user space turns off TSO, we turn off GSO/LRO so that
976 * user-space will not receive TSO frames. 976 * user-space will not receive TSO frames.
977 */ 977 */
978 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) 978 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
979 features |= RX_OFFLOADS; 979 features |= RX_OFFLOADS;
980 else 980 else
981 features &= ~RX_OFFLOADS; 981 features &= ~RX_OFFLOADS;
@@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1090 case TUNSETOFFLOAD: 1090 case TUNSETOFFLOAD:
1091 /* let the user check for future flags */ 1091 /* let the user check for future flags */
1092 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1092 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1093 TUN_F_TSO_ECN)) 1093 TUN_F_TSO_ECN | TUN_F_UFO))
1094 return -EINVAL; 1094 return -EINVAL;
1095 1095
1096 rtnl_lock(); 1096 rtnl_lock();
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index 602c625d95d5..b5edc7f96a39 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
246 /* 246 /*
247 * See if we managed to reduce the size of the packet. 247 * See if we managed to reduce the size of the packet.
248 */ 248 */
249 if (olen < isize) { 249 if (olen < isize && olen <= osize) {
250 state->stats.comp_bytes += olen; 250 state->stats.comp_bytes += olen;
251 state->stats.comp_packets++; 251 state->stats.comp_packets++;
252 } else { 252 } else {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8c8dc16839a7..10f9e4021b5a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -65,7 +65,6 @@
65#include <linux/nsproxy.h> 65#include <linux/nsproxy.h>
66#include <linux/virtio_net.h> 66#include <linux/virtio_net.h>
67#include <linux/rcupdate.h> 67#include <linux/rcupdate.h>
68#include <net/ipv6.h>
69#include <net/net_namespace.h> 68#include <net/net_namespace.h>
70#include <net/netns/generic.h> 69#include <net/netns/generic.h>
71#include <net/rtnetlink.h> 70#include <net/rtnetlink.h>
@@ -187,7 +186,7 @@ struct tun_struct {
187 struct net_device *dev; 186 struct net_device *dev;
188 netdev_features_t set_features; 187 netdev_features_t set_features;
189#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 188#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
190 NETIF_F_TSO6) 189 NETIF_F_TSO6|NETIF_F_UFO)
191 190
192 int vnet_hdr_sz; 191 int vnet_hdr_sz;
193 int sndbuf; 192 int sndbuf;
@@ -1167,8 +1166,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1167 break; 1166 break;
1168 } 1167 }
1169 1168
1170 skb_reset_network_header(skb);
1171
1172 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 1169 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1173 pr_debug("GSO!\n"); 1170 pr_debug("GSO!\n");
1174 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 1171 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1179,20 +1176,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1179 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 1176 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1180 break; 1177 break;
1181 case VIRTIO_NET_HDR_GSO_UDP: 1178 case VIRTIO_NET_HDR_GSO_UDP:
1182 {
1183 static bool warned;
1184
1185 if (!warned) {
1186 warned = true;
1187 netdev_warn(tun->dev,
1188 "%s: using disabled UFO feature; please fix this program\n",
1189 current->comm);
1190 }
1191 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1179 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1192 if (skb->protocol == htons(ETH_P_IPV6))
1193 ipv6_proxy_select_ident(skb);
1194 break; 1180 break;
1195 }
1196 default: 1181 default:
1197 tun->dev->stats.rx_frame_errors++; 1182 tun->dev->stats.rx_frame_errors++;
1198 kfree_skb(skb); 1183 kfree_skb(skb);
@@ -1221,6 +1206,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1221 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1206 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1222 } 1207 }
1223 1208
1209 skb_reset_network_header(skb);
1224 skb_probe_transport_header(skb, 0); 1210 skb_probe_transport_header(skb, 0);
1225 1211
1226 rxhash = skb_get_hash(skb); 1212 rxhash = skb_get_hash(skb);
@@ -1298,6 +1284,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1298 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1284 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1299 else if (sinfo->gso_type & SKB_GSO_TCPV6) 1285 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1300 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1286 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1287 else if (sinfo->gso_type & SKB_GSO_UDP)
1288 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1301 else { 1289 else {
1302 pr_err("unexpected GSO type: " 1290 pr_err("unexpected GSO type: "
1303 "0x%x, gso_size %d, hdr_len %d\n", 1291 "0x%x, gso_size %d, hdr_len %d\n",
@@ -1746,6 +1734,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
1746 features |= NETIF_F_TSO6; 1734 features |= NETIF_F_TSO6;
1747 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 1735 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1748 } 1736 }
1737
1738 if (arg & TUN_F_UFO) {
1739 features |= NETIF_F_UFO;
1740 arg &= ~TUN_F_UFO;
1741 }
1749 } 1742 }
1750 1743
1751 /* This gives the user a way to test for new features in future by 1744 /* This gives the user a way to test for new features in future by
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 99b69af14274..4a1e9c489f1f 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
77 int ret; 77 int ret;
78 78
79 udelay(1); 79 udelay(1);
80 ret = sr_read_reg(dev, EPCR, &tmp); 80 ret = sr_read_reg(dev, SR_EPCR, &tmp);
81 if (ret < 0) 81 if (ret < 0)
82 return ret; 82 return ret;
83 83
@@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
98 98
99 mutex_lock(&dev->phy_mutex); 99 mutex_lock(&dev->phy_mutex);
100 100
101 sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 101 sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
102 sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); 102 sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
103 103
104 ret = wait_phy_eeprom_ready(dev, phy); 104 ret = wait_phy_eeprom_ready(dev, phy);
105 if (ret < 0) 105 if (ret < 0)
106 goto out_unlock; 106 goto out_unlock;
107 107
108 sr_write_reg(dev, EPCR, 0x0); 108 sr_write_reg(dev, SR_EPCR, 0x0);
109 ret = sr_read(dev, EPDR, 2, value); 109 ret = sr_read(dev, SR_EPDR, 2, value);
110 110
111 netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", 111 netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
112 phy, reg, *value, ret); 112 phy, reg, *value, ret);
@@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
123 123
124 mutex_lock(&dev->phy_mutex); 124 mutex_lock(&dev->phy_mutex);
125 125
126 ret = sr_write(dev, EPDR, 2, &value); 126 ret = sr_write(dev, SR_EPDR, 2, &value);
127 if (ret < 0) 127 if (ret < 0)
128 goto out_unlock; 128 goto out_unlock;
129 129
130 sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 130 sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
131 sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : 131 sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
132 (EPCR_WEP | EPCR_ERPRW)); 132 (EPCR_WEP | EPCR_ERPRW));
133 133
134 ret = wait_phy_eeprom_ready(dev, phy); 134 ret = wait_phy_eeprom_ready(dev, phy);
135 if (ret < 0) 135 if (ret < 0)
136 goto out_unlock; 136 goto out_unlock;
137 137
138 sr_write_reg(dev, EPCR, 0x0); 138 sr_write_reg(dev, SR_EPCR, 0x0);
139 139
140out_unlock: 140out_unlock:
141 mutex_unlock(&dev->phy_mutex); 141 mutex_unlock(&dev->phy_mutex);
@@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
188 if (loc == MII_BMSR) { 188 if (loc == MII_BMSR) {
189 u8 value; 189 u8 value;
190 190
191 sr_read_reg(dev, NSR, &value); 191 sr_read_reg(dev, SR_NSR, &value);
192 if (value & NSR_LINKST) 192 if (value & NSR_LINKST)
193 rc = 1; 193 rc = 1;
194 } 194 }
@@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev)
228 int rc = 0; 228 int rc = 0;
229 229
230 /* Get the Link Status directly */ 230 /* Get the Link Status directly */
231 sr_read_reg(dev, NSR, &value); 231 sr_read_reg(dev, SR_NSR, &value);
232 if (value & NSR_LINKST) 232 if (value & NSR_LINKST)
233 rc = 1; 233 rc = 1;
234 234
@@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev)
281 } 281 }
282 } 282 }
283 283
284 sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes); 284 sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
285 sr_write_reg_async(dev, RCR, rx_ctl); 285 sr_write_reg_async(dev, SR_RCR, rx_ctl);
286} 286}
287 287
288static int sr9700_set_mac_address(struct net_device *netdev, void *p) 288static int sr9700_set_mac_address(struct net_device *netdev, void *p)
@@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
297 } 297 }
298 298
299 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 299 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
300 sr_write_async(dev, PAR, 6, netdev->dev_addr); 300 sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
301 301
302 return 0; 302 return 0;
303} 303}
@@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
340 mii->phy_id_mask = 0x1f; 340 mii->phy_id_mask = 0x1f;
341 mii->reg_num_mask = 0x1f; 341 mii->reg_num_mask = 0x1f;
342 342
343 sr_write_reg(dev, NCR, NCR_RST); 343 sr_write_reg(dev, SR_NCR, NCR_RST);
344 udelay(20); 344 udelay(20);
345 345
346 /* read MAC 346 /* read MAC
@@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
348 * EEPROM automatically to PAR. In case there is no EEPROM externally, 348 * EEPROM automatically to PAR. In case there is no EEPROM externally,
349 * a default MAC address is stored in PAR for making chip work properly. 349 * a default MAC address is stored in PAR for making chip work properly.
350 */ 350 */
351 if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) { 351 if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
352 netdev_err(netdev, "Error reading MAC address\n"); 352 netdev_err(netdev, "Error reading MAC address\n");
353 ret = -ENODEV; 353 ret = -ENODEV;
354 goto out; 354 goto out;
355 } 355 }
356 356
357 /* power up and reset phy */ 357 /* power up and reset phy */
358 sr_write_reg(dev, PRR, PRR_PHY_RST); 358 sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
359 /* at least 10ms, here 20ms for safe */ 359 /* at least 10ms, here 20ms for safe */
360 mdelay(20); 360 mdelay(20);
361 sr_write_reg(dev, PRR, 0); 361 sr_write_reg(dev, SR_PRR, 0);
362 /* at least 1ms, here 2ms for reading right register */ 362 /* at least 1ms, here 2ms for reading right register */
363 udelay(2 * 1000); 363 udelay(2 * 1000);
364 364
diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h
index fd687c575e74..258b030277e7 100644
--- a/drivers/net/usb/sr9700.h
+++ b/drivers/net/usb/sr9700.h
@@ -14,13 +14,13 @@
14/* sr9700 spec. register table on Linux platform */ 14/* sr9700 spec. register table on Linux platform */
15 15
16/* Network Control Reg */ 16/* Network Control Reg */
17#define NCR 0x00 17#define SR_NCR 0x00
18#define NCR_RST (1 << 0) 18#define NCR_RST (1 << 0)
19#define NCR_LBK (3 << 1) 19#define NCR_LBK (3 << 1)
20#define NCR_FDX (1 << 3) 20#define NCR_FDX (1 << 3)
21#define NCR_WAKEEN (1 << 6) 21#define NCR_WAKEEN (1 << 6)
22/* Network Status Reg */ 22/* Network Status Reg */
23#define NSR 0x01 23#define SR_NSR 0x01
24#define NSR_RXRDY (1 << 0) 24#define NSR_RXRDY (1 << 0)
25#define NSR_RXOV (1 << 1) 25#define NSR_RXOV (1 << 1)
26#define NSR_TX1END (1 << 2) 26#define NSR_TX1END (1 << 2)
@@ -30,7 +30,7 @@
30#define NSR_LINKST (1 << 6) 30#define NSR_LINKST (1 << 6)
31#define NSR_SPEED (1 << 7) 31#define NSR_SPEED (1 << 7)
32/* Tx Control Reg */ 32/* Tx Control Reg */
33#define TCR 0x02 33#define SR_TCR 0x02
34#define TCR_CRC_DIS (1 << 1) 34#define TCR_CRC_DIS (1 << 1)
35#define TCR_PAD_DIS (1 << 2) 35#define TCR_PAD_DIS (1 << 2)
36#define TCR_LC_CARE (1 << 3) 36#define TCR_LC_CARE (1 << 3)
@@ -38,7 +38,7 @@
38#define TCR_EXCECM (1 << 5) 38#define TCR_EXCECM (1 << 5)
39#define TCR_LF_EN (1 << 6) 39#define TCR_LF_EN (1 << 6)
40/* Tx Status Reg for Packet Index 1 */ 40/* Tx Status Reg for Packet Index 1 */
41#define TSR1 0x03 41#define SR_TSR1 0x03
42#define TSR1_EC (1 << 2) 42#define TSR1_EC (1 << 2)
43#define TSR1_COL (1 << 3) 43#define TSR1_COL (1 << 3)
44#define TSR1_LC (1 << 4) 44#define TSR1_LC (1 << 4)
@@ -46,7 +46,7 @@
46#define TSR1_LOC (1 << 6) 46#define TSR1_LOC (1 << 6)
47#define TSR1_TLF (1 << 7) 47#define TSR1_TLF (1 << 7)
48/* Tx Status Reg for Packet Index 2 */ 48/* Tx Status Reg for Packet Index 2 */
49#define TSR2 0x04 49#define SR_TSR2 0x04
50#define TSR2_EC (1 << 2) 50#define TSR2_EC (1 << 2)
51#define TSR2_COL (1 << 3) 51#define TSR2_COL (1 << 3)
52#define TSR2_LC (1 << 4) 52#define TSR2_LC (1 << 4)
@@ -54,7 +54,7 @@
54#define TSR2_LOC (1 << 6) 54#define TSR2_LOC (1 << 6)
55#define TSR2_TLF (1 << 7) 55#define TSR2_TLF (1 << 7)
56/* Rx Control Reg*/ 56/* Rx Control Reg*/
57#define RCR 0x05 57#define SR_RCR 0x05
58#define RCR_RXEN (1 << 0) 58#define RCR_RXEN (1 << 0)
59#define RCR_PRMSC (1 << 1) 59#define RCR_PRMSC (1 << 1)
60#define RCR_RUNT (1 << 2) 60#define RCR_RUNT (1 << 2)
@@ -62,87 +62,87 @@
62#define RCR_DIS_CRC (1 << 4) 62#define RCR_DIS_CRC (1 << 4)
63#define RCR_DIS_LONG (1 << 5) 63#define RCR_DIS_LONG (1 << 5)
64/* Rx Status Reg */ 64/* Rx Status Reg */
65#define RSR 0x06 65#define SR_RSR 0x06
66#define RSR_AE (1 << 2) 66#define RSR_AE (1 << 2)
67#define RSR_MF (1 << 6) 67#define RSR_MF (1 << 6)
68#define RSR_RF (1 << 7) 68#define RSR_RF (1 << 7)
69/* Rx Overflow Counter Reg */ 69/* Rx Overflow Counter Reg */
70#define ROCR 0x07 70#define SR_ROCR 0x07
71#define ROCR_ROC (0x7F << 0) 71#define ROCR_ROC (0x7F << 0)
72#define ROCR_RXFU (1 << 7) 72#define ROCR_RXFU (1 << 7)
73/* Back Pressure Threshold Reg */ 73/* Back Pressure Threshold Reg */
74#define BPTR 0x08 74#define SR_BPTR 0x08
75#define BPTR_JPT (0x0F << 0) 75#define BPTR_JPT (0x0F << 0)
76#define BPTR_BPHW (0x0F << 4) 76#define BPTR_BPHW (0x0F << 4)
77/* Flow Control Threshold Reg */ 77/* Flow Control Threshold Reg */
78#define FCTR 0x09 78#define SR_FCTR 0x09
79#define FCTR_LWOT (0x0F << 0) 79#define FCTR_LWOT (0x0F << 0)
80#define FCTR_HWOT (0x0F << 4) 80#define FCTR_HWOT (0x0F << 4)
81/* rx/tx Flow Control Reg */ 81/* rx/tx Flow Control Reg */
82#define FCR 0x0A 82#define SR_FCR 0x0A
83#define FCR_FLCE (1 << 0) 83#define FCR_FLCE (1 << 0)
84#define FCR_BKPA (1 << 4) 84#define FCR_BKPA (1 << 4)
85#define FCR_TXPEN (1 << 5) 85#define FCR_TXPEN (1 << 5)
86#define FCR_TXPF (1 << 6) 86#define FCR_TXPF (1 << 6)
87#define FCR_TXP0 (1 << 7) 87#define FCR_TXP0 (1 << 7)
88/* Eeprom & Phy Control Reg */ 88/* Eeprom & Phy Control Reg */
89#define EPCR 0x0B 89#define SR_EPCR 0x0B
90#define EPCR_ERRE (1 << 0) 90#define EPCR_ERRE (1 << 0)
91#define EPCR_ERPRW (1 << 1) 91#define EPCR_ERPRW (1 << 1)
92#define EPCR_ERPRR (1 << 2) 92#define EPCR_ERPRR (1 << 2)
93#define EPCR_EPOS (1 << 3) 93#define EPCR_EPOS (1 << 3)
94#define EPCR_WEP (1 << 4) 94#define EPCR_WEP (1 << 4)
95/* Eeprom & Phy Address Reg */ 95/* Eeprom & Phy Address Reg */
96#define EPAR 0x0C 96#define SR_EPAR 0x0C
97#define EPAR_EROA (0x3F << 0) 97#define EPAR_EROA (0x3F << 0)
98#define EPAR_PHY_ADR_MASK (0x03 << 6) 98#define EPAR_PHY_ADR_MASK (0x03 << 6)
99#define EPAR_PHY_ADR (0x01 << 6) 99#define EPAR_PHY_ADR (0x01 << 6)
100/* Eeprom & Phy Data Reg */ 100/* Eeprom & Phy Data Reg */
101#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ 101#define SR_EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
102/* Wakeup Control Reg */ 102/* Wakeup Control Reg */
103#define WCR 0x0F 103#define SR_WCR 0x0F
104#define WCR_MAGICST (1 << 0) 104#define WCR_MAGICST (1 << 0)
105#define WCR_LINKST (1 << 2) 105#define WCR_LINKST (1 << 2)
106#define WCR_MAGICEN (1 << 3) 106#define WCR_MAGICEN (1 << 3)
107#define WCR_LINKEN (1 << 5) 107#define WCR_LINKEN (1 << 5)
108/* Physical Address Reg */ 108/* Physical Address Reg */
109#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ 109#define SR_PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
110/* Multicast Address Reg */ 110/* Multicast Address Reg */
111#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ 111#define SR_MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
112/* 0x1e unused */ 112/* 0x1e unused */
113/* Phy Reset Reg */ 113/* Phy Reset Reg */
114#define PRR 0x1F 114#define SR_PRR 0x1F
115#define PRR_PHY_RST (1 << 0) 115#define PRR_PHY_RST (1 << 0)
116/* Tx sdram Write Pointer Address Low */ 116/* Tx sdram Write Pointer Address Low */
117#define TWPAL 0x20 117#define SR_TWPAL 0x20
118/* Tx sdram Write Pointer Address High */ 118/* Tx sdram Write Pointer Address High */
119#define TWPAH 0x21 119#define SR_TWPAH 0x21
120/* Tx sdram Read Pointer Address Low */ 120/* Tx sdram Read Pointer Address Low */
121#define TRPAL 0x22 121#define SR_TRPAL 0x22
122/* Tx sdram Read Pointer Address High */ 122/* Tx sdram Read Pointer Address High */
123#define TRPAH 0x23 123#define SR_TRPAH 0x23
124/* Rx sdram Write Pointer Address Low */ 124/* Rx sdram Write Pointer Address Low */
125#define RWPAL 0x24 125#define SR_RWPAL 0x24
126/* Rx sdram Write Pointer Address High */ 126/* Rx sdram Write Pointer Address High */
127#define RWPAH 0x25 127#define SR_RWPAH 0x25
128/* Rx sdram Read Pointer Address Low */ 128/* Rx sdram Read Pointer Address Low */
129#define RRPAL 0x26 129#define SR_RRPAL 0x26
130/* Rx sdram Read Pointer Address High */ 130/* Rx sdram Read Pointer Address High */
131#define RRPAH 0x27 131#define SR_RRPAH 0x27
132/* Vendor ID register */ 132/* Vendor ID register */
133#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ 133#define SR_VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */
134/* Product ID register */ 134/* Product ID register */
135#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ 135#define SR_PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */
136/* CHIP Revision register */ 136/* CHIP Revision register */
137#define CHIPR 0x2C 137#define SR_CHIPR 0x2C
138/* 0x2D --> 0xEF unused */ 138/* 0x2D --> 0xEF unused */
139/* USB Device Address */ 139/* USB Device Address */
140#define USBDA 0xF0 140#define SR_USBDA 0xF0
141#define USBDA_USBFA (0x7F << 0) 141#define USBDA_USBFA (0x7F << 0)
142/* RX packet Counter Reg */ 142/* RX packet Counter Reg */
143#define RXC 0xF1 143#define SR_RXC 0xF1
144/* Tx packet Counter & USB Status Reg */ 144/* Tx packet Counter & USB Status Reg */
145#define TXC_USBS 0xF2 145#define SR_TXC_USBS 0xF2
146#define TXC_USBS_TXC0 (1 << 0) 146#define TXC_USBS_TXC0 (1 << 0)
147#define TXC_USBS_TXC1 (1 << 1) 147#define TXC_USBS_TXC1 (1 << 1)
148#define TXC_USBS_TXC2 (1 << 2) 148#define TXC_USBS_TXC2 (1 << 2)
@@ -150,7 +150,7 @@
150#define TXC_USBS_SUSFLAG (1 << 6) 150#define TXC_USBS_SUSFLAG (1 << 6)
151#define TXC_USBS_RXFAULT (1 << 7) 151#define TXC_USBS_RXFAULT (1 << 7)
152/* USB Control register */ 152/* USB Control register */
153#define USBC 0xF4 153#define SR_USBC 0xF4
154#define USBC_EP3NAK (1 << 4) 154#define USBC_EP3NAK (1 << 4)
155#define USBC_EP3ACK (1 << 5) 155#define USBC_EP3ACK (1 << 5)
156 156
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5ca97713bfb3..059fdf1bf5ee 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
490 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 490 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
491 break; 491 break;
492 case VIRTIO_NET_HDR_GSO_UDP: 492 case VIRTIO_NET_HDR_GSO_UDP:
493 {
494 static bool warned;
495
496 if (!warned) {
497 warned = true;
498 netdev_warn(dev,
499 "host using disabled UFO feature; please fix it\n");
500 }
501 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 493 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
502 break; 494 break;
503 }
504 case VIRTIO_NET_HDR_GSO_TCPV6: 495 case VIRTIO_NET_HDR_GSO_TCPV6:
505 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 496 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
506 break; 497 break;
@@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
888 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 879 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
889 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 880 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
890 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 881 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
882 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
883 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
891 else 884 else
892 BUG(); 885 BUG();
893 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 886 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -1748,7 +1741,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1748 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1741 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1749 1742
1750 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1743 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1751 dev->hw_features |= NETIF_F_TSO 1744 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
1752 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 1745 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1753 } 1746 }
1754 /* Individual feature bits: what can host handle? */ 1747 /* Individual feature bits: what can host handle? */
@@ -1758,9 +1751,11 @@ static int virtnet_probe(struct virtio_device *vdev)
1758 dev->hw_features |= NETIF_F_TSO6; 1751 dev->hw_features |= NETIF_F_TSO6;
1759 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 1752 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1760 dev->hw_features |= NETIF_F_TSO_ECN; 1753 dev->hw_features |= NETIF_F_TSO_ECN;
1754 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1755 dev->hw_features |= NETIF_F_UFO;
1761 1756
1762 if (gso) 1757 if (gso)
1763 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 1758 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1764 /* (!csum && gso) case will be fixed by register_netdev() */ 1759 /* (!csum && gso) case will be fixed by register_netdev() */
1765 } 1760 }
1766 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 1761 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1798,7 +1793,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1798 /* If we can receive ANY GSO packets, we must allocate large ones. */ 1793 /* If we can receive ANY GSO packets, we must allocate large ones. */
1799 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 1794 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1800 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 1795 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1801 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 1796 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
1797 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
1802 vi->big_packets = true; 1798 vi->big_packets = true;
1803 1799
1804 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 1800 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1994,9 +1990,9 @@ static struct virtio_device_id id_table[] = {
1994static unsigned int features[] = { 1990static unsigned int features[] = {
1995 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 1991 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1996 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 1992 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1997 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, 1993 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1998 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 1994 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1999 VIRTIO_NET_F_GUEST_ECN, 1995 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
2000 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 1996 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
2001 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1997 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
2002 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, 1998 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7fbd89fbe107..a8c755dcab14 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2432,10 +2432,10 @@ static void vxlan_sock_work(struct work_struct *work)
2432 dev_put(vxlan->dev); 2432 dev_put(vxlan->dev);
2433} 2433}
2434 2434
2435static int vxlan_newlink(struct net *net, struct net_device *dev, 2435static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2436 struct nlattr *tb[], struct nlattr *data[]) 2436 struct nlattr *tb[], struct nlattr *data[])
2437{ 2437{
2438 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2438 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2439 struct vxlan_dev *vxlan = netdev_priv(dev); 2439 struct vxlan_dev *vxlan = netdev_priv(dev);
2440 struct vxlan_rdst *dst = &vxlan->default_dst; 2440 struct vxlan_rdst *dst = &vxlan->default_dst;
2441 __u32 vni; 2441 __u32 vni;
@@ -2445,7 +2445,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2445 if (!data[IFLA_VXLAN_ID]) 2445 if (!data[IFLA_VXLAN_ID])
2446 return -EINVAL; 2446 return -EINVAL;
2447 2447
2448 vxlan->net = dev_net(dev); 2448 vxlan->net = src_net;
2449 2449
2450 vni = nla_get_u32(data[IFLA_VXLAN_ID]); 2450 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2451 dst->remote_vni = vni; 2451 dst->remote_vni = vni;
@@ -2481,7 +2481,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2481 if (data[IFLA_VXLAN_LINK] && 2481 if (data[IFLA_VXLAN_LINK] &&
2482 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { 2482 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
2483 struct net_device *lowerdev 2483 struct net_device *lowerdev
2484 = __dev_get_by_index(net, dst->remote_ifindex); 2484 = __dev_get_by_index(src_net, dst->remote_ifindex);
2485 2485
2486 if (!lowerdev) { 2486 if (!lowerdev) {
2487 pr_info("ifindex %d does not exist\n", dst->remote_ifindex); 2487 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2557,7 +2557,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2557 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 2557 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2558 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 2558 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2559 2559
2560 if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, 2560 if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2561 vxlan->dst_port)) { 2561 vxlan->dst_port)) {
2562 pr_info("duplicate VNI %u\n", vni); 2562 pr_info("duplicate VNI %u\n", vni);
2563 return -EEXIST; 2563 return -EEXIST;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 94e234975c61..a2fdd15f285a 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -25,7 +25,7 @@ if WAN
25# There is no way to detect a comtrol sv11 - force it modular for now. 25# There is no way to detect a comtrol sv11 - force it modular for now.
26config HOSTESS_SV11 26config HOSTESS_SV11
27 tristate "Comtrol Hostess SV-11 support" 27 tristate "Comtrol Hostess SV-11 support"
28 depends on ISA && m && ISA_DMA_API && INET && HDLC 28 depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
29 help 29 help
30 Driver for Comtrol Hostess SV-11 network card which 30 Driver for Comtrol Hostess SV-11 network card which
31 operates on low speed synchronous serial links at up to 31 operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
37# The COSA/SRP driver has not been tested as non-modular yet. 37# The COSA/SRP driver has not been tested as non-modular yet.
38config COSA 38config COSA
39 tristate "COSA/SRP sync serial boards support" 39 tristate "COSA/SRP sync serial boards support"
40 depends on ISA && m && ISA_DMA_API && HDLC 40 depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
41 ---help--- 41 ---help---
42 Driver for COSA and SRP synchronous serial boards. 42 Driver for COSA and SRP synchronous serial boards.
43 43
@@ -87,7 +87,7 @@ config LANMEDIA
87# There is no way to detect a Sealevel board. Force it modular 87# There is no way to detect a Sealevel board. Force it modular
88config SEALEVEL_4021 88config SEALEVEL_4021
89 tristate "Sealevel Systems 4021 support" 89 tristate "Sealevel Systems 4021 support"
90 depends on ISA && m && ISA_DMA_API && INET && HDLC 90 depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
91 help 91 help
92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. 92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
93 93
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9a72640237cb..62b0bf4fdf6b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
285 285
286 __ath_cancel_work(sc); 286 __ath_cancel_work(sc);
287 287
288 disable_irq(sc->irq);
288 tasklet_disable(&sc->intr_tq); 289 tasklet_disable(&sc->intr_tq);
289 tasklet_disable(&sc->bcon_tasklet); 290 tasklet_disable(&sc->bcon_tasklet);
290 spin_lock_bh(&sc->sc_pcu_lock); 291 spin_lock_bh(&sc->sc_pcu_lock);
@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
331 r = -EIO; 332 r = -EIO;
332 333
333out: 334out:
335 enable_irq(sc->irq);
334 spin_unlock_bh(&sc->sc_pcu_lock); 336 spin_unlock_bh(&sc->sc_pcu_lock);
335 tasklet_enable(&sc->bcon_tasklet); 337 tasklet_enable(&sc->bcon_tasklet);
336 tasklet_enable(&sc->intr_tq); 338 tasklet_enable(&sc->intr_tq);
@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)
512 if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) 514 if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
513 return IRQ_NONE; 515 return IRQ_NONE;
514 516
515 if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
516 return IRQ_NONE;
517
518 /* shared irq, not for us */ 517 /* shared irq, not for us */
519 if (!ath9k_hw_intrpend(ah)) 518 if (!ath9k_hw_intrpend(ah))
520 return IRQ_NONE; 519 return IRQ_NONE;
@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)
529 ath9k_debug_sync_cause(sc, sync_cause); 528 ath9k_debug_sync_cause(sc, sync_cause);
530 status &= ah->imask; /* discard unasked-for bits */ 529 status &= ah->imask; /* discard unasked-for bits */
531 530
532 if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) 531 if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
533 return IRQ_HANDLED; 532 return IRQ_HANDLED;
534 533
535 /* 534 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 1bbe4fc47b97..660ddb1b7d8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag {
246 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, 246 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
247 * regardless of the band or the number of the probes. FW will calculate 247 * regardless of the band or the number of the probes. FW will calculate
248 * the actual dwell time. 248 * the actual dwell time.
249 * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
249 */ 250 */
250enum iwl_ucode_tlv_api { 251enum iwl_ucode_tlv_api {
251 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), 252 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
@@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api {
257 IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), 258 IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
258 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), 259 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
259 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), 260 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
261 IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
260}; 262};
261 263
262/** 264/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 201846de94e7..cfc0e65b34a5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags {
653}; 653};
654 654
655/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S 655/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
656 * @flags: enum iwl_scan_channel_flgs 656 * @flags: enum iwl_scan_channel_flags
657 * @non_ebs_ratio: how many regular scan iteration before EBS 657 * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
658 * involved.
659 * 1 - EBS is disabled.
660 * 2 - every second scan will be full scan(and so on).
658 */ 661 */
659struct iwl_scan_channel_opt { 662struct iwl_scan_channel_opt {
660 __le16 flags; 663 __le16 flags;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e880f9d4717b..20915587c820 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3343 msk |= mvmsta->tfd_queue_msk; 3343 msk |= mvmsta->tfd_queue_msk;
3344 } 3344 }
3345 3345
3346 if (drop) { 3346 msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
3347 if (iwl_mvm_flush_tx_path(mvm, msk, true))
3348 IWL_ERR(mvm, "flush request fail\n");
3349 mutex_unlock(&mvm->mutex);
3350 } else {
3351 mutex_unlock(&mvm->mutex);
3352 3347
3353 /* this can take a while, and we may need/want other operations 3348 if (iwl_mvm_flush_tx_path(mvm, msk, true))
3354 * to succeed while doing this, so do it without the mutex held 3349 IWL_ERR(mvm, "flush request fail\n");
3355 */ 3350 mutex_unlock(&mvm->mutex);
3356 iwl_trans_wait_tx_queue_empty(mvm->trans, msk); 3351
3357 } 3352 /* this can take a while, and we may need/want other operations
3353 * to succeed while doing this, so do it without the mutex held
3354 */
3355 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3358} 3356}
3359 3357
3360const struct ieee80211_ops iwl_mvm_hw_ops = { 3358const struct ieee80211_ops iwl_mvm_hw_ops = {
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index ec9a8e7bae1d..844bf7c4c8de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -72,6 +72,8 @@
72 72
73#define IWL_PLCP_QUIET_THRESH 1 73#define IWL_PLCP_QUIET_THRESH 1
74#define IWL_ACTIVE_QUIET_TIME 10 74#define IWL_ACTIVE_QUIET_TIME 10
75#define IWL_DENSE_EBS_SCAN_RATIO 5
76#define IWL_SPARSE_EBS_SCAN_RATIO 1
75 77
76struct iwl_mvm_scan_params { 78struct iwl_mvm_scan_params {
77 u32 max_out_time; 79 u32 max_out_time;
@@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1105 return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, 1107 return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
1106 notify); 1108 notify);
1107 1109
1110 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
1111 return 0;
1112
1113 if (iwl_mvm_is_radio_killed(mvm))
1114 goto out;
1115
1108 if (mvm->scan_status != IWL_MVM_SCAN_SCHED && 1116 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
1109 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || 1117 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
1110 mvm->scan_status != IWL_MVM_SCAN_OS)) { 1118 mvm->scan_status != IWL_MVM_SCAN_OS)) {
@@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1141 if (mvm->scan_status == IWL_MVM_SCAN_OS) 1149 if (mvm->scan_status == IWL_MVM_SCAN_OS)
1142 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 1150 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1143 1151
1152out:
1144 mvm->scan_status = IWL_MVM_SCAN_NONE; 1153 mvm->scan_status = IWL_MVM_SCAN_NONE;
1145 1154
1146 if (notify) { 1155 if (notify) {
@@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
1297 cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); 1306 cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
1298 cmd->iter_num = cpu_to_le32(1); 1307 cmd->iter_num = cpu_to_le32(1);
1299 1308
1300 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
1301 mvm->last_ebs_successful) {
1302 cmd->channel_opt[0].flags =
1303 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1304 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1305 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1306 cmd->channel_opt[1].flags =
1307 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1308 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1309 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1310 }
1311
1312 if (iwl_mvm_rrm_scan_needed(mvm)) 1309 if (iwl_mvm_rrm_scan_needed(mvm))
1313 cmd->scan_flags |= 1310 cmd->scan_flags |=
1314 cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); 1311 cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
@@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
1383 cmd->schedule[1].iterations = 0; 1380 cmd->schedule[1].iterations = 0;
1384 cmd->schedule[1].full_scan_mul = 0; 1381 cmd->schedule[1].full_scan_mul = 0;
1385 1382
1383 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
1384 mvm->last_ebs_successful) {
1385 cmd->channel_opt[0].flags =
1386 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1387 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1388 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1389 cmd->channel_opt[0].non_ebs_ratio =
1390 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
1391 cmd->channel_opt[1].flags =
1392 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1393 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1394 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1395 cmd->channel_opt[1].non_ebs_ratio =
1396 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
1397 }
1398
1386 for (i = 1; i <= req->req.n_ssids; i++) 1399 for (i = 1; i <= req->req.n_ssids; i++)
1387 ssid_bitmap |= BIT(i); 1400 ssid_bitmap |= BIT(i);
1388 1401
@@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
1483 cmd->schedule[1].iterations = 0xff; 1496 cmd->schedule[1].iterations = 0xff;
1484 cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; 1497 cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
1485 1498
1499 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
1500 mvm->last_ebs_successful) {
1501 cmd->channel_opt[0].flags =
1502 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1503 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1504 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1505 cmd->channel_opt[0].non_ebs_ratio =
1506 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
1507 cmd->channel_opt[1].flags =
1508 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1509 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1510 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1511 cmd->channel_opt[1].non_ebs_ratio =
1512 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
1513 }
1514
1486 iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, 1515 iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
1487 ssid_bitmap, cmd); 1516 ssid_bitmap, cmd);
1488 1517
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 4333306ccdee..c59d07567d90 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
90 90
91 if (ieee80211_is_probe_resp(fc)) 91 if (ieee80211_is_probe_resp(fc))
92 tx_flags |= TX_CMD_FLG_TSF; 92 tx_flags |= TX_CMD_FLG_TSF;
93 else if (ieee80211_is_back_req(fc))
94 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
95 93
96 if (ieee80211_has_morefrags(fc)) 94 if (ieee80211_has_morefrags(fc))
97 tx_flags |= TX_CMD_FLG_MORE_FRAG; 95 tx_flags |= TX_CMD_FLG_MORE_FRAG;
@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
100 u8 *qc = ieee80211_get_qos_ctl(hdr); 98 u8 *qc = ieee80211_get_qos_ctl(hdr);
101 tx_cmd->tid_tspec = qc[0] & 0xf; 99 tx_cmd->tid_tspec = qc[0] & 0xf;
102 tx_flags &= ~TX_CMD_FLG_SEQ_CTL; 100 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
101 } else if (ieee80211_is_back_req(fc)) {
102 struct ieee80211_bar *bar = (void *)skb->data;
103 u16 control = le16_to_cpu(bar->control);
104
105 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
106 tx_cmd->tid_tspec = (control &
107 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
108 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
109 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
103 } else { 110 } else {
104 tx_cmd->tid_tspec = IWL_TID_NON_QOS; 111 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
105 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) 112 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 9259a732e8a4..037f74f0fcf6 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -578,6 +578,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
578 goto err_rx_unbind; 578 goto err_rx_unbind;
579 } 579 }
580 queue->task = task; 580 queue->task = task;
581 get_task_struct(task);
581 582
582 task = kthread_create(xenvif_dealloc_kthread, 583 task = kthread_create(xenvif_dealloc_kthread,
583 (void *)queue, "%s-dealloc", queue->name); 584 (void *)queue, "%s-dealloc", queue->name);
@@ -634,6 +635,7 @@ void xenvif_disconnect(struct xenvif *vif)
634 635
635 if (queue->task) { 636 if (queue->task) {
636 kthread_stop(queue->task); 637 kthread_stop(queue->task);
638 put_task_struct(queue->task);
637 queue->task = NULL; 639 queue->task = NULL;
638 } 640 }
639 641
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 908e65e9b821..c8ce701a7efb 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -2109,8 +2109,7 @@ int xenvif_kthread_guest_rx(void *data)
2109 */ 2109 */
2110 if (unlikely(vif->disabled && queue->id == 0)) { 2110 if (unlikely(vif->disabled && queue->id == 0)) {
2111 xenvif_carrier_off(vif); 2111 xenvif_carrier_off(vif);
2112 xenvif_rx_queue_purge(queue); 2112 break;
2113 continue;
2114 } 2113 }
2115 2114
2116 if (!skb_queue_empty(&queue->rx_queue)) 2115 if (!skb_queue_empty(&queue->rx_queue))
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index df781cdf13c1..17ca98657a28 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -283,6 +283,9 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
283 struct msi_msg msg; 283 struct msi_msg msg;
284 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); 284 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
285 285
286 if (desc->msi_attrib.is_msix)
287 return -EINVAL;
288
286 irq = assign_irq(1, desc, &pos); 289 irq = assign_irq(1, desc, &pos);
287 if (irq < 0) 290 if (irq < 0)
288 return irq; 291 return irq;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e52356aa09b8..903d5078b5ed 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -324,18 +324,52 @@ static void quirk_s3_64M(struct pci_dev *dev)
324DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); 324DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
325DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); 325DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
326 326
327static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
328 const char *name)
329{
330 u32 region;
331 struct pci_bus_region bus_region;
332 struct resource *res = dev->resource + pos;
333
334 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
335
336 if (!region)
337 return;
338
339 res->name = pci_name(dev);
340 res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
341 res->flags |=
342 (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
343 region &= ~(size - 1);
344
345 /* Convert from PCI bus to resource space */
346 bus_region.start = region;
347 bus_region.end = region + size - 1;
348 pcibios_bus_to_resource(dev->bus, res, &bus_region);
349
350 dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
351 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
352}
353
327/* 354/*
328 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS 355 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
329 * ver. 1.33 20070103) don't set the correct ISA PCI region header info. 356 * ver. 1.33 20070103) don't set the correct ISA PCI region header info.
330 * BAR0 should be 8 bytes; instead, it may be set to something like 8k 357 * BAR0 should be 8 bytes; instead, it may be set to something like 8k
331 * (which conflicts w/ BAR1's memory range). 358 * (which conflicts w/ BAR1's memory range).
359 *
360 * CS553x's ISA PCI BARs may also be read-only (ref:
361 * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
332 */ 362 */
333static void quirk_cs5536_vsa(struct pci_dev *dev) 363static void quirk_cs5536_vsa(struct pci_dev *dev)
334{ 364{
365 static char *name = "CS5536 ISA bridge";
366
335 if (pci_resource_len(dev, 0) != 8) { 367 if (pci_resource_len(dev, 0) != 8) {
336 struct resource *res = &dev->resource[0]; 368 quirk_io(dev, 0, 8, name); /* SMB */
337 res->end = res->start + 8 - 1; 369 quirk_io(dev, 1, 256, name); /* GPIO */
338 dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n"); 370 quirk_io(dev, 2, 64, name); /* MFGPT */
371 dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
372 name);
339 } 373 }
340} 374}
341DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); 375DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index dfd021e8268f..f4cd0b9b2438 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -177,7 +177,7 @@ struct at91_pinctrl {
177 struct device *dev; 177 struct device *dev;
178 struct pinctrl_dev *pctl; 178 struct pinctrl_dev *pctl;
179 179
180 int nbanks; 180 int nactive_banks;
181 181
182 uint32_t *mux_mask; 182 uint32_t *mux_mask;
183 int nmux; 183 int nmux;
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,
653 int mux; 653 int mux;
654 654
655 /* check if it's a valid config */ 655 /* check if it's a valid config */
656 if (pin->bank >= info->nbanks) { 656 if (pin->bank >= gpio_banks) {
657 dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", 657 dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
658 name, index, pin->bank, info->nbanks); 658 name, index, pin->bank, gpio_banks);
659 return -EINVAL; 659 return -EINVAL;
660 } 660 }
661 661
662 if (!gpio_chips[pin->bank]) {
663 dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
664 name, index, pin->bank);
665 return -ENXIO;
666 }
667
662 if (pin->pin >= MAX_NB_GPIO_PER_BANK) { 668 if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
663 dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n", 669 dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
664 name, index, pin->pin, MAX_NB_GPIO_PER_BANK); 670 name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,
981 987
982 for_each_child_of_node(np, child) { 988 for_each_child_of_node(np, child) {
983 if (of_device_is_compatible(child, gpio_compat)) { 989 if (of_device_is_compatible(child, gpio_compat)) {
984 info->nbanks++; 990 if (of_device_is_available(child))
991 info->nactive_banks++;
985 } else { 992 } else {
986 info->nfunctions++; 993 info->nfunctions++;
987 info->ngroups += of_get_child_count(child); 994 info->ngroups += of_get_child_count(child);
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
1003 } 1010 }
1004 1011
1005 size /= sizeof(*list); 1012 size /= sizeof(*list);
1006 if (!size || size % info->nbanks) { 1013 if (!size || size % gpio_banks) {
1007 dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks); 1014 dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);
1008 return -EINVAL; 1015 return -EINVAL;
1009 } 1016 }
1010 info->nmux = size / info->nbanks; 1017 info->nmux = size / gpio_banks;
1011 1018
1012 info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL); 1019 info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
1013 if (!info->mux_mask) { 1020 if (!info->mux_mask) {
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
1131 of_match_device(at91_pinctrl_of_match, &pdev->dev)->data; 1138 of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
1132 at91_pinctrl_child_count(info, np); 1139 at91_pinctrl_child_count(info, np);
1133 1140
1134 if (info->nbanks < 1) { 1141 if (gpio_banks < 1) {
1135 dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n"); 1142 dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
1136 return -EINVAL; 1143 return -EINVAL;
1137 } 1144 }
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
1144 1151
1145 dev_dbg(&pdev->dev, "mux-mask\n"); 1152 dev_dbg(&pdev->dev, "mux-mask\n");
1146 tmp = info->mux_mask; 1153 tmp = info->mux_mask;
1147 for (i = 0; i < info->nbanks; i++) { 1154 for (i = 0; i < gpio_banks; i++) {
1148 for (j = 0; j < info->nmux; j++, tmp++) { 1155 for (j = 0; j < info->nmux; j++, tmp++) {
1149 dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]); 1156 dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
1150 } 1157 }
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
1162 if (!info->groups) 1169 if (!info->groups)
1163 return -ENOMEM; 1170 return -ENOMEM;
1164 1171
1165 dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks); 1172 dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
1166 dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions); 1173 dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
1167 dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); 1174 dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
1168 1175
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
1185{ 1192{
1186 struct at91_pinctrl *info; 1193 struct at91_pinctrl *info;
1187 struct pinctrl_pin_desc *pdesc; 1194 struct pinctrl_pin_desc *pdesc;
1188 int ret, i, j, k; 1195 int ret, i, j, k, ngpio_chips_enabled = 0;
1189 1196
1190 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 1197 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
1191 if (!info) 1198 if (!info)
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
1200 * to obtain references to the struct gpio_chip * for them, and we 1207 * to obtain references to the struct gpio_chip * for them, and we
1201 * need this to proceed. 1208 * need this to proceed.
1202 */ 1209 */
1203 for (i = 0; i < info->nbanks; i++) { 1210 for (i = 0; i < gpio_banks; i++)
1204 if (!gpio_chips[i]) { 1211 if (gpio_chips[i])
1205 dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); 1212 ngpio_chips_enabled++;
1206 devm_kfree(&pdev->dev, info); 1213
1207 return -EPROBE_DEFER; 1214 if (ngpio_chips_enabled < info->nactive_banks) {
1208 } 1215 dev_warn(&pdev->dev,
1216 "All GPIO chips are not registered yet (%d/%d)\n",
1217 ngpio_chips_enabled, info->nactive_banks);
1218 devm_kfree(&pdev->dev, info);
1219 return -EPROBE_DEFER;
1209 } 1220 }
1210 1221
1211 at91_pinctrl_desc.name = dev_name(&pdev->dev); 1222 at91_pinctrl_desc.name = dev_name(&pdev->dev);
1212 at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK; 1223 at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
1213 at91_pinctrl_desc.pins = pdesc = 1224 at91_pinctrl_desc.pins = pdesc =
1214 devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL); 1225 devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);
1215 1226
1216 if (!at91_pinctrl_desc.pins) 1227 if (!at91_pinctrl_desc.pins)
1217 return -ENOMEM; 1228 return -ENOMEM;
1218 1229
1219 for (i = 0 , k = 0; i < info->nbanks; i++) { 1230 for (i = 0, k = 0; i < gpio_banks; i++) {
1220 for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) { 1231 for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
1221 pdesc->number = k; 1232 pdesc->number = k;
1222 pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); 1233 pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j);
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
1234 } 1245 }
1235 1246
1236 /* We will handle a range of GPIO pins */ 1247 /* We will handle a range of GPIO pins */
1237 for (i = 0; i < info->nbanks; i++) 1248 for (i = 0; i < gpio_banks; i++)
1238 pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); 1249 if (gpio_chips[i])
1250 pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
1239 1251
1240 dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); 1252 dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
1241 1253
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
1613static int at91_gpio_of_irq_setup(struct platform_device *pdev, 1625static int at91_gpio_of_irq_setup(struct platform_device *pdev,
1614 struct at91_gpio_chip *at91_gpio) 1626 struct at91_gpio_chip *at91_gpio)
1615{ 1627{
1628 struct gpio_chip *gpiochip_prev = NULL;
1616 struct at91_gpio_chip *prev = NULL; 1629 struct at91_gpio_chip *prev = NULL;
1617 struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); 1630 struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
1618 int ret; 1631 int ret, i;
1619 1632
1620 at91_gpio->pioc_hwirq = irqd_to_hwirq(d); 1633 at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
1621 1634
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
1641 return ret; 1654 return ret;
1642 } 1655 }
1643 1656
1644 /* Setup chained handler */
1645 if (at91_gpio->pioc_idx)
1646 prev = gpio_chips[at91_gpio->pioc_idx - 1];
1647
1648 /* The top level handler handles one bank of GPIOs, except 1657 /* The top level handler handles one bank of GPIOs, except
1649 * on some SoC it can handle up to three... 1658 * on some SoC it can handle up to three...
1650 * We only set up the handler for the first of the list. 1659 * We only set up the handler for the first of the list.
1651 */ 1660 */
1652 if (prev && prev->next == at91_gpio) 1661 gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
1662 if (!gpiochip_prev) {
1663 /* Then register the chain on the parent IRQ */
1664 gpiochip_set_chained_irqchip(&at91_gpio->chip,
1665 &gpio_irqchip,
1666 at91_gpio->pioc_virq,
1667 gpio_irq_handler);
1653 return 0; 1668 return 0;
1669 }
1654 1670
1655 /* Then register the chain on the parent IRQ */ 1671 prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
1656 gpiochip_set_chained_irqchip(&at91_gpio->chip,
1657 &gpio_irqchip,
1658 at91_gpio->pioc_virq,
1659 gpio_irq_handler);
1660 1672
1661 return 0; 1673 /* we can only have 2 banks before */
1674 for (i = 0; i < 2; i++) {
1675 if (prev->next) {
1676 prev = prev->next;
1677 } else {
1678 prev->next = at91_gpio;
1679 return 0;
1680 }
1681 }
1682
1683 return -EINVAL;
1662} 1684}
1663 1685
1664/* This structure is replicated for each GPIO block allocated at probe time */ 1686/* This structure is replicated for each GPIO block allocated at probe time */
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = {
1675 .ngpio = MAX_NB_GPIO_PER_BANK, 1697 .ngpio = MAX_NB_GPIO_PER_BANK,
1676}; 1698};
1677 1699
1678static void at91_gpio_probe_fixup(void)
1679{
1680 unsigned i;
1681 struct at91_gpio_chip *at91_gpio, *last = NULL;
1682
1683 for (i = 0; i < gpio_banks; i++) {
1684 at91_gpio = gpio_chips[i];
1685
1686 /*
1687 * GPIO controller are grouped on some SoC:
1688 * PIOC, PIOD and PIOE can share the same IRQ line
1689 */
1690 if (last && last->pioc_virq == at91_gpio->pioc_virq)
1691 last->next = at91_gpio;
1692 last = at91_gpio;
1693 }
1694}
1695
1696static struct of_device_id at91_gpio_of_match[] = { 1700static struct of_device_id at91_gpio_of_match[] = {
1697 { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, 1701 { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
1698 { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, 1702 { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
1805 gpio_chips[alias_idx] = at91_chip; 1809 gpio_chips[alias_idx] = at91_chip;
1806 gpio_banks = max(gpio_banks, alias_idx + 1); 1810 gpio_banks = max(gpio_banks, alias_idx + 1);
1807 1811
1808 at91_gpio_probe_fixup();
1809
1810 ret = at91_gpio_of_irq_setup(pdev, at91_chip); 1812 ret = at91_gpio_of_irq_setup(pdev, at91_chip);
1811 if (ret) 1813 if (ret)
1812 goto irq_setup_err; 1814 goto irq_setup_err;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c3a60b57a865..a6f116aa5235 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -414,6 +414,14 @@ config REGULATOR_MAX77802
414 Exynos5420/Exynos5800 SoCs to control various voltages. 414 Exynos5420/Exynos5800 SoCs to control various voltages.
415 It includes support for control of voltage and ramp speed. 415 It includes support for control of voltage and ramp speed.
416 416
417config REGULATOR_MAX77843
418 tristate "Maxim 77843 regulator"
419 depends on MFD_MAX77843
420 help
421 This driver controls a Maxim 77843 regulator.
422 The regulator include two 'SAFEOUT' for USB(Universal Serial Bus)
423 This is suitable for Exynos5433 SoC chips.
424
417config REGULATOR_MC13XXX_CORE 425config REGULATOR_MC13XXX_CORE
418 tristate 426 tristate
419 427
@@ -433,6 +441,15 @@ config REGULATOR_MC13892
433 Say y here to support the regulators found on the Freescale MC13892 441 Say y here to support the regulators found on the Freescale MC13892
434 PMIC. 442 PMIC.
435 443
444config REGULATOR_MT6397
445 tristate "MediaTek MT6397 PMIC"
446 depends on MFD_MT6397
447 help
448 Say y here to select this option to enable the power regulator of
449 MediaTek MT6397 PMIC.
450 This driver supports the control of different power rails of device
451 through regulator interface.
452
436config REGULATOR_PALMAS 453config REGULATOR_PALMAS
437 tristate "TI Palmas PMIC Regulators" 454 tristate "TI Palmas PMIC Regulators"
438 depends on MFD_PALMAS 455 depends on MFD_PALMAS
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 1f28ebfc6f3a..2c4da15e1545 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -55,9 +55,11 @@ obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
55obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o 55obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
56obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o 56obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
57obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o 57obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
58obj-$(CONFIG_REGULATOR_MAX77843) += max77843.o
58obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o 59obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
59obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o 60obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
60obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o 61obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
62obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
61obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o 63obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
62obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o 64obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
63obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o 65obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f23d7e1f2ee7..e4331f5e5d7d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -32,11 +32,13 @@
32 32
33#define AXP20X_FREQ_DCDC_MASK 0x0f 33#define AXP20X_FREQ_DCDC_MASK 0x0f
34 34
35#define AXP20X_DESC_IO(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \ 35#define AXP20X_DESC_IO(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \
36 _emask, _enable_val, _disable_val) \ 36 _ereg, _emask, _enable_val, _disable_val) \
37 [AXP20X_##_id] = { \ 37 [AXP20X_##_id] = { \
38 .name = #_id, \ 38 .name = #_id, \
39 .supply_name = (_supply), \ 39 .supply_name = (_supply), \
40 .of_match = of_match_ptr(_match), \
41 .regulators_node = of_match_ptr("regulators"), \
40 .type = REGULATOR_VOLTAGE, \ 42 .type = REGULATOR_VOLTAGE, \
41 .id = AXP20X_##_id, \ 43 .id = AXP20X_##_id, \
42 .n_voltages = (((_max) - (_min)) / (_step) + 1), \ 44 .n_voltages = (((_max) - (_min)) / (_step) + 1), \
@@ -52,11 +54,13 @@
52 .ops = &axp20x_ops, \ 54 .ops = &axp20x_ops, \
53 } 55 }
54 56
55#define AXP20X_DESC(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \ 57#define AXP20X_DESC(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \
56 _emask) \ 58 _ereg, _emask) \
57 [AXP20X_##_id] = { \ 59 [AXP20X_##_id] = { \
58 .name = #_id, \ 60 .name = #_id, \
59 .supply_name = (_supply), \ 61 .supply_name = (_supply), \
62 .of_match = of_match_ptr(_match), \
63 .regulators_node = of_match_ptr("regulators"), \
60 .type = REGULATOR_VOLTAGE, \ 64 .type = REGULATOR_VOLTAGE, \
61 .id = AXP20X_##_id, \ 65 .id = AXP20X_##_id, \
62 .n_voltages = (((_max) - (_min)) / (_step) + 1), \ 66 .n_voltages = (((_max) - (_min)) / (_step) + 1), \
@@ -70,10 +74,12 @@
70 .ops = &axp20x_ops, \ 74 .ops = &axp20x_ops, \
71 } 75 }
72 76
73#define AXP20X_DESC_FIXED(_id, _supply, _volt) \ 77#define AXP20X_DESC_FIXED(_id, _match, _supply, _volt) \
74 [AXP20X_##_id] = { \ 78 [AXP20X_##_id] = { \
75 .name = #_id, \ 79 .name = #_id, \
76 .supply_name = (_supply), \ 80 .supply_name = (_supply), \
81 .of_match = of_match_ptr(_match), \
82 .regulators_node = of_match_ptr("regulators"), \
77 .type = REGULATOR_VOLTAGE, \ 83 .type = REGULATOR_VOLTAGE, \
78 .id = AXP20X_##_id, \ 84 .id = AXP20X_##_id, \
79 .n_voltages = 1, \ 85 .n_voltages = 1, \
@@ -82,10 +88,13 @@
82 .ops = &axp20x_ops_fixed \ 88 .ops = &axp20x_ops_fixed \
83 } 89 }
84 90
85#define AXP20X_DESC_TABLE(_id, _supply, _table, _vreg, _vmask, _ereg, _emask) \ 91#define AXP20X_DESC_TABLE(_id, _match, _supply, _table, _vreg, _vmask, _ereg, \
92 _emask) \
86 [AXP20X_##_id] = { \ 93 [AXP20X_##_id] = { \
87 .name = #_id, \ 94 .name = #_id, \
88 .supply_name = (_supply), \ 95 .supply_name = (_supply), \
96 .of_match = of_match_ptr(_match), \
97 .regulators_node = of_match_ptr("regulators"), \
89 .type = REGULATOR_VOLTAGE, \ 98 .type = REGULATOR_VOLTAGE, \
90 .id = AXP20X_##_id, \ 99 .id = AXP20X_##_id, \
91 .n_voltages = ARRAY_SIZE(_table), \ 100 .n_voltages = ARRAY_SIZE(_table), \
@@ -127,36 +136,20 @@ static struct regulator_ops axp20x_ops = {
127}; 136};
128 137
129static const struct regulator_desc axp20x_regulators[] = { 138static const struct regulator_desc axp20x_regulators[] = {
130 AXP20X_DESC(DCDC2, "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT, 0x3f, 139 AXP20X_DESC(DCDC2, "dcdc2", "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT,
131 AXP20X_PWR_OUT_CTRL, 0x10), 140 0x3f, AXP20X_PWR_OUT_CTRL, 0x10),
132 AXP20X_DESC(DCDC3, "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT, 0x7f, 141 AXP20X_DESC(DCDC3, "dcdc3", "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT,
133 AXP20X_PWR_OUT_CTRL, 0x02), 142 0x7f, AXP20X_PWR_OUT_CTRL, 0x02),
134 AXP20X_DESC_FIXED(LDO1, "acin", 1300), 143 AXP20X_DESC_FIXED(LDO1, "ldo1", "acin", 1300),
135 AXP20X_DESC(LDO2, "ldo24in", 1800, 3300, 100, AXP20X_LDO24_V_OUT, 0xf0, 144 AXP20X_DESC(LDO2, "ldo2", "ldo24in", 1800, 3300, 100,
136 AXP20X_PWR_OUT_CTRL, 0x04), 145 AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04),
137 AXP20X_DESC(LDO3, "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, 0x7f, 146 AXP20X_DESC(LDO3, "ldo3", "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT,
138 AXP20X_PWR_OUT_CTRL, 0x40), 147 0x7f, AXP20X_PWR_OUT_CTRL, 0x40),
139 AXP20X_DESC_TABLE(LDO4, "ldo24in", axp20x_ldo4_data, AXP20X_LDO24_V_OUT, 0x0f, 148 AXP20X_DESC_TABLE(LDO4, "ldo4", "ldo24in", axp20x_ldo4_data,
140 AXP20X_PWR_OUT_CTRL, 0x08), 149 AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL, 0x08),
141 AXP20X_DESC_IO(LDO5, "ldo5in", 1800, 3300, 100, AXP20X_LDO5_V_OUT, 0xf0, 150 AXP20X_DESC_IO(LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
142 AXP20X_GPIO0_CTRL, 0x07, AXP20X_IO_ENABLED, 151 AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07,
143 AXP20X_IO_DISABLED), 152 AXP20X_IO_ENABLED, AXP20X_IO_DISABLED),
144};
145
146#define AXP_MATCH(_name, _id) \
147 [AXP20X_##_id] = { \
148 .name = #_name, \
149 .driver_data = (void *) &axp20x_regulators[AXP20X_##_id], \
150 }
151
152static struct of_regulator_match axp20x_matches[] = {
153 AXP_MATCH(dcdc2, DCDC2),
154 AXP_MATCH(dcdc3, DCDC3),
155 AXP_MATCH(ldo1, LDO1),
156 AXP_MATCH(ldo2, LDO2),
157 AXP_MATCH(ldo3, LDO3),
158 AXP_MATCH(ldo4, LDO4),
159 AXP_MATCH(ldo5, LDO5),
160}; 153};
161 154
162static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) 155static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
@@ -193,13 +186,6 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
193 if (!regulators) { 186 if (!regulators) {
194 dev_warn(&pdev->dev, "regulators node not found\n"); 187 dev_warn(&pdev->dev, "regulators node not found\n");
195 } else { 188 } else {
196 ret = of_regulator_match(&pdev->dev, regulators, axp20x_matches,
197 ARRAY_SIZE(axp20x_matches));
198 if (ret < 0) {
199 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
200 return ret;
201 }
202
203 dcdcfreq = 1500; 189 dcdcfreq = 1500;
204 of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq); 190 of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq);
205 ret = axp20x_set_dcdc_freq(pdev, dcdcfreq); 191 ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
@@ -233,23 +219,17 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
233{ 219{
234 struct regulator_dev *rdev; 220 struct regulator_dev *rdev;
235 struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent); 221 struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
236 struct regulator_config config = { }; 222 struct regulator_config config = {
237 struct regulator_init_data *init_data; 223 .dev = pdev->dev.parent,
224 .regmap = axp20x->regmap,
225 };
238 int ret, i; 226 int ret, i;
239 u32 workmode; 227 u32 workmode;
240 228
241 ret = axp20x_regulator_parse_dt(pdev); 229 /* This only sets the dcdc freq. Ignore any errors */
242 if (ret) 230 axp20x_regulator_parse_dt(pdev);
243 return ret;
244 231
245 for (i = 0; i < AXP20X_REG_ID_MAX; i++) { 232 for (i = 0; i < AXP20X_REG_ID_MAX; i++) {
246 init_data = axp20x_matches[i].init_data;
247
248 config.dev = pdev->dev.parent;
249 config.init_data = init_data;
250 config.regmap = axp20x->regmap;
251 config.of_node = axp20x_matches[i].of_node;
252
253 rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i], 233 rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i],
254 &config); 234 &config);
255 if (IS_ERR(rdev)) { 235 if (IS_ERR(rdev)) {
@@ -259,7 +239,8 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
259 return PTR_ERR(rdev); 239 return PTR_ERR(rdev);
260 } 240 }
261 241
262 ret = of_property_read_u32(axp20x_matches[i].of_node, "x-powers,dcdc-workmode", 242 ret = of_property_read_u32(rdev->dev.of_node,
243 "x-powers,dcdc-workmode",
263 &workmode); 244 &workmode);
264 if (!ret) { 245 if (!ret) {
265 if (axp20x_set_dcdc_workmode(rdev, i, workmode)) 246 if (axp20x_set_dcdc_workmode(rdev, i, workmode))
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 9c48fb32f660..b899947d839d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -632,49 +632,34 @@ static ssize_t regulator_bypass_show(struct device *dev,
632static DEVICE_ATTR(bypass, 0444, 632static DEVICE_ATTR(bypass, 0444,
633 regulator_bypass_show, NULL); 633 regulator_bypass_show, NULL);
634 634
635/*
636 * These are the only attributes are present for all regulators.
637 * Other attributes are a function of regulator functionality.
638 */
639static struct attribute *regulator_dev_attrs[] = {
640 &dev_attr_name.attr,
641 &dev_attr_num_users.attr,
642 &dev_attr_type.attr,
643 NULL,
644};
645ATTRIBUTE_GROUPS(regulator_dev);
646
647static void regulator_dev_release(struct device *dev)
648{
649 struct regulator_dev *rdev = dev_get_drvdata(dev);
650 kfree(rdev);
651}
652
653static struct class regulator_class = {
654 .name = "regulator",
655 .dev_release = regulator_dev_release,
656 .dev_groups = regulator_dev_groups,
657};
658
659/* Calculate the new optimum regulator operating mode based on the new total 635/* Calculate the new optimum regulator operating mode based on the new total
660 * consumer load. All locks held by caller */ 636 * consumer load. All locks held by caller */
661static void drms_uA_update(struct regulator_dev *rdev) 637static int drms_uA_update(struct regulator_dev *rdev)
662{ 638{
663 struct regulator *sibling; 639 struct regulator *sibling;
664 int current_uA = 0, output_uV, input_uV, err; 640 int current_uA = 0, output_uV, input_uV, err;
665 unsigned int mode; 641 unsigned int mode;
666 642
643 /*
644 * first check to see if we can set modes at all, otherwise just
645 * tell the consumer everything is OK.
646 */
667 err = regulator_check_drms(rdev); 647 err = regulator_check_drms(rdev);
668 if (err < 0 || !rdev->desc->ops->get_optimum_mode || 648 if (err < 0)
669 (!rdev->desc->ops->get_voltage && 649 return 0;
670 !rdev->desc->ops->get_voltage_sel) || 650
671 !rdev->desc->ops->set_mode) 651 if (!rdev->desc->ops->get_optimum_mode)
672 return; 652 return 0;
653
654 if (!rdev->desc->ops->set_mode)
655 return -EINVAL;
673 656
674 /* get output voltage */ 657 /* get output voltage */
675 output_uV = _regulator_get_voltage(rdev); 658 output_uV = _regulator_get_voltage(rdev);
676 if (output_uV <= 0) 659 if (output_uV <= 0) {
677 return; 660 rdev_err(rdev, "invalid output voltage found\n");
661 return -EINVAL;
662 }
678 663
679 /* get input voltage */ 664 /* get input voltage */
680 input_uV = 0; 665 input_uV = 0;
@@ -682,8 +667,10 @@ static void drms_uA_update(struct regulator_dev *rdev)
682 input_uV = regulator_get_voltage(rdev->supply); 667 input_uV = regulator_get_voltage(rdev->supply);
683 if (input_uV <= 0) 668 if (input_uV <= 0)
684 input_uV = rdev->constraints->input_uV; 669 input_uV = rdev->constraints->input_uV;
685 if (input_uV <= 0) 670 if (input_uV <= 0) {
686 return; 671 rdev_err(rdev, "invalid input voltage found\n");
672 return -EINVAL;
673 }
687 674
688 /* calc total requested load */ 675 /* calc total requested load */
689 list_for_each_entry(sibling, &rdev->consumer_list, list) 676 list_for_each_entry(sibling, &rdev->consumer_list, list)
@@ -695,8 +682,17 @@ static void drms_uA_update(struct regulator_dev *rdev)
695 682
696 /* check the new mode is allowed */ 683 /* check the new mode is allowed */
697 err = regulator_mode_constrain(rdev, &mode); 684 err = regulator_mode_constrain(rdev, &mode);
698 if (err == 0) 685 if (err < 0) {
699 rdev->desc->ops->set_mode(rdev, mode); 686 rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
687 current_uA, input_uV, output_uV);
688 return err;
689 }
690
691 err = rdev->desc->ops->set_mode(rdev, mode);
692 if (err < 0)
693 rdev_err(rdev, "failed to set optimum mode %x\n", mode);
694
695 return err;
700} 696}
701 697
702static int suspend_set_state(struct regulator_dev *rdev, 698static int suspend_set_state(struct regulator_dev *rdev,
@@ -3026,75 +3022,13 @@ EXPORT_SYMBOL_GPL(regulator_get_mode);
3026int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) 3022int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
3027{ 3023{
3028 struct regulator_dev *rdev = regulator->rdev; 3024 struct regulator_dev *rdev = regulator->rdev;
3029 struct regulator *consumer; 3025 int ret;
3030 int ret, output_uV, input_uV = 0, total_uA_load = 0;
3031 unsigned int mode;
3032
3033 if (rdev->supply)
3034 input_uV = regulator_get_voltage(rdev->supply);
3035 3026
3036 mutex_lock(&rdev->mutex); 3027 mutex_lock(&rdev->mutex);
3037
3038 /*
3039 * first check to see if we can set modes at all, otherwise just
3040 * tell the consumer everything is OK.
3041 */
3042 regulator->uA_load = uA_load; 3028 regulator->uA_load = uA_load;
3043 ret = regulator_check_drms(rdev); 3029 ret = drms_uA_update(rdev);
3044 if (ret < 0) {
3045 ret = 0;
3046 goto out;
3047 }
3048
3049 if (!rdev->desc->ops->get_optimum_mode)
3050 goto out;
3051
3052 /*
3053 * we can actually do this so any errors are indicators of
3054 * potential real failure.
3055 */
3056 ret = -EINVAL;
3057
3058 if (!rdev->desc->ops->set_mode)
3059 goto out;
3060
3061 /* get output voltage */
3062 output_uV = _regulator_get_voltage(rdev);
3063 if (output_uV <= 0) {
3064 rdev_err(rdev, "invalid output voltage found\n");
3065 goto out;
3066 }
3067
3068 /* No supply? Use constraint voltage */
3069 if (input_uV <= 0)
3070 input_uV = rdev->constraints->input_uV;
3071 if (input_uV <= 0) {
3072 rdev_err(rdev, "invalid input voltage found\n");
3073 goto out;
3074 }
3075
3076 /* calc total requested load for this regulator */
3077 list_for_each_entry(consumer, &rdev->consumer_list, list)
3078 total_uA_load += consumer->uA_load;
3079
3080 mode = rdev->desc->ops->get_optimum_mode(rdev,
3081 input_uV, output_uV,
3082 total_uA_load);
3083 ret = regulator_mode_constrain(rdev, &mode);
3084 if (ret < 0) {
3085 rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
3086 total_uA_load, input_uV, output_uV);
3087 goto out;
3088 }
3089
3090 ret = rdev->desc->ops->set_mode(rdev, mode);
3091 if (ret < 0) {
3092 rdev_err(rdev, "failed to set optimum mode %x\n", mode);
3093 goto out;
3094 }
3095 ret = mode;
3096out:
3097 mutex_unlock(&rdev->mutex); 3030 mutex_unlock(&rdev->mutex);
3031
3098 return ret; 3032 return ret;
3099} 3033}
3100EXPORT_SYMBOL_GPL(regulator_set_optimum_mode); 3034EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
@@ -3436,126 +3370,136 @@ int regulator_mode_to_status(unsigned int mode)
3436} 3370}
3437EXPORT_SYMBOL_GPL(regulator_mode_to_status); 3371EXPORT_SYMBOL_GPL(regulator_mode_to_status);
3438 3372
3373static struct attribute *regulator_dev_attrs[] = {
3374 &dev_attr_name.attr,
3375 &dev_attr_num_users.attr,
3376 &dev_attr_type.attr,
3377 &dev_attr_microvolts.attr,
3378 &dev_attr_microamps.attr,
3379 &dev_attr_opmode.attr,
3380 &dev_attr_state.attr,
3381 &dev_attr_status.attr,
3382 &dev_attr_bypass.attr,
3383 &dev_attr_requested_microamps.attr,
3384 &dev_attr_min_microvolts.attr,
3385 &dev_attr_max_microvolts.attr,
3386 &dev_attr_min_microamps.attr,
3387 &dev_attr_max_microamps.attr,
3388 &dev_attr_suspend_standby_state.attr,
3389 &dev_attr_suspend_mem_state.attr,
3390 &dev_attr_suspend_disk_state.attr,
3391 &dev_attr_suspend_standby_microvolts.attr,
3392 &dev_attr_suspend_mem_microvolts.attr,
3393 &dev_attr_suspend_disk_microvolts.attr,
3394 &dev_attr_suspend_standby_mode.attr,
3395 &dev_attr_suspend_mem_mode.attr,
3396 &dev_attr_suspend_disk_mode.attr,
3397 NULL
3398};
3399
3439/* 3400/*
3440 * To avoid cluttering sysfs (and memory) with useless state, only 3401 * To avoid cluttering sysfs (and memory) with useless state, only
3441 * create attributes that can be meaningfully displayed. 3402 * create attributes that can be meaningfully displayed.
3442 */ 3403 */
3443static int add_regulator_attributes(struct regulator_dev *rdev) 3404static umode_t regulator_attr_is_visible(struct kobject *kobj,
3405 struct attribute *attr, int idx)
3444{ 3406{
3445 struct device *dev = &rdev->dev; 3407 struct device *dev = kobj_to_dev(kobj);
3408 struct regulator_dev *rdev = container_of(dev, struct regulator_dev, dev);
3446 const struct regulator_ops *ops = rdev->desc->ops; 3409 const struct regulator_ops *ops = rdev->desc->ops;
3447 int status = 0; 3410 umode_t mode = attr->mode;
3411
3412 /* these three are always present */
3413 if (attr == &dev_attr_name.attr ||
3414 attr == &dev_attr_num_users.attr ||
3415 attr == &dev_attr_type.attr)
3416 return mode;
3448 3417
3449 /* some attributes need specific methods to be displayed */ 3418 /* some attributes need specific methods to be displayed */
3450 if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || 3419 if (attr == &dev_attr_microvolts.attr) {
3451 (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) || 3420 if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
3452 (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) || 3421 (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
3453 (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) { 3422 (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
3454 status = device_create_file(dev, &dev_attr_microvolts); 3423 (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1))
3455 if (status < 0) 3424 return mode;
3456 return status; 3425 return 0;
3457 }
3458 if (ops->get_current_limit) {
3459 status = device_create_file(dev, &dev_attr_microamps);
3460 if (status < 0)
3461 return status;
3462 }
3463 if (ops->get_mode) {
3464 status = device_create_file(dev, &dev_attr_opmode);
3465 if (status < 0)
3466 return status;
3467 }
3468 if (rdev->ena_pin || ops->is_enabled) {
3469 status = device_create_file(dev, &dev_attr_state);
3470 if (status < 0)
3471 return status;
3472 }
3473 if (ops->get_status) {
3474 status = device_create_file(dev, &dev_attr_status);
3475 if (status < 0)
3476 return status;
3477 }
3478 if (ops->get_bypass) {
3479 status = device_create_file(dev, &dev_attr_bypass);
3480 if (status < 0)
3481 return status;
3482 } 3426 }
3483 3427
3428 if (attr == &dev_attr_microamps.attr)
3429 return ops->get_current_limit ? mode : 0;
3430
3431 if (attr == &dev_attr_opmode.attr)
3432 return ops->get_mode ? mode : 0;
3433
3434 if (attr == &dev_attr_state.attr)
3435 return (rdev->ena_pin || ops->is_enabled) ? mode : 0;
3436
3437 if (attr == &dev_attr_status.attr)
3438 return ops->get_status ? mode : 0;
3439
3440 if (attr == &dev_attr_bypass.attr)
3441 return ops->get_bypass ? mode : 0;
3442
3484 /* some attributes are type-specific */ 3443 /* some attributes are type-specific */
3485 if (rdev->desc->type == REGULATOR_CURRENT) { 3444 if (attr == &dev_attr_requested_microamps.attr)
3486 status = device_create_file(dev, &dev_attr_requested_microamps); 3445 return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
3487 if (status < 0)
3488 return status;
3489 }
3490 3446
3491 /* all the other attributes exist to support constraints; 3447 /* all the other attributes exist to support constraints;
3492 * don't show them if there are no constraints, or if the 3448 * don't show them if there are no constraints, or if the
3493 * relevant supporting methods are missing. 3449 * relevant supporting methods are missing.
3494 */ 3450 */
3495 if (!rdev->constraints) 3451 if (!rdev->constraints)
3496 return status; 3452 return 0;
3497 3453
3498 /* constraints need specific supporting methods */ 3454 /* constraints need specific supporting methods */
3499 if (ops->set_voltage || ops->set_voltage_sel) { 3455 if (attr == &dev_attr_min_microvolts.attr ||
3500 status = device_create_file(dev, &dev_attr_min_microvolts); 3456 attr == &dev_attr_max_microvolts.attr)
3501 if (status < 0) 3457 return (ops->set_voltage || ops->set_voltage_sel) ? mode : 0;
3502 return status; 3458
3503 status = device_create_file(dev, &dev_attr_max_microvolts); 3459 if (attr == &dev_attr_min_microamps.attr ||
3504 if (status < 0) 3460 attr == &dev_attr_max_microamps.attr)
3505 return status; 3461 return ops->set_current_limit ? mode : 0;
3506 }
3507 if (ops->set_current_limit) {
3508 status = device_create_file(dev, &dev_attr_min_microamps);
3509 if (status < 0)
3510 return status;
3511 status = device_create_file(dev, &dev_attr_max_microamps);
3512 if (status < 0)
3513 return status;
3514 }
3515
3516 status = device_create_file(dev, &dev_attr_suspend_standby_state);
3517 if (status < 0)
3518 return status;
3519 status = device_create_file(dev, &dev_attr_suspend_mem_state);
3520 if (status < 0)
3521 return status;
3522 status = device_create_file(dev, &dev_attr_suspend_disk_state);
3523 if (status < 0)
3524 return status;
3525 3462
3526 if (ops->set_suspend_voltage) { 3463 if (attr == &dev_attr_suspend_standby_state.attr ||
3527 status = device_create_file(dev, 3464 attr == &dev_attr_suspend_mem_state.attr ||
3528 &dev_attr_suspend_standby_microvolts); 3465 attr == &dev_attr_suspend_disk_state.attr)
3529 if (status < 0) 3466 return mode;
3530 return status; 3467
3531 status = device_create_file(dev, 3468 if (attr == &dev_attr_suspend_standby_microvolts.attr ||
3532 &dev_attr_suspend_mem_microvolts); 3469 attr == &dev_attr_suspend_mem_microvolts.attr ||
3533 if (status < 0) 3470 attr == &dev_attr_suspend_disk_microvolts.attr)
3534 return status; 3471 return ops->set_suspend_voltage ? mode : 0;
3535 status = device_create_file(dev, 3472
3536 &dev_attr_suspend_disk_microvolts); 3473 if (attr == &dev_attr_suspend_standby_mode.attr ||
3537 if (status < 0) 3474 attr == &dev_attr_suspend_mem_mode.attr ||
3538 return status; 3475 attr == &dev_attr_suspend_disk_mode.attr)
3539 } 3476 return ops->set_suspend_mode ? mode : 0;
3540 3477
3541 if (ops->set_suspend_mode) { 3478 return mode;
3542 status = device_create_file(dev,
3543 &dev_attr_suspend_standby_mode);
3544 if (status < 0)
3545 return status;
3546 status = device_create_file(dev,
3547 &dev_attr_suspend_mem_mode);
3548 if (status < 0)
3549 return status;
3550 status = device_create_file(dev,
3551 &dev_attr_suspend_disk_mode);
3552 if (status < 0)
3553 return status;
3554 }
3555
3556 return status;
3557} 3479}
3558 3480
3481static const struct attribute_group regulator_dev_group = {
3482 .attrs = regulator_dev_attrs,
3483 .is_visible = regulator_attr_is_visible,
3484};
3485
3486static const struct attribute_group *regulator_dev_groups[] = {
3487 &regulator_dev_group,
3488 NULL
3489};
3490
3491static void regulator_dev_release(struct device *dev)
3492{
3493 struct regulator_dev *rdev = dev_get_drvdata(dev);
3494 kfree(rdev);
3495}
3496
3497static struct class regulator_class = {
3498 .name = "regulator",
3499 .dev_release = regulator_dev_release,
3500 .dev_groups = regulator_dev_groups,
3501};
3502
3559static void rdev_init_debugfs(struct regulator_dev *rdev) 3503static void rdev_init_debugfs(struct regulator_dev *rdev)
3560{ 3504{
3561 rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root); 3505 rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
@@ -3575,7 +3519,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
3575/** 3519/**
3576 * regulator_register - register regulator 3520 * regulator_register - register regulator
3577 * @regulator_desc: regulator to register 3521 * @regulator_desc: regulator to register
3578 * @config: runtime configuration for regulator 3522 * @cfg: runtime configuration for regulator
3579 * 3523 *
3580 * Called by regulator drivers to register a regulator. 3524 * Called by regulator drivers to register a regulator.
3581 * Returns a valid pointer to struct regulator_dev on success 3525 * Returns a valid pointer to struct regulator_dev on success
@@ -3583,20 +3527,21 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
3583 */ 3527 */
3584struct regulator_dev * 3528struct regulator_dev *
3585regulator_register(const struct regulator_desc *regulator_desc, 3529regulator_register(const struct regulator_desc *regulator_desc,
3586 const struct regulator_config *config) 3530 const struct regulator_config *cfg)
3587{ 3531{
3588 const struct regulation_constraints *constraints = NULL; 3532 const struct regulation_constraints *constraints = NULL;
3589 const struct regulator_init_data *init_data; 3533 const struct regulator_init_data *init_data;
3590 static atomic_t regulator_no = ATOMIC_INIT(0); 3534 struct regulator_config *config = NULL;
3535 static atomic_t regulator_no = ATOMIC_INIT(-1);
3591 struct regulator_dev *rdev; 3536 struct regulator_dev *rdev;
3592 struct device *dev; 3537 struct device *dev;
3593 int ret, i; 3538 int ret, i;
3594 const char *supply = NULL; 3539 const char *supply = NULL;
3595 3540
3596 if (regulator_desc == NULL || config == NULL) 3541 if (regulator_desc == NULL || cfg == NULL)
3597 return ERR_PTR(-EINVAL); 3542 return ERR_PTR(-EINVAL);
3598 3543
3599 dev = config->dev; 3544 dev = cfg->dev;
3600 WARN_ON(!dev); 3545 WARN_ON(!dev);
3601 3546
3602 if (regulator_desc->name == NULL || regulator_desc->ops == NULL) 3547 if (regulator_desc->name == NULL || regulator_desc->ops == NULL)
@@ -3626,7 +3571,17 @@ regulator_register(const struct regulator_desc *regulator_desc,
3626 if (rdev == NULL) 3571 if (rdev == NULL)
3627 return ERR_PTR(-ENOMEM); 3572 return ERR_PTR(-ENOMEM);
3628 3573
3629 init_data = regulator_of_get_init_data(dev, regulator_desc, 3574 /*
3575 * Duplicate the config so the driver could override it after
3576 * parsing init data.
3577 */
3578 config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL);
3579 if (config == NULL) {
3580 kfree(rdev);
3581 return ERR_PTR(-ENOMEM);
3582 }
3583
3584 init_data = regulator_of_get_init_data(dev, regulator_desc, config,
3630 &rdev->dev.of_node); 3585 &rdev->dev.of_node);
3631 if (!init_data) { 3586 if (!init_data) {
3632 init_data = config->init_data; 3587 init_data = config->init_data;
@@ -3660,8 +3615,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
3660 /* register with sysfs */ 3615 /* register with sysfs */
3661 rdev->dev.class = &regulator_class; 3616 rdev->dev.class = &regulator_class;
3662 rdev->dev.parent = dev; 3617 rdev->dev.parent = dev;
3663 dev_set_name(&rdev->dev, "regulator.%d", 3618 dev_set_name(&rdev->dev, "regulator.%lu",
3664 atomic_inc_return(&regulator_no) - 1); 3619 (unsigned long) atomic_inc_return(&regulator_no));
3665 ret = device_register(&rdev->dev); 3620 ret = device_register(&rdev->dev);
3666 if (ret != 0) { 3621 if (ret != 0) {
3667 put_device(&rdev->dev); 3622 put_device(&rdev->dev);
@@ -3694,11 +3649,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
3694 if (ret < 0) 3649 if (ret < 0)
3695 goto scrub; 3650 goto scrub;
3696 3651
3697 /* add attributes supported by this regulator */
3698 ret = add_regulator_attributes(rdev);
3699 if (ret < 0)
3700 goto scrub;
3701
3702 if (init_data && init_data->supply_regulator) 3652 if (init_data && init_data->supply_regulator)
3703 supply = init_data->supply_regulator; 3653 supply = init_data->supply_regulator;
3704 else if (regulator_desc->supply_name) 3654 else if (regulator_desc->supply_name)
@@ -3754,6 +3704,7 @@ add_dev:
3754 rdev_init_debugfs(rdev); 3704 rdev_init_debugfs(rdev);
3755out: 3705out:
3756 mutex_unlock(&regulator_list_mutex); 3706 mutex_unlock(&regulator_list_mutex);
3707 kfree(config);
3757 return rdev; 3708 return rdev;
3758 3709
3759unset_supplies: 3710unset_supplies:
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index c78d2106d6cb..01343419555e 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -24,6 +24,7 @@
24#include <linux/regmap.h> 24#include <linux/regmap.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/of_gpio.h>
27#include <linux/regulator/of_regulator.h> 28#include <linux/regulator/of_regulator.h>
28#include <linux/regulator/da9211.h> 29#include <linux/regulator/da9211.h>
29#include "da9211-regulator.h" 30#include "da9211-regulator.h"
@@ -276,7 +277,10 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
276 continue; 277 continue;
277 278
278 pdata->init_data[n] = da9211_matches[i].init_data; 279 pdata->init_data[n] = da9211_matches[i].init_data;
279 280 pdata->reg_node[n] = da9211_matches[i].of_node;
281 pdata->gpio_ren[n] =
282 of_get_named_gpio(da9211_matches[i].of_node,
283 "enable-gpios", 0);
280 n++; 284 n++;
281 } 285 }
282 286
@@ -364,7 +368,15 @@ static int da9211_regulator_init(struct da9211 *chip)
364 config.dev = chip->dev; 368 config.dev = chip->dev;
365 config.driver_data = chip; 369 config.driver_data = chip;
366 config.regmap = chip->regmap; 370 config.regmap = chip->regmap;
367 config.of_node = chip->dev->of_node; 371 config.of_node = chip->pdata->reg_node[i];
372
373 if (gpio_is_valid(chip->pdata->gpio_ren[i])) {
374 config.ena_gpio = chip->pdata->gpio_ren[i];
375 config.ena_gpio_initialized = true;
376 } else {
377 config.ena_gpio = -EINVAL;
378 config.ena_gpio_initialized = false;
379 }
368 380
369 chip->rdev[i] = devm_regulator_register(chip->dev, 381 chip->rdev[i] = devm_regulator_register(chip->dev,
370 &da9211_regulators[i], &config); 382 &da9211_regulators[i], &config);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 6c43ab2d5121..3c25db89a021 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -147,7 +147,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
147 return REGULATOR_MODE_NORMAL; 147 return REGULATOR_MODE_NORMAL;
148} 148}
149 149
150static int slew_rates[] = { 150static const int slew_rates[] = {
151 64000, 151 64000,
152 32000, 152 32000,
153 16000, 153 16000,
@@ -296,7 +296,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
296 return PTR_ERR_OR_ZERO(di->rdev); 296 return PTR_ERR_OR_ZERO(di->rdev);
297} 297}
298 298
299static struct regmap_config fan53555_regmap_config = { 299static const struct regmap_config fan53555_regmap_config = {
300 .reg_bits = 8, 300 .reg_bits = 8,
301 .val_bits = 8, 301 .val_bits = 8,
302}; 302};
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 80ba2a35a04b..c74ac8734023 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -38,11 +38,13 @@ struct regulator {
38#ifdef CONFIG_OF 38#ifdef CONFIG_OF
39struct regulator_init_data *regulator_of_get_init_data(struct device *dev, 39struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
40 const struct regulator_desc *desc, 40 const struct regulator_desc *desc,
41 struct regulator_config *config,
41 struct device_node **node); 42 struct device_node **node);
42#else 43#else
43static inline struct regulator_init_data * 44static inline struct regulator_init_data *
44regulator_of_get_init_data(struct device *dev, 45regulator_of_get_init_data(struct device *dev,
45 const struct regulator_desc *desc, 46 const struct regulator_desc *desc,
47 struct regulator_config *config,
46 struct device_node **node) 48 struct device_node **node)
47{ 49{
48 return NULL; 50 return NULL;
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 92fefd98da58..6e3a15fe00f1 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -177,8 +177,10 @@ static int isl9305_i2c_probe(struct i2c_client *i2c,
177 177
178#ifdef CONFIG_OF 178#ifdef CONFIG_OF
179static const struct of_device_id isl9305_dt_ids[] = { 179static const struct of_device_id isl9305_dt_ids[] = {
180 { .compatible = "isl,isl9305" }, 180 { .compatible = "isl,isl9305" }, /* for backward compat., don't use */
181 { .compatible = "isl,isl9305h" }, 181 { .compatible = "isil,isl9305" },
182 { .compatible = "isl,isl9305h" }, /* for backward compat., don't use */
183 { .compatible = "isil,isl9305h" },
182 {}, 184 {},
183}; 185};
184#endif 186#endif
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 021d64d856bb..3de328ab41f3 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -106,7 +106,6 @@ struct lp872x {
106 struct device *dev; 106 struct device *dev;
107 enum lp872x_id chipid; 107 enum lp872x_id chipid;
108 struct lp872x_platform_data *pdata; 108 struct lp872x_platform_data *pdata;
109 struct regulator_dev **regulators;
110 int num_regulators; 109 int num_regulators;
111 enum lp872x_dvs_state dvs_pin; 110 enum lp872x_dvs_state dvs_pin;
112 int dvs_gpio; 111 int dvs_gpio;
@@ -801,8 +800,6 @@ static int lp872x_regulator_register(struct lp872x *lp)
801 dev_err(lp->dev, "regulator register err"); 800 dev_err(lp->dev, "regulator register err");
802 return PTR_ERR(rdev); 801 return PTR_ERR(rdev);
803 } 802 }
804
805 *(lp->regulators + i) = rdev;
806 } 803 }
807 804
808 return 0; 805 return 0;
@@ -906,7 +903,7 @@ static struct lp872x_platform_data
906static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) 903static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
907{ 904{
908 struct lp872x *lp; 905 struct lp872x *lp;
909 int ret, size, num_regulators; 906 int ret;
910 const int lp872x_num_regulators[] = { 907 const int lp872x_num_regulators[] = {
911 [LP8720] = LP8720_NUM_REGULATORS, 908 [LP8720] = LP8720_NUM_REGULATORS,
912 [LP8725] = LP8725_NUM_REGULATORS, 909 [LP8725] = LP8725_NUM_REGULATORS,
@@ -918,38 +915,27 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
918 915
919 lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL); 916 lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
920 if (!lp) 917 if (!lp)
921 goto err_mem; 918 return -ENOMEM;
922
923 num_regulators = lp872x_num_regulators[id->driver_data];
924 size = sizeof(struct regulator_dev *) * num_regulators;
925 919
926 lp->regulators = devm_kzalloc(&cl->dev, size, GFP_KERNEL); 920 lp->num_regulators = lp872x_num_regulators[id->driver_data];
927 if (!lp->regulators)
928 goto err_mem;
929 921
930 lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config); 922 lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config);
931 if (IS_ERR(lp->regmap)) { 923 if (IS_ERR(lp->regmap)) {
932 ret = PTR_ERR(lp->regmap); 924 ret = PTR_ERR(lp->regmap);
933 dev_err(&cl->dev, "regmap init i2c err: %d\n", ret); 925 dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
934 goto err_dev; 926 return ret;
935 } 927 }
936 928
937 lp->dev = &cl->dev; 929 lp->dev = &cl->dev;
938 lp->pdata = dev_get_platdata(&cl->dev); 930 lp->pdata = dev_get_platdata(&cl->dev);
939 lp->chipid = id->driver_data; 931 lp->chipid = id->driver_data;
940 lp->num_regulators = num_regulators;
941 i2c_set_clientdata(cl, lp); 932 i2c_set_clientdata(cl, lp);
942 933
943 ret = lp872x_config(lp); 934 ret = lp872x_config(lp);
944 if (ret) 935 if (ret)
945 goto err_dev; 936 return ret;
946 937
947 return lp872x_regulator_register(lp); 938 return lp872x_regulator_register(lp);
948
949err_mem:
950 return -ENOMEM;
951err_dev:
952 return ret;
953} 939}
954 940
955static const struct of_device_id lp872x_dt_ids[] = { 941static const struct of_device_id lp872x_dt_ids[] = {
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index bf9a44c5fdd2..b3678d289619 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -103,6 +103,8 @@ static struct regulator_ops max14577_charger_ops = {
103static const struct regulator_desc max14577_supported_regulators[] = { 103static const struct regulator_desc max14577_supported_regulators[] = {
104 [MAX14577_SAFEOUT] = { 104 [MAX14577_SAFEOUT] = {
105 .name = "SAFEOUT", 105 .name = "SAFEOUT",
106 .of_match = of_match_ptr("SAFEOUT"),
107 .regulators_node = of_match_ptr("regulators"),
106 .id = MAX14577_SAFEOUT, 108 .id = MAX14577_SAFEOUT,
107 .ops = &max14577_safeout_ops, 109 .ops = &max14577_safeout_ops,
108 .type = REGULATOR_VOLTAGE, 110 .type = REGULATOR_VOLTAGE,
@@ -114,6 +116,8 @@ static const struct regulator_desc max14577_supported_regulators[] = {
114 }, 116 },
115 [MAX14577_CHARGER] = { 117 [MAX14577_CHARGER] = {
116 .name = "CHARGER", 118 .name = "CHARGER",
119 .of_match = of_match_ptr("CHARGER"),
120 .regulators_node = of_match_ptr("regulators"),
117 .id = MAX14577_CHARGER, 121 .id = MAX14577_CHARGER,
118 .ops = &max14577_charger_ops, 122 .ops = &max14577_charger_ops,
119 .type = REGULATOR_CURRENT, 123 .type = REGULATOR_CURRENT,
@@ -137,6 +141,8 @@ static struct regulator_ops max77836_ldo_ops = {
137static const struct regulator_desc max77836_supported_regulators[] = { 141static const struct regulator_desc max77836_supported_regulators[] = {
138 [MAX14577_SAFEOUT] = { 142 [MAX14577_SAFEOUT] = {
139 .name = "SAFEOUT", 143 .name = "SAFEOUT",
144 .of_match = of_match_ptr("SAFEOUT"),
145 .regulators_node = of_match_ptr("regulators"),
140 .id = MAX14577_SAFEOUT, 146 .id = MAX14577_SAFEOUT,
141 .ops = &max14577_safeout_ops, 147 .ops = &max14577_safeout_ops,
142 .type = REGULATOR_VOLTAGE, 148 .type = REGULATOR_VOLTAGE,
@@ -148,6 +154,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
148 }, 154 },
149 [MAX14577_CHARGER] = { 155 [MAX14577_CHARGER] = {
150 .name = "CHARGER", 156 .name = "CHARGER",
157 .of_match = of_match_ptr("CHARGER"),
158 .regulators_node = of_match_ptr("regulators"),
151 .id = MAX14577_CHARGER, 159 .id = MAX14577_CHARGER,
152 .ops = &max14577_charger_ops, 160 .ops = &max14577_charger_ops,
153 .type = REGULATOR_CURRENT, 161 .type = REGULATOR_CURRENT,
@@ -157,6 +165,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
157 }, 165 },
158 [MAX77836_LDO1] = { 166 [MAX77836_LDO1] = {
159 .name = "LDO1", 167 .name = "LDO1",
168 .of_match = of_match_ptr("LDO1"),
169 .regulators_node = of_match_ptr("regulators"),
160 .id = MAX77836_LDO1, 170 .id = MAX77836_LDO1,
161 .ops = &max77836_ldo_ops, 171 .ops = &max77836_ldo_ops,
162 .type = REGULATOR_VOLTAGE, 172 .type = REGULATOR_VOLTAGE,
@@ -171,6 +181,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
171 }, 181 },
172 [MAX77836_LDO2] = { 182 [MAX77836_LDO2] = {
173 .name = "LDO2", 183 .name = "LDO2",
184 .of_match = of_match_ptr("LDO2"),
185 .regulators_node = of_match_ptr("regulators"),
174 .id = MAX77836_LDO2, 186 .id = MAX77836_LDO2,
175 .ops = &max77836_ldo_ops, 187 .ops = &max77836_ldo_ops,
176 .type = REGULATOR_VOLTAGE, 188 .type = REGULATOR_VOLTAGE,
@@ -198,43 +210,6 @@ static struct of_regulator_match max77836_regulator_matches[] = {
198 { .name = "LDO2", }, 210 { .name = "LDO2", },
199}; 211};
200 212
201static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
202 enum maxim_device_type dev_type)
203{
204 int ret;
205 struct device_node *np;
206 struct of_regulator_match *regulator_matches;
207 unsigned int regulator_matches_size;
208
209 np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
210 if (!np) {
211 dev_err(&pdev->dev, "Failed to get child OF node for regulators\n");
212 return -EINVAL;
213 }
214
215 switch (dev_type) {
216 case MAXIM_DEVICE_TYPE_MAX77836:
217 regulator_matches = max77836_regulator_matches;
218 regulator_matches_size = ARRAY_SIZE(max77836_regulator_matches);
219 break;
220 case MAXIM_DEVICE_TYPE_MAX14577:
221 default:
222 regulator_matches = max14577_regulator_matches;
223 regulator_matches_size = ARRAY_SIZE(max14577_regulator_matches);
224 }
225
226 ret = of_regulator_match(&pdev->dev, np, regulator_matches,
227 regulator_matches_size);
228 if (ret < 0)
229 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
230 else
231 ret = 0;
232
233 of_node_put(np);
234
235 return ret;
236}
237
238static inline struct regulator_init_data *match_init_data(int index, 213static inline struct regulator_init_data *match_init_data(int index,
239 enum maxim_device_type dev_type) 214 enum maxim_device_type dev_type)
240{ 215{
@@ -261,11 +236,6 @@ static inline struct device_node *match_of_node(int index,
261 } 236 }
262} 237}
263#else /* CONFIG_OF */ 238#else /* CONFIG_OF */
264static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
265 enum maxim_device_type dev_type)
266{
267 return 0;
268}
269static inline struct regulator_init_data *match_init_data(int index, 239static inline struct regulator_init_data *match_init_data(int index,
270 enum maxim_device_type dev_type) 240 enum maxim_device_type dev_type)
271{ 241{
@@ -308,16 +278,12 @@ static int max14577_regulator_probe(struct platform_device *pdev)
308{ 278{
309 struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent); 279 struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
310 struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev); 280 struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev);
311 int i, ret; 281 int i, ret = 0;
312 struct regulator_config config = {}; 282 struct regulator_config config = {};
313 const struct regulator_desc *supported_regulators; 283 const struct regulator_desc *supported_regulators;
314 unsigned int supported_regulators_size; 284 unsigned int supported_regulators_size;
315 enum maxim_device_type dev_type = max14577->dev_type; 285 enum maxim_device_type dev_type = max14577->dev_type;
316 286
317 ret = max14577_regulator_dt_parse_pdata(pdev, dev_type);
318 if (ret)
319 return ret;
320
321 switch (dev_type) { 287 switch (dev_type) {
322 case MAXIM_DEVICE_TYPE_MAX77836: 288 case MAXIM_DEVICE_TYPE_MAX77836:
323 supported_regulators = max77836_supported_regulators; 289 supported_regulators = max77836_supported_regulators;
@@ -329,7 +295,7 @@ static int max14577_regulator_probe(struct platform_device *pdev)
329 supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators); 295 supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators);
330 } 296 }
331 297
332 config.dev = &pdev->dev; 298 config.dev = max14577->dev;
333 config.driver_data = max14577; 299 config.driver_data = max14577;
334 300
335 for (i = 0; i < supported_regulators_size; i++) { 301 for (i = 0; i < supported_regulators_size; i++) {
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index 10d206266ac2..15fb1416bfbd 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -26,6 +26,7 @@
26#include <linux/bug.h> 26#include <linux/bug.h>
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/of_gpio.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/regulator/driver.h> 32#include <linux/regulator/driver.h>
@@ -46,6 +47,11 @@
46#define MAX77686_DVS_UVSTEP 12500 47#define MAX77686_DVS_UVSTEP 12500
47 48
48/* 49/*
50 * Value for configuring buck[89] and LDO{20,21,22} as GPIO control.
51 * It is the same as 'off' for other regulators.
52 */
53#define MAX77686_GPIO_CONTROL 0x0
54/*
49 * Values used for configuring LDOs and bucks. 55 * Values used for configuring LDOs and bucks.
50 * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26 56 * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26
51 */ 57 */
@@ -82,6 +88,8 @@ enum max77686_ramp_rate {
82}; 88};
83 89
84struct max77686_data { 90struct max77686_data {
91 u64 gpio_enabled:MAX77686_REGULATORS;
92
85 /* Array indexed by regulator id */ 93 /* Array indexed by regulator id */
86 unsigned int opmode[MAX77686_REGULATORS]; 94 unsigned int opmode[MAX77686_REGULATORS];
87}; 95};
@@ -100,6 +108,26 @@ static unsigned int max77686_get_opmode_shift(int id)
100 } 108 }
101} 109}
102 110
111/*
112 * When regulator is configured for GPIO control then it
113 * replaces "normal" mode. Any change from low power mode to normal
114 * should actually change to GPIO control.
115 * Map normal mode to proper value for such regulators.
116 */
117static unsigned int max77686_map_normal_mode(struct max77686_data *max77686,
118 int id)
119{
120 switch (id) {
121 case MAX77686_BUCK8:
122 case MAX77686_BUCK9:
123 case MAX77686_LDO20 ... MAX77686_LDO22:
124 if (max77686->gpio_enabled & (1 << id))
125 return MAX77686_GPIO_CONTROL;
126 }
127
128 return MAX77686_NORMAL;
129}
130
103/* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */ 131/* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */
104static int max77686_set_suspend_disable(struct regulator_dev *rdev) 132static int max77686_set_suspend_disable(struct regulator_dev *rdev)
105{ 133{
@@ -136,7 +164,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
136 val = MAX77686_LDO_LOWPOWER_PWRREQ; 164 val = MAX77686_LDO_LOWPOWER_PWRREQ;
137 break; 165 break;
138 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ 166 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
139 val = MAX77686_NORMAL; 167 val = max77686_map_normal_mode(max77686, id);
140 break; 168 break;
141 default: 169 default:
142 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", 170 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -160,7 +188,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
160{ 188{
161 unsigned int val; 189 unsigned int val;
162 struct max77686_data *max77686 = rdev_get_drvdata(rdev); 190 struct max77686_data *max77686 = rdev_get_drvdata(rdev);
163 int ret; 191 int ret, id = rdev_get_id(rdev);
164 192
165 switch (mode) { 193 switch (mode) {
166 case REGULATOR_MODE_STANDBY: /* switch off */ 194 case REGULATOR_MODE_STANDBY: /* switch off */
@@ -170,7 +198,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
170 val = MAX77686_LDO_LOWPOWER_PWRREQ; 198 val = MAX77686_LDO_LOWPOWER_PWRREQ;
171 break; 199 break;
172 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ 200 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
173 val = MAX77686_NORMAL; 201 val = max77686_map_normal_mode(max77686, id);
174 break; 202 break;
175 default: 203 default:
176 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", 204 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -184,7 +212,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
184 if (ret) 212 if (ret)
185 return ret; 213 return ret;
186 214
187 max77686->opmode[rdev_get_id(rdev)] = val; 215 max77686->opmode[id] = val;
188 return 0; 216 return 0;
189} 217}
190 218
@@ -197,7 +225,7 @@ static int max77686_enable(struct regulator_dev *rdev)
197 shift = max77686_get_opmode_shift(id); 225 shift = max77686_get_opmode_shift(id);
198 226
199 if (max77686->opmode[id] == MAX77686_OFF_PWRREQ) 227 if (max77686->opmode[id] == MAX77686_OFF_PWRREQ)
200 max77686->opmode[id] = MAX77686_NORMAL; 228 max77686->opmode[id] = max77686_map_normal_mode(max77686, id);
201 229
202 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 230 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
203 rdev->desc->enable_mask, 231 rdev->desc->enable_mask,
@@ -229,6 +257,36 @@ static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
229 MAX77686_RAMP_RATE_MASK, ramp_value << 6); 257 MAX77686_RAMP_RATE_MASK, ramp_value << 6);
230} 258}
231 259
260static int max77686_of_parse_cb(struct device_node *np,
261 const struct regulator_desc *desc,
262 struct regulator_config *config)
263{
264 struct max77686_data *max77686 = config->driver_data;
265
266 switch (desc->id) {
267 case MAX77686_BUCK8:
268 case MAX77686_BUCK9:
269 case MAX77686_LDO20 ... MAX77686_LDO22:
270 config->ena_gpio = of_get_named_gpio(np,
271 "maxim,ena-gpios", 0);
272 config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
273 config->ena_gpio_initialized = true;
274 break;
275 default:
276 return 0;
277 }
278
279 if (gpio_is_valid(config->ena_gpio)) {
280 max77686->gpio_enabled |= (1 << desc->id);
281
282 return regmap_update_bits(config->regmap, desc->enable_reg,
283 desc->enable_mask,
284 MAX77686_GPIO_CONTROL);
285 }
286
287 return 0;
288}
289
232static struct regulator_ops max77686_ops = { 290static struct regulator_ops max77686_ops = {
233 .list_voltage = regulator_list_voltage_linear, 291 .list_voltage = regulator_list_voltage_linear,
234 .map_voltage = regulator_map_voltage_linear, 292 .map_voltage = regulator_map_voltage_linear,
@@ -283,6 +341,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
283 .name = "LDO"#num, \ 341 .name = "LDO"#num, \
284 .of_match = of_match_ptr("LDO"#num), \ 342 .of_match = of_match_ptr("LDO"#num), \
285 .regulators_node = of_match_ptr("voltage-regulators"), \ 343 .regulators_node = of_match_ptr("voltage-regulators"), \
344 .of_parse_cb = max77686_of_parse_cb, \
286 .id = MAX77686_LDO##num, \ 345 .id = MAX77686_LDO##num, \
287 .ops = &max77686_ops, \ 346 .ops = &max77686_ops, \
288 .type = REGULATOR_VOLTAGE, \ 347 .type = REGULATOR_VOLTAGE, \
@@ -355,6 +414,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
355 .name = "BUCK"#num, \ 414 .name = "BUCK"#num, \
356 .of_match = of_match_ptr("BUCK"#num), \ 415 .of_match = of_match_ptr("BUCK"#num), \
357 .regulators_node = of_match_ptr("voltage-regulators"), \ 416 .regulators_node = of_match_ptr("voltage-regulators"), \
417 .of_parse_cb = max77686_of_parse_cb, \
358 .id = MAX77686_BUCK##num, \ 418 .id = MAX77686_BUCK##num, \
359 .ops = &max77686_ops, \ 419 .ops = &max77686_ops, \
360 .type = REGULATOR_VOLTAGE, \ 420 .type = REGULATOR_VOLTAGE, \
diff --git a/drivers/regulator/max77843.c b/drivers/regulator/max77843.c
new file mode 100644
index 000000000000..c132ef527cdd
--- /dev/null
+++ b/drivers/regulator/max77843.c
@@ -0,0 +1,227 @@
1/*
2 * max77843.c - Regulator driver for the Maxim MAX77843
3 *
4 * Copyright (C) 2015 Samsung Electronics
5 * Author: Jaewon Kim <jaewon02.kim@samsung.com>
6 * Author: Beomho Seo <beomho.seo@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/regulator/driver.h>
17#include <linux/regulator/machine.h>
18#include <linux/mfd/max77843-private.h>
19#include <linux/regulator/of_regulator.h>
20
21enum max77843_regulator_type {
22 MAX77843_SAFEOUT1 = 0,
23 MAX77843_SAFEOUT2,
24 MAX77843_CHARGER,
25
26 MAX77843_NUM,
27};
28
29static const unsigned int max77843_safeout_voltage_table[] = {
30 4850000,
31 4900000,
32 4950000,
33 3300000,
34};
35
36static int max77843_reg_is_enabled(struct regulator_dev *rdev)
37{
38 struct regmap *regmap = rdev->regmap;
39 int ret;
40 unsigned int reg;
41
42 ret = regmap_read(regmap, rdev->desc->enable_reg, &reg);
43 if (ret) {
44 dev_err(&rdev->dev, "Fialed to read charger register\n");
45 return ret;
46 }
47
48 return (reg & rdev->desc->enable_mask) == rdev->desc->enable_mask;
49}
50
51static int max77843_reg_get_current_limit(struct regulator_dev *rdev)
52{
53 struct regmap *regmap = rdev->regmap;
54 unsigned int chg_min_uA = rdev->constraints->min_uA;
55 unsigned int chg_max_uA = rdev->constraints->max_uA;
56 unsigned int val;
57 int ret;
58 unsigned int reg, sel;
59
60 ret = regmap_read(regmap, MAX77843_CHG_REG_CHG_CNFG_02, &reg);
61 if (ret) {
62 dev_err(&rdev->dev, "Failed to read charger register\n");
63 return ret;
64 }
65
66 sel = reg & MAX77843_CHG_FAST_CHG_CURRENT_MASK;
67
68 if (sel < 0x03)
69 sel = 0;
70 else
71 sel -= 2;
72
73 val = chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel;
74 if (val > chg_max_uA)
75 return -EINVAL;
76
77 return val;
78}
79
80static int max77843_reg_set_current_limit(struct regulator_dev *rdev,
81 int min_uA, int max_uA)
82{
83 struct regmap *regmap = rdev->regmap;
84 unsigned int chg_min_uA = rdev->constraints->min_uA;
85 int sel = 0;
86
87 while (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel < min_uA)
88 sel++;
89
90 if (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel > max_uA)
91 return -EINVAL;
92
93 sel += 2;
94
95 return regmap_write(regmap, MAX77843_CHG_REG_CHG_CNFG_02, sel);
96}
97
98static struct regulator_ops max77843_charger_ops = {
99 .is_enabled = max77843_reg_is_enabled,
100 .enable = regulator_enable_regmap,
101 .disable = regulator_disable_regmap,
102 .get_current_limit = max77843_reg_get_current_limit,
103 .set_current_limit = max77843_reg_set_current_limit,
104};
105
106static struct regulator_ops max77843_regulator_ops = {
107 .is_enabled = regulator_is_enabled_regmap,
108 .enable = regulator_enable_regmap,
109 .disable = regulator_disable_regmap,
110 .list_voltage = regulator_list_voltage_table,
111 .get_voltage_sel = regulator_get_voltage_sel_regmap,
112 .set_voltage_sel = regulator_set_voltage_sel_regmap,
113};
114
115static const struct regulator_desc max77843_supported_regulators[] = {
116 [MAX77843_SAFEOUT1] = {
117 .name = "SAFEOUT1",
118 .id = MAX77843_SAFEOUT1,
119 .ops = &max77843_regulator_ops,
120 .of_match = of_match_ptr("SAFEOUT1"),
121 .regulators_node = of_match_ptr("regulators"),
122 .type = REGULATOR_VOLTAGE,
123 .owner = THIS_MODULE,
124 .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table),
125 .volt_table = max77843_safeout_voltage_table,
126 .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
127 .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1,
128 .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
129 .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK,
130 },
131 [MAX77843_SAFEOUT2] = {
132 .name = "SAFEOUT2",
133 .id = MAX77843_SAFEOUT2,
134 .ops = &max77843_regulator_ops,
135 .of_match = of_match_ptr("SAFEOUT2"),
136 .regulators_node = of_match_ptr("regulators"),
137 .type = REGULATOR_VOLTAGE,
138 .owner = THIS_MODULE,
139 .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table),
140 .volt_table = max77843_safeout_voltage_table,
141 .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
142 .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2,
143 .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
144 .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK,
145 },
146 [MAX77843_CHARGER] = {
147 .name = "CHARGER",
148 .id = MAX77843_CHARGER,
149 .ops = &max77843_charger_ops,
150 .of_match = of_match_ptr("CHARGER"),
151 .regulators_node = of_match_ptr("regulators"),
152 .type = REGULATOR_CURRENT,
153 .owner = THIS_MODULE,
154 .enable_reg = MAX77843_CHG_REG_CHG_CNFG_00,
155 .enable_mask = MAX77843_CHG_MASK,
156 },
157};
158
159static struct regmap *max77843_get_regmap(struct max77843 *max77843, int reg_id)
160{
161 switch (reg_id) {
162 case MAX77843_SAFEOUT1:
163 case MAX77843_SAFEOUT2:
164 return max77843->regmap;
165 case MAX77843_CHARGER:
166 return max77843->regmap_chg;
167 default:
168 return max77843->regmap;
169 }
170}
171
172static int max77843_regulator_probe(struct platform_device *pdev)
173{
174 struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
175 struct regulator_config config = {};
176 int i;
177
178 config.dev = max77843->dev;
179 config.driver_data = max77843;
180
181 for (i = 0; i < ARRAY_SIZE(max77843_supported_regulators); i++) {
182 struct regulator_dev *regulator;
183
184 config.regmap = max77843_get_regmap(max77843,
185 max77843_supported_regulators[i].id);
186
187 regulator = devm_regulator_register(&pdev->dev,
188 &max77843_supported_regulators[i], &config);
189 if (IS_ERR(regulator)) {
190 dev_err(&pdev->dev,
191 "Failed to regiser regulator-%d\n", i);
192 return PTR_ERR(regulator);
193 }
194 }
195
196 return 0;
197}
198
199static const struct platform_device_id max77843_regulator_id[] = {
200 { "max77843-regulator", },
201 { /* sentinel */ },
202};
203
204static struct platform_driver max77843_regulator_driver = {
205 .driver = {
206 .name = "max77843-regulator",
207 },
208 .probe = max77843_regulator_probe,
209 .id_table = max77843_regulator_id,
210};
211
212static int __init max77843_regulator_init(void)
213{
214 return platform_driver_register(&max77843_regulator_driver);
215}
216subsys_initcall(max77843_regulator_init);
217
218static void __exit max77843_regulator_exit(void)
219{
220 platform_driver_unregister(&max77843_regulator_driver);
221}
222module_exit(max77843_regulator_exit);
223
224MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
225MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
226MODULE_DESCRIPTION("Maxim MAX77843 regulator driver");
227MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index c8bddcc8f911..81229579ece9 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -115,7 +115,7 @@ static unsigned int max8649_get_mode(struct regulator_dev *rdev)
115 return REGULATOR_MODE_NORMAL; 115 return REGULATOR_MODE_NORMAL;
116} 116}
117 117
118static struct regulator_ops max8649_dcdc_ops = { 118static const struct regulator_ops max8649_dcdc_ops = {
119 .set_voltage_sel = regulator_set_voltage_sel_regmap, 119 .set_voltage_sel = regulator_set_voltage_sel_regmap,
120 .get_voltage_sel = regulator_get_voltage_sel_regmap, 120 .get_voltage_sel = regulator_get_voltage_sel_regmap,
121 .list_voltage = regulator_list_voltage_linear, 121 .list_voltage = regulator_list_voltage_linear,
@@ -143,7 +143,7 @@ static struct regulator_desc dcdc_desc = {
143 .enable_is_inverted = true, 143 .enable_is_inverted = true,
144}; 144};
145 145
146static struct regmap_config max8649_regmap_config = { 146static const struct regmap_config max8649_regmap_config = {
147 .reg_bits = 8, 147 .reg_bits = 8,
148 .val_bits = 8, 148 .val_bits = 8,
149}; 149};
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
new file mode 100644
index 000000000000..a5b2f4762677
--- /dev/null
+++ b/drivers/regulator/mt6397-regulator.c
@@ -0,0 +1,332 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Flora Fu <flora.fu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/regmap.h>
19#include <linux/mfd/mt6397/core.h>
20#include <linux/mfd/mt6397/registers.h>
21#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h>
23#include <linux/regulator/mt6397-regulator.h>
24#include <linux/regulator/of_regulator.h>
25
26/*
27 * MT6397 regulators' information
28 *
29 * @desc: standard fields of regulator description.
30 * @qi: Mask for query enable signal status of regulators
31 * @vselon_reg: Register sections for hardware control mode of bucks
32 * @vselctrl_reg: Register for controlling the buck control mode.
33 * @vselctrl_mask: Mask for query buck's voltage control mode.
34 */
35struct mt6397_regulator_info {
36 struct regulator_desc desc;
37 u32 qi;
38 u32 vselon_reg;
39 u32 vselctrl_reg;
40 u32 vselctrl_mask;
41};
42
43#define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
44 vosel, vosel_mask, voselon, vosel_ctrl) \
45[MT6397_ID_##vreg] = { \
46 .desc = { \
47 .name = #vreg, \
48 .of_match = of_match_ptr(match), \
49 .ops = &mt6397_volt_range_ops, \
50 .type = REGULATOR_VOLTAGE, \
51 .id = MT6397_ID_##vreg, \
52 .owner = THIS_MODULE, \
53 .n_voltages = (max - min)/step + 1, \
54 .linear_ranges = volt_ranges, \
55 .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
56 .vsel_reg = vosel, \
57 .vsel_mask = vosel_mask, \
58 .enable_reg = enreg, \
59 .enable_mask = BIT(0), \
60 }, \
61 .qi = BIT(13), \
62 .vselon_reg = voselon, \
63 .vselctrl_reg = vosel_ctrl, \
64 .vselctrl_mask = BIT(1), \
65}
66
67#define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
68 vosel_mask) \
69[MT6397_ID_##vreg] = { \
70 .desc = { \
71 .name = #vreg, \
72 .of_match = of_match_ptr(match), \
73 .ops = &mt6397_volt_table_ops, \
74 .type = REGULATOR_VOLTAGE, \
75 .id = MT6397_ID_##vreg, \
76 .owner = THIS_MODULE, \
77 .n_voltages = ARRAY_SIZE(ldo_volt_table), \
78 .volt_table = ldo_volt_table, \
79 .vsel_reg = vosel, \
80 .vsel_mask = vosel_mask, \
81 .enable_reg = enreg, \
82 .enable_mask = BIT(enbit), \
83 }, \
84 .qi = BIT(15), \
85}
86
87#define MT6397_REG_FIXED(match, vreg, enreg, enbit, volt) \
88[MT6397_ID_##vreg] = { \
89 .desc = { \
90 .name = #vreg, \
91 .of_match = of_match_ptr(match), \
92 .ops = &mt6397_volt_fixed_ops, \
93 .type = REGULATOR_VOLTAGE, \
94 .id = MT6397_ID_##vreg, \
95 .owner = THIS_MODULE, \
96 .n_voltages = 1, \
97 .enable_reg = enreg, \
98 .enable_mask = BIT(enbit), \
99 .min_uV = volt, \
100 }, \
101 .qi = BIT(15), \
102}
103
104static const struct regulator_linear_range buck_volt_range1[] = {
105 REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
106};
107
108static const struct regulator_linear_range buck_volt_range2[] = {
109 REGULATOR_LINEAR_RANGE(800000, 0, 0x7f, 6250),
110};
111
112static const struct regulator_linear_range buck_volt_range3[] = {
113 REGULATOR_LINEAR_RANGE(1500000, 0, 0x1f, 20000),
114};
115
116static const u32 ldo_volt_table1[] = {
117 1500000, 1800000, 2500000, 2800000,
118};
119
120static const u32 ldo_volt_table2[] = {
121 1800000, 3300000,
122};
123
124static const u32 ldo_volt_table3[] = {
125 3000000, 3300000,
126};
127
128static const u32 ldo_volt_table4[] = {
129 1220000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
130};
131
132static const u32 ldo_volt_table5[] = {
133 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
134};
135
136static const u32 ldo_volt_table5_v2[] = {
137 1200000, 1000000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
138};
139
140static const u32 ldo_volt_table6[] = {
141 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000,
142};
143
144static const u32 ldo_volt_table7[] = {
145 1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000,
146};
147
148static int mt6397_get_status(struct regulator_dev *rdev)
149{
150 int ret;
151 u32 regval;
152 struct mt6397_regulator_info *info = rdev_get_drvdata(rdev);
153
154 ret = regmap_read(rdev->regmap, info->desc.enable_reg, &regval);
155 if (ret != 0) {
156 dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
157 return ret;
158 }
159
160 return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
161}
162
163static struct regulator_ops mt6397_volt_range_ops = {
164 .list_voltage = regulator_list_voltage_linear_range,
165 .map_voltage = regulator_map_voltage_linear_range,
166 .set_voltage_sel = regulator_set_voltage_sel_regmap,
167 .get_voltage_sel = regulator_get_voltage_sel_regmap,
168 .set_voltage_time_sel = regulator_set_voltage_time_sel,
169 .enable = regulator_enable_regmap,
170 .disable = regulator_disable_regmap,
171 .is_enabled = regulator_is_enabled_regmap,
172 .get_status = mt6397_get_status,
173};
174
175static struct regulator_ops mt6397_volt_table_ops = {
176 .list_voltage = regulator_list_voltage_table,
177 .map_voltage = regulator_map_voltage_iterate,
178 .set_voltage_sel = regulator_set_voltage_sel_regmap,
179 .get_voltage_sel = regulator_get_voltage_sel_regmap,
180 .set_voltage_time_sel = regulator_set_voltage_time_sel,
181 .enable = regulator_enable_regmap,
182 .disable = regulator_disable_regmap,
183 .is_enabled = regulator_is_enabled_regmap,
184 .get_status = mt6397_get_status,
185};
186
187static struct regulator_ops mt6397_volt_fixed_ops = {
188 .list_voltage = regulator_list_voltage_linear,
189 .enable = regulator_enable_regmap,
190 .disable = regulator_disable_regmap,
191 .is_enabled = regulator_is_enabled_regmap,
192 .get_status = mt6397_get_status,
193};
194
195/* The array is indexed by id(MT6397_ID_XXX) */
196static struct mt6397_regulator_info mt6397_regulators[] = {
197 MT6397_BUCK("buck_vpca15", VPCA15, 700000, 1493750, 6250,
198 buck_volt_range1, MT6397_VCA15_CON7, MT6397_VCA15_CON9, 0x7f,
199 MT6397_VCA15_CON10, MT6397_VCA15_CON5),
200 MT6397_BUCK("buck_vpca7", VPCA7, 700000, 1493750, 6250,
201 buck_volt_range1, MT6397_VPCA7_CON7, MT6397_VPCA7_CON9, 0x7f,
202 MT6397_VPCA7_CON10, MT6397_VPCA7_CON5),
203 MT6397_BUCK("buck_vsramca15", VSRAMCA15, 700000, 1493750, 6250,
204 buck_volt_range1, MT6397_VSRMCA15_CON7, MT6397_VSRMCA15_CON9,
205 0x7f, MT6397_VSRMCA15_CON10, MT6397_VSRMCA15_CON5),
206 MT6397_BUCK("buck_vsramca7", VSRAMCA7, 700000, 1493750, 6250,
207 buck_volt_range1, MT6397_VSRMCA7_CON7, MT6397_VSRMCA7_CON9,
208 0x7f, MT6397_VSRMCA7_CON10, MT6397_VSRMCA7_CON5),
209 MT6397_BUCK("buck_vcore", VCORE, 700000, 1493750, 6250,
210 buck_volt_range1, MT6397_VCORE_CON7, MT6397_VCORE_CON9, 0x7f,
211 MT6397_VCORE_CON10, MT6397_VCORE_CON5),
212 MT6397_BUCK("buck_vgpu", VGPU, 700000, 1493750, 6250, buck_volt_range1,
213 MT6397_VGPU_CON7, MT6397_VGPU_CON9, 0x7f,
214 MT6397_VGPU_CON10, MT6397_VGPU_CON5),
215 MT6397_BUCK("buck_vdrm", VDRM, 800000, 1593750, 6250, buck_volt_range2,
216 MT6397_VDRM_CON7, MT6397_VDRM_CON9, 0x7f,
217 MT6397_VDRM_CON10, MT6397_VDRM_CON5),
218 MT6397_BUCK("buck_vio18", VIO18, 1500000, 2120000, 20000,
219 buck_volt_range3, MT6397_VIO18_CON7, MT6397_VIO18_CON9, 0x1f,
220 MT6397_VIO18_CON10, MT6397_VIO18_CON5),
221 MT6397_REG_FIXED("ldo_vtcxo", VTCXO, MT6397_ANALDO_CON0, 10, 2800000),
222 MT6397_REG_FIXED("ldo_va28", VA28, MT6397_ANALDO_CON1, 14, 2800000),
223 MT6397_LDO("ldo_vcama", VCAMA, ldo_volt_table1,
224 MT6397_ANALDO_CON2, 15, MT6397_ANALDO_CON6, 0xC0),
225 MT6397_REG_FIXED("ldo_vio28", VIO28, MT6397_DIGLDO_CON0, 14, 2800000),
226 MT6397_REG_FIXED("ldo_vusb", VUSB, MT6397_DIGLDO_CON1, 14, 3300000),
227 MT6397_LDO("ldo_vmc", VMC, ldo_volt_table2,
228 MT6397_DIGLDO_CON2, 12, MT6397_DIGLDO_CON29, 0x10),
229 MT6397_LDO("ldo_vmch", VMCH, ldo_volt_table3,
230 MT6397_DIGLDO_CON3, 14, MT6397_DIGLDO_CON17, 0x80),
231 MT6397_LDO("ldo_vemc3v3", VEMC3V3, ldo_volt_table3,
232 MT6397_DIGLDO_CON4, 14, MT6397_DIGLDO_CON18, 0x10),
233 MT6397_LDO("ldo_vgp1", VGP1, ldo_volt_table4,
234 MT6397_DIGLDO_CON5, 15, MT6397_DIGLDO_CON19, 0xE0),
235 MT6397_LDO("ldo_vgp2", VGP2, ldo_volt_table5,
236 MT6397_DIGLDO_CON6, 15, MT6397_DIGLDO_CON20, 0xE0),
237 MT6397_LDO("ldo_vgp3", VGP3, ldo_volt_table5,
238 MT6397_DIGLDO_CON7, 15, MT6397_DIGLDO_CON21, 0xE0),
239 MT6397_LDO("ldo_vgp4", VGP4, ldo_volt_table5,
240 MT6397_DIGLDO_CON8, 15, MT6397_DIGLDO_CON22, 0xE0),
241 MT6397_LDO("ldo_vgp5", VGP5, ldo_volt_table6,
242 MT6397_DIGLDO_CON9, 15, MT6397_DIGLDO_CON23, 0xE0),
243 MT6397_LDO("ldo_vgp6", VGP6, ldo_volt_table5,
244 MT6397_DIGLDO_CON10, 15, MT6397_DIGLDO_CON33, 0xE0),
245 MT6397_LDO("ldo_vibr", VIBR, ldo_volt_table7,
246 MT6397_DIGLDO_CON24, 15, MT6397_DIGLDO_CON25, 0xE00),
247};
248
249static int mt6397_set_buck_vosel_reg(struct platform_device *pdev)
250{
251 struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
252 int i;
253 u32 regval;
254
255 for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
256 if (mt6397_regulators[i].vselctrl_reg) {
257 if (regmap_read(mt6397->regmap,
258 mt6397_regulators[i].vselctrl_reg,
259 &regval) < 0) {
260 dev_err(&pdev->dev,
261 "Failed to read buck ctrl\n");
262 return -EIO;
263 }
264
265 if (regval & mt6397_regulators[i].vselctrl_mask) {
266 mt6397_regulators[i].desc.vsel_reg =
267 mt6397_regulators[i].vselon_reg;
268 }
269 }
270 }
271
272 return 0;
273}
274
275static int mt6397_regulator_probe(struct platform_device *pdev)
276{
277 struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
278 struct regulator_config config = {};
279 struct regulator_dev *rdev;
280 int i;
281 u32 reg_value, version;
282
283 /* Query buck controller to select activated voltage register part */
284 if (mt6397_set_buck_vosel_reg(pdev))
285 return -EIO;
286
287 /* Read PMIC chip revision to update constraints and voltage table */
288 if (regmap_read(mt6397->regmap, MT6397_CID, &reg_value) < 0) {
289 dev_err(&pdev->dev, "Failed to read Chip ID\n");
290 return -EIO;
291 }
292 dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
293
294 version = (reg_value & 0xFF);
295 switch (version) {
296 case MT6397_REGULATOR_ID91:
297 mt6397_regulators[MT6397_ID_VGP2].desc.volt_table =
298 ldo_volt_table5_v2;
299 break;
300 default:
301 break;
302 }
303
304 for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
305 config.dev = &pdev->dev;
306 config.driver_data = &mt6397_regulators[i];
307 config.regmap = mt6397->regmap;
308 rdev = devm_regulator_register(&pdev->dev,
309 &mt6397_regulators[i].desc, &config);
310 if (IS_ERR(rdev)) {
311 dev_err(&pdev->dev, "failed to register %s\n",
312 mt6397_regulators[i].desc.name);
313 return PTR_ERR(rdev);
314 }
315 }
316
317 return 0;
318}
319
320static struct platform_driver mt6397_regulator_driver = {
321 .driver = {
322 .name = "mt6397-regulator",
323 },
324 .probe = mt6397_regulator_probe,
325};
326
327module_platform_driver(mt6397_regulator_driver);
328
329MODULE_AUTHOR("Flora Fu <flora.fu@mediatek.com>");
330MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6397 PMIC");
331MODULE_LICENSE("GPL");
332MODULE_ALIAS("platform:mt6397-regulator");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 91eaaf010524..24e812c48d93 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -270,6 +270,7 @@ EXPORT_SYMBOL_GPL(of_regulator_match);
270 270
271struct regulator_init_data *regulator_of_get_init_data(struct device *dev, 271struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
272 const struct regulator_desc *desc, 272 const struct regulator_desc *desc,
273 struct regulator_config *config,
273 struct device_node **node) 274 struct device_node **node)
274{ 275{
275 struct device_node *search, *child; 276 struct device_node *search, *child;
@@ -307,6 +308,16 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
307 break; 308 break;
308 } 309 }
309 310
311 if (desc->of_parse_cb) {
312 if (desc->of_parse_cb(child, desc, config)) {
313 dev_err(dev,
314 "driver callback failed to parse DT for regulator %s\n",
315 child->name);
316 init_data = NULL;
317 break;
318 }
319 }
320
310 of_node_get(child); 321 of_node_get(child);
311 *node = child; 322 *node = child;
312 break; 323 break;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index c879dff597ee..8cc8d1877c44 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -56,7 +56,7 @@
56#define PFUZE100_VGEN5VOL 0x70 56#define PFUZE100_VGEN5VOL 0x70
57#define PFUZE100_VGEN6VOL 0x71 57#define PFUZE100_VGEN6VOL 0x71
58 58
59enum chips { PFUZE100, PFUZE200 }; 59enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3 };
60 60
61struct pfuze_regulator { 61struct pfuze_regulator {
62 struct regulator_desc desc; 62 struct regulator_desc desc;
@@ -80,9 +80,18 @@ static const int pfuze100_vsnvs[] = {
80 1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000, 80 1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000,
81}; 81};
82 82
83static const int pfuze3000_sw2lo[] = {
84 1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000,
85};
86
87static const int pfuze3000_sw2hi[] = {
88 2500000, 2800000, 2850000, 3000000, 3100000, 3150000, 3200000, 3300000,
89};
90
83static const struct i2c_device_id pfuze_device_id[] = { 91static const struct i2c_device_id pfuze_device_id[] = {
84 {.name = "pfuze100", .driver_data = PFUZE100}, 92 {.name = "pfuze100", .driver_data = PFUZE100},
85 {.name = "pfuze200", .driver_data = PFUZE200}, 93 {.name = "pfuze200", .driver_data = PFUZE200},
94 {.name = "pfuze3000", .driver_data = PFUZE3000},
86 { } 95 { }
87}; 96};
88MODULE_DEVICE_TABLE(i2c, pfuze_device_id); 97MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
@@ -90,6 +99,7 @@ MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
90static const struct of_device_id pfuze_dt_ids[] = { 99static const struct of_device_id pfuze_dt_ids[] = {
91 { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100}, 100 { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
92 { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200}, 101 { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
102 { .compatible = "fsl,pfuze3000", .data = (void *)PFUZE3000},
93 { } 103 { }
94}; 104};
95MODULE_DEVICE_TABLE(of, pfuze_dt_ids); 105MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
@@ -219,6 +229,60 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
219 .stby_mask = 0x20, \ 229 .stby_mask = 0x20, \
220 } 230 }
221 231
232#define PFUZE3000_VCC_REG(_chip, _name, base, min, max, step) { \
233 .desc = { \
234 .name = #_name, \
235 .n_voltages = ((max) - (min)) / (step) + 1, \
236 .ops = &pfuze100_ldo_regulator_ops, \
237 .type = REGULATOR_VOLTAGE, \
238 .id = _chip ## _ ## _name, \
239 .owner = THIS_MODULE, \
240 .min_uV = (min), \
241 .uV_step = (step), \
242 .vsel_reg = (base), \
243 .vsel_mask = 0x3, \
244 .enable_reg = (base), \
245 .enable_mask = 0x10, \
246 }, \
247 .stby_reg = (base), \
248 .stby_mask = 0x20, \
249}
250
251
252#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \
253 .desc = { \
254 .name = #_name,\
255 .n_voltages = ((max) - (min)) / (step) + 1, \
256 .ops = &pfuze100_sw_regulator_ops, \
257 .type = REGULATOR_VOLTAGE, \
258 .id = _chip ## _ ## _name, \
259 .owner = THIS_MODULE, \
260 .min_uV = (min), \
261 .uV_step = (step), \
262 .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
263 .vsel_mask = 0x7, \
264 }, \
265 .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
266 .stby_mask = 0x7, \
267}
268
269#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
270 .desc = { \
271 .name = #_name,\
272 .n_voltages = ((max) - (min)) / (step) + 1, \
273 .ops = &pfuze100_sw_regulator_ops, \
274 .type = REGULATOR_VOLTAGE, \
275 .id = _chip ## _ ## _name, \
276 .owner = THIS_MODULE, \
277 .min_uV = (min), \
278 .uV_step = (step), \
279 .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
280 .vsel_mask = 0xf, \
281 }, \
282 .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
283 .stby_mask = 0xf, \
284}
285
222/* PFUZE100 */ 286/* PFUZE100 */
223static struct pfuze_regulator pfuze100_regulators[] = { 287static struct pfuze_regulator pfuze100_regulators[] = {
224 PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000), 288 PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
@@ -254,6 +318,22 @@ static struct pfuze_regulator pfuze200_regulators[] = {
254 PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), 318 PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
255}; 319};
256 320
321static struct pfuze_regulator pfuze3000_regulators[] = {
322 PFUZE100_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 700000, 1475000, 25000),
323 PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
324 PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
325 PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
326 PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
327 PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
328 PFUZE100_FIXED_REG(PFUZE3000, VREFDDR, PFUZE100_VREFDDRCON, 750000),
329 PFUZE100_VGEN_REG(PFUZE3000, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
330 PFUZE100_VGEN_REG(PFUZE3000, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
331 PFUZE3000_VCC_REG(PFUZE3000, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000),
332 PFUZE3000_VCC_REG(PFUZE3000, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000),
333 PFUZE100_VGEN_REG(PFUZE3000, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
334 PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
335};
336
257static struct pfuze_regulator *pfuze_regulators; 337static struct pfuze_regulator *pfuze_regulators;
258 338
259#ifdef CONFIG_OF 339#ifdef CONFIG_OF
@@ -294,6 +374,24 @@ static struct of_regulator_match pfuze200_matches[] = {
294 { .name = "vgen6", }, 374 { .name = "vgen6", },
295}; 375};
296 376
377/* PFUZE3000 */
378static struct of_regulator_match pfuze3000_matches[] = {
379
380 { .name = "sw1a", },
381 { .name = "sw1b", },
382 { .name = "sw2", },
383 { .name = "sw3", },
384 { .name = "swbst", },
385 { .name = "vsnvs", },
386 { .name = "vrefddr", },
387 { .name = "vldo1", },
388 { .name = "vldo2", },
389 { .name = "vccsd", },
390 { .name = "v33", },
391 { .name = "vldo3", },
392 { .name = "vldo4", },
393};
394
297static struct of_regulator_match *pfuze_matches; 395static struct of_regulator_match *pfuze_matches;
298 396
299static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) 397static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
@@ -313,6 +411,11 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
313 } 411 }
314 412
315 switch (chip->chip_id) { 413 switch (chip->chip_id) {
414 case PFUZE3000:
415 pfuze_matches = pfuze3000_matches;
416 ret = of_regulator_match(dev, parent, pfuze3000_matches,
417 ARRAY_SIZE(pfuze3000_matches));
418 break;
316 case PFUZE200: 419 case PFUZE200:
317 pfuze_matches = pfuze200_matches; 420 pfuze_matches = pfuze200_matches;
318 ret = of_regulator_match(dev, parent, pfuze200_matches, 421 ret = of_regulator_match(dev, parent, pfuze200_matches,
@@ -378,7 +481,8 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
378 * as ID=8 in PFUZE100 481 * as ID=8 in PFUZE100
379 */ 482 */
380 dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8"); 483 dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
381 } else if ((value & 0x0f) != pfuze_chip->chip_id) { 484 } else if ((value & 0x0f) != pfuze_chip->chip_id &&
485 (value & 0xf0) >> 4 != pfuze_chip->chip_id) {
382 /* device id NOT match with your setting */ 486 /* device id NOT match with your setting */
383 dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); 487 dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
384 return -ENODEV; 488 return -ENODEV;
@@ -417,7 +521,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
417 int i, ret; 521 int i, ret;
418 const struct of_device_id *match; 522 const struct of_device_id *match;
419 u32 regulator_num; 523 u32 regulator_num;
420 u32 sw_check_start, sw_check_end; 524 u32 sw_check_start, sw_check_end, sw_hi = 0x40;
421 525
422 pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip), 526 pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
423 GFP_KERNEL); 527 GFP_KERNEL);
@@ -458,13 +562,19 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
458 562
459 /* use the right regulators after identify the right device */ 563 /* use the right regulators after identify the right device */
460 switch (pfuze_chip->chip_id) { 564 switch (pfuze_chip->chip_id) {
565 case PFUZE3000:
566 pfuze_regulators = pfuze3000_regulators;
567 regulator_num = ARRAY_SIZE(pfuze3000_regulators);
568 sw_check_start = PFUZE3000_SW2;
569 sw_check_end = PFUZE3000_SW2;
570 sw_hi = 1 << 3;
571 break;
461 case PFUZE200: 572 case PFUZE200:
462 pfuze_regulators = pfuze200_regulators; 573 pfuze_regulators = pfuze200_regulators;
463 regulator_num = ARRAY_SIZE(pfuze200_regulators); 574 regulator_num = ARRAY_SIZE(pfuze200_regulators);
464 sw_check_start = PFUZE200_SW2; 575 sw_check_start = PFUZE200_SW2;
465 sw_check_end = PFUZE200_SW3B; 576 sw_check_end = PFUZE200_SW3B;
466 break; 577 break;
467
468 case PFUZE100: 578 case PFUZE100:
469 default: 579 default:
470 pfuze_regulators = pfuze100_regulators; 580 pfuze_regulators = pfuze100_regulators;
@@ -474,7 +584,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
474 break; 584 break;
475 } 585 }
476 dev_info(&client->dev, "pfuze%s found.\n", 586 dev_info(&client->dev, "pfuze%s found.\n",
477 (pfuze_chip->chip_id == PFUZE100) ? "100" : "200"); 587 (pfuze_chip->chip_id == PFUZE100) ? "100" :
588 ((pfuze_chip->chip_id == PFUZE200) ? "200" : "3000"));
478 589
479 memcpy(pfuze_chip->regulator_descs, pfuze_regulators, 590 memcpy(pfuze_chip->regulator_descs, pfuze_regulators,
480 sizeof(pfuze_chip->regulator_descs)); 591 sizeof(pfuze_chip->regulator_descs));
@@ -498,10 +609,15 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
498 /* SW2~SW4 high bit check and modify the voltage value table */ 609 /* SW2~SW4 high bit check and modify the voltage value table */
499 if (i >= sw_check_start && i <= sw_check_end) { 610 if (i >= sw_check_start && i <= sw_check_end) {
500 regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); 611 regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
501 if (val & 0x40) { 612 if (val & sw_hi) {
502 desc->min_uV = 800000; 613 if (pfuze_chip->chip_id == PFUZE3000) {
503 desc->uV_step = 50000; 614 desc->volt_table = pfuze3000_sw2hi;
504 desc->n_voltages = 51; 615 desc->n_voltages = ARRAY_SIZE(pfuze3000_sw2hi);
616 } else {
617 desc->min_uV = 800000;
618 desc->uV_step = 50000;
619 desc->n_voltages = 51;
620 }
505 } 621 }
506 } 622 }
507 623
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 8364ff331a81..e8647f7cf25e 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -227,9 +227,11 @@ static int rpm_reg_set_mV_sel(struct regulator_dev *rdev,
227 return uV; 227 return uV;
228 228
229 mutex_lock(&vreg->lock); 229 mutex_lock(&vreg->lock);
230 vreg->uV = uV;
231 if (vreg->is_enabled) 230 if (vreg->is_enabled)
232 ret = rpm_reg_write(vreg, req, vreg->uV / 1000); 231 ret = rpm_reg_write(vreg, req, uV / 1000);
232
233 if (!ret)
234 vreg->uV = uV;
233 mutex_unlock(&vreg->lock); 235 mutex_unlock(&vreg->lock);
234 236
235 return ret; 237 return ret;
@@ -252,9 +254,11 @@ static int rpm_reg_set_uV_sel(struct regulator_dev *rdev,
252 return uV; 254 return uV;
253 255
254 mutex_lock(&vreg->lock); 256 mutex_lock(&vreg->lock);
255 vreg->uV = uV;
256 if (vreg->is_enabled) 257 if (vreg->is_enabled)
257 ret = rpm_reg_write(vreg, req, vreg->uV); 258 ret = rpm_reg_write(vreg, req, uV);
259
260 if (!ret)
261 vreg->uV = uV;
258 mutex_unlock(&vreg->lock); 262 mutex_unlock(&vreg->lock);
259 263
260 return ret; 264 return ret;
@@ -674,6 +678,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
674 vreg->desc.owner = THIS_MODULE; 678 vreg->desc.owner = THIS_MODULE;
675 vreg->desc.type = REGULATOR_VOLTAGE; 679 vreg->desc.type = REGULATOR_VOLTAGE;
676 vreg->desc.name = pdev->dev.of_node->name; 680 vreg->desc.name = pdev->dev.of_node->name;
681 vreg->desc.supply_name = "vin";
677 682
678 vreg->rpm = dev_get_drvdata(pdev->dev.parent); 683 vreg->rpm = dev_get_drvdata(pdev->dev.parent);
679 if (!vreg->rpm) { 684 if (!vreg->rpm) {
@@ -768,7 +773,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
768 break; 773 break;
769 } 774 }
770 775
771 if (force_mode < 0) { 776 if (force_mode == -1) {
772 dev_err(&pdev->dev, "invalid force mode\n"); 777 dev_err(&pdev->dev, "invalid force mode\n");
773 return -EINVAL; 778 return -EINVAL;
774 } 779 }
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index c94a3e0f3b91..1f93b752a81c 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -97,7 +97,7 @@ static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
97 RK808_RAMP_RATE_MASK, ramp_value); 97 RK808_RAMP_RATE_MASK, ramp_value);
98} 98}
99 99
100int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv) 100static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
101{ 101{
102 unsigned int reg; 102 unsigned int reg;
103 int sel = regulator_map_voltage_linear_range(rdev, uv, uv); 103 int sel = regulator_map_voltage_linear_range(rdev, uv, uv);
@@ -112,7 +112,7 @@ int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
112 sel); 112 sel);
113} 113}
114 114
115int rk808_set_suspend_enable(struct regulator_dev *rdev) 115static int rk808_set_suspend_enable(struct regulator_dev *rdev)
116{ 116{
117 unsigned int reg; 117 unsigned int reg;
118 118
@@ -123,7 +123,7 @@ int rk808_set_suspend_enable(struct regulator_dev *rdev)
123 0); 123 0);
124} 124}
125 125
126int rk808_set_suspend_disable(struct regulator_dev *rdev) 126static int rk808_set_suspend_disable(struct regulator_dev *rdev)
127{ 127{
128 unsigned int reg; 128 unsigned int reg;
129 129
diff --git a/drivers/regulator/rt5033-regulator.c b/drivers/regulator/rt5033-regulator.c
index 870cc49438db..96d2c18e051a 100644
--- a/drivers/regulator/rt5033-regulator.c
+++ b/drivers/regulator/rt5033-regulator.c
@@ -36,6 +36,8 @@ static struct regulator_ops rt5033_buck_ops = {
36static const struct regulator_desc rt5033_supported_regulators[] = { 36static const struct regulator_desc rt5033_supported_regulators[] = {
37 [RT5033_BUCK] = { 37 [RT5033_BUCK] = {
38 .name = "BUCK", 38 .name = "BUCK",
39 .of_match = of_match_ptr("BUCK"),
40 .regulators_node = of_match_ptr("regulators"),
39 .id = RT5033_BUCK, 41 .id = RT5033_BUCK,
40 .ops = &rt5033_buck_ops, 42 .ops = &rt5033_buck_ops,
41 .type = REGULATOR_VOLTAGE, 43 .type = REGULATOR_VOLTAGE,
@@ -50,6 +52,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
50 }, 52 },
51 [RT5033_LDO] = { 53 [RT5033_LDO] = {
52 .name = "LDO", 54 .name = "LDO",
55 .of_match = of_match_ptr("LDO"),
56 .regulators_node = of_match_ptr("regulators"),
53 .id = RT5033_LDO, 57 .id = RT5033_LDO,
54 .ops = &rt5033_buck_ops, 58 .ops = &rt5033_buck_ops,
55 .type = REGULATOR_VOLTAGE, 59 .type = REGULATOR_VOLTAGE,
@@ -64,6 +68,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
64 }, 68 },
65 [RT5033_SAFE_LDO] = { 69 [RT5033_SAFE_LDO] = {
66 .name = "SAFE_LDO", 70 .name = "SAFE_LDO",
71 .of_match = of_match_ptr("SAFE_LDO"),
72 .regulators_node = of_match_ptr("regulators"),
67 .id = RT5033_SAFE_LDO, 73 .id = RT5033_SAFE_LDO,
68 .ops = &rt5033_safe_ldo_ops, 74 .ops = &rt5033_safe_ldo_ops,
69 .type = REGULATOR_VOLTAGE, 75 .type = REGULATOR_VOLTAGE,
@@ -81,7 +87,7 @@ static int rt5033_regulator_probe(struct platform_device *pdev)
81 int ret, i; 87 int ret, i;
82 struct regulator_config config = {}; 88 struct regulator_config config = {};
83 89
84 config.dev = &pdev->dev; 90 config.dev = rt5033->dev;
85 config.driver_data = rt5033; 91 config.driver_data = rt5033;
86 92
87 for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) { 93 for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) {
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 7380af8bd50d..b941e564b3f3 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -173,7 +173,7 @@ static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
173} 173}
174 174
175/* Operations permitted on VDCDCx */ 175/* Operations permitted on VDCDCx */
176static struct regulator_ops tps65023_dcdc_ops = { 176static const struct regulator_ops tps65023_dcdc_ops = {
177 .is_enabled = regulator_is_enabled_regmap, 177 .is_enabled = regulator_is_enabled_regmap,
178 .enable = regulator_enable_regmap, 178 .enable = regulator_enable_regmap,
179 .disable = regulator_disable_regmap, 179 .disable = regulator_disable_regmap,
@@ -184,7 +184,7 @@ static struct regulator_ops tps65023_dcdc_ops = {
184}; 184};
185 185
186/* Operations permitted on LDOx */ 186/* Operations permitted on LDOx */
187static struct regulator_ops tps65023_ldo_ops = { 187static const struct regulator_ops tps65023_ldo_ops = {
188 .is_enabled = regulator_is_enabled_regmap, 188 .is_enabled = regulator_is_enabled_regmap,
189 .enable = regulator_enable_regmap, 189 .enable = regulator_enable_regmap,
190 .disable = regulator_disable_regmap, 190 .disable = regulator_disable_regmap,
@@ -194,7 +194,7 @@ static struct regulator_ops tps65023_ldo_ops = {
194 .map_voltage = regulator_map_voltage_ascend, 194 .map_voltage = regulator_map_voltage_ascend,
195}; 195};
196 196
197static struct regmap_config tps65023_regmap_config = { 197static const struct regmap_config tps65023_regmap_config = {
198 .reg_bits = 8, 198 .reg_bits = 8,
199 .val_bits = 8, 199 .val_bits = 8,
200}; 200};
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f407e3763432..642c77c76b84 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1784 QETH_DBF_TEXT(SETUP, 2, "idxanswr"); 1784 QETH_DBF_TEXT(SETUP, 2, "idxanswr");
1785 card = CARD_FROM_CDEV(channel->ccwdev); 1785 card = CARD_FROM_CDEV(channel->ccwdev);
1786 iob = qeth_get_buffer(channel); 1786 iob = qeth_get_buffer(channel);
1787 if (!iob)
1788 return -ENOMEM;
1787 iob->callback = idx_reply_cb; 1789 iob->callback = idx_reply_cb;
1788 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); 1790 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1789 channel->ccw.count = QETH_BUFSIZE; 1791 channel->ccw.count = QETH_BUFSIZE;
@@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
1834 QETH_DBF_TEXT(SETUP, 2, "idxactch"); 1836 QETH_DBF_TEXT(SETUP, 2, "idxactch");
1835 1837
1836 iob = qeth_get_buffer(channel); 1838 iob = qeth_get_buffer(channel);
1839 if (!iob)
1840 return -ENOMEM;
1837 iob->callback = idx_reply_cb; 1841 iob->callback = idx_reply_cb;
1838 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); 1842 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1839 channel->ccw.count = IDX_ACTIVATE_SIZE; 1843 channel->ccw.count = IDX_ACTIVATE_SIZE;
@@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
2021} 2025}
2022EXPORT_SYMBOL_GPL(qeth_prepare_control_data); 2026EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
2023 2027
2028/**
2029 * qeth_send_control_data() - send control command to the card
2030 * @card: qeth_card structure pointer
2031 * @len: size of the command buffer
2032 * @iob: qeth_cmd_buffer pointer
2033 * @reply_cb: callback function pointer
2034 * @cb_card: pointer to the qeth_card structure
2035 * @cb_reply: pointer to the qeth_reply structure
2036 * @cb_cmd: pointer to the original iob for non-IPA
2037 * commands, or to the qeth_ipa_cmd structure
2038 * for the IPA commands.
2039 * @reply_param: private pointer passed to the callback
2040 *
2041 * Returns the value of the `return_code' field of the response
2042 * block returned from the hardware, or other error indication.
2043 * Value of zero indicates successful execution of the command.
2044 *
2045 * Callback function gets called one or more times, with cb_cmd
2046 * pointing to the response returned by the hardware. Callback
2047 * function must return non-zero if more reply blocks are expected,
2048 * and zero if the last or only reply block is received. Callback
2049 * function can get the value of the reply_param pointer from the
2050 * field 'param' of the structure qeth_reply.
2051 */
2052
2024int qeth_send_control_data(struct qeth_card *card, int len, 2053int qeth_send_control_data(struct qeth_card *card, int len,
2025 struct qeth_cmd_buffer *iob, 2054 struct qeth_cmd_buffer *iob,
2026 int (*reply_cb)(struct qeth_card *, struct qeth_reply *, 2055 int (*reply_cb)(struct qeth_card *cb_card,
2027 unsigned long), 2056 struct qeth_reply *cb_reply,
2057 unsigned long cb_cmd),
2028 void *reply_param) 2058 void *reply_param)
2029{ 2059{
2030 int rc; 2060 int rc;
@@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2914 struct qeth_cmd_buffer *iob; 2944 struct qeth_cmd_buffer *iob;
2915 struct qeth_ipa_cmd *cmd; 2945 struct qeth_ipa_cmd *cmd;
2916 2946
2917 iob = qeth_wait_for_buffer(&card->write); 2947 iob = qeth_get_buffer(&card->write);
2918 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 2948 if (iob) {
2919 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); 2949 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2950 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
2951 } else {
2952 dev_warn(&card->gdev->dev,
2953 "The qeth driver ran out of channel command buffers\n");
2954 QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
2955 dev_name(&card->gdev->dev));
2956 }
2920 2957
2921 return iob; 2958 return iob;
2922} 2959}
@@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2932} 2969}
2933EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); 2970EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2934 2971
2972/**
2973 * qeth_send_ipa_cmd() - send an IPA command
2974 *
2975 * See qeth_send_control_data() for explanation of the arguments.
2976 */
2977
2935int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 2978int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2936 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 2979 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2937 unsigned long), 2980 unsigned long),
@@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card)
2968 QETH_DBF_TEXT(SETUP, 2, "strtlan"); 3011 QETH_DBF_TEXT(SETUP, 2, "strtlan");
2969 3012
2970 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); 3013 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
3014 if (!iob)
3015 return -ENOMEM;
2971 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 3016 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2972 return rc; 3017 return rc;
2973} 3018}
@@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3013 3058
3014 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, 3059 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
3015 QETH_PROT_IPV4); 3060 QETH_PROT_IPV4);
3016 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 3061 if (iob) {
3017 cmd->data.setadapterparms.hdr.cmdlength = cmdlen; 3062 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3018 cmd->data.setadapterparms.hdr.command_code = command; 3063 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
3019 cmd->data.setadapterparms.hdr.used_total = 1; 3064 cmd->data.setadapterparms.hdr.command_code = command;
3020 cmd->data.setadapterparms.hdr.seq_no = 1; 3065 cmd->data.setadapterparms.hdr.used_total = 1;
3066 cmd->data.setadapterparms.hdr.seq_no = 1;
3067 }
3021 3068
3022 return iob; 3069 return iob;
3023} 3070}
@@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
3030 QETH_CARD_TEXT(card, 3, "queryadp"); 3077 QETH_CARD_TEXT(card, 3, "queryadp");
3031 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 3078 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3032 sizeof(struct qeth_ipacmd_setadpparms)); 3079 sizeof(struct qeth_ipacmd_setadpparms));
3080 if (!iob)
3081 return -ENOMEM;
3033 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 3082 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3034 return rc; 3083 return rc;
3035} 3084}
@@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
3080 3129
3081 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); 3130 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
3082 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); 3131 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
3132 if (!iob)
3133 return -ENOMEM;
3083 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 3134 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3084 return rc; 3135 return rc;
3085} 3136}
@@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card,
3119 return -ENOMEDIUM; 3170 return -ENOMEDIUM;
3120 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 3171 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
3121 sizeof(struct qeth_ipacmd_setadpparms_hdr)); 3172 sizeof(struct qeth_ipacmd_setadpparms_hdr));
3173 if (!iob)
3174 return -ENOMEM;
3122 return qeth_send_ipa_cmd(card, iob, 3175 return qeth_send_ipa_cmd(card, iob,
3123 qeth_query_switch_attributes_cb, sw_info); 3176 qeth_query_switch_attributes_cb, sw_info);
3124} 3177}
@@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card)
3146 3199
3147 QETH_DBF_TEXT(SETUP, 2, "qdiagass"); 3200 QETH_DBF_TEXT(SETUP, 2, "qdiagass");
3148 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 3201 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3202 if (!iob)
3203 return -ENOMEM;
3149 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 3204 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3150 cmd->data.diagass.subcmd_len = 16; 3205 cmd->data.diagass.subcmd_len = 16;
3151 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; 3206 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
@@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3197 3252
3198 QETH_DBF_TEXT(SETUP, 2, "diagtrap"); 3253 QETH_DBF_TEXT(SETUP, 2, "diagtrap");
3199 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 3254 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3255 if (!iob)
3256 return -ENOMEM;
3200 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 3257 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3201 cmd->data.diagass.subcmd_len = 80; 3258 cmd->data.diagass.subcmd_len = 80;
3202 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; 3259 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
@@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
4162 4219
4163 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4220 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4164 sizeof(struct qeth_ipacmd_setadpparms)); 4221 sizeof(struct qeth_ipacmd_setadpparms));
4222 if (!iob)
4223 return;
4165 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); 4224 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
4166 cmd->data.setadapterparms.data.mode = mode; 4225 cmd->data.setadapterparms.data.mode = mode;
4167 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4226 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
@@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4232 4291
4233 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4292 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4234 sizeof(struct qeth_ipacmd_setadpparms)); 4293 sizeof(struct qeth_ipacmd_setadpparms));
4294 if (!iob)
4295 return -ENOMEM;
4235 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 4296 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4236 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4297 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4237 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; 4298 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
@@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4345 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4406 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4346 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 4407 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4347 sizeof(struct qeth_set_access_ctrl)); 4408 sizeof(struct qeth_set_access_ctrl));
4409 if (!iob)
4410 return -ENOMEM;
4348 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 4411 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4349 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4412 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4350 access_ctrl_req->subcmd_code = isolation; 4413 access_ctrl_req->subcmd_code = isolation;
@@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4588 4651
4589 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, 4652 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4590 QETH_SNMP_SETADP_CMDLENGTH + req_len); 4653 QETH_SNMP_SETADP_CMDLENGTH + req_len);
4654 if (!iob) {
4655 rc = -ENOMEM;
4656 goto out;
4657 }
4591 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 4658 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4592 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); 4659 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4593 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, 4660 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
@@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4599 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4666 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4600 rc = -EFAULT; 4667 rc = -EFAULT;
4601 } 4668 }
4602 4669out:
4603 kfree(ureq); 4670 kfree(ureq);
4604 kfree(qinfo.udata); 4671 kfree(qinfo.udata);
4605 return rc; 4672 return rc;
@@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4670 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4737 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4671 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 4738 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4672 sizeof(struct qeth_query_oat)); 4739 sizeof(struct qeth_query_oat));
4740 if (!iob) {
4741 rc = -ENOMEM;
4742 goto out_free;
4743 }
4673 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 4744 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4674 oat_req = &cmd->data.setadapterparms.data.query_oat; 4745 oat_req = &cmd->data.setadapterparms.data.query_oat;
4675 oat_req->subcmd_code = oat_data.command; 4746 oat_req->subcmd_code = oat_data.command;
@@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card,
4735 return -EOPNOTSUPP; 4806 return -EOPNOTSUPP;
4736 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 4807 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
4737 sizeof(struct qeth_ipacmd_setadpparms_hdr)); 4808 sizeof(struct qeth_ipacmd_setadpparms_hdr));
4809 if (!iob)
4810 return -ENOMEM;
4738 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, 4811 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4739 (void *)carrier_info); 4812 (void *)carrier_info);
4740} 4813}
@@ -5060,11 +5133,23 @@ retriable:
5060 card->options.adp.supported_funcs = 0; 5133 card->options.adp.supported_funcs = 0;
5061 card->options.sbp.supported_funcs = 0; 5134 card->options.sbp.supported_funcs = 0;
5062 card->info.diagass_support = 0; 5135 card->info.diagass_support = 0;
5063 qeth_query_ipassists(card, QETH_PROT_IPV4); 5136 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5064 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) 5137 if (rc == -ENOMEM)
5065 qeth_query_setadapterparms(card); 5138 goto out;
5066 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) 5139 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5067 qeth_query_setdiagass(card); 5140 rc = qeth_query_setadapterparms(card);
5141 if (rc < 0) {
5142 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
5143 goto out;
5144 }
5145 }
5146 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5147 rc = qeth_query_setdiagass(card);
5148 if (rc < 0) {
5149 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
5150 goto out;
5151 }
5152 }
5068 return 0; 5153 return 0;
5069out: 5154out:
5070 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5155 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d02cd1a67943..ce87ae72edbd 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *);
27static int qeth_l2_stop(struct net_device *); 27static int qeth_l2_stop(struct net_device *);
28static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); 28static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
29static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, 29static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
30 enum qeth_ipa_cmds, 30 enum qeth_ipa_cmds);
31 int (*reply_cb) (struct qeth_card *,
32 struct qeth_reply*,
33 unsigned long));
34static void qeth_l2_set_multicast_list(struct net_device *); 31static void qeth_l2_set_multicast_list(struct net_device *);
35static int qeth_l2_recover(void *); 32static int qeth_l2_recover(void *);
36static void qeth_bridgeport_query_support(struct qeth_card *card); 33static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
130 return ndev; 127 return ndev;
131} 128}
132 129
133static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, 130static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
134 struct qeth_reply *reply,
135 unsigned long data)
136{ 131{
137 struct qeth_ipa_cmd *cmd; 132 int rc;
138 __u8 *mac;
139 133
140 QETH_CARD_TEXT(card, 2, "L2Sgmacb"); 134 if (retcode)
141 cmd = (struct qeth_ipa_cmd *) data; 135 QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
142 mac = &cmd->data.setdelmac.mac[0]; 136 switch (retcode) {
143 /* MAC already registered, needed in couple/uncouple case */ 137 case IPA_RC_SUCCESS:
144 if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) { 138 rc = 0;
145 QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n", 139 break;
146 mac, QETH_CARD_IFNAME(card)); 140 case IPA_RC_L2_UNSUPPORTED_CMD:
147 cmd->hdr.return_code = 0; 141 rc = -ENOSYS;
142 break;
143 case IPA_RC_L2_ADDR_TABLE_FULL:
144 rc = -ENOSPC;
145 break;
146 case IPA_RC_L2_DUP_MAC:
147 case IPA_RC_L2_DUP_LAYER3_MAC:
148 rc = -EEXIST;
149 break;
150 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
151 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
152 rc = -EPERM;
153 break;
154 case IPA_RC_L2_MAC_NOT_FOUND:
155 rc = -ENOENT;
156 break;
157 case -ENOMEM:
158 rc = -ENOMEM;
159 break;
160 default:
161 rc = -EIO;
162 break;
148 } 163 }
149 if (cmd->hdr.return_code) 164 return rc;
150 QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
151 mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
152 return 0;
153} 165}
154 166
155static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) 167static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
156{ 168{
157 QETH_CARD_TEXT(card, 2, "L2Sgmac"); 169 int rc;
158 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
159 qeth_l2_send_setgroupmac_cb);
160}
161
162static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
163 struct qeth_reply *reply,
164 unsigned long data)
165{
166 struct qeth_ipa_cmd *cmd;
167 __u8 *mac;
168 170
169 QETH_CARD_TEXT(card, 2, "L2Dgmacb"); 171 QETH_CARD_TEXT(card, 2, "L2Sgmac");
170 cmd = (struct qeth_ipa_cmd *) data; 172 rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
171 mac = &cmd->data.setdelmac.mac[0]; 173 IPA_CMD_SETGMAC));
172 if (cmd->hdr.return_code) 174 if (rc == -EEXIST)
173 QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n", 175 QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
174 mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); 176 mac, QETH_CARD_IFNAME(card));
175 return 0; 177 else if (rc)
178 QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
179 mac, QETH_CARD_IFNAME(card), rc);
180 return rc;
176} 181}
177 182
178static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) 183static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
179{ 184{
185 int rc;
186
180 QETH_CARD_TEXT(card, 2, "L2Dgmac"); 187 QETH_CARD_TEXT(card, 2, "L2Dgmac");
181 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, 188 rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
182 qeth_l2_send_delgroupmac_cb); 189 IPA_CMD_DELGMAC));
190 if (rc)
191 QETH_DBF_MESSAGE(2,
192 "Could not delete group MAC %pM on %s: %d\n",
193 mac, QETH_CARD_IFNAME(card), rc);
194 return rc;
183} 195}
184 196
185static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) 197static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
@@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
197 mc->is_vmac = vmac; 209 mc->is_vmac = vmac;
198 210
199 if (vmac) { 211 if (vmac) {
200 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, 212 rc = qeth_setdel_makerc(card,
201 NULL); 213 qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
202 } else { 214 } else {
203 rc = qeth_l2_send_setgroupmac(card, mac); 215 rc = qeth_setdel_makerc(card,
216 qeth_l2_send_setgroupmac(card, mac));
204 } 217 }
205 218
206 if (!rc) 219 if (!rc)
@@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
218 if (del) { 231 if (del) {
219 if (mc->is_vmac) 232 if (mc->is_vmac)
220 qeth_l2_send_setdelmac(card, mc->mc_addr, 233 qeth_l2_send_setdelmac(card, mc->mc_addr,
221 IPA_CMD_DELVMAC, NULL); 234 IPA_CMD_DELVMAC);
222 else 235 else
223 qeth_l2_send_delgroupmac(card, mc->mc_addr); 236 qeth_l2_send_delgroupmac(card, mc->mc_addr);
224 } 237 }
@@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
291 304
292 QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); 305 QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
293 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); 306 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
307 if (!iob)
308 return -ENOMEM;
294 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 309 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
295 cmd->data.setdelvlan.vlan_id = i; 310 cmd->data.setdelvlan.vlan_id = i;
296 return qeth_send_ipa_cmd(card, iob, 311 return qeth_send_ipa_cmd(card, iob,
@@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
313{ 328{
314 struct qeth_card *card = dev->ml_priv; 329 struct qeth_card *card = dev->ml_priv;
315 struct qeth_vlan_vid *id; 330 struct qeth_vlan_vid *id;
331 int rc;
316 332
317 QETH_CARD_TEXT_(card, 4, "aid:%d", vid); 333 QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
318 if (!vid) 334 if (!vid)
@@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
328 id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); 344 id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
329 if (id) { 345 if (id) {
330 id->vid = vid; 346 id->vid = vid;
331 qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); 347 rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
348 if (rc) {
349 kfree(id);
350 return rc;
351 }
332 spin_lock_bh(&card->vlanlock); 352 spin_lock_bh(&card->vlanlock);
333 list_add_tail(&id->list, &card->vid_list); 353 list_add_tail(&id->list, &card->vid_list);
334 spin_unlock_bh(&card->vlanlock); 354 spin_unlock_bh(&card->vlanlock);
@@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
343{ 363{
344 struct qeth_vlan_vid *id, *tmpid = NULL; 364 struct qeth_vlan_vid *id, *tmpid = NULL;
345 struct qeth_card *card = dev->ml_priv; 365 struct qeth_card *card = dev->ml_priv;
366 int rc = 0;
346 367
347 QETH_CARD_TEXT_(card, 4, "kid:%d", vid); 368 QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
348 if (card->info.type == QETH_CARD_TYPE_OSM) { 369 if (card->info.type == QETH_CARD_TYPE_OSM) {
@@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
363 } 384 }
364 spin_unlock_bh(&card->vlanlock); 385 spin_unlock_bh(&card->vlanlock);
365 if (tmpid) { 386 if (tmpid) {
366 qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); 387 rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
367 kfree(tmpid); 388 kfree(tmpid);
368 } 389 }
369 qeth_l2_set_multicast_list(card->dev); 390 qeth_l2_set_multicast_list(card->dev);
370 return 0; 391 return rc;
371} 392}
372 393
373static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) 394static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -539,91 +560,62 @@ out:
539} 560}
540 561
541static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, 562static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
542 enum qeth_ipa_cmds ipacmd, 563 enum qeth_ipa_cmds ipacmd)
543 int (*reply_cb) (struct qeth_card *,
544 struct qeth_reply*,
545 unsigned long))
546{ 564{
547 struct qeth_ipa_cmd *cmd; 565 struct qeth_ipa_cmd *cmd;
548 struct qeth_cmd_buffer *iob; 566 struct qeth_cmd_buffer *iob;
549 567
550 QETH_CARD_TEXT(card, 2, "L2sdmac"); 568 QETH_CARD_TEXT(card, 2, "L2sdmac");
551 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); 569 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
570 if (!iob)
571 return -ENOMEM;
552 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 572 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
553 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; 573 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
554 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); 574 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
555 return qeth_send_ipa_cmd(card, iob, reply_cb, NULL); 575 return qeth_send_ipa_cmd(card, iob, NULL, NULL);
556} 576}
557 577
558static int qeth_l2_send_setmac_cb(struct qeth_card *card, 578static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
559 struct qeth_reply *reply,
560 unsigned long data)
561{ 579{
562 struct qeth_ipa_cmd *cmd; 580 int rc;
563 581
564 QETH_CARD_TEXT(card, 2, "L2Smaccb"); 582 QETH_CARD_TEXT(card, 2, "L2Setmac");
565 cmd = (struct qeth_ipa_cmd *) data; 583 rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
566 if (cmd->hdr.return_code) { 584 IPA_CMD_SETVMAC));
567 QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code); 585 if (rc == 0) {
586 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
587 memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
588 dev_info(&card->gdev->dev,
589 "MAC address %pM successfully registered on device %s\n",
590 card->dev->dev_addr, card->dev->name);
591 } else {
568 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; 592 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
569 switch (cmd->hdr.return_code) { 593 switch (rc) {
570 case IPA_RC_L2_DUP_MAC: 594 case -EEXIST:
571 case IPA_RC_L2_DUP_LAYER3_MAC:
572 dev_warn(&card->gdev->dev, 595 dev_warn(&card->gdev->dev,
573 "MAC address %pM already exists\n", 596 "MAC address %pM already exists\n", mac);
574 cmd->data.setdelmac.mac);
575 break; 597 break;
576 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: 598 case -EPERM:
577 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
578 dev_warn(&card->gdev->dev, 599 dev_warn(&card->gdev->dev,
579 "MAC address %pM is not authorized\n", 600 "MAC address %pM is not authorized\n", mac);
580 cmd->data.setdelmac.mac);
581 break;
582 default:
583 break; 601 break;
584 } 602 }
585 } else {
586 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
587 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
588 OSA_ADDR_LEN);
589 dev_info(&card->gdev->dev,
590 "MAC address %pM successfully registered on device %s\n",
591 card->dev->dev_addr, card->dev->name);
592 }
593 return 0;
594}
595
596static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
597{
598 QETH_CARD_TEXT(card, 2, "L2Setmac");
599 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
600 qeth_l2_send_setmac_cb);
601}
602
603static int qeth_l2_send_delmac_cb(struct qeth_card *card,
604 struct qeth_reply *reply,
605 unsigned long data)
606{
607 struct qeth_ipa_cmd *cmd;
608
609 QETH_CARD_TEXT(card, 2, "L2Dmaccb");
610 cmd = (struct qeth_ipa_cmd *) data;
611 if (cmd->hdr.return_code) {
612 QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
613 return 0;
614 } 603 }
615 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; 604 return rc;
616
617 return 0;
618} 605}
619 606
620static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) 607static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
621{ 608{
609 int rc;
610
622 QETH_CARD_TEXT(card, 2, "L2Delmac"); 611 QETH_CARD_TEXT(card, 2, "L2Delmac");
623 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) 612 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
624 return 0; 613 return 0;
625 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, 614 rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
626 qeth_l2_send_delmac_cb); 615 IPA_CMD_DELVMAC));
616 if (rc == 0)
617 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
618 return rc;
627} 619}
628 620
629static int qeth_l2_request_initial_mac(struct qeth_card *card) 621static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
651 if (rc) { 643 if (rc) {
652 QETH_DBF_MESSAGE(2, "couldn't get MAC address on " 644 QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
653 "device %s: x%x\n", CARD_BUS_ID(card), rc); 645 "device %s: x%x\n", CARD_BUS_ID(card), rc);
654 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 646 QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
655 return rc; 647 return rc;
656 } 648 }
657 QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); 649 QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
@@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
687 return -ERESTARTSYS; 679 return -ERESTARTSYS;
688 } 680 }
689 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); 681 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
690 if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) 682 if (!rc || (rc == -ENOENT))
691 rc = qeth_l2_send_setmac(card, addr->sa_data); 683 rc = qeth_l2_send_setmac(card, addr->sa_data);
692 return rc ? -EINVAL : 0; 684 return rc ? -EINVAL : 0;
693} 685}
@@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
996 recover_flag = card->state; 988 recover_flag = card->state;
997 rc = qeth_core_hardsetup_card(card); 989 rc = qeth_core_hardsetup_card(card);
998 if (rc) { 990 if (rc) {
999 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 991 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
1000 rc = -ENODEV; 992 rc = -ENODEV;
1001 goto out_remove; 993 goto out_remove;
1002 } 994 }
@@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
1730 1722
1731 QETH_CARD_TEXT(card, 2, "brqsuppo"); 1723 QETH_CARD_TEXT(card, 2, "brqsuppo");
1732 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); 1724 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
1725 if (!iob)
1726 return;
1733 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1727 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1734 cmd->data.sbp.hdr.cmdlength = 1728 cmd->data.sbp.hdr.cmdlength =
1735 sizeof(struct qeth_ipacmd_sbp_hdr) + 1729 sizeof(struct qeth_ipacmd_sbp_hdr) +
@@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
1805 if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) 1799 if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
1806 return -EOPNOTSUPP; 1800 return -EOPNOTSUPP;
1807 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); 1801 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
1802 if (!iob)
1803 return -ENOMEM;
1808 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1804 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1809 cmd->data.sbp.hdr.cmdlength = 1805 cmd->data.sbp.hdr.cmdlength =
1810 sizeof(struct qeth_ipacmd_sbp_hdr); 1806 sizeof(struct qeth_ipacmd_sbp_hdr);
@@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
1817 if (rc) 1813 if (rc)
1818 return rc; 1814 return rc;
1819 rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); 1815 rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
1820 if (rc) 1816 return rc;
1821 return rc;
1822 return 0;
1823} 1817}
1824EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); 1818EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
1825 1819
@@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
1873 if (!(card->options.sbp.supported_funcs & setcmd)) 1867 if (!(card->options.sbp.supported_funcs & setcmd))
1874 return -EOPNOTSUPP; 1868 return -EOPNOTSUPP;
1875 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); 1869 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
1870 if (!iob)
1871 return -ENOMEM;
1876 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1872 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1877 cmd->data.sbp.hdr.cmdlength = cmdlength; 1873 cmd->data.sbp.hdr.cmdlength = cmdlength;
1878 cmd->data.sbp.hdr.command_code = setcmd; 1874 cmd->data.sbp.hdr.command_code = setcmd;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 625227ad16ee..e2a0ee845399 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
549 QETH_CARD_TEXT(card, 4, "setdelmc"); 549 QETH_CARD_TEXT(card, 4, "setdelmc");
550 550
551 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 551 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
552 if (!iob)
553 return -ENOMEM;
552 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 554 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
553 memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); 555 memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
554 if (addr->proto == QETH_PROT_IPV6) 556 if (addr->proto == QETH_PROT_IPV6)
@@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
588 QETH_CARD_TEXT_(card, 4, "flags%02X", flags); 590 QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
589 591
590 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 592 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
593 if (!iob)
594 return -ENOMEM;
591 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 595 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
592 if (addr->proto == QETH_PROT_IPV6) { 596 if (addr->proto == QETH_PROT_IPV6) {
593 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, 597 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
@@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
616 620
617 QETH_CARD_TEXT(card, 4, "setroutg"); 621 QETH_CARD_TEXT(card, 4, "setroutg");
618 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); 622 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
623 if (!iob)
624 return -ENOMEM;
619 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 625 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
620 cmd->data.setrtg.type = (type); 626 cmd->data.setrtg.type = (type);
621 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 627 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
@@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
1049 QETH_CARD_TEXT(card, 4, "getasscm"); 1055 QETH_CARD_TEXT(card, 4, "getasscm");
1050 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); 1056 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
1051 1057
1052 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1058 if (iob) {
1053 cmd->data.setassparms.hdr.assist_no = ipa_func; 1059 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1054 cmd->data.setassparms.hdr.length = 8 + len; 1060 cmd->data.setassparms.hdr.assist_no = ipa_func;
1055 cmd->data.setassparms.hdr.command_code = cmd_code; 1061 cmd->data.setassparms.hdr.length = 8 + len;
1056 cmd->data.setassparms.hdr.return_code = 0; 1062 cmd->data.setassparms.hdr.command_code = cmd_code;
1057 cmd->data.setassparms.hdr.seq_no = 0; 1063 cmd->data.setassparms.hdr.return_code = 0;
1064 cmd->data.setassparms.hdr.seq_no = 0;
1065 }
1058 1066
1059 return iob; 1067 return iob;
1060} 1068}
@@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
1090 QETH_CARD_TEXT(card, 4, "simassp6"); 1098 QETH_CARD_TEXT(card, 4, "simassp6");
1091 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 1099 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1092 0, QETH_PROT_IPV6); 1100 0, QETH_PROT_IPV6);
1101 if (!iob)
1102 return -ENOMEM;
1093 rc = qeth_l3_send_setassparms(card, iob, 0, 0, 1103 rc = qeth_l3_send_setassparms(card, iob, 0, 0,
1094 qeth_l3_default_setassparms_cb, NULL); 1104 qeth_l3_default_setassparms_cb, NULL);
1095 return rc; 1105 return rc;
@@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
1108 length = sizeof(__u32); 1118 length = sizeof(__u32);
1109 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 1119 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1110 length, QETH_PROT_IPV4); 1120 length, QETH_PROT_IPV4);
1121 if (!iob)
1122 return -ENOMEM;
1111 rc = qeth_l3_send_setassparms(card, iob, length, data, 1123 rc = qeth_l3_send_setassparms(card, iob, length, data,
1112 qeth_l3_default_setassparms_cb, NULL); 1124 qeth_l3_default_setassparms_cb, NULL);
1113 return rc; 1125 return rc;
@@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
1494 1506
1495 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 1507 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1496 QETH_PROT_IPV6); 1508 QETH_PROT_IPV6);
1509 if (!iob)
1510 return -ENOMEM;
1497 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1511 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1498 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1512 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1499 card->info.unique_id; 1513 card->info.unique_id;
@@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
1537 1551
1538 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 1552 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1539 QETH_PROT_IPV6); 1553 QETH_PROT_IPV6);
1554 if (!iob)
1555 return -ENOMEM;
1540 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1556 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1541 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1557 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1542 card->info.unique_id; 1558 card->info.unique_id;
@@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
1611 QETH_DBF_TEXT(SETUP, 2, "diagtrac"); 1627 QETH_DBF_TEXT(SETUP, 2, "diagtrac");
1612 1628
1613 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 1629 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
1630 if (!iob)
1631 return -ENOMEM;
1614 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1632 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1615 cmd->data.diagass.subcmd_len = 16; 1633 cmd->data.diagass.subcmd_len = 16;
1616 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; 1634 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
@@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
2442 IPA_CMD_ASS_ARP_QUERY_INFO, 2460 IPA_CMD_ASS_ARP_QUERY_INFO,
2443 sizeof(struct qeth_arp_query_data) - sizeof(char), 2461 sizeof(struct qeth_arp_query_data) - sizeof(char),
2444 prot); 2462 prot);
2463 if (!iob)
2464 return -ENOMEM;
2445 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 2465 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2446 cmd->data.setassparms.data.query_arp.request_bits = 0x000F; 2466 cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
2447 cmd->data.setassparms.data.query_arp.reply_bits = 0; 2467 cmd->data.setassparms.data.query_arp.reply_bits = 0;
@@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
2535 IPA_CMD_ASS_ARP_ADD_ENTRY, 2555 IPA_CMD_ASS_ARP_ADD_ENTRY,
2536 sizeof(struct qeth_arp_cache_entry), 2556 sizeof(struct qeth_arp_cache_entry),
2537 QETH_PROT_IPV4); 2557 QETH_PROT_IPV4);
2558 if (!iob)
2559 return -ENOMEM;
2538 rc = qeth_l3_send_setassparms(card, iob, 2560 rc = qeth_l3_send_setassparms(card, iob,
2539 sizeof(struct qeth_arp_cache_entry), 2561 sizeof(struct qeth_arp_cache_entry),
2540 (unsigned long) entry, 2562 (unsigned long) entry,
@@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
2574 IPA_CMD_ASS_ARP_REMOVE_ENTRY, 2596 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
2575 12, 2597 12,
2576 QETH_PROT_IPV4); 2598 QETH_PROT_IPV4);
2599 if (!iob)
2600 return -ENOMEM;
2577 rc = qeth_l3_send_setassparms(card, iob, 2601 rc = qeth_l3_send_setassparms(card, iob,
2578 12, (unsigned long)buf, 2602 12, (unsigned long)buf,
2579 qeth_l3_default_setassparms_cb, NULL); 2603 qeth_l3_default_setassparms_cb, NULL);
@@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
3262 3286
3263static int qeth_l3_setup_netdev(struct qeth_card *card) 3287static int qeth_l3_setup_netdev(struct qeth_card *card)
3264{ 3288{
3289 int rc;
3290
3265 if (card->info.type == QETH_CARD_TYPE_OSD || 3291 if (card->info.type == QETH_CARD_TYPE_OSD ||
3266 card->info.type == QETH_CARD_TYPE_OSX) { 3292 card->info.type == QETH_CARD_TYPE_OSX) {
3267 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 3293 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3293 return -ENODEV; 3319 return -ENODEV;
3294 card->dev->flags |= IFF_NOARP; 3320 card->dev->flags |= IFF_NOARP;
3295 card->dev->netdev_ops = &qeth_l3_netdev_ops; 3321 card->dev->netdev_ops = &qeth_l3_netdev_ops;
3296 qeth_l3_iqd_read_initial_mac(card); 3322 rc = qeth_l3_iqd_read_initial_mac(card);
3323 if (rc)
3324 return rc;
3297 if (card->options.hsuid[0]) 3325 if (card->options.hsuid[0])
3298 memcpy(card->dev->perm_addr, card->options.hsuid, 9); 3326 memcpy(card->dev->perm_addr, card->options.hsuid, 9);
3299 } else 3327 } else
@@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3360 recover_flag = card->state; 3388 recover_flag = card->state;
3361 rc = qeth_core_hardsetup_card(card); 3389 rc = qeth_core_hardsetup_card(card);
3362 if (rc) { 3390 if (rc) {
3363 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3391 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
3364 rc = -ENODEV; 3392 rc = -ENODEV;
3365 goto out_remove; 3393 goto out_remove;
3366 } 3394 }
@@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3401contin: 3429contin:
3402 rc = qeth_l3_setadapter_parms(card); 3430 rc = qeth_l3_setadapter_parms(card);
3403 if (rc) 3431 if (rc)
3404 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3432 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
3405 if (!card->options.sniffer) { 3433 if (!card->options.sniffer) {
3406 rc = qeth_l3_start_ipassists(card); 3434 rc = qeth_l3_start_ipassists(card);
3407 if (rc) { 3435 if (rc) {
@@ -3410,10 +3438,10 @@ contin:
3410 } 3438 }
3411 rc = qeth_l3_setrouting_v4(card); 3439 rc = qeth_l3_setrouting_v4(card);
3412 if (rc) 3440 if (rc)
3413 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 3441 QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
3414 rc = qeth_l3_setrouting_v6(card); 3442 rc = qeth_l3_setrouting_v6(card);
3415 if (rc) 3443 if (rc)
3416 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 3444 QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
3417 } 3445 }
3418 netif_tx_disable(card->dev); 3446 netif_tx_disable(card->dev);
3419 3447
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 1dba62c5cf6a..1efebc9eedfb 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -136,11 +136,12 @@ static void __detach_handler (struct kref *kref)
136 struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh; 136 struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
137 struct scsi_device *sdev = scsi_dh_data->sdev; 137 struct scsi_device *sdev = scsi_dh_data->sdev;
138 138
139 scsi_dh->detach(sdev);
140
139 spin_lock_irq(sdev->request_queue->queue_lock); 141 spin_lock_irq(sdev->request_queue->queue_lock);
140 sdev->scsi_dh_data = NULL; 142 sdev->scsi_dh_data = NULL;
141 spin_unlock_irq(sdev->request_queue->queue_lock); 143 spin_unlock_irq(sdev->request_queue->queue_lock);
142 144
143 scsi_dh->detach(sdev);
144 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name); 145 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
145 module_put(scsi_dh->module); 146 module_put(scsi_dh->module);
146} 147}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index e02885451425..9b3829931f40 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev)
986 return -ENXIO; 986 return -ENXIO;
987 if (!get_device(&sdev->sdev_gendev)) 987 if (!get_device(&sdev->sdev_gendev))
988 return -ENXIO; 988 return -ENXIO;
989 /* We can fail this if we're doing SCSI operations 989 /* We can fail try_module_get if we're doing SCSI operations
990 * from module exit (like cache flush) */ 990 * from module exit (like cache flush) */
991 try_module_get(sdev->host->hostt->module); 991 __module_get(sdev->host->hostt->module);
992 992
993 return 0; 993 return 0;
994} 994}
@@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get);
1004 */ 1004 */
1005void scsi_device_put(struct scsi_device *sdev) 1005void scsi_device_put(struct scsi_device *sdev)
1006{ 1006{
1007#ifdef CONFIG_MODULE_UNLOAD 1007 module_put(sdev->host->hostt->module);
1008 struct module *module = sdev->host->hostt->module;
1009
1010 /* The module refcount will be zero if scsi_device_get()
1011 * was called from a module removal routine */
1012 if (module && module_refcount(module) != 0)
1013 module_put(module);
1014#endif
1015 put_device(&sdev->sdev_gendev); 1008 put_device(&sdev->sdev_gendev);
1016} 1009}
1017EXPORT_SYMBOL(scsi_device_put); 1010EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 399516925d80..05ea0d49a3a3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2800,9 +2800,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
2800 */ 2800 */
2801 sd_set_flush_flag(sdkp); 2801 sd_set_flush_flag(sdkp);
2802 2802
2803 max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), 2803 max_xfer = sdkp->max_xfer_blocks;
2804 sdkp->max_xfer_blocks);
2805 max_xfer <<= ilog2(sdp->sector_size) - 9; 2804 max_xfer <<= ilog2(sdp->sector_size) - 9;
2805
2806 max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
2807 max_xfer);
2806 blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); 2808 blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
2807 set_capacity(disk, sdkp->capacity); 2809 set_capacity(disk, sdkp->capacity);
2808 sd_config_write_same(sdkp); 2810 sd_config_write_same(sdkp);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 99829985c1a1..95ccedabba4f 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -185,6 +185,16 @@ config SPI_DAVINCI
185 help 185 help
186 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. 186 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
187 187
188config SPI_DLN2
189 tristate "Diolan DLN-2 USB SPI adapter"
190 depends on MFD_DLN2
191 help
192 If you say yes to this option, support will be included for Diolan
193 DLN2, a USB to SPI interface.
194
195 This driver can also be built as a module. If so, the module
196 will be called spi-dln2.
197
188config SPI_EFM32 198config SPI_EFM32
189 tristate "EFM32 SPI controller" 199 tristate "EFM32 SPI controller"
190 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) 200 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
@@ -279,7 +289,7 @@ config SPI_FSL_CPM
279 depends on FSL_SOC 289 depends on FSL_SOC
280 290
281config SPI_FSL_SPI 291config SPI_FSL_SPI
282 bool "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller" 292 tristate "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
283 depends on OF 293 depends on OF
284 select SPI_FSL_LIB 294 select SPI_FSL_LIB
285 select SPI_FSL_CPM if FSL_SOC 295 select SPI_FSL_CPM if FSL_SOC
@@ -292,7 +302,6 @@ config SPI_FSL_SPI
292 302
293config SPI_FSL_DSPI 303config SPI_FSL_DSPI
294 tristate "Freescale DSPI controller" 304 tristate "Freescale DSPI controller"
295 select SPI_BITBANG
296 select REGMAP_MMIO 305 select REGMAP_MMIO
297 depends on SOC_VF610 || COMPILE_TEST 306 depends on SOC_VF610 || COMPILE_TEST
298 help 307 help
@@ -300,7 +309,7 @@ config SPI_FSL_DSPI
300 mode. VF610 platform uses the controller. 309 mode. VF610 platform uses the controller.
301 310
302config SPI_FSL_ESPI 311config SPI_FSL_ESPI
303 bool "Freescale eSPI controller" 312 tristate "Freescale eSPI controller"
304 depends on FSL_SOC 313 depends on FSL_SOC
305 select SPI_FSL_LIB 314 select SPI_FSL_LIB
306 help 315 help
@@ -460,7 +469,6 @@ config SPI_S3C24XX_FIQ
460config SPI_S3C64XX 469config SPI_S3C64XX
461 tristate "Samsung S3C64XX series type SPI" 470 tristate "Samsung S3C64XX series type SPI"
462 depends on (PLAT_SAMSUNG || ARCH_EXYNOS) 471 depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
463 select S3C64XX_PL080 if ARCH_S3C64XX
464 help 472 help
465 SPI driver for Samsung S3C64XX and newer SoCs. 473 SPI driver for Samsung S3C64XX and newer SoCs.
466 474
@@ -503,6 +511,13 @@ config SPI_SIRF
503 help 511 help
504 SPI driver for CSR SiRFprimaII SoCs 512 SPI driver for CSR SiRFprimaII SoCs
505 513
514config SPI_ST_SSC4
515 tristate "STMicroelectronics SPI SSC-based driver"
516 depends on ARCH_STI
517 help
518 STMicroelectronics SoCs support for SPI. If you say yes to
519 this option, support will be included for the SSC driven SPI.
520
506config SPI_SUN4I 521config SPI_SUN4I
507 tristate "Allwinner A10 SoCs SPI controller" 522 tristate "Allwinner A10 SoCs SPI controller"
508 depends on ARCH_SUNXI || COMPILE_TEST 523 depends on ARCH_SUNXI || COMPILE_TEST
@@ -595,7 +610,6 @@ config SPI_XTENSA_XTFPGA
595 16 bit words in SPI mode 0, automatically asserting CS on transfer 610 16 bit words in SPI mode 0, automatically asserting CS on transfer
596 start and deasserting on end. 611 start and deasserting on end.
597 612
598
599config SPI_NUC900 613config SPI_NUC900
600 tristate "Nuvoton NUC900 series SPI" 614 tristate "Nuvoton NUC900 series SPI"
601 depends on ARCH_W90X900 615 depends on ARCH_W90X900
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6b9d2ac629cc..d8cbf654976b 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
27obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o 27obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
28obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o 28obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
29obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o 29obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
30obj-$(CONFIG_SPI_DLN2) += spi-dln2.o
30obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o 31obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
31obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o 32obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
32obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o 33obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
@@ -76,6 +77,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
76obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o 77obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
77obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o 78obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
78obj-$(CONFIG_SPI_SIRF) += spi-sirf.o 79obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
80obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
79obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o 81obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
80obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o 82obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
81obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o 83obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 23d8f5f56579..9af7841f2e8c 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1046,6 +1046,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
1046 struct atmel_spi_device *asd; 1046 struct atmel_spi_device *asd;
1047 int timeout; 1047 int timeout;
1048 int ret; 1048 int ret;
1049 unsigned long dma_timeout;
1049 1050
1050 as = spi_master_get_devdata(master); 1051 as = spi_master_get_devdata(master);
1051 1052
@@ -1103,15 +1104,12 @@ static int atmel_spi_one_transfer(struct spi_master *master,
1103 1104
1104 /* interrupts are disabled, so free the lock for schedule */ 1105 /* interrupts are disabled, so free the lock for schedule */
1105 atmel_spi_unlock(as); 1106 atmel_spi_unlock(as);
1106 ret = wait_for_completion_timeout(&as->xfer_completion, 1107 dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
1107 SPI_DMA_TIMEOUT); 1108 SPI_DMA_TIMEOUT);
1108 atmel_spi_lock(as); 1109 atmel_spi_lock(as);
1109 if (WARN_ON(ret == 0)) { 1110 if (WARN_ON(dma_timeout == 0)) {
1110 dev_err(&spi->dev, 1111 dev_err(&spi->dev, "spi transfer timeout\n");
1111 "spi trasfer timeout, err %d\n", ret);
1112 as->done_status = -EIO; 1112 as->done_status = -EIO;
1113 } else {
1114 ret = 0;
1115 } 1113 }
1116 1114
1117 if (as->done_status) 1115 if (as->done_status)
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 326f47973684..f45e085c01a6 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -15,10 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 18 */
23 19
24#include <linux/init.h> 20#include <linux/init.h>
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 98aab457b24d..419a782ab6d5 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -17,10 +17,6 @@
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */ 20 */
25 21
26#include <linux/clk.h> 22#include <linux/clk.h>
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index c20530982e26..e73e2b052c9c 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the
19 * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
20 */ 16 */
21 17
22#include <linux/kernel.h> 18#include <linux/kernel.h>
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index dc7d2c2d643e..5ef6638d5e8a 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 13 */
18 14
19#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index ee4f91ccd8fd..9a95862986c8 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20#include <linux/kernel.h> 16#include <linux/kernel.h>
21#include <linux/init.h> 17#include <linux/init.h>
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 41b5dc4445f6..688956ff5095 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -12,11 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
19 *
20*/ 15*/
21 16
22#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index b3707badb1e5..5e991065f5b0 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 14 */
19 15
20#include <linux/interrupt.h> 16#include <linux/interrupt.h>
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
new file mode 100644
index 000000000000..3b7d91d94fea
--- /dev/null
+++ b/drivers/spi/spi-dln2.c
@@ -0,0 +1,881 @@
1/*
2 * Driver for the Diolan DLN-2 USB-SPI adapter
3 *
4 * Copyright (c) 2014 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/mfd/dln2.h>
15#include <linux/spi/spi.h>
16#include <linux/pm_runtime.h>
17#include <asm/unaligned.h>
18
19#define DLN2_SPI_MODULE_ID 0x02
20#define DLN2_SPI_CMD(cmd) DLN2_CMD(cmd, DLN2_SPI_MODULE_ID)
21
22/* SPI commands */
23#define DLN2_SPI_GET_PORT_COUNT DLN2_SPI_CMD(0x00)
24#define DLN2_SPI_ENABLE DLN2_SPI_CMD(0x11)
25#define DLN2_SPI_DISABLE DLN2_SPI_CMD(0x12)
26#define DLN2_SPI_IS_ENABLED DLN2_SPI_CMD(0x13)
27#define DLN2_SPI_SET_MODE DLN2_SPI_CMD(0x14)
28#define DLN2_SPI_GET_MODE DLN2_SPI_CMD(0x15)
29#define DLN2_SPI_SET_FRAME_SIZE DLN2_SPI_CMD(0x16)
30#define DLN2_SPI_GET_FRAME_SIZE DLN2_SPI_CMD(0x17)
31#define DLN2_SPI_SET_FREQUENCY DLN2_SPI_CMD(0x18)
32#define DLN2_SPI_GET_FREQUENCY DLN2_SPI_CMD(0x19)
33#define DLN2_SPI_READ_WRITE DLN2_SPI_CMD(0x1A)
34#define DLN2_SPI_READ DLN2_SPI_CMD(0x1B)
35#define DLN2_SPI_WRITE DLN2_SPI_CMD(0x1C)
36#define DLN2_SPI_SET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x20)
37#define DLN2_SPI_GET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x21)
38#define DLN2_SPI_SET_DELAY_AFTER_SS DLN2_SPI_CMD(0x22)
39#define DLN2_SPI_GET_DELAY_AFTER_SS DLN2_SPI_CMD(0x23)
40#define DLN2_SPI_SET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x24)
41#define DLN2_SPI_GET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x25)
42#define DLN2_SPI_SET_SS DLN2_SPI_CMD(0x26)
43#define DLN2_SPI_GET_SS DLN2_SPI_CMD(0x27)
44#define DLN2_SPI_RELEASE_SS DLN2_SPI_CMD(0x28)
45#define DLN2_SPI_SS_VARIABLE_ENABLE DLN2_SPI_CMD(0x2B)
46#define DLN2_SPI_SS_VARIABLE_DISABLE DLN2_SPI_CMD(0x2C)
47#define DLN2_SPI_SS_VARIABLE_IS_ENABLED DLN2_SPI_CMD(0x2D)
48#define DLN2_SPI_SS_AAT_ENABLE DLN2_SPI_CMD(0x2E)
49#define DLN2_SPI_SS_AAT_DISABLE DLN2_SPI_CMD(0x2F)
50#define DLN2_SPI_SS_AAT_IS_ENABLED DLN2_SPI_CMD(0x30)
51#define DLN2_SPI_SS_BETWEEN_FRAMES_ENABLE DLN2_SPI_CMD(0x31)
52#define DLN2_SPI_SS_BETWEEN_FRAMES_DISABLE DLN2_SPI_CMD(0x32)
53#define DLN2_SPI_SS_BETWEEN_FRAMES_IS_ENABLED DLN2_SPI_CMD(0x33)
54#define DLN2_SPI_SET_CPHA DLN2_SPI_CMD(0x34)
55#define DLN2_SPI_GET_CPHA DLN2_SPI_CMD(0x35)
56#define DLN2_SPI_SET_CPOL DLN2_SPI_CMD(0x36)
57#define DLN2_SPI_GET_CPOL DLN2_SPI_CMD(0x37)
58#define DLN2_SPI_SS_MULTI_ENABLE DLN2_SPI_CMD(0x38)
59#define DLN2_SPI_SS_MULTI_DISABLE DLN2_SPI_CMD(0x39)
60#define DLN2_SPI_SS_MULTI_IS_ENABLED DLN2_SPI_CMD(0x3A)
61#define DLN2_SPI_GET_SUPPORTED_MODES DLN2_SPI_CMD(0x40)
62#define DLN2_SPI_GET_SUPPORTED_CPHA_VALUES DLN2_SPI_CMD(0x41)
63#define DLN2_SPI_GET_SUPPORTED_CPOL_VALUES DLN2_SPI_CMD(0x42)
64#define DLN2_SPI_GET_SUPPORTED_FRAME_SIZES DLN2_SPI_CMD(0x43)
65#define DLN2_SPI_GET_SS_COUNT DLN2_SPI_CMD(0x44)
66#define DLN2_SPI_GET_MIN_FREQUENCY DLN2_SPI_CMD(0x45)
67#define DLN2_SPI_GET_MAX_FREQUENCY DLN2_SPI_CMD(0x46)
68#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x47)
69#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x48)
70#define DLN2_SPI_GET_MIN_DELAY_AFTER_SS DLN2_SPI_CMD(0x49)
71#define DLN2_SPI_GET_MAX_DELAY_AFTER_SS DLN2_SPI_CMD(0x4A)
72#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4B)
73#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4C)
74
75#define DLN2_SPI_MAX_XFER_SIZE 256
76#define DLN2_SPI_BUF_SIZE (DLN2_SPI_MAX_XFER_SIZE + 16)
77#define DLN2_SPI_ATTR_LEAVE_SS_LOW BIT(0)
78#define DLN2_TRANSFERS_WAIT_COMPLETE 1
79#define DLN2_TRANSFERS_CANCEL 0
80#define DLN2_RPM_AUTOSUSPEND_TIMEOUT 2000
81
82struct dln2_spi {
83 struct platform_device *pdev;
84 struct spi_master *master;
85 u8 port;
86
87 /*
88 * This buffer will be used mainly for read/write operations. Since
89 * they're quite large, we cannot use the stack. Protection is not
90 * needed because all SPI communication is serialized by the SPI core.
91 */
92 void *buf;
93
94 u8 bpw;
95 u32 speed;
96 u16 mode;
97 u8 cs;
98};
99
100/*
101 * Enable/Disable SPI module. The disable command will wait for transfers to
102 * complete first.
103 */
104static int dln2_spi_enable(struct dln2_spi *dln2, bool enable)
105{
106 u16 cmd;
107 struct {
108 u8 port;
109 u8 wait_for_completion;
110 } tx;
111 unsigned len = sizeof(tx);
112
113 tx.port = dln2->port;
114
115 if (enable) {
116 cmd = DLN2_SPI_ENABLE;
117 len -= sizeof(tx.wait_for_completion);
118 } else {
119 tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE;
120 cmd = DLN2_SPI_DISABLE;
121 }
122
123 return dln2_transfer_tx(dln2->pdev, cmd, &tx, len);
124}
125
126/*
127 * Select/unselect multiple CS lines. The selected lines will be automatically
128 * toggled LOW/HIGH by the board firmware during transfers, provided they're
129 * enabled first.
130 *
131 * Ex: cs_mask = 0x03 -> CS0 & CS1 will be selected and the next WR/RD operation
132 * will toggle the lines LOW/HIGH automatically.
133 */
134static int dln2_spi_cs_set(struct dln2_spi *dln2, u8 cs_mask)
135{
136 struct {
137 u8 port;
138 u8 cs;
139 } tx;
140
141 tx.port = dln2->port;
142
143 /*
144 * According to Diolan docs, "a slave device can be selected by changing
145 * the corresponding bit value to 0". The rest must be set to 1. Hence
146 * the bitwise NOT in front.
147 */
148 tx.cs = ~cs_mask;
149
150 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx));
151}
152
153/*
154 * Select one CS line. The other lines will be un-selected.
155 */
156static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs)
157{
158 return dln2_spi_cs_set(dln2, BIT(cs));
159}
160
161/*
162 * Enable/disable CS lines for usage. The module has to be disabled first.
163 */
164static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable)
165{
166 struct {
167 u8 port;
168 u8 cs;
169 } tx;
170 u16 cmd;
171
172 tx.port = dln2->port;
173 tx.cs = cs_mask;
174 cmd = enable ? DLN2_SPI_SS_MULTI_ENABLE : DLN2_SPI_SS_MULTI_DISABLE;
175
176 return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx));
177}
178
179static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable)
180{
181 u8 cs_mask = GENMASK(dln2->master->num_chipselect - 1, 0);
182
183 return dln2_spi_cs_enable(dln2, cs_mask, enable);
184}
185
186static int dln2_spi_get_cs_num(struct dln2_spi *dln2, u16 *cs_num)
187{
188 int ret;
189 struct {
190 u8 port;
191 } tx;
192 struct {
193 __le16 cs_count;
194 } rx;
195 unsigned rx_len = sizeof(rx);
196
197 tx.port = dln2->port;
198 ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx),
199 &rx, &rx_len);
200 if (ret < 0)
201 return ret;
202 if (rx_len < sizeof(rx))
203 return -EPROTO;
204
205 *cs_num = le16_to_cpu(rx.cs_count);
206
207 dev_dbg(&dln2->pdev->dev, "cs_num = %d\n", *cs_num);
208
209 return 0;
210}
211
212static int dln2_spi_get_speed(struct dln2_spi *dln2, u16 cmd, u32 *freq)
213{
214 int ret;
215 struct {
216 u8 port;
217 } tx;
218 struct {
219 __le32 speed;
220 } rx;
221 unsigned rx_len = sizeof(rx);
222
223 tx.port = dln2->port;
224
225 ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len);
226 if (ret < 0)
227 return ret;
228 if (rx_len < sizeof(rx))
229 return -EPROTO;
230
231 *freq = le32_to_cpu(rx.speed);
232
233 return 0;
234}
235
236/*
237 * Get bus min/max frequencies.
238 */
239static int dln2_spi_get_speed_range(struct dln2_spi *dln2, u32 *fmin, u32 *fmax)
240{
241 int ret;
242
243 ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MIN_FREQUENCY, fmin);
244 if (ret < 0)
245 return ret;
246
247 ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MAX_FREQUENCY, fmax);
248 if (ret < 0)
249 return ret;
250
251 dev_dbg(&dln2->pdev->dev, "freq_min = %d, freq_max = %d\n",
252 *fmin, *fmax);
253
254 return 0;
255}
256
257/*
258 * Set the bus speed. The module will automatically round down to the closest
259 * available frequency and returns it. The module has to be disabled first.
260 */
261static int dln2_spi_set_speed(struct dln2_spi *dln2, u32 speed)
262{
263 int ret;
264 struct {
265 u8 port;
266 __le32 speed;
267 } __packed tx;
268 struct {
269 __le32 speed;
270 } rx;
271 int rx_len = sizeof(rx);
272
273 tx.port = dln2->port;
274 tx.speed = cpu_to_le32(speed);
275
276 ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx),
277 &rx, &rx_len);
278 if (ret < 0)
279 return ret;
280 if (rx_len < sizeof(rx))
281 return -EPROTO;
282
283 return 0;
284}
285
286/*
287 * Change CPOL & CPHA. The module has to be disabled first.
288 */
289static int dln2_spi_set_mode(struct dln2_spi *dln2, u8 mode)
290{
291 struct {
292 u8 port;
293 u8 mode;
294 } tx;
295
296 tx.port = dln2->port;
297 tx.mode = mode;
298
299 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx));
300}
301
302/*
303 * Change frame size. The module has to be disabled first.
304 */
305static int dln2_spi_set_bpw(struct dln2_spi *dln2, u8 bpw)
306{
307 struct {
308 u8 port;
309 u8 bpw;
310 } tx;
311
312 tx.port = dln2->port;
313 tx.bpw = bpw;
314
315 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_FRAME_SIZE,
316 &tx, sizeof(tx));
317}
318
319static int dln2_spi_get_supported_frame_sizes(struct dln2_spi *dln2,
320 u32 *bpw_mask)
321{
322 int ret;
323 struct {
324 u8 port;
325 } tx;
326 struct {
327 u8 count;
328 u8 frame_sizes[36];
329 } *rx = dln2->buf;
330 unsigned rx_len = sizeof(*rx);
331 int i;
332
333 tx.port = dln2->port;
334
335 ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SUPPORTED_FRAME_SIZES,
336 &tx, sizeof(tx), rx, &rx_len);
337 if (ret < 0)
338 return ret;
339 if (rx_len < sizeof(*rx))
340 return -EPROTO;
341 if (rx->count > ARRAY_SIZE(rx->frame_sizes))
342 return -EPROTO;
343
344 *bpw_mask = 0;
345 for (i = 0; i < rx->count; i++)
346 *bpw_mask |= BIT(rx->frame_sizes[i] - 1);
347
348 dev_dbg(&dln2->pdev->dev, "bpw_mask = 0x%X\n", *bpw_mask);
349
350 return 0;
351}
352
353/*
354 * Copy the data to DLN2 buffer and change the byte order to LE, requested by
355 * DLN2 module. SPI core makes sure that the data length is a multiple of word
356 * size.
357 */
358static int dln2_spi_copy_to_buf(u8 *dln2_buf, const u8 *src, u16 len, u8 bpw)
359{
360#ifdef __LITTLE_ENDIAN
361 memcpy(dln2_buf, src, len);
362#else
363 if (bpw <= 8) {
364 memcpy(dln2_buf, src, len);
365 } else if (bpw <= 16) {
366 __le16 *d = (__le16 *)dln2_buf;
367 u16 *s = (u16 *)src;
368
369 len = len / 2;
370 while (len--)
371 *d++ = cpu_to_le16p(s++);
372 } else {
373 __le32 *d = (__le32 *)dln2_buf;
374 u32 *s = (u32 *)src;
375
376 len = len / 4;
377 while (len--)
378 *d++ = cpu_to_le32p(s++);
379 }
380#endif
381
382 return 0;
383}
384
385/*
386 * Copy the data from DLN2 buffer and convert to CPU byte order since the DLN2
387 * buffer is LE ordered. SPI core makes sure that the data length is a multiple
388 * of word size. The RX dln2_buf is 2 byte aligned so, for BE, we have to make
389 * sure we avoid unaligned accesses for 32 bit case.
390 */
391static int dln2_spi_copy_from_buf(u8 *dest, const u8 *dln2_buf, u16 len, u8 bpw)
392{
393#ifdef __LITTLE_ENDIAN
394 memcpy(dest, dln2_buf, len);
395#else
396 if (bpw <= 8) {
397 memcpy(dest, dln2_buf, len);
398 } else if (bpw <= 16) {
399 u16 *d = (u16 *)dest;
400 __le16 *s = (__le16 *)dln2_buf;
401
402 len = len / 2;
403 while (len--)
404 *d++ = le16_to_cpup(s++);
405 } else {
406 u32 *d = (u32 *)dest;
407 __le32 *s = (__le32 *)dln2_buf;
408
409 len = len / 4;
410 while (len--)
411 *d++ = get_unaligned_le32(s++);
412 }
413#endif
414
415 return 0;
416}
417
418/*
419 * Perform one write operation.
420 */
421static int dln2_spi_write_one(struct dln2_spi *dln2, const u8 *data,
422 u16 data_len, u8 attr)
423{
424 struct {
425 u8 port;
426 __le16 size;
427 u8 attr;
428 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
429 } __packed *tx = dln2->buf;
430 unsigned tx_len;
431
432 BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE);
433
434 if (data_len > DLN2_SPI_MAX_XFER_SIZE)
435 return -EINVAL;
436
437 tx->port = dln2->port;
438 tx->size = cpu_to_le16(data_len);
439 tx->attr = attr;
440
441 dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw);
442
443 tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
444 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len);
445}
446
447/*
448 * Perform one read operation.
449 */
450static int dln2_spi_read_one(struct dln2_spi *dln2, u8 *data,
451 u16 data_len, u8 attr)
452{
453 int ret;
454 struct {
455 u8 port;
456 __le16 size;
457 u8 attr;
458 } __packed tx;
459 struct {
460 __le16 size;
461 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
462 } __packed *rx = dln2->buf;
463 unsigned rx_len = sizeof(*rx);
464
465 BUILD_BUG_ON(sizeof(*rx) > DLN2_SPI_BUF_SIZE);
466
467 if (data_len > DLN2_SPI_MAX_XFER_SIZE)
468 return -EINVAL;
469
470 tx.port = dln2->port;
471 tx.size = cpu_to_le16(data_len);
472 tx.attr = attr;
473
474 ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx),
475 rx, &rx_len);
476 if (ret < 0)
477 return ret;
478 if (rx_len < sizeof(rx->size) + data_len)
479 return -EPROTO;
480 if (le16_to_cpu(rx->size) != data_len)
481 return -EPROTO;
482
483 dln2_spi_copy_from_buf(data, rx->buf, data_len, dln2->bpw);
484
485 return 0;
486}
487
488/*
489 * Perform one write & read operation.
490 */
491static int dln2_spi_read_write_one(struct dln2_spi *dln2, const u8 *tx_data,
492 u8 *rx_data, u16 data_len, u8 attr)
493{
494 int ret;
495 struct {
496 u8 port;
497 __le16 size;
498 u8 attr;
499 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
500 } __packed *tx;
501 struct {
502 __le16 size;
503 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
504 } __packed *rx;
505 unsigned tx_len, rx_len;
506
507 BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE ||
508 sizeof(*rx) > DLN2_SPI_BUF_SIZE);
509
510 if (data_len > DLN2_SPI_MAX_XFER_SIZE)
511 return -EINVAL;
512
513 /*
514 * Since this is a pseudo full-duplex communication, we're perfectly
515 * safe to use the same buffer for both tx and rx. When DLN2 sends the
516 * response back, with the rx data, we don't need the tx buffer anymore.
517 */
518 tx = dln2->buf;
519 rx = dln2->buf;
520
521 tx->port = dln2->port;
522 tx->size = cpu_to_le16(data_len);
523 tx->attr = attr;
524
525 dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw);
526
527 tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
528 rx_len = sizeof(*rx);
529
530 ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len,
531 rx, &rx_len);
532 if (ret < 0)
533 return ret;
534 if (rx_len < sizeof(rx->size) + data_len)
535 return -EPROTO;
536 if (le16_to_cpu(rx->size) != data_len)
537 return -EPROTO;
538
539 dln2_spi_copy_from_buf(rx_data, rx->buf, data_len, dln2->bpw);
540
541 return 0;
542}
543
544/*
545 * Read/Write wrapper. It will automatically split an operation into multiple
546 * single ones due to device buffer constraints.
547 */
548static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data,
549 u8 *rx_data, u16 data_len, u8 attr) {
550 int ret;
551 u16 len;
552 u8 temp_attr;
553 u16 remaining = data_len;
554 u16 offset;
555
556 do {
557 if (remaining > DLN2_SPI_MAX_XFER_SIZE) {
558 len = DLN2_SPI_MAX_XFER_SIZE;
559 temp_attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
560 } else {
561 len = remaining;
562 temp_attr = attr;
563 }
564
565 offset = data_len - remaining;
566
567 if (tx_data && rx_data) {
568 ret = dln2_spi_read_write_one(dln2,
569 tx_data + offset,
570 rx_data + offset,
571 len, temp_attr);
572 } else if (tx_data) {
573 ret = dln2_spi_write_one(dln2,
574 tx_data + offset,
575 len, temp_attr);
576 } else if (rx_data) {
577 ret = dln2_spi_read_one(dln2,
578 rx_data + offset,
579 len, temp_attr);
580 } else {
581 return -EINVAL;
582 }
583
584 if (ret < 0)
585 return ret;
586
587 remaining -= len;
588 } while (remaining);
589
590 return 0;
591}
592
593static int dln2_spi_prepare_message(struct spi_master *master,
594 struct spi_message *message)
595{
596 int ret;
597 struct dln2_spi *dln2 = spi_master_get_devdata(master);
598 struct spi_device *spi = message->spi;
599
600 if (dln2->cs != spi->chip_select) {
601 ret = dln2_spi_cs_set_one(dln2, spi->chip_select);
602 if (ret < 0)
603 return ret;
604
605 dln2->cs = spi->chip_select;
606 }
607
608 return 0;
609}
610
611static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed,
612 u8 bpw, u8 mode)
613{
614 int ret;
615 bool bus_setup_change;
616
617 bus_setup_change = dln2->speed != speed || dln2->mode != mode ||
618 dln2->bpw != bpw;
619
620 if (!bus_setup_change)
621 return 0;
622
623 ret = dln2_spi_enable(dln2, false);
624 if (ret < 0)
625 return ret;
626
627 if (dln2->speed != speed) {
628 ret = dln2_spi_set_speed(dln2, speed);
629 if (ret < 0)
630 return ret;
631
632 dln2->speed = speed;
633 }
634
635 if (dln2->mode != mode) {
636 ret = dln2_spi_set_mode(dln2, mode & 0x3);
637 if (ret < 0)
638 return ret;
639
640 dln2->mode = mode;
641 }
642
643 if (dln2->bpw != bpw) {
644 ret = dln2_spi_set_bpw(dln2, bpw);
645 if (ret < 0)
646 return ret;
647
648 dln2->bpw = bpw;
649 }
650
651 return dln2_spi_enable(dln2, true);
652}
653
654static int dln2_spi_transfer_one(struct spi_master *master,
655 struct spi_device *spi,
656 struct spi_transfer *xfer)
657{
658 struct dln2_spi *dln2 = spi_master_get_devdata(master);
659 int status;
660 u8 attr = 0;
661
662 status = dln2_spi_transfer_setup(dln2, xfer->speed_hz,
663 xfer->bits_per_word,
664 spi->mode);
665 if (status < 0) {
666 dev_err(&dln2->pdev->dev, "Cannot setup transfer\n");
667 return status;
668 }
669
670 if (!xfer->cs_change && !spi_transfer_is_last(master, xfer))
671 attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
672
673 status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf,
674 xfer->len, attr);
675 if (status < 0)
676 dev_err(&dln2->pdev->dev, "write/read failed!\n");
677
678 return status;
679}
680
681static int dln2_spi_probe(struct platform_device *pdev)
682{
683 struct spi_master *master;
684 struct dln2_spi *dln2;
685 struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
686 int ret;
687
688 master = spi_alloc_master(&pdev->dev, sizeof(*dln2));
689 if (!master)
690 return -ENOMEM;
691
692 platform_set_drvdata(pdev, master);
693
694 dln2 = spi_master_get_devdata(master);
695
696 dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL);
697 if (!dln2->buf) {
698 ret = -ENOMEM;
699 goto exit_free_master;
700 }
701
702 dln2->master = master;
703 dln2->pdev = pdev;
704 dln2->port = pdata->port;
705 /* cs/mode can never be 0xff, so the first transfer will set them */
706 dln2->cs = 0xff;
707 dln2->mode = 0xff;
708
709 /* disable SPI module before continuing with the setup */
710 ret = dln2_spi_enable(dln2, false);
711 if (ret < 0) {
712 dev_err(&pdev->dev, "Failed to disable SPI module\n");
713 goto exit_free_master;
714 }
715
716 ret = dln2_spi_get_cs_num(dln2, &master->num_chipselect);
717 if (ret < 0) {
718 dev_err(&pdev->dev, "Failed to get number of CS pins\n");
719 goto exit_free_master;
720 }
721
722 ret = dln2_spi_get_speed_range(dln2,
723 &master->min_speed_hz,
724 &master->max_speed_hz);
725 if (ret < 0) {
726 dev_err(&pdev->dev, "Failed to read bus min/max freqs\n");
727 goto exit_free_master;
728 }
729
730 ret = dln2_spi_get_supported_frame_sizes(dln2,
731 &master->bits_per_word_mask);
732 if (ret < 0) {
733 dev_err(&pdev->dev, "Failed to read supported frame sizes\n");
734 goto exit_free_master;
735 }
736
737 ret = dln2_spi_cs_enable_all(dln2, true);
738 if (ret < 0) {
739 dev_err(&pdev->dev, "Failed to enable CS pins\n");
740 goto exit_free_master;
741 }
742
743 master->bus_num = -1;
744 master->mode_bits = SPI_CPOL | SPI_CPHA;
745 master->prepare_message = dln2_spi_prepare_message;
746 master->transfer_one = dln2_spi_transfer_one;
747 master->auto_runtime_pm = true;
748
749 /* enable SPI module, we're good to go */
750 ret = dln2_spi_enable(dln2, true);
751 if (ret < 0) {
752 dev_err(&pdev->dev, "Failed to enable SPI module\n");
753 goto exit_free_master;
754 }
755
756 pm_runtime_set_autosuspend_delay(&pdev->dev,
757 DLN2_RPM_AUTOSUSPEND_TIMEOUT);
758 pm_runtime_use_autosuspend(&pdev->dev);
759 pm_runtime_set_active(&pdev->dev);
760 pm_runtime_enable(&pdev->dev);
761
762 ret = devm_spi_register_master(&pdev->dev, master);
763 if (ret < 0) {
764 dev_err(&pdev->dev, "Failed to register master\n");
765 goto exit_register;
766 }
767
768 return ret;
769
770exit_register:
771 pm_runtime_disable(&pdev->dev);
772 pm_runtime_set_suspended(&pdev->dev);
773
774 if (dln2_spi_enable(dln2, false) < 0)
775 dev_err(&pdev->dev, "Failed to disable SPI module\n");
776exit_free_master:
777 spi_master_put(master);
778
779 return ret;
780}
781
782static int dln2_spi_remove(struct platform_device *pdev)
783{
784 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
785 struct dln2_spi *dln2 = spi_master_get_devdata(master);
786
787 pm_runtime_disable(&pdev->dev);
788
789 if (dln2_spi_enable(dln2, false) < 0)
790 dev_err(&pdev->dev, "Failed to disable SPI module\n");
791
792 return 0;
793}
794
795#ifdef CONFIG_PM_SLEEP
796static int dln2_spi_suspend(struct device *dev)
797{
798 int ret;
799 struct spi_master *master = dev_get_drvdata(dev);
800 struct dln2_spi *dln2 = spi_master_get_devdata(master);
801
802 ret = spi_master_suspend(master);
803 if (ret < 0)
804 return ret;
805
806 if (!pm_runtime_suspended(dev)) {
807 ret = dln2_spi_enable(dln2, false);
808 if (ret < 0)
809 return ret;
810 }
811
812 /*
813 * USB power may be cut off during sleep. Resetting the following
814 * parameters will force the board to be set up before first transfer.
815 */
816 dln2->cs = 0xff;
817 dln2->speed = 0;
818 dln2->bpw = 0;
819 dln2->mode = 0xff;
820
821 return 0;
822}
823
824static int dln2_spi_resume(struct device *dev)
825{
826 int ret;
827 struct spi_master *master = dev_get_drvdata(dev);
828 struct dln2_spi *dln2 = spi_master_get_devdata(master);
829
830 if (!pm_runtime_suspended(dev)) {
831 ret = dln2_spi_cs_enable_all(dln2, true);
832 if (ret < 0)
833 return ret;
834
835 ret = dln2_spi_enable(dln2, true);
836 if (ret < 0)
837 return ret;
838 }
839
840 return spi_master_resume(master);
841}
842#endif /* CONFIG_PM_SLEEP */
843
844#ifdef CONFIG_PM
845static int dln2_spi_runtime_suspend(struct device *dev)
846{
847 struct spi_master *master = dev_get_drvdata(dev);
848 struct dln2_spi *dln2 = spi_master_get_devdata(master);
849
850 return dln2_spi_enable(dln2, false);
851}
852
853static int dln2_spi_runtime_resume(struct device *dev)
854{
855 struct spi_master *master = dev_get_drvdata(dev);
856 struct dln2_spi *dln2 = spi_master_get_devdata(master);
857
858 return dln2_spi_enable(dln2, true);
859}
860#endif /* CONFIG_PM */
861
862static const struct dev_pm_ops dln2_spi_pm = {
863 SET_SYSTEM_SLEEP_PM_OPS(dln2_spi_suspend, dln2_spi_resume)
864 SET_RUNTIME_PM_OPS(dln2_spi_runtime_suspend,
865 dln2_spi_runtime_resume, NULL)
866};
867
868static struct platform_driver spi_dln2_driver = {
869 .driver = {
870 .name = "dln2-spi",
871 .pm = &dln2_spi_pm,
872 },
873 .probe = dln2_spi_probe,
874 .remove = dln2_spi_remove,
875};
876module_platform_driver(spi_dln2_driver);
877
878MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI master interface");
879MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>");
880MODULE_LICENSE("GPL v2");
881MODULE_ALIAS("platform:dln2-spi");
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index a67d37c7e3c0..a0197fd4e95c 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -247,9 +247,9 @@ static struct dw_spi_dma_ops mid_dma_ops = {
247 247
248/* Some specific info for SPI0 controller on Intel MID */ 248/* Some specific info for SPI0 controller on Intel MID */
249 249
250/* HW info for MRST CLk Control Unit, one 32b reg */ 250/* HW info for MRST Clk Control Unit, 32b reg per controller */
251#define MRST_SPI_CLK_BASE 100000000 /* 100m */ 251#define MRST_SPI_CLK_BASE 100000000 /* 100m */
252#define MRST_CLK_SPI0_REG 0xff11d86c 252#define MRST_CLK_SPI_REG 0xff11d86c
253#define CLK_SPI_BDIV_OFFSET 0 253#define CLK_SPI_BDIV_OFFSET 0
254#define CLK_SPI_BDIV_MASK 0x00000007 254#define CLK_SPI_BDIV_MASK 0x00000007
255#define CLK_SPI_CDIV_OFFSET 9 255#define CLK_SPI_CDIV_OFFSET 9
@@ -261,16 +261,17 @@ int dw_spi_mid_init(struct dw_spi *dws)
261 void __iomem *clk_reg; 261 void __iomem *clk_reg;
262 u32 clk_cdiv; 262 u32 clk_cdiv;
263 263
264 clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); 264 clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
265 if (!clk_reg) 265 if (!clk_reg)
266 return -ENOMEM; 266 return -ENOMEM;
267 267
268 /* get SPI controller operating freq info */ 268 /* Get SPI controller operating freq info */
269 clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; 269 clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
270 clk_cdiv &= CLK_SPI_CDIV_MASK;
271 clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
270 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); 272 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
271 iounmap(clk_reg);
272 273
273 dws->num_cs = 16; 274 iounmap(clk_reg);
274 275
275#ifdef CONFIG_SPI_DW_MID_DMA 276#ifdef CONFIG_SPI_DW_MID_DMA
276 dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); 277 dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index ba68da12cdf0..5ba331047cbe 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -30,10 +30,20 @@ struct dw_spi_pci {
30 30
31struct spi_pci_desc { 31struct spi_pci_desc {
32 int (*setup)(struct dw_spi *); 32 int (*setup)(struct dw_spi *);
33 u16 num_cs;
34 u16 bus_num;
33}; 35};
34 36
35static struct spi_pci_desc spi_pci_mid_desc = { 37static struct spi_pci_desc spi_pci_mid_desc_1 = {
36 .setup = dw_spi_mid_init, 38 .setup = dw_spi_mid_init,
39 .num_cs = 32,
40 .bus_num = 0,
41};
42
43static struct spi_pci_desc spi_pci_mid_desc_2 = {
44 .setup = dw_spi_mid_init,
45 .num_cs = 4,
46 .bus_num = 1,
37}; 47};
38 48
39static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 49static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -65,18 +75,23 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
65 75
66 dws->regs = pcim_iomap_table(pdev)[pci_bar]; 76 dws->regs = pcim_iomap_table(pdev)[pci_bar];
67 77
68 dws->bus_num = 0;
69 dws->num_cs = 4;
70 dws->irq = pdev->irq; 78 dws->irq = pdev->irq;
71 79
72 /* 80 /*
73 * Specific handling for paltforms, like dma setup, 81 * Specific handling for paltforms, like dma setup,
74 * clock rate, FIFO depth. 82 * clock rate, FIFO depth.
75 */ 83 */
76 if (desc && desc->setup) { 84 if (desc) {
77 ret = desc->setup(dws); 85 dws->num_cs = desc->num_cs;
78 if (ret) 86 dws->bus_num = desc->bus_num;
79 return ret; 87
88 if (desc->setup) {
89 ret = desc->setup(dws);
90 if (ret)
91 return ret;
92 }
93 } else {
94 return -ENODEV;
80 } 95 }
81 96
82 ret = dw_spi_add_host(&pdev->dev, dws); 97 ret = dw_spi_add_host(&pdev->dev, dws);
@@ -121,7 +136,14 @@ static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume);
121 136
122static const struct pci_device_id pci_ids[] = { 137static const struct pci_device_id pci_ids[] = {
123 /* Intel MID platform SPI controller 0 */ 138 /* Intel MID platform SPI controller 0 */
124 { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc}, 139 /*
140 * The access to the device 8086:0801 is disabled by HW, since it's
141 * exclusively used by SCU to communicate with MSIC.
142 */
143 /* Intel MID platform SPI controller 1 */
144 { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1},
145 /* Intel MID platform SPI controller 2 */
146 { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2},
125 {}, 147 {},
126}; 148};
127 149
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 8edcd1b84562..5a97a62b298a 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -608,7 +608,7 @@ static void dw_spi_cleanup(struct spi_device *spi)
608} 608}
609 609
610/* Restart the controller, disable all interrupts, clean rx fifo */ 610/* Restart the controller, disable all interrupts, clean rx fifo */
611static void spi_hw_init(struct dw_spi *dws) 611static void spi_hw_init(struct device *dev, struct dw_spi *dws)
612{ 612{
613 spi_enable_chip(dws, 0); 613 spi_enable_chip(dws, 0);
614 spi_mask_intr(dws, 0xff); 614 spi_mask_intr(dws, 0xff);
@@ -626,9 +626,10 @@ static void spi_hw_init(struct dw_spi *dws)
626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) 626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
627 break; 627 break;
628 } 628 }
629 dw_writew(dws, DW_SPI_TXFLTR, 0);
629 630
630 dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; 631 dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
631 dw_writew(dws, DW_SPI_TXFLTR, 0); 632 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
632 } 633 }
633} 634}
634 635
@@ -668,7 +669,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
668 master->dev.of_node = dev->of_node; 669 master->dev.of_node = dev->of_node;
669 670
670 /* Basic HW init */ 671 /* Basic HW init */
671 spi_hw_init(dws); 672 spi_hw_init(dev, dws);
672 673
673 if (dws->dma_ops && dws->dma_ops->dma_init) { 674 if (dws->dma_ops && dws->dma_ops->dma_init) {
674 ret = dws->dma_ops->dma_init(dws); 675 ret = dws->dma_ops->dma_init(dws);
@@ -731,7 +732,7 @@ int dw_spi_resume_host(struct dw_spi *dws)
731{ 732{
732 int ret; 733 int ret;
733 734
734 spi_hw_init(dws); 735 spi_hw_init(&dws->master->dev, dws);
735 ret = spi_master_resume(dws->master); 736 ret = spi_master_resume(dws->master);
736 if (ret) 737 if (ret)
737 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); 738 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 912b9037e9cf..286b2c81fc6b 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -353,16 +353,6 @@ static int falcon_sflash_setup(struct spi_device *spi)
353 return 0; 353 return 0;
354} 354}
355 355
356static int falcon_sflash_prepare_xfer(struct spi_master *master)
357{
358 return 0;
359}
360
361static int falcon_sflash_unprepare_xfer(struct spi_master *master)
362{
363 return 0;
364}
365
366static int falcon_sflash_xfer_one(struct spi_master *master, 356static int falcon_sflash_xfer_one(struct spi_master *master,
367 struct spi_message *m) 357 struct spi_message *m)
368{ 358{
@@ -420,9 +410,7 @@ static int falcon_sflash_probe(struct platform_device *pdev)
420 master->mode_bits = SPI_MODE_3; 410 master->mode_bits = SPI_MODE_3;
421 master->flags = SPI_MASTER_HALF_DUPLEX; 411 master->flags = SPI_MASTER_HALF_DUPLEX;
422 master->setup = falcon_sflash_setup; 412 master->setup = falcon_sflash_setup;
423 master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
424 master->transfer_one_message = falcon_sflash_xfer_one; 413 master->transfer_one_message = falcon_sflash_xfer_one;
425 master->unprepare_transfer_hardware = falcon_sflash_unprepare_xfer;
426 master->dev.of_node = pdev->dev.of_node; 414 master->dev.of_node = pdev->dev.of_node;
427 415
428 ret = devm_spi_register_master(&pdev->dev, master); 416 ret = devm_spi_register_master(&pdev->dev, master);
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index e85ab1cb17a2..9c46a3058743 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -20,6 +20,7 @@
20#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
21#include <linux/fsl_devices.h> 21#include <linux/fsl_devices.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h>
23#include <linux/of_address.h> 24#include <linux/of_address.h>
24#include <linux/spi/spi.h> 25#include <linux/spi/spi.h>
25#include <linux/types.h> 26#include <linux/types.h>
@@ -68,6 +69,7 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
68 } 69 }
69 } 70 }
70} 71}
72EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
71 73
72static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) 74static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
73{ 75{
@@ -162,6 +164,7 @@ err_rx_dma:
162 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); 164 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
163 return -ENOMEM; 165 return -ENOMEM;
164} 166}
167EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
165 168
166void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) 169void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
167{ 170{
@@ -174,6 +177,7 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
174 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); 177 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
175 mspi->xfer_in_progress = NULL; 178 mspi->xfer_in_progress = NULL;
176} 179}
180EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
177 181
178void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) 182void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
179{ 183{
@@ -198,6 +202,7 @@ void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
198 else 202 else
199 complete(&mspi->done); 203 complete(&mspi->done);
200} 204}
205EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
201 206
202static void *fsl_spi_alloc_dummy_rx(void) 207static void *fsl_spi_alloc_dummy_rx(void)
203{ 208{
@@ -375,6 +380,7 @@ err_pram:
375 fsl_spi_free_dummy_rx(); 380 fsl_spi_free_dummy_rx();
376 return -ENOMEM; 381 return -ENOMEM;
377} 382}
383EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
378 384
379void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) 385void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
380{ 386{
@@ -389,3 +395,6 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
389 cpm_muram_free(cpm_muram_offset(mspi->pram)); 395 cpm_muram_free(cpm_muram_offset(mspi->pram));
390 fsl_spi_free_dummy_rx(); 396 fsl_spi_free_dummy_rx();
391} 397}
398EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
399
400MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 4cda994d3f40..d1a39249704a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -106,7 +106,7 @@ struct chip_data {
106}; 106};
107 107
108struct fsl_dspi { 108struct fsl_dspi {
109 struct spi_bitbang bitbang; 109 struct spi_master *master;
110 struct platform_device *pdev; 110 struct platform_device *pdev;
111 111
112 struct regmap *regmap; 112 struct regmap *regmap;
@@ -114,6 +114,7 @@ struct fsl_dspi {
114 struct clk *clk; 114 struct clk *clk;
115 115
116 struct spi_transfer *cur_transfer; 116 struct spi_transfer *cur_transfer;
117 struct spi_message *cur_msg;
117 struct chip_data *cur_chip; 118 struct chip_data *cur_chip;
118 size_t len; 119 size_t len;
119 void *tx; 120 void *tx;
@@ -123,6 +124,7 @@ struct fsl_dspi {
123 char dataflags; 124 char dataflags;
124 u8 cs; 125 u8 cs;
125 u16 void_write_data; 126 u16 void_write_data;
127 u32 cs_change;
126 128
127 wait_queue_head_t waitq; 129 wait_queue_head_t waitq;
128 u32 waitflags; 130 u32 waitflags;
@@ -225,6 +227,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
225 if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) { 227 if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
226 /* last transfer in the transfer */ 228 /* last transfer in the transfer */
227 dspi_pushr |= SPI_PUSHR_EOQ; 229 dspi_pushr |= SPI_PUSHR_EOQ;
230 if ((dspi->cs_change) && (!dspi->len))
231 dspi_pushr &= ~SPI_PUSHR_CONT;
228 } else if (tx_word && (dspi->len == 1)) 232 } else if (tx_word && (dspi->len == 1))
229 dspi_pushr |= SPI_PUSHR_EOQ; 233 dspi_pushr |= SPI_PUSHR_EOQ;
230 234
@@ -246,6 +250,7 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
246 int rx_count = 0; 250 int rx_count = 0;
247 int rx_word = is_double_byte_mode(dspi); 251 int rx_word = is_double_byte_mode(dspi);
248 u16 d; 252 u16 d;
253
249 while ((dspi->rx < dspi->rx_end) 254 while ((dspi->rx < dspi->rx_end)
250 && (rx_count < DSPI_FIFO_SIZE)) { 255 && (rx_count < DSPI_FIFO_SIZE)) {
251 if (rx_word) { 256 if (rx_word) {
@@ -276,86 +281,89 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
276 return rx_count; 281 return rx_count;
277} 282}
278 283
279static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t) 284static int dspi_transfer_one_message(struct spi_master *master,
285 struct spi_message *message)
280{ 286{
281 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); 287 struct fsl_dspi *dspi = spi_master_get_devdata(master);
282 dspi->cur_transfer = t; 288 struct spi_device *spi = message->spi;
283 dspi->cur_chip = spi_get_ctldata(spi); 289 struct spi_transfer *transfer;
284 dspi->cs = spi->chip_select; 290 int status = 0;
285 dspi->void_write_data = dspi->cur_chip->void_write_data; 291 message->actual_length = 0;
286 292
287 dspi->dataflags = 0; 293 list_for_each_entry(transfer, &message->transfers, transfer_list) {
288 dspi->tx = (void *)t->tx_buf; 294 dspi->cur_transfer = transfer;
289 dspi->tx_end = dspi->tx + t->len; 295 dspi->cur_msg = message;
290 dspi->rx = t->rx_buf; 296 dspi->cur_chip = spi_get_ctldata(spi);
291 dspi->rx_end = dspi->rx + t->len; 297 dspi->cs = spi->chip_select;
292 dspi->len = t->len; 298 if (dspi->cur_transfer->transfer_list.next
293 299 == &dspi->cur_msg->transfers)
294 if (!dspi->rx) 300 transfer->cs_change = 1;
295 dspi->dataflags |= TRAN_STATE_RX_VOID; 301 dspi->cs_change = transfer->cs_change;
296 302 dspi->void_write_data = dspi->cur_chip->void_write_data;
297 if (!dspi->tx) 303
298 dspi->dataflags |= TRAN_STATE_TX_VOID; 304 dspi->dataflags = 0;
299 305 dspi->tx = (void *)transfer->tx_buf;
300 regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val); 306 dspi->tx_end = dspi->tx + transfer->len;
301 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val); 307 dspi->rx = transfer->rx_buf;
302 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); 308 dspi->rx_end = dspi->rx + transfer->len;
303 309 dspi->len = transfer->len;
304 if (t->speed_hz) 310
311 if (!dspi->rx)
312 dspi->dataflags |= TRAN_STATE_RX_VOID;
313
314 if (!dspi->tx)
315 dspi->dataflags |= TRAN_STATE_TX_VOID;
316
317 regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
318 regmap_update_bits(dspi->regmap, SPI_MCR,
319 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
320 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
305 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), 321 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
306 dspi->cur_chip->ctar_val); 322 dspi->cur_chip->ctar_val);
323 if (transfer->speed_hz)
324 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
325 dspi->cur_chip->ctar_val);
307 326
308 dspi_transfer_write(dspi); 327 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
328 message->actual_length += dspi_transfer_write(dspi);
309 329
310 if (wait_event_interruptible(dspi->waitq, dspi->waitflags)) 330 if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
311 dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n"); 331 dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
312 dspi->waitflags = 0; 332 dspi->waitflags = 0;
313
314 return t->len - dspi->len;
315}
316 333
317static void dspi_chipselect(struct spi_device *spi, int value) 334 if (transfer->delay_usecs)
318{ 335 udelay(transfer->delay_usecs);
319 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
320 unsigned int pushr;
321
322 regmap_read(dspi->regmap, SPI_PUSHR, &pushr);
323
324 switch (value) {
325 case BITBANG_CS_ACTIVE:
326 pushr |= SPI_PUSHR_CONT;
327 break;
328 case BITBANG_CS_INACTIVE:
329 pushr &= ~SPI_PUSHR_CONT;
330 break;
331 } 336 }
332 337
333 regmap_write(dspi->regmap, SPI_PUSHR, pushr); 338 message->status = status;
339 spi_finalize_current_message(master);
340
341 return status;
334} 342}
335 343
336static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 344static int dspi_setup(struct spi_device *spi)
337{ 345{
338 struct chip_data *chip; 346 struct chip_data *chip;
339 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); 347 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
340 unsigned char br = 0, pbr = 0, fmsz = 0; 348 unsigned char br = 0, pbr = 0, fmsz = 0;
341 349
350 if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
351 fmsz = spi->bits_per_word - 1;
352 } else {
353 pr_err("Invalid wordsize\n");
354 return -ENODEV;
355 }
356
342 /* Only alloc on first setup */ 357 /* Only alloc on first setup */
343 chip = spi_get_ctldata(spi); 358 chip = spi_get_ctldata(spi);
344 if (chip == NULL) { 359 if (chip == NULL) {
345 chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data), 360 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
346 GFP_KERNEL);
347 if (!chip) 361 if (!chip)
348 return -ENOMEM; 362 return -ENOMEM;
349 } 363 }
350 364
351 chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | 365 chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
352 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; 366 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
353 if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
354 fmsz = spi->bits_per_word - 1;
355 } else {
356 pr_err("Invalid wordsize\n");
357 return -ENODEV;
358 }
359 367
360 chip->void_write_data = 0; 368 chip->void_write_data = 0;
361 369
@@ -374,34 +382,34 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
374 return 0; 382 return 0;
375} 383}
376 384
377static int dspi_setup(struct spi_device *spi) 385static void dspi_cleanup(struct spi_device *spi)
378{ 386{
379 if (!spi->max_speed_hz) 387 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
380 return -EINVAL; 388
389 dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
390 spi->master->bus_num, spi->chip_select);
381 391
382 return dspi_setup_transfer(spi, NULL); 392 kfree(chip);
383} 393}
384 394
385static irqreturn_t dspi_interrupt(int irq, void *dev_id) 395static irqreturn_t dspi_interrupt(int irq, void *dev_id)
386{ 396{
387 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; 397 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
388 398
389 regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF); 399 struct spi_message *msg = dspi->cur_msg;
390 400
401 regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
391 dspi_transfer_read(dspi); 402 dspi_transfer_read(dspi);
392 403
393 if (!dspi->len) { 404 if (!dspi->len) {
394 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) 405 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
395 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 406 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
396 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16)); 407 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
397 408
398 dspi->waitflags = 1; 409 dspi->waitflags = 1;
399 wake_up_interruptible(&dspi->waitq); 410 wake_up_interruptible(&dspi->waitq);
400 } else { 411 } else
401 dspi_transfer_write(dspi); 412 msg->actual_length += dspi_transfer_write(dspi);
402
403 return IRQ_HANDLED;
404 }
405 413
406 return IRQ_HANDLED; 414 return IRQ_HANDLED;
407} 415}
@@ -460,13 +468,14 @@ static int dspi_probe(struct platform_device *pdev)
460 468
461 dspi = spi_master_get_devdata(master); 469 dspi = spi_master_get_devdata(master);
462 dspi->pdev = pdev; 470 dspi->pdev = pdev;
463 dspi->bitbang.master = master; 471 dspi->master = master;
464 dspi->bitbang.chipselect = dspi_chipselect; 472
465 dspi->bitbang.setup_transfer = dspi_setup_transfer; 473 master->transfer = NULL;
466 dspi->bitbang.txrx_bufs = dspi_txrx_transfer; 474 master->setup = dspi_setup;
467 dspi->bitbang.master->setup = dspi_setup; 475 master->transfer_one_message = dspi_transfer_one_message;
468 dspi->bitbang.master->dev.of_node = pdev->dev.of_node; 476 master->dev.of_node = pdev->dev.of_node;
469 477
478 master->cleanup = dspi_cleanup;
470 master->mode_bits = SPI_CPOL | SPI_CPHA; 479 master->mode_bits = SPI_CPOL | SPI_CPHA;
471 master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | 480 master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
472 SPI_BPW_MASK(16); 481 SPI_BPW_MASK(16);
@@ -525,7 +534,7 @@ static int dspi_probe(struct platform_device *pdev)
525 init_waitqueue_head(&dspi->waitq); 534 init_waitqueue_head(&dspi->waitq);
526 platform_set_drvdata(pdev, master); 535 platform_set_drvdata(pdev, master);
527 536
528 ret = spi_bitbang_start(&dspi->bitbang); 537 ret = spi_register_master(master);
529 if (ret != 0) { 538 if (ret != 0) {
530 dev_err(&pdev->dev, "Problem registering DSPI master\n"); 539 dev_err(&pdev->dev, "Problem registering DSPI master\n");
531 goto out_clk_put; 540 goto out_clk_put;
@@ -547,9 +556,9 @@ static int dspi_remove(struct platform_device *pdev)
547 struct fsl_dspi *dspi = spi_master_get_devdata(master); 556 struct fsl_dspi *dspi = spi_master_get_devdata(master);
548 557
549 /* Disconnect from the SPI framework */ 558 /* Disconnect from the SPI framework */
550 spi_bitbang_stop(&dspi->bitbang);
551 clk_disable_unprepare(dspi->clk); 559 clk_disable_unprepare(dspi->clk);
552 spi_master_put(dspi->bitbang.master); 560 spi_unregister_master(dspi->master);
561 spi_master_put(dspi->master);
553 562
554 return 0; 563 return 0;
555} 564}
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 446b737e1532..cb35d2f0d0e6 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/module.h>
24#include <linux/of_platform.h> 25#include <linux/of_platform.h>
25#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
26#ifdef CONFIG_FSL_SOC 27#ifdef CONFIG_FSL_SOC
@@ -35,7 +36,8 @@ void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
35 type *rx = mpc8xxx_spi->rx; \ 36 type *rx = mpc8xxx_spi->rx; \
36 *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ 37 *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
37 mpc8xxx_spi->rx = rx; \ 38 mpc8xxx_spi->rx = rx; \
38} 39} \
40EXPORT_SYMBOL_GPL(mpc8xxx_spi_rx_buf_##type);
39 41
40#define MPC8XXX_SPI_TX_BUF(type) \ 42#define MPC8XXX_SPI_TX_BUF(type) \
41u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ 43u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
@@ -47,7 +49,8 @@ u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
47 data = *tx++ << mpc8xxx_spi->tx_shift; \ 49 data = *tx++ << mpc8xxx_spi->tx_shift; \
48 mpc8xxx_spi->tx = tx; \ 50 mpc8xxx_spi->tx = tx; \
49 return data; \ 51 return data; \
50} 52} \
53EXPORT_SYMBOL_GPL(mpc8xxx_spi_tx_buf_##type);
51 54
52MPC8XXX_SPI_RX_BUF(u8) 55MPC8XXX_SPI_RX_BUF(u8)
53MPC8XXX_SPI_RX_BUF(u16) 56MPC8XXX_SPI_RX_BUF(u16)
@@ -60,6 +63,7 @@ struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
60{ 63{
61 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); 64 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
62} 65}
66EXPORT_SYMBOL_GPL(to_of_pinfo);
63 67
64const char *mpc8xxx_spi_strmode(unsigned int flags) 68const char *mpc8xxx_spi_strmode(unsigned int flags)
65{ 69{
@@ -75,6 +79,7 @@ const char *mpc8xxx_spi_strmode(unsigned int flags)
75 } 79 }
76 return "CPU"; 80 return "CPU";
77} 81}
82EXPORT_SYMBOL_GPL(mpc8xxx_spi_strmode);
78 83
79void mpc8xxx_spi_probe(struct device *dev, struct resource *mem, 84void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
80 unsigned int irq) 85 unsigned int irq)
@@ -102,13 +107,12 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
102 mpc8xxx_spi->rx_shift = 0; 107 mpc8xxx_spi->rx_shift = 0;
103 mpc8xxx_spi->tx_shift = 0; 108 mpc8xxx_spi->tx_shift = 0;
104 109
105 init_completion(&mpc8xxx_spi->done);
106
107 master->bus_num = pdata->bus_num; 110 master->bus_num = pdata->bus_num;
108 master->num_chipselect = pdata->max_chipselect; 111 master->num_chipselect = pdata->max_chipselect;
109 112
110 init_completion(&mpc8xxx_spi->done); 113 init_completion(&mpc8xxx_spi->done);
111} 114}
115EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe);
112 116
113int mpc8xxx_spi_remove(struct device *dev) 117int mpc8xxx_spi_remove(struct device *dev)
114{ 118{
@@ -127,6 +131,7 @@ int mpc8xxx_spi_remove(struct device *dev)
127 131
128 return 0; 132 return 0;
129} 133}
134EXPORT_SYMBOL_GPL(mpc8xxx_spi_remove);
130 135
131int of_mpc8xxx_spi_probe(struct platform_device *ofdev) 136int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
132{ 137{
@@ -173,3 +178,6 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
173 178
174 return 0; 179 return 0;
175} 180}
181EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe);
182
183MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index b4ed04e8862f..1326a392adca 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -28,7 +28,7 @@ struct mpc8xxx_spi {
28 /* rx & tx bufs from the spi_transfer */ 28 /* rx & tx bufs from the spi_transfer */
29 const void *tx; 29 const void *tx;
30 void *rx; 30 void *rx;
31#ifdef CONFIG_SPI_FSL_ESPI 31#if IS_ENABLED(CONFIG_SPI_FSL_ESPI)
32 int len; 32 int len;
33#endif 33#endif
34 34
@@ -68,7 +68,7 @@ struct mpc8xxx_spi {
68 68
69 unsigned int flags; 69 unsigned int flags;
70 70
71#ifdef CONFIG_SPI_FSL_SPI 71#if IS_ENABLED(CONFIG_SPI_FSL_SPI)
72 int type; 72 int type;
73 int native_chipselects; 73 int native_chipselects;
74 u8 max_bits_per_word; 74 u8 max_bits_per_word;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index aee4e7589568..1c34c9314c8a 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20#include <linux/kernel.h> 16#include <linux/kernel.h>
21#include <linux/module.h> 17#include <linux/module.h>
@@ -92,7 +88,7 @@ struct spi_gpio {
92 88
93/*----------------------------------------------------------------------*/ 89/*----------------------------------------------------------------------*/
94 90
95static inline struct spi_gpio * __pure 91static inline struct spi_gpio *__pure
96spi_to_spi_gpio(const struct spi_device *spi) 92spi_to_spi_gpio(const struct spi_device *spi)
97{ 93{
98 const struct spi_bitbang *bang; 94 const struct spi_bitbang *bang;
@@ -103,7 +99,7 @@ spi_to_spi_gpio(const struct spi_device *spi)
103 return spi_gpio; 99 return spi_gpio;
104} 100}
105 101
106static inline struct spi_gpio_platform_data * __pure 102static inline struct spi_gpio_platform_data *__pure
107spi_to_pdata(const struct spi_device *spi) 103spi_to_pdata(const struct spi_device *spi)
108{ 104{
109 return &spi_to_spi_gpio(spi)->pdata; 105 return &spi_to_spi_gpio(spi)->pdata;
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index aad6683db81b..c01567d53581 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -160,16 +160,16 @@ static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
160 unsigned int count = 0; 160 unsigned int count = 0;
161 u32 status; 161 u32 status;
162 162
163 while (count < max) { 163 while (count < max / 4) {
164 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); 164 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
165 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 165 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
166 if (status & SPFI_INTERRUPT_SDFUL) 166 if (status & SPFI_INTERRUPT_SDFUL)
167 break; 167 break;
168 spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA); 168 spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
169 count += 4; 169 count++;
170 } 170 }
171 171
172 return count; 172 return count * 4;
173} 173}
174 174
175static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf, 175static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
@@ -196,17 +196,17 @@ static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
196 unsigned int count = 0; 196 unsigned int count = 0;
197 u32 status; 197 u32 status;
198 198
199 while (count < max) { 199 while (count < max / 4) {
200 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT, 200 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
201 SPFI_INTERRUPT_CLEAR); 201 SPFI_INTERRUPT_CLEAR);
202 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 202 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
203 if (!(status & SPFI_INTERRUPT_GDEX32BIT)) 203 if (!(status & SPFI_INTERRUPT_GDEX32BIT))
204 break; 204 break;
205 buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); 205 buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
206 count += 4; 206 count++;
207 } 207 }
208 208
209 return count; 209 return count * 4;
210} 210}
211 211
212static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf, 212static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
@@ -251,17 +251,15 @@ static int img_spfi_start_pio(struct spi_master *master,
251 time_before(jiffies, timeout)) { 251 time_before(jiffies, timeout)) {
252 unsigned int tx_count, rx_count; 252 unsigned int tx_count, rx_count;
253 253
254 switch (xfer->bits_per_word) { 254 if (tx_bytes >= 4)
255 case 32:
256 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes); 255 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
257 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); 256 else
258 break;
259 case 8:
260 default:
261 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes); 257 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
258
259 if (rx_bytes >= 4)
260 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
261 else
262 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes); 262 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
263 break;
264 }
265 263
266 tx_buf += tx_count; 264 tx_buf += tx_count;
267 rx_buf += rx_count; 265 rx_buf += rx_count;
@@ -331,14 +329,11 @@ static int img_spfi_start_dma(struct spi_master *master,
331 329
332 if (xfer->rx_buf) { 330 if (xfer->rx_buf) {
333 rxconf.direction = DMA_DEV_TO_MEM; 331 rxconf.direction = DMA_DEV_TO_MEM;
334 switch (xfer->bits_per_word) { 332 if (xfer->len % 4 == 0) {
335 case 32:
336 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA; 333 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
337 rxconf.src_addr_width = 4; 334 rxconf.src_addr_width = 4;
338 rxconf.src_maxburst = 4; 335 rxconf.src_maxburst = 4;
339 break; 336 } else {
340 case 8:
341 default:
342 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; 337 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
343 rxconf.src_addr_width = 1; 338 rxconf.src_addr_width = 1;
344 rxconf.src_maxburst = 4; 339 rxconf.src_maxburst = 4;
@@ -358,18 +353,14 @@ static int img_spfi_start_dma(struct spi_master *master,
358 353
359 if (xfer->tx_buf) { 354 if (xfer->tx_buf) {
360 txconf.direction = DMA_MEM_TO_DEV; 355 txconf.direction = DMA_MEM_TO_DEV;
361 switch (xfer->bits_per_word) { 356 if (xfer->len % 4 == 0) {
362 case 32:
363 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA; 357 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
364 txconf.dst_addr_width = 4; 358 txconf.dst_addr_width = 4;
365 txconf.dst_maxburst = 4; 359 txconf.dst_maxburst = 4;
366 break; 360 } else {
367 case 8:
368 default:
369 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; 361 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
370 txconf.dst_addr_width = 1; 362 txconf.dst_addr_width = 1;
371 txconf.dst_maxburst = 4; 363 txconf.dst_maxburst = 4;
372 break;
373 } 364 }
374 dmaengine_slave_config(spfi->tx_ch, &txconf); 365 dmaengine_slave_config(spfi->tx_ch, &txconf);
375 366
@@ -508,9 +499,7 @@ static void img_spfi_set_cs(struct spi_device *spi, bool enable)
508static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, 499static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
509 struct spi_transfer *xfer) 500 struct spi_transfer *xfer)
510{ 501{
511 if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE) 502 if (xfer->len > SPFI_32BIT_FIFO_SIZE)
512 return true;
513 if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE)
514 return true; 503 return true;
515 return false; 504 return false;
516} 505}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 961b97d43b43..6fea4af51c41 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -89,7 +89,6 @@ struct spi_imx_data {
89 89
90 struct completion xfer_done; 90 struct completion xfer_done;
91 void __iomem *base; 91 void __iomem *base;
92 int irq;
93 struct clk *clk_per; 92 struct clk *clk_per;
94 struct clk *clk_ipg; 93 struct clk *clk_ipg;
95 unsigned long spi_clk; 94 unsigned long spi_clk;
@@ -823,6 +822,10 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
823 struct dma_slave_config slave_config = {}; 822 struct dma_slave_config slave_config = {};
824 int ret; 823 int ret;
825 824
825 /* use pio mode for i.mx6dl chip TKT238285 */
826 if (of_machine_is_compatible("fsl,imx6dl"))
827 return 0;
828
826 /* Prepare for TX DMA: */ 829 /* Prepare for TX DMA: */
827 master->dma_tx = dma_request_slave_channel(dev, "tx"); 830 master->dma_tx = dma_request_slave_channel(dev, "tx");
828 if (!master->dma_tx) { 831 if (!master->dma_tx) {
@@ -892,6 +895,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
892{ 895{
893 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; 896 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
894 int ret; 897 int ret;
898 unsigned long timeout;
895 u32 dma; 899 u32 dma;
896 int left; 900 int left;
897 struct spi_master *master = spi_imx->bitbang.master; 901 struct spi_master *master = spi_imx->bitbang.master;
@@ -939,17 +943,17 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
939 dma_async_issue_pending(master->dma_tx); 943 dma_async_issue_pending(master->dma_tx);
940 dma_async_issue_pending(master->dma_rx); 944 dma_async_issue_pending(master->dma_rx);
941 /* Wait SDMA to finish the data transfer.*/ 945 /* Wait SDMA to finish the data transfer.*/
942 ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 946 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
943 IMX_DMA_TIMEOUT); 947 IMX_DMA_TIMEOUT);
944 if (!ret) { 948 if (!timeout) {
945 pr_warn("%s %s: I/O Error in DMA TX\n", 949 pr_warn("%s %s: I/O Error in DMA TX\n",
946 dev_driver_string(&master->dev), 950 dev_driver_string(&master->dev),
947 dev_name(&master->dev)); 951 dev_name(&master->dev));
948 dmaengine_terminate_all(master->dma_tx); 952 dmaengine_terminate_all(master->dma_tx);
949 } else { 953 } else {
950 ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion, 954 timeout = wait_for_completion_timeout(
951 IMX_DMA_TIMEOUT); 955 &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
952 if (!ret) { 956 if (!timeout) {
953 pr_warn("%s %s: I/O Error in DMA RX\n", 957 pr_warn("%s %s: I/O Error in DMA RX\n",
954 dev_driver_string(&master->dev), 958 dev_driver_string(&master->dev),
955 dev_name(&master->dev)); 959 dev_name(&master->dev));
@@ -964,9 +968,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
964 spi_imx->dma_finished = 1; 968 spi_imx->dma_finished = 1;
965 spi_imx->devtype_data->trigger(spi_imx); 969 spi_imx->devtype_data->trigger(spi_imx);
966 970
967 if (!ret) 971 if (!timeout)
968 ret = -ETIMEDOUT; 972 ret = -ETIMEDOUT;
969 else if (ret > 0) 973 else
970 ret = transfer->len; 974 ret = transfer->len;
971 975
972 return ret; 976 return ret;
@@ -1076,7 +1080,7 @@ static int spi_imx_probe(struct platform_device *pdev)
1076 struct spi_master *master; 1080 struct spi_master *master;
1077 struct spi_imx_data *spi_imx; 1081 struct spi_imx_data *spi_imx;
1078 struct resource *res; 1082 struct resource *res;
1079 int i, ret, num_cs; 1083 int i, ret, num_cs, irq;
1080 1084
1081 if (!np && !mxc_platform_info) { 1085 if (!np && !mxc_platform_info) {
1082 dev_err(&pdev->dev, "can't get the platform data\n"); 1086 dev_err(&pdev->dev, "can't get the platform data\n");
@@ -1143,16 +1147,16 @@ static int spi_imx_probe(struct platform_device *pdev)
1143 goto out_master_put; 1147 goto out_master_put;
1144 } 1148 }
1145 1149
1146 spi_imx->irq = platform_get_irq(pdev, 0); 1150 irq = platform_get_irq(pdev, 0);
1147 if (spi_imx->irq < 0) { 1151 if (irq < 0) {
1148 ret = spi_imx->irq; 1152 ret = irq;
1149 goto out_master_put; 1153 goto out_master_put;
1150 } 1154 }
1151 1155
1152 ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0, 1156 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
1153 dev_name(&pdev->dev), spi_imx); 1157 dev_name(&pdev->dev), spi_imx);
1154 if (ret) { 1158 if (ret) {
1155 dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); 1159 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
1156 goto out_master_put; 1160 goto out_master_put;
1157 } 1161 }
1158 1162
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 41c5765be746..ba72347cb99d 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20 16
21#include <linux/init.h> 17#include <linux/init.h>
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 1bbac0378bf7..5468fc70dbf8 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -85,7 +85,7 @@ struct meson_spifc {
85 struct device *dev; 85 struct device *dev;
86}; 86};
87 87
88static struct regmap_config spifc_regmap_config = { 88static const struct regmap_config spifc_regmap_config = {
89 .reg_bits = 32, 89 .reg_bits = 32,
90 .val_bits = 32, 90 .val_bits = 32,
91 .reg_stride = 4, 91 .reg_stride = 4,
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 4045a1e580e1..5b0e9a3e83f6 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -282,9 +282,8 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
282 dmaengine_submit(desc); 282 dmaengine_submit(desc);
283 dma_async_issue_pending(ssp->dmach); 283 dma_async_issue_pending(ssp->dmach);
284 284
285 ret = wait_for_completion_timeout(&spi->c, 285 if (!wait_for_completion_timeout(&spi->c,
286 msecs_to_jiffies(SSP_TIMEOUT)); 286 msecs_to_jiffies(SSP_TIMEOUT))) {
287 if (!ret) {
288 dev_err(ssp->dev, "DMA transfer timeout\n"); 287 dev_err(ssp->dev, "DMA transfer timeout\n");
289 ret = -ETIMEDOUT; 288 ret = -ETIMEDOUT;
290 dmaengine_terminate_all(ssp->dmach); 289 dmaengine_terminate_all(ssp->dmach);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 79399ae9c84c..d890d309dff9 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -16,11 +16,6 @@
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */ 19 */
25#include <linux/kernel.h> 20#include <linux/kernel.h>
26#include <linux/init.h> 21#include <linux/init.h>
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index daf1ada5cd11..3c0844457c07 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -28,10 +28,6 @@
28 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * You should have received a copy of the GNU General Public License along
33 * with this program; if not, write to the Free Software Foundation, Inc.,
34 * 675 Mass Ave, Cambridge, MA 02139, USA.
35 */ 31 */
36#include <linux/kernel.h> 32#include <linux/kernel.h>
37#include <linux/init.h> 33#include <linux/init.h>
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 3bc3cbabbbc0..4df8942058de 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -14,11 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */ 17 */
23 18
24#include <linux/kernel.h> 19#include <linux/kernel.h>
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 3dec9e0b99b8..861664776672 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -28,7 +28,12 @@
28/* Runtime PM autosuspend timeout: PM is fairly light on this driver */ 28/* Runtime PM autosuspend timeout: PM is fairly light on this driver */
29#define SPI_AUTOSUSPEND_TIMEOUT 200 29#define SPI_AUTOSUSPEND_TIMEOUT 200
30 30
31#define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/ 31/* Some SoCs using this driver support up to 8 chip selects.
32 * It is up to the implementer to only use the chip selects
33 * that are available.
34 */
35#define ORION_NUM_CHIPSELECTS 8
36
32#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */ 37#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */
33 38
34#define ORION_SPI_IF_CTRL_REG 0x00 39#define ORION_SPI_IF_CTRL_REG 0x00
@@ -44,6 +49,10 @@
44#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF 49#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF
45#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \ 50#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \
46 ORION_SPI_MODE_CPHA) 51 ORION_SPI_MODE_CPHA)
52#define ORION_SPI_CS_MASK 0x1C
53#define ORION_SPI_CS_SHIFT 2
54#define ORION_SPI_CS(cs) ((cs << ORION_SPI_CS_SHIFT) & \
55 ORION_SPI_CS_MASK)
47 56
48enum orion_spi_type { 57enum orion_spi_type {
49 ORION_SPI, 58 ORION_SPI,
@@ -215,9 +224,18 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
215 return 0; 224 return 0;
216} 225}
217 226
218static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable) 227static void orion_spi_set_cs(struct spi_device *spi, bool enable)
219{ 228{
220 if (enable) 229 struct orion_spi *orion_spi;
230
231 orion_spi = spi_master_get_devdata(spi->master);
232
233 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
234 orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
235 ORION_SPI_CS(spi->chip_select));
236
237 /* Chip select logic is inverted from spi_set_cs */
238 if (!enable)
221 orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); 239 orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
222 else 240 else
223 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); 241 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
@@ -332,64 +350,31 @@ out:
332 return xfer->len - count; 350 return xfer->len - count;
333} 351}
334 352
335static int orion_spi_transfer_one_message(struct spi_master *master, 353static int orion_spi_transfer_one(struct spi_master *master,
336 struct spi_message *m) 354 struct spi_device *spi,
355 struct spi_transfer *t)
337{ 356{
338 struct orion_spi *orion_spi = spi_master_get_devdata(master);
339 struct spi_device *spi = m->spi;
340 struct spi_transfer *t = NULL;
341 int par_override = 0;
342 int status = 0; 357 int status = 0;
343 int cs_active = 0;
344
345 /* Load defaults */
346 status = orion_spi_setup_transfer(spi, NULL);
347 358
359 status = orion_spi_setup_transfer(spi, t);
348 if (status < 0) 360 if (status < 0)
349 goto msg_done; 361 return status;
350
351 list_for_each_entry(t, &m->transfers, transfer_list) {
352 if (par_override || t->speed_hz || t->bits_per_word) {
353 par_override = 1;
354 status = orion_spi_setup_transfer(spi, t);
355 if (status < 0)
356 break;
357 if (!t->speed_hz && !t->bits_per_word)
358 par_override = 0;
359 }
360
361 if (!cs_active) {
362 orion_spi_set_cs(orion_spi, 1);
363 cs_active = 1;
364 }
365 362
366 if (t->len) 363 if (t->len)
367 m->actual_length += orion_spi_write_read(spi, t); 364 orion_spi_write_read(spi, t);
368 365
369 if (t->delay_usecs) 366 return status;
370 udelay(t->delay_usecs); 367}
371
372 if (t->cs_change) {
373 orion_spi_set_cs(orion_spi, 0);
374 cs_active = 0;
375 }
376 }
377
378msg_done:
379 if (cs_active)
380 orion_spi_set_cs(orion_spi, 0);
381
382 m->status = status;
383 spi_finalize_current_message(master);
384 368
385 return 0; 369static int orion_spi_setup(struct spi_device *spi)
370{
371 return orion_spi_setup_transfer(spi, NULL);
386} 372}
387 373
388static int orion_spi_reset(struct orion_spi *orion_spi) 374static int orion_spi_reset(struct orion_spi *orion_spi)
389{ 375{
390 /* Verify that the CS is deasserted */ 376 /* Verify that the CS is deasserted */
391 orion_spi_set_cs(orion_spi, 0); 377 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
392
393 return 0; 378 return 0;
394} 379}
395 380
@@ -442,9 +427,10 @@ static int orion_spi_probe(struct platform_device *pdev)
442 427
443 /* we support only mode 0, and no options */ 428 /* we support only mode 0, and no options */
444 master->mode_bits = SPI_CPHA | SPI_CPOL; 429 master->mode_bits = SPI_CPHA | SPI_CPOL;
445 430 master->set_cs = orion_spi_set_cs;
446 master->transfer_one_message = orion_spi_transfer_one_message; 431 master->transfer_one = orion_spi_transfer_one;
447 master->num_chipselect = ORION_NUM_CHIPSELECTS; 432 master->num_chipselect = ORION_NUM_CHIPSELECTS;
433 master->setup = orion_spi_setup;
448 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); 434 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
449 master->auto_runtime_pm = true; 435 master->auto_runtime_pm = true;
450 436
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 62a9297e96ac..66a173939be8 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -111,23 +111,24 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
111 * by using ->dma_running. 111 * by using ->dma_running.
112 */ 112 */
113 if (atomic_dec_and_test(&drv_data->dma_running)) { 113 if (atomic_dec_and_test(&drv_data->dma_running)) {
114 void __iomem *reg = drv_data->ioaddr;
115
116 /* 114 /*
117 * If the other CPU is still handling the ROR interrupt we 115 * If the other CPU is still handling the ROR interrupt we
118 * might not know about the error yet. So we re-check the 116 * might not know about the error yet. So we re-check the
119 * ROR bit here before we clear the status register. 117 * ROR bit here before we clear the status register.
120 */ 118 */
121 if (!error) { 119 if (!error) {
122 u32 status = read_SSSR(reg) & drv_data->mask_sr; 120 u32 status = pxa2xx_spi_read(drv_data, SSSR)
121 & drv_data->mask_sr;
123 error = status & SSSR_ROR; 122 error = status & SSSR_ROR;
124 } 123 }
125 124
126 /* Clear status & disable interrupts */ 125 /* Clear status & disable interrupts */
127 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 126 pxa2xx_spi_write(drv_data, SSCR1,
127 pxa2xx_spi_read(drv_data, SSCR1)
128 & ~drv_data->dma_cr1);
128 write_SSSR_CS(drv_data, drv_data->clear_sr); 129 write_SSSR_CS(drv_data, drv_data->clear_sr);
129 if (!pxa25x_ssp_comp(drv_data)) 130 if (!pxa25x_ssp_comp(drv_data))
130 write_SSTO(0, reg); 131 pxa2xx_spi_write(drv_data, SSTO, 0);
131 132
132 if (!error) { 133 if (!error) {
133 pxa2xx_spi_unmap_dma_buffers(drv_data); 134 pxa2xx_spi_unmap_dma_buffers(drv_data);
@@ -139,7 +140,9 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
139 msg->state = pxa2xx_spi_next_transfer(drv_data); 140 msg->state = pxa2xx_spi_next_transfer(drv_data);
140 } else { 141 } else {
141 /* In case we got an error we disable the SSP now */ 142 /* In case we got an error we disable the SSP now */
142 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 143 pxa2xx_spi_write(drv_data, SSCR0,
144 pxa2xx_spi_read(drv_data, SSCR0)
145 & ~SSCR0_SSE);
143 146
144 msg->state = ERROR_STATE; 147 msg->state = ERROR_STATE;
145 } 148 }
@@ -247,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
247{ 250{
248 u32 status; 251 u32 status;
249 252
250 status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr; 253 status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
251 if (status & SSSR_ROR) { 254 if (status & SSSR_ROR) {
252 dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); 255 dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
253 256
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
index e8a26f25d5c0..2e0796a0003f 100644
--- a/drivers/spi/spi-pxa2xx-pxadma.c
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20 16
21#include <linux/delay.h> 17#include <linux/delay.h>
@@ -25,6 +21,7 @@
25#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
26#include <linux/spi/pxa2xx_spi.h> 22#include <linux/spi/pxa2xx_spi.h>
27 23
24#include <mach/dma.h>
28#include "spi-pxa2xx.h" 25#include "spi-pxa2xx.h"
29 26
30#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) 27#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
@@ -118,11 +115,11 @@ static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
118 drv_data->dma_mapped = 0; 115 drv_data->dma_mapped = 0;
119} 116}
120 117
121static int wait_ssp_rx_stall(void const __iomem *ioaddr) 118static int wait_ssp_rx_stall(struct driver_data *drv_data)
122{ 119{
123 unsigned long limit = loops_per_jiffy << 1; 120 unsigned long limit = loops_per_jiffy << 1;
124 121
125 while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) 122 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit)
126 cpu_relax(); 123 cpu_relax();
127 124
128 return limit; 125 return limit;
@@ -141,17 +138,18 @@ static int wait_dma_channel_stop(int channel)
141static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, 138static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
142 const char *msg) 139 const char *msg)
143{ 140{
144 void __iomem *reg = drv_data->ioaddr;
145
146 /* Stop and reset */ 141 /* Stop and reset */
147 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 142 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
148 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 143 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
149 write_SSSR_CS(drv_data, drv_data->clear_sr); 144 write_SSSR_CS(drv_data, drv_data->clear_sr);
150 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 145 pxa2xx_spi_write(drv_data, SSCR1,
146 pxa2xx_spi_read(drv_data, SSCR1)
147 & ~drv_data->dma_cr1);
151 if (!pxa25x_ssp_comp(drv_data)) 148 if (!pxa25x_ssp_comp(drv_data))
152 write_SSTO(0, reg); 149 pxa2xx_spi_write(drv_data, SSTO, 0);
153 pxa2xx_spi_flush(drv_data); 150 pxa2xx_spi_flush(drv_data);
154 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 151 pxa2xx_spi_write(drv_data, SSCR0,
152 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
155 153
156 pxa2xx_spi_unmap_dma_buffers(drv_data); 154 pxa2xx_spi_unmap_dma_buffers(drv_data);
157 155
@@ -163,11 +161,12 @@ static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
163 161
164static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data) 162static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
165{ 163{
166 void __iomem *reg = drv_data->ioaddr;
167 struct spi_message *msg = drv_data->cur_msg; 164 struct spi_message *msg = drv_data->cur_msg;
168 165
169 /* Clear and disable interrupts on SSP and DMA channels*/ 166 /* Clear and disable interrupts on SSP and DMA channels*/
170 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 167 pxa2xx_spi_write(drv_data, SSCR1,
168 pxa2xx_spi_read(drv_data, SSCR1)
169 & ~drv_data->dma_cr1);
171 write_SSSR_CS(drv_data, drv_data->clear_sr); 170 write_SSSR_CS(drv_data, drv_data->clear_sr);
172 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 171 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
173 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 172 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
@@ -228,7 +227,7 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
228 && (drv_data->ssp_type == PXA25x_SSP)) { 227 && (drv_data->ssp_type == PXA25x_SSP)) {
229 228
230 /* Wait for rx to stall */ 229 /* Wait for rx to stall */
231 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) 230 if (wait_ssp_rx_stall(drv_data) == 0)
232 dev_err(&drv_data->pdev->dev, 231 dev_err(&drv_data->pdev->dev,
233 "dma_handler: ssp rx stall failed\n"); 232 "dma_handler: ssp rx stall failed\n");
234 233
@@ -240,9 +239,8 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
240irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) 239irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
241{ 240{
242 u32 irq_status; 241 u32 irq_status;
243 void __iomem *reg = drv_data->ioaddr;
244 242
245 irq_status = read_SSSR(reg) & drv_data->mask_sr; 243 irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
246 if (irq_status & SSSR_ROR) { 244 if (irq_status & SSSR_ROR) {
247 pxa2xx_spi_dma_error_stop(drv_data, 245 pxa2xx_spi_dma_error_stop(drv_data,
248 "dma_transfer: fifo overrun"); 246 "dma_transfer: fifo overrun");
@@ -252,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
252 /* Check for false positive timeout */ 250 /* Check for false positive timeout */
253 if ((irq_status & SSSR_TINT) 251 if ((irq_status & SSSR_TINT)
254 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { 252 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
255 write_SSSR(SSSR_TINT, reg); 253 pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
256 return IRQ_HANDLED; 254 return IRQ_HANDLED;
257 } 255 }
258 256
@@ -261,7 +259,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
261 /* Clear and disable timeout interrupt, do the rest in 259 /* Clear and disable timeout interrupt, do the rest in
262 * dma_transfer_complete */ 260 * dma_transfer_complete */
263 if (!pxa25x_ssp_comp(drv_data)) 261 if (!pxa25x_ssp_comp(drv_data))
264 write_SSTO(0, reg); 262 pxa2xx_spi_write(drv_data, SSTO, 0);
265 263
266 /* finish this transfer, start the next */ 264 /* finish this transfer, start the next */
267 pxa2xx_spi_dma_transfer_complete(drv_data); 265 pxa2xx_spi_dma_transfer_complete(drv_data);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 23822e7df6c1..6f72ad01e041 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */ 14 */
19 15
20#include <linux/init.h> 16#include <linux/init.h>
@@ -45,8 +41,6 @@ MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
45MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
46MODULE_ALIAS("platform:pxa2xx-spi"); 42MODULE_ALIAS("platform:pxa2xx-spi");
47 43
48#define MAX_BUSES 3
49
50#define TIMOUT_DFLT 1000 44#define TIMOUT_DFLT 1000
51 45
52/* 46/*
@@ -162,7 +156,6 @@ pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
162 156
163static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data) 157static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
164{ 158{
165 void __iomem *reg = drv_data->ioaddr;
166 u32 mask; 159 u32 mask;
167 160
168 switch (drv_data->ssp_type) { 161 switch (drv_data->ssp_type) {
@@ -174,7 +167,7 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
174 break; 167 break;
175 } 168 }
176 169
177 return (read_SSSR(reg) & mask) == mask; 170 return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
178} 171}
179 172
180static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data, 173static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
@@ -253,9 +246,6 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
253 unsigned offset = 0x400; 246 unsigned offset = 0x400;
254 u32 value, orig; 247 u32 value, orig;
255 248
256 if (!is_lpss_ssp(drv_data))
257 return;
258
259 /* 249 /*
260 * Perform auto-detection of the LPSS SSP private registers. They 250 * Perform auto-detection of the LPSS SSP private registers. They
261 * can be either at 1k or 2k offset from the base address. 251 * can be either at 1k or 2k offset from the base address.
@@ -304,9 +294,6 @@ static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
304{ 294{
305 u32 value; 295 u32 value;
306 296
307 if (!is_lpss_ssp(drv_data))
308 return;
309
310 value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL); 297 value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
311 if (enable) 298 if (enable)
312 value &= ~SPI_CS_CONTROL_CS_HIGH; 299 value &= ~SPI_CS_CONTROL_CS_HIGH;
@@ -320,7 +307,7 @@ static void cs_assert(struct driver_data *drv_data)
320 struct chip_data *chip = drv_data->cur_chip; 307 struct chip_data *chip = drv_data->cur_chip;
321 308
322 if (drv_data->ssp_type == CE4100_SSP) { 309 if (drv_data->ssp_type == CE4100_SSP) {
323 write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); 310 pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
324 return; 311 return;
325 } 312 }
326 313
@@ -334,7 +321,8 @@ static void cs_assert(struct driver_data *drv_data)
334 return; 321 return;
335 } 322 }
336 323
337 lpss_ssp_cs_control(drv_data, true); 324 if (is_lpss_ssp(drv_data))
325 lpss_ssp_cs_control(drv_data, true);
338} 326}
339 327
340static void cs_deassert(struct driver_data *drv_data) 328static void cs_deassert(struct driver_data *drv_data)
@@ -354,20 +342,18 @@ static void cs_deassert(struct driver_data *drv_data)
354 return; 342 return;
355 } 343 }
356 344
357 lpss_ssp_cs_control(drv_data, false); 345 if (is_lpss_ssp(drv_data))
346 lpss_ssp_cs_control(drv_data, false);
358} 347}
359 348
360int pxa2xx_spi_flush(struct driver_data *drv_data) 349int pxa2xx_spi_flush(struct driver_data *drv_data)
361{ 350{
362 unsigned long limit = loops_per_jiffy << 1; 351 unsigned long limit = loops_per_jiffy << 1;
363 352
364 void __iomem *reg = drv_data->ioaddr;
365
366 do { 353 do {
367 while (read_SSSR(reg) & SSSR_RNE) { 354 while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
368 read_SSDR(reg); 355 pxa2xx_spi_read(drv_data, SSDR);
369 } 356 } while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
370 } while ((read_SSSR(reg) & SSSR_BSY) && --limit);
371 write_SSSR_CS(drv_data, SSSR_ROR); 357 write_SSSR_CS(drv_data, SSSR_ROR);
372 358
373 return limit; 359 return limit;
@@ -375,14 +361,13 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
375 361
376static int null_writer(struct driver_data *drv_data) 362static int null_writer(struct driver_data *drv_data)
377{ 363{
378 void __iomem *reg = drv_data->ioaddr;
379 u8 n_bytes = drv_data->n_bytes; 364 u8 n_bytes = drv_data->n_bytes;
380 365
381 if (pxa2xx_spi_txfifo_full(drv_data) 366 if (pxa2xx_spi_txfifo_full(drv_data)
382 || (drv_data->tx == drv_data->tx_end)) 367 || (drv_data->tx == drv_data->tx_end))
383 return 0; 368 return 0;
384 369
385 write_SSDR(0, reg); 370 pxa2xx_spi_write(drv_data, SSDR, 0);
386 drv_data->tx += n_bytes; 371 drv_data->tx += n_bytes;
387 372
388 return 1; 373 return 1;
@@ -390,12 +375,11 @@ static int null_writer(struct driver_data *drv_data)
390 375
391static int null_reader(struct driver_data *drv_data) 376static int null_reader(struct driver_data *drv_data)
392{ 377{
393 void __iomem *reg = drv_data->ioaddr;
394 u8 n_bytes = drv_data->n_bytes; 378 u8 n_bytes = drv_data->n_bytes;
395 379
396 while ((read_SSSR(reg) & SSSR_RNE) 380 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
397 && (drv_data->rx < drv_data->rx_end)) { 381 && (drv_data->rx < drv_data->rx_end)) {
398 read_SSDR(reg); 382 pxa2xx_spi_read(drv_data, SSDR);
399 drv_data->rx += n_bytes; 383 drv_data->rx += n_bytes;
400 } 384 }
401 385
@@ -404,13 +388,11 @@ static int null_reader(struct driver_data *drv_data)
404 388
405static int u8_writer(struct driver_data *drv_data) 389static int u8_writer(struct driver_data *drv_data)
406{ 390{
407 void __iomem *reg = drv_data->ioaddr;
408
409 if (pxa2xx_spi_txfifo_full(drv_data) 391 if (pxa2xx_spi_txfifo_full(drv_data)
410 || (drv_data->tx == drv_data->tx_end)) 392 || (drv_data->tx == drv_data->tx_end))
411 return 0; 393 return 0;
412 394
413 write_SSDR(*(u8 *)(drv_data->tx), reg); 395 pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
414 ++drv_data->tx; 396 ++drv_data->tx;
415 397
416 return 1; 398 return 1;
@@ -418,11 +400,9 @@ static int u8_writer(struct driver_data *drv_data)
418 400
419static int u8_reader(struct driver_data *drv_data) 401static int u8_reader(struct driver_data *drv_data)
420{ 402{
421 void __iomem *reg = drv_data->ioaddr; 403 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
422 404 && (drv_data->rx < drv_data->rx_end)) {
423 while ((read_SSSR(reg) & SSSR_RNE) 405 *(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
424 && (drv_data->rx < drv_data->rx_end)) {
425 *(u8 *)(drv_data->rx) = read_SSDR(reg);
426 ++drv_data->rx; 406 ++drv_data->rx;
427 } 407 }
428 408
@@ -431,13 +411,11 @@ static int u8_reader(struct driver_data *drv_data)
431 411
432static int u16_writer(struct driver_data *drv_data) 412static int u16_writer(struct driver_data *drv_data)
433{ 413{
434 void __iomem *reg = drv_data->ioaddr;
435
436 if (pxa2xx_spi_txfifo_full(drv_data) 414 if (pxa2xx_spi_txfifo_full(drv_data)
437 || (drv_data->tx == drv_data->tx_end)) 415 || (drv_data->tx == drv_data->tx_end))
438 return 0; 416 return 0;
439 417
440 write_SSDR(*(u16 *)(drv_data->tx), reg); 418 pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
441 drv_data->tx += 2; 419 drv_data->tx += 2;
442 420
443 return 1; 421 return 1;
@@ -445,11 +423,9 @@ static int u16_writer(struct driver_data *drv_data)
445 423
446static int u16_reader(struct driver_data *drv_data) 424static int u16_reader(struct driver_data *drv_data)
447{ 425{
448 void __iomem *reg = drv_data->ioaddr; 426 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
449 427 && (drv_data->rx < drv_data->rx_end)) {
450 while ((read_SSSR(reg) & SSSR_RNE) 428 *(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
451 && (drv_data->rx < drv_data->rx_end)) {
452 *(u16 *)(drv_data->rx) = read_SSDR(reg);
453 drv_data->rx += 2; 429 drv_data->rx += 2;
454 } 430 }
455 431
@@ -458,13 +434,11 @@ static int u16_reader(struct driver_data *drv_data)
458 434
459static int u32_writer(struct driver_data *drv_data) 435static int u32_writer(struct driver_data *drv_data)
460{ 436{
461 void __iomem *reg = drv_data->ioaddr;
462
463 if (pxa2xx_spi_txfifo_full(drv_data) 437 if (pxa2xx_spi_txfifo_full(drv_data)
464 || (drv_data->tx == drv_data->tx_end)) 438 || (drv_data->tx == drv_data->tx_end))
465 return 0; 439 return 0;
466 440
467 write_SSDR(*(u32 *)(drv_data->tx), reg); 441 pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
468 drv_data->tx += 4; 442 drv_data->tx += 4;
469 443
470 return 1; 444 return 1;
@@ -472,11 +446,9 @@ static int u32_writer(struct driver_data *drv_data)
472 446
473static int u32_reader(struct driver_data *drv_data) 447static int u32_reader(struct driver_data *drv_data)
474{ 448{
475 void __iomem *reg = drv_data->ioaddr; 449 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
476 450 && (drv_data->rx < drv_data->rx_end)) {
477 while ((read_SSSR(reg) & SSSR_RNE) 451 *(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
478 && (drv_data->rx < drv_data->rx_end)) {
479 *(u32 *)(drv_data->rx) = read_SSDR(reg);
480 drv_data->rx += 4; 452 drv_data->rx += 4;
481 } 453 }
482 454
@@ -552,27 +524,25 @@ static void giveback(struct driver_data *drv_data)
552 524
553static void reset_sccr1(struct driver_data *drv_data) 525static void reset_sccr1(struct driver_data *drv_data)
554{ 526{
555 void __iomem *reg = drv_data->ioaddr;
556 struct chip_data *chip = drv_data->cur_chip; 527 struct chip_data *chip = drv_data->cur_chip;
557 u32 sccr1_reg; 528 u32 sccr1_reg;
558 529
559 sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; 530 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
560 sccr1_reg &= ~SSCR1_RFT; 531 sccr1_reg &= ~SSCR1_RFT;
561 sccr1_reg |= chip->threshold; 532 sccr1_reg |= chip->threshold;
562 write_SSCR1(sccr1_reg, reg); 533 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
563} 534}
564 535
565static void int_error_stop(struct driver_data *drv_data, const char* msg) 536static void int_error_stop(struct driver_data *drv_data, const char* msg)
566{ 537{
567 void __iomem *reg = drv_data->ioaddr;
568
569 /* Stop and reset SSP */ 538 /* Stop and reset SSP */
570 write_SSSR_CS(drv_data, drv_data->clear_sr); 539 write_SSSR_CS(drv_data, drv_data->clear_sr);
571 reset_sccr1(drv_data); 540 reset_sccr1(drv_data);
572 if (!pxa25x_ssp_comp(drv_data)) 541 if (!pxa25x_ssp_comp(drv_data))
573 write_SSTO(0, reg); 542 pxa2xx_spi_write(drv_data, SSTO, 0);
574 pxa2xx_spi_flush(drv_data); 543 pxa2xx_spi_flush(drv_data);
575 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 544 pxa2xx_spi_write(drv_data, SSCR0,
545 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
576 546
577 dev_err(&drv_data->pdev->dev, "%s\n", msg); 547 dev_err(&drv_data->pdev->dev, "%s\n", msg);
578 548
@@ -582,13 +552,11 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
582 552
583static void int_transfer_complete(struct driver_data *drv_data) 553static void int_transfer_complete(struct driver_data *drv_data)
584{ 554{
585 void __iomem *reg = drv_data->ioaddr;
586
587 /* Stop SSP */ 555 /* Stop SSP */
588 write_SSSR_CS(drv_data, drv_data->clear_sr); 556 write_SSSR_CS(drv_data, drv_data->clear_sr);
589 reset_sccr1(drv_data); 557 reset_sccr1(drv_data);
590 if (!pxa25x_ssp_comp(drv_data)) 558 if (!pxa25x_ssp_comp(drv_data))
591 write_SSTO(0, reg); 559 pxa2xx_spi_write(drv_data, SSTO, 0);
592 560
593 /* Update total byte transferred return count actual bytes read */ 561 /* Update total byte transferred return count actual bytes read */
594 drv_data->cur_msg->actual_length += drv_data->len - 562 drv_data->cur_msg->actual_length += drv_data->len -
@@ -607,12 +575,10 @@ static void int_transfer_complete(struct driver_data *drv_data)
607 575
608static irqreturn_t interrupt_transfer(struct driver_data *drv_data) 576static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
609{ 577{
610 void __iomem *reg = drv_data->ioaddr; 578 u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
579 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
611 580
612 u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? 581 u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
613 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
614
615 u32 irq_status = read_SSSR(reg) & irq_mask;
616 582
617 if (irq_status & SSSR_ROR) { 583 if (irq_status & SSSR_ROR) {
618 int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); 584 int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
@@ -620,7 +586,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
620 } 586 }
621 587
622 if (irq_status & SSSR_TINT) { 588 if (irq_status & SSSR_TINT) {
623 write_SSSR(SSSR_TINT, reg); 589 pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
624 if (drv_data->read(drv_data)) { 590 if (drv_data->read(drv_data)) {
625 int_transfer_complete(drv_data); 591 int_transfer_complete(drv_data);
626 return IRQ_HANDLED; 592 return IRQ_HANDLED;
@@ -644,7 +610,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
644 u32 bytes_left; 610 u32 bytes_left;
645 u32 sccr1_reg; 611 u32 sccr1_reg;
646 612
647 sccr1_reg = read_SSCR1(reg); 613 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
648 sccr1_reg &= ~SSCR1_TIE; 614 sccr1_reg &= ~SSCR1_TIE;
649 615
650 /* 616 /*
@@ -670,7 +636,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
670 636
671 pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre); 637 pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
672 } 638 }
673 write_SSCR1(sccr1_reg, reg); 639 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
674 } 640 }
675 641
676 /* We did something */ 642 /* We did something */
@@ -680,7 +646,6 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
680static irqreturn_t ssp_int(int irq, void *dev_id) 646static irqreturn_t ssp_int(int irq, void *dev_id)
681{ 647{
682 struct driver_data *drv_data = dev_id; 648 struct driver_data *drv_data = dev_id;
683 void __iomem *reg = drv_data->ioaddr;
684 u32 sccr1_reg; 649 u32 sccr1_reg;
685 u32 mask = drv_data->mask_sr; 650 u32 mask = drv_data->mask_sr;
686 u32 status; 651 u32 status;
@@ -700,11 +665,11 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
700 * are all set to one. That means that the device is already 665 * are all set to one. That means that the device is already
701 * powered off. 666 * powered off.
702 */ 667 */
703 status = read_SSSR(reg); 668 status = pxa2xx_spi_read(drv_data, SSSR);
704 if (status == ~0) 669 if (status == ~0)
705 return IRQ_NONE; 670 return IRQ_NONE;
706 671
707 sccr1_reg = read_SSCR1(reg); 672 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
708 673
709 /* Ignore possible writes if we don't need to write */ 674 /* Ignore possible writes if we don't need to write */
710 if (!(sccr1_reg & SSCR1_TIE)) 675 if (!(sccr1_reg & SSCR1_TIE))
@@ -715,10 +680,14 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
715 680
716 if (!drv_data->cur_msg) { 681 if (!drv_data->cur_msg) {
717 682
718 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 683 pxa2xx_spi_write(drv_data, SSCR0,
719 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 684 pxa2xx_spi_read(drv_data, SSCR0)
685 & ~SSCR0_SSE);
686 pxa2xx_spi_write(drv_data, SSCR1,
687 pxa2xx_spi_read(drv_data, SSCR1)
688 & ~drv_data->int_cr1);
720 if (!pxa25x_ssp_comp(drv_data)) 689 if (!pxa25x_ssp_comp(drv_data))
721 write_SSTO(0, reg); 690 pxa2xx_spi_write(drv_data, SSTO, 0);
722 write_SSSR_CS(drv_data, drv_data->clear_sr); 691 write_SSSR_CS(drv_data, drv_data->clear_sr);
723 692
724 dev_err(&drv_data->pdev->dev, 693 dev_err(&drv_data->pdev->dev,
@@ -787,7 +756,6 @@ static void pump_transfers(unsigned long data)
787 struct spi_transfer *transfer = NULL; 756 struct spi_transfer *transfer = NULL;
788 struct spi_transfer *previous = NULL; 757 struct spi_transfer *previous = NULL;
789 struct chip_data *chip = NULL; 758 struct chip_data *chip = NULL;
790 void __iomem *reg = drv_data->ioaddr;
791 u32 clk_div = 0; 759 u32 clk_div = 0;
792 u8 bits = 0; 760 u8 bits = 0;
793 u32 speed = 0; 761 u32 speed = 0;
@@ -931,7 +899,7 @@ static void pump_transfers(unsigned long data)
931 899
932 /* Clear status and start DMA engine */ 900 /* Clear status and start DMA engine */
933 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; 901 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
934 write_SSSR(drv_data->clear_sr, reg); 902 pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
935 903
936 pxa2xx_spi_dma_start(drv_data); 904 pxa2xx_spi_dma_start(drv_data);
937 } else { 905 } else {
@@ -944,39 +912,43 @@ static void pump_transfers(unsigned long data)
944 } 912 }
945 913
946 if (is_lpss_ssp(drv_data)) { 914 if (is_lpss_ssp(drv_data)) {
947 if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold) 915 if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
948 write_SSIRF(chip->lpss_rx_threshold, reg); 916 != chip->lpss_rx_threshold)
949 if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold) 917 pxa2xx_spi_write(drv_data, SSIRF,
950 write_SSITF(chip->lpss_tx_threshold, reg); 918 chip->lpss_rx_threshold);
919 if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
920 != chip->lpss_tx_threshold)
921 pxa2xx_spi_write(drv_data, SSITF,
922 chip->lpss_tx_threshold);
951 } 923 }
952 924
953 if (is_quark_x1000_ssp(drv_data) && 925 if (is_quark_x1000_ssp(drv_data) &&
954 (read_DDS_RATE(reg) != chip->dds_rate)) 926 (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
955 write_DDS_RATE(chip->dds_rate, reg); 927 pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
956 928
957 /* see if we need to reload the config registers */ 929 /* see if we need to reload the config registers */
958 if ((read_SSCR0(reg) != cr0) || 930 if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
959 (read_SSCR1(reg) & change_mask) != (cr1 & change_mask)) { 931 || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
960 932 != (cr1 & change_mask)) {
961 /* stop the SSP, and update the other bits */ 933 /* stop the SSP, and update the other bits */
962 write_SSCR0(cr0 & ~SSCR0_SSE, reg); 934 pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
963 if (!pxa25x_ssp_comp(drv_data)) 935 if (!pxa25x_ssp_comp(drv_data))
964 write_SSTO(chip->timeout, reg); 936 pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
965 /* first set CR1 without interrupt and service enables */ 937 /* first set CR1 without interrupt and service enables */
966 write_SSCR1(cr1 & change_mask, reg); 938 pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
967 /* restart the SSP */ 939 /* restart the SSP */
968 write_SSCR0(cr0, reg); 940 pxa2xx_spi_write(drv_data, SSCR0, cr0);
969 941
970 } else { 942 } else {
971 if (!pxa25x_ssp_comp(drv_data)) 943 if (!pxa25x_ssp_comp(drv_data))
972 write_SSTO(chip->timeout, reg); 944 pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
973 } 945 }
974 946
975 cs_assert(drv_data); 947 cs_assert(drv_data);
976 948
977 /* after chip select, release the data by enabling service 949 /* after chip select, release the data by enabling service
978 * requests and interrupts, without changing any mode bits */ 950 * requests and interrupts, without changing any mode bits */
979 write_SSCR1(cr1, reg); 951 pxa2xx_spi_write(drv_data, SSCR1, cr1);
980} 952}
981 953
982static int pxa2xx_spi_transfer_one_message(struct spi_master *master, 954static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
@@ -1005,8 +977,8 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
1005 struct driver_data *drv_data = spi_master_get_devdata(master); 977 struct driver_data *drv_data = spi_master_get_devdata(master);
1006 978
1007 /* Disable the SSP now */ 979 /* Disable the SSP now */
1008 write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE, 980 pxa2xx_spi_write(drv_data, SSCR0,
1009 drv_data->ioaddr); 981 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
1010 982
1011 return 0; 983 return 0;
1012} 984}
@@ -1289,6 +1261,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1289 struct driver_data *drv_data; 1261 struct driver_data *drv_data;
1290 struct ssp_device *ssp; 1262 struct ssp_device *ssp;
1291 int status; 1263 int status;
1264 u32 tmp;
1292 1265
1293 platform_info = dev_get_platdata(dev); 1266 platform_info = dev_get_platdata(dev);
1294 if (!platform_info) { 1267 if (!platform_info) {
@@ -1386,38 +1359,35 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1386 drv_data->max_clk_rate = clk_get_rate(ssp->clk); 1359 drv_data->max_clk_rate = clk_get_rate(ssp->clk);
1387 1360
1388 /* Load default SSP configuration */ 1361 /* Load default SSP configuration */
1389 write_SSCR0(0, drv_data->ioaddr); 1362 pxa2xx_spi_write(drv_data, SSCR0, 0);
1390 switch (drv_data->ssp_type) { 1363 switch (drv_data->ssp_type) {
1391 case QUARK_X1000_SSP: 1364 case QUARK_X1000_SSP:
1392 write_SSCR1(QUARK_X1000_SSCR1_RxTresh( 1365 tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
1393 RX_THRESH_QUARK_X1000_DFLT) | 1366 | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
1394 QUARK_X1000_SSCR1_TxTresh( 1367 pxa2xx_spi_write(drv_data, SSCR1, tmp);
1395 TX_THRESH_QUARK_X1000_DFLT),
1396 drv_data->ioaddr);
1397 1368
1398 /* using the Motorola SPI protocol and use 8 bit frame */ 1369 /* using the Motorola SPI protocol and use 8 bit frame */
1399 write_SSCR0(QUARK_X1000_SSCR0_Motorola 1370 pxa2xx_spi_write(drv_data, SSCR0,
1400 | QUARK_X1000_SSCR0_DataSize(8), 1371 QUARK_X1000_SSCR0_Motorola
1401 drv_data->ioaddr); 1372 | QUARK_X1000_SSCR0_DataSize(8));
1402 break; 1373 break;
1403 default: 1374 default:
1404 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | 1375 tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
1405 SSCR1_TxTresh(TX_THRESH_DFLT), 1376 SSCR1_TxTresh(TX_THRESH_DFLT);
1406 drv_data->ioaddr); 1377 pxa2xx_spi_write(drv_data, SSCR1, tmp);
1407 write_SSCR0(SSCR0_SCR(2) 1378 tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
1408 | SSCR0_Motorola 1379 pxa2xx_spi_write(drv_data, SSCR0, tmp);
1409 | SSCR0_DataSize(8),
1410 drv_data->ioaddr);
1411 break; 1380 break;
1412 } 1381 }
1413 1382
1414 if (!pxa25x_ssp_comp(drv_data)) 1383 if (!pxa25x_ssp_comp(drv_data))
1415 write_SSTO(0, drv_data->ioaddr); 1384 pxa2xx_spi_write(drv_data, SSTO, 0);
1416 1385
1417 if (!is_quark_x1000_ssp(drv_data)) 1386 if (!is_quark_x1000_ssp(drv_data))
1418 write_SSPSP(0, drv_data->ioaddr); 1387 pxa2xx_spi_write(drv_data, SSPSP, 0);
1419 1388
1420 lpss_ssp_setup(drv_data); 1389 if (is_lpss_ssp(drv_data))
1390 lpss_ssp_setup(drv_data);
1421 1391
1422 tasklet_init(&drv_data->pump_transfers, pump_transfers, 1392 tasklet_init(&drv_data->pump_transfers, pump_transfers,
1423 (unsigned long)drv_data); 1393 (unsigned long)drv_data);
@@ -1460,7 +1430,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
1460 pm_runtime_get_sync(&pdev->dev); 1430 pm_runtime_get_sync(&pdev->dev);
1461 1431
1462 /* Disable the SSP at the peripheral and SOC level */ 1432 /* Disable the SSP at the peripheral and SOC level */
1463 write_SSCR0(0, drv_data->ioaddr); 1433 pxa2xx_spi_write(drv_data, SSCR0, 0);
1464 clk_disable_unprepare(ssp->clk); 1434 clk_disable_unprepare(ssp->clk);
1465 1435
1466 /* Release DMA */ 1436 /* Release DMA */
@@ -1497,7 +1467,7 @@ static int pxa2xx_spi_suspend(struct device *dev)
1497 status = spi_master_suspend(drv_data->master); 1467 status = spi_master_suspend(drv_data->master);
1498 if (status != 0) 1468 if (status != 0)
1499 return status; 1469 return status;
1500 write_SSCR0(0, drv_data->ioaddr); 1470 pxa2xx_spi_write(drv_data, SSCR0, 0);
1501 1471
1502 if (!pm_runtime_suspended(dev)) 1472 if (!pm_runtime_suspended(dev))
1503 clk_disable_unprepare(ssp->clk); 1473 clk_disable_unprepare(ssp->clk);
@@ -1518,7 +1488,8 @@ static int pxa2xx_spi_resume(struct device *dev)
1518 clk_prepare_enable(ssp->clk); 1488 clk_prepare_enable(ssp->clk);
1519 1489
1520 /* Restore LPSS private register bits */ 1490 /* Restore LPSS private register bits */
1521 lpss_ssp_setup(drv_data); 1491 if (is_lpss_ssp(drv_data))
1492 lpss_ssp_setup(drv_data);
1522 1493
1523 /* Start the queue running */ 1494 /* Start the queue running */
1524 status = spi_master_resume(drv_data->master); 1495 status = spi_master_resume(drv_data->master);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 6bec59c90cd4..85a58c906869 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -115,23 +115,17 @@ struct chip_data {
115 void (*cs_control)(u32 command); 115 void (*cs_control)(u32 command);
116}; 116};
117 117
118#define DEFINE_SSP_REG(reg, off) \ 118static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data,
119static inline u32 read_##reg(void const __iomem *p) \ 119 unsigned reg)
120{ return __raw_readl(p + (off)); } \ 120{
121\ 121 return __raw_readl(drv_data->ioaddr + reg);
122static inline void write_##reg(u32 v, void __iomem *p) \ 122}
123{ __raw_writel(v, p + (off)); } 123
124 124static inline void pxa2xx_spi_write(const struct driver_data *drv_data,
125DEFINE_SSP_REG(SSCR0, 0x00) 125 unsigned reg, u32 val)
126DEFINE_SSP_REG(SSCR1, 0x04) 126{
127DEFINE_SSP_REG(SSSR, 0x08) 127 __raw_writel(val, drv_data->ioaddr + reg);
128DEFINE_SSP_REG(SSITR, 0x0c) 128}
129DEFINE_SSP_REG(SSDR, 0x10)
130DEFINE_SSP_REG(DDS_RATE, 0x28) /* DDS Clock Rate */
131DEFINE_SSP_REG(SSTO, 0x28)
132DEFINE_SSP_REG(SSPSP, 0x2c)
133DEFINE_SSP_REG(SSITF, SSITF)
134DEFINE_SSP_REG(SSIRF, SSIRF)
135 129
136#define START_STATE ((void *)0) 130#define START_STATE ((void *)0)
137#define RUNNING_STATE ((void *)1) 131#define RUNNING_STATE ((void *)1)
@@ -155,13 +149,11 @@ static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
155 149
156static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) 150static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
157{ 151{
158 void __iomem *reg = drv_data->ioaddr;
159
160 if (drv_data->ssp_type == CE4100_SSP || 152 if (drv_data->ssp_type == CE4100_SSP ||
161 drv_data->ssp_type == QUARK_X1000_SSP) 153 drv_data->ssp_type == QUARK_X1000_SSP)
162 val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; 154 val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK;
163 155
164 write_SSSR(val, reg); 156 pxa2xx_spi_write(drv_data, SSSR, val);
165} 157}
166 158
167extern int pxa2xx_spi_flush(struct driver_data *drv_data); 159extern int pxa2xx_spi_flush(struct driver_data *drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index e7fb5a0d2e8d..ff9cdbdb6672 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -337,7 +337,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
337static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) 337static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
338{ 338{
339 struct spi_qup *controller = spi_master_get_devdata(spi->master); 339 struct spi_qup *controller = spi_master_get_devdata(spi->master);
340 u32 config, iomode, mode; 340 u32 config, iomode, mode, control;
341 int ret, n_words, w_size; 341 int ret, n_words, w_size;
342 342
343 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { 343 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
@@ -392,6 +392,15 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
392 392
393 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES); 393 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
394 394
395 control = readl_relaxed(controller->base + SPI_IO_CONTROL);
396
397 if (spi->mode & SPI_CPOL)
398 control |= SPI_IO_C_CLK_IDLE_HIGH;
399 else
400 control &= ~SPI_IO_C_CLK_IDLE_HIGH;
401
402 writel_relaxed(control, controller->base + SPI_IO_CONTROL);
403
395 config = readl_relaxed(controller->base + SPI_CONFIG); 404 config = readl_relaxed(controller->base + SPI_CONFIG);
396 405
397 if (spi->mode & SPI_LOOP) 406 if (spi->mode & SPI_LOOP)
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index daabbabd26b0..1a777dc261d6 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -437,6 +437,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
437 rs->state &= ~TXBUSY; 437 rs->state &= ~TXBUSY;
438 spin_unlock_irqrestore(&rs->lock, flags); 438 spin_unlock_irqrestore(&rs->lock, flags);
439 439
440 rxdesc = NULL;
440 if (rs->rx) { 441 if (rs->rx) {
441 rxconf.direction = rs->dma_rx.direction; 442 rxconf.direction = rs->dma_rx.direction;
442 rxconf.src_addr = rs->dma_rx.addr; 443 rxconf.src_addr = rs->dma_rx.addr;
@@ -453,6 +454,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
453 rxdesc->callback_param = rs; 454 rxdesc->callback_param = rs;
454 } 455 }
455 456
457 txdesc = NULL;
456 if (rs->tx) { 458 if (rs->tx) {
457 txconf.direction = rs->dma_tx.direction; 459 txconf.direction = rs->dma_tx.direction;
458 txconf.dst_addr = rs->dma_tx.addr; 460 txconf.dst_addr = rs->dma_tx.addr;
@@ -470,7 +472,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
470 } 472 }
471 473
472 /* rx must be started before tx due to spi instinct */ 474 /* rx must be started before tx due to spi instinct */
473 if (rs->rx) { 475 if (rxdesc) {
474 spin_lock_irqsave(&rs->lock, flags); 476 spin_lock_irqsave(&rs->lock, flags);
475 rs->state |= RXBUSY; 477 rs->state |= RXBUSY;
476 spin_unlock_irqrestore(&rs->lock, flags); 478 spin_unlock_irqrestore(&rs->lock, flags);
@@ -478,7 +480,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
478 dma_async_issue_pending(rs->dma_rx.ch); 480 dma_async_issue_pending(rs->dma_rx.ch);
479 } 481 }
480 482
481 if (rs->tx) { 483 if (txdesc) {
482 spin_lock_irqsave(&rs->lock, flags); 484 spin_lock_irqsave(&rs->lock, flags);
483 rs->state |= TXBUSY; 485 rs->state |= TXBUSY;
484 spin_unlock_irqrestore(&rs->lock, flags); 486 spin_unlock_irqrestore(&rs->lock, flags);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2071f788c6fb..46ce47076e63 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -15,11 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */ 18 */
24 19
25#include <linux/module.h> 20#include <linux/module.h>
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 37b19836f5cb..9231c34b5a5c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */ 14 */
19 15
20#include <linux/init.h> 16#include <linux/init.h>
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 237f2e7a7179..5a56acf8a43e 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 15 */
20 16
21#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index fc29233d0650..20e800e70442 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -16,11 +16,6 @@
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 *
24 */ 19 */
25 20
26#include <linux/clk.h> 21#include <linux/clk.h>
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 3ab7a21445fc..e57eec0b2f46 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -82,6 +82,8 @@ struct sh_msiof_spi_priv {
82#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ 82#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
83#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ 83#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
84#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ 84#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
85#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
86#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
85#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */ 87#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
86#define MDR1_FLD_SHIFT 2 88#define MDR1_FLD_SHIFT 2
87#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ 89#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
@@ -241,42 +243,80 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
241 243
242static struct { 244static struct {
243 unsigned short div; 245 unsigned short div;
244 unsigned short scr; 246 unsigned short brdv;
245} const sh_msiof_spi_clk_table[] = { 247} const sh_msiof_spi_div_table[] = {
246 { 1, SCR_BRPS( 1) | SCR_BRDV_DIV_1 }, 248 { 1, SCR_BRDV_DIV_1 },
247 { 2, SCR_BRPS( 1) | SCR_BRDV_DIV_2 }, 249 { 2, SCR_BRDV_DIV_2 },
248 { 4, SCR_BRPS( 1) | SCR_BRDV_DIV_4 }, 250 { 4, SCR_BRDV_DIV_4 },
249 { 8, SCR_BRPS( 1) | SCR_BRDV_DIV_8 }, 251 { 8, SCR_BRDV_DIV_8 },
250 { 16, SCR_BRPS( 1) | SCR_BRDV_DIV_16 }, 252 { 16, SCR_BRDV_DIV_16 },
251 { 32, SCR_BRPS( 1) | SCR_BRDV_DIV_32 }, 253 { 32, SCR_BRDV_DIV_32 },
252 { 64, SCR_BRPS(32) | SCR_BRDV_DIV_2 },
253 { 128, SCR_BRPS(32) | SCR_BRDV_DIV_4 },
254 { 256, SCR_BRPS(32) | SCR_BRDV_DIV_8 },
255 { 512, SCR_BRPS(32) | SCR_BRDV_DIV_16 },
256 { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 },
257}; 254};
258 255
259static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, 256static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
260 unsigned long parent_rate, u32 spi_hz) 257 unsigned long parent_rate, u32 spi_hz)
261{ 258{
262 unsigned long div = 1024; 259 unsigned long div = 1024;
260 u32 brps, scr;
263 size_t k; 261 size_t k;
264 262
265 if (!WARN_ON(!spi_hz || !parent_rate)) 263 if (!WARN_ON(!spi_hz || !parent_rate))
266 div = DIV_ROUND_UP(parent_rate, spi_hz); 264 div = DIV_ROUND_UP(parent_rate, spi_hz);
267 265
268 /* TODO: make more fine grained */ 266 for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
269 267 brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
270 for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) { 268 if (brps <= 32) /* max of brdv is 32 */
271 if (sh_msiof_spi_clk_table[k].div >= div)
272 break; 269 break;
273 } 270 }
274 271
275 k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1); 272 k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
276 273
277 sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr); 274 scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
275 sh_msiof_write(p, TSCR, scr);
278 if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX)) 276 if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
279 sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr); 277 sh_msiof_write(p, RSCR, scr);
278}
279
280static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
281{
282 /*
283 * DTDL/SYNCDL bit : p->info->dtdl or p->info->syncdl
284 * b'000 : 0
285 * b'001 : 100
286 * b'010 : 200
287 * b'011 (SYNCDL only) : 300
288 * b'101 : 50
289 * b'110 : 150
290 */
291 if (dtdl_or_syncdl % 100)
292 return dtdl_or_syncdl / 100 + 5;
293 else
294 return dtdl_or_syncdl / 100;
295}
296
297static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
298{
299 u32 val;
300
301 if (!p->info)
302 return 0;
303
304 /* check if DTDL and SYNCDL is allowed value */
305 if (p->info->dtdl > 200 || p->info->syncdl > 300) {
306 dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
307 return 0;
308 }
309
310 /* check if the sum of DTDL and SYNCDL becomes an integer value */
311 if ((p->info->dtdl + p->info->syncdl) % 100) {
312 dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
313 return 0;
314 }
315
316 val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
317 val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
318
319 return val;
280} 320}
281 321
282static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, 322static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
@@ -296,6 +336,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
296 tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP; 336 tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
297 tmp |= !cs_high << MDR1_SYNCAC_SHIFT; 337 tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
298 tmp |= lsb_first << MDR1_BITLSB_SHIFT; 338 tmp |= lsb_first << MDR1_BITLSB_SHIFT;
339 tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
299 sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON); 340 sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
300 if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) { 341 if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
301 /* These bits are reserved if RX needs TX */ 342 /* These bits are reserved if RX needs TX */
@@ -501,7 +542,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
501 gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 542 gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
502 543
503 544
504 pm_runtime_put_sync(&p->pdev->dev); 545 pm_runtime_put(&p->pdev->dev);
505 546
506 return 0; 547 return 0;
507} 548}
@@ -595,8 +636,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
595 } 636 }
596 637
597 /* wait for tx fifo to be emptied / rx fifo to be filled */ 638 /* wait for tx fifo to be emptied / rx fifo to be filled */
598 ret = wait_for_completion_timeout(&p->done, HZ); 639 if (!wait_for_completion_timeout(&p->done, HZ)) {
599 if (!ret) {
600 dev_err(&p->pdev->dev, "PIO timeout\n"); 640 dev_err(&p->pdev->dev, "PIO timeout\n");
601 ret = -ETIMEDOUT; 641 ret = -ETIMEDOUT;
602 goto stop_reset; 642 goto stop_reset;
@@ -706,8 +746,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
706 } 746 }
707 747
708 /* wait for tx fifo to be emptied / rx fifo to be filled */ 748 /* wait for tx fifo to be emptied / rx fifo to be filled */
709 ret = wait_for_completion_timeout(&p->done, HZ); 749 if (!wait_for_completion_timeout(&p->done, HZ)) {
710 if (!ret) {
711 dev_err(&p->pdev->dev, "DMA timeout\n"); 750 dev_err(&p->pdev->dev, "DMA timeout\n");
712 ret = -ETIMEDOUT; 751 ret = -ETIMEDOUT;
713 goto stop_reset; 752 goto stop_reset;
@@ -957,6 +996,8 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
957 &info->tx_fifo_override); 996 &info->tx_fifo_override);
958 of_property_read_u32(np, "renesas,rx-fifo-size", 997 of_property_read_u32(np, "renesas,rx-fifo-size",
959 &info->rx_fifo_override); 998 &info->rx_fifo_override);
999 of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
1000 of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
960 1001
961 info->num_chipselect = num_cs; 1002 info->num_chipselect = num_cs;
962 1003
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 1cfc906dd174..502501187c9e 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -14,11 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */ 17 */
23 18
24#include <linux/module.h> 19#include <linux/module.h>
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index d075191476f0..f5715c9f68b0 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -818,7 +818,6 @@ static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
818 818
819static const struct of_device_id spi_sirfsoc_of_match[] = { 819static const struct of_device_id spi_sirfsoc_of_match[] = {
820 { .compatible = "sirf,prima2-spi", }, 820 { .compatible = "sirf,prima2-spi", },
821 { .compatible = "sirf,marco-spi", },
822 {} 821 {}
823}; 822};
824MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match); 823MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
new file mode 100644
index 000000000000..2faeaa7b57a8
--- /dev/null
+++ b/drivers/spi/spi-st-ssc4.c
@@ -0,0 +1,504 @@
1/*
2 * Copyright (c) 2008-2014 STMicroelectronics Limited
3 *
4 * Author: Angus Clark <Angus.Clark@st.com>
5 * Patrice Chotard <patrice.chotard@st.com>
6 * Lee Jones <lee.jones@linaro.org>
7 *
8 * SPI master mode controller driver, used in STMicroelectronics devices.
9 *
10 * May be copied or modified under the terms of the GNU General Public
11 * License Version 2.0 only. See linux/COPYING for more information.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/pinctrl/consumer.h>
20#include <linux/platform_device.h>
21#include <linux/of.h>
22#include <linux/of_gpio.h>
23#include <linux/of_irq.h>
24#include <linux/pm_runtime.h>
25#include <linux/spi/spi.h>
26#include <linux/spi/spi_bitbang.h>
27
28/* SSC registers */
29#define SSC_BRG 0x000
30#define SSC_TBUF 0x004
31#define SSC_RBUF 0x008
32#define SSC_CTL 0x00C
33#define SSC_IEN 0x010
34#define SSC_I2C 0x018
35
36/* SSC Control */
37#define SSC_CTL_DATA_WIDTH_9 0x8
38#define SSC_CTL_DATA_WIDTH_MSK 0xf
39#define SSC_CTL_BM 0xf
40#define SSC_CTL_HB BIT(4)
41#define SSC_CTL_PH BIT(5)
42#define SSC_CTL_PO BIT(6)
43#define SSC_CTL_SR BIT(7)
44#define SSC_CTL_MS BIT(8)
45#define SSC_CTL_EN BIT(9)
46#define SSC_CTL_LPB BIT(10)
47#define SSC_CTL_EN_TX_FIFO BIT(11)
48#define SSC_CTL_EN_RX_FIFO BIT(12)
49#define SSC_CTL_EN_CLST_RX BIT(13)
50
51/* SSC Interrupt Enable */
52#define SSC_IEN_TEEN BIT(2)
53
54#define FIFO_SIZE 8
55
56struct spi_st {
57 /* SSC SPI Controller */
58 void __iomem *base;
59 struct clk *clk;
60 struct device *dev;
61
62 /* SSC SPI current transaction */
63 const u8 *tx_ptr;
64 u8 *rx_ptr;
65 u16 bytes_per_word;
66 unsigned int words_remaining;
67 unsigned int baud;
68 struct completion done;
69};
70
71static int spi_st_clk_enable(struct spi_st *spi_st)
72{
73 /*
74 * Current platforms use one of the core clocks for SPI and I2C.
75 * If we attempt to disable the clock, the system will hang.
76 *
77 * TODO: Remove this when platform supports power domains.
78 */
79 return 0;
80
81 return clk_prepare_enable(spi_st->clk);
82}
83
84static void spi_st_clk_disable(struct spi_st *spi_st)
85{
86 /*
87 * Current platforms use one of the core clocks for SPI and I2C.
88 * If we attempt to disable the clock, the system will hang.
89 *
90 * TODO: Remove this when platform supports power domains.
91 */
92 return;
93
94 clk_disable_unprepare(spi_st->clk);
95}
96
97/* Load the TX FIFO */
98static void ssc_write_tx_fifo(struct spi_st *spi_st)
99{
100 unsigned int count, i;
101 uint32_t word = 0;
102
103 if (spi_st->words_remaining > FIFO_SIZE)
104 count = FIFO_SIZE;
105 else
106 count = spi_st->words_remaining;
107
108 for (i = 0; i < count; i++) {
109 if (spi_st->tx_ptr) {
110 if (spi_st->bytes_per_word == 1) {
111 word = *spi_st->tx_ptr++;
112 } else {
113 word = *spi_st->tx_ptr++;
114 word = *spi_st->tx_ptr++ | (word << 8);
115 }
116 }
117 writel_relaxed(word, spi_st->base + SSC_TBUF);
118 }
119}
120
121/* Read the RX FIFO */
122static void ssc_read_rx_fifo(struct spi_st *spi_st)
123{
124 unsigned int count, i;
125 uint32_t word = 0;
126
127 if (spi_st->words_remaining > FIFO_SIZE)
128 count = FIFO_SIZE;
129 else
130 count = spi_st->words_remaining;
131
132 for (i = 0; i < count; i++) {
133 word = readl_relaxed(spi_st->base + SSC_RBUF);
134
135 if (spi_st->rx_ptr) {
136 if (spi_st->bytes_per_word == 1) {
137 *spi_st->rx_ptr++ = (uint8_t)word;
138 } else {
139 *spi_st->rx_ptr++ = (word >> 8);
140 *spi_st->rx_ptr++ = word & 0xff;
141 }
142 }
143 }
144 spi_st->words_remaining -= count;
145}
146
147static int spi_st_transfer_one(struct spi_master *master,
148 struct spi_device *spi, struct spi_transfer *t)
149{
150 struct spi_st *spi_st = spi_master_get_devdata(master);
151 uint32_t ctl = 0;
152
153 /* Setup transfer */
154 spi_st->tx_ptr = t->tx_buf;
155 spi_st->rx_ptr = t->rx_buf;
156
157 if (spi->bits_per_word > 8) {
158 /*
159 * Anything greater than 8 bits-per-word requires 2
160 * bytes-per-word in the RX/TX buffers
161 */
162 spi_st->bytes_per_word = 2;
163 spi_st->words_remaining = t->len / 2;
164
165 } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) {
166 /*
167 * If transfer is even-length, and 8 bits-per-word, then
168 * implement as half-length 16 bits-per-word transfer
169 */
170 spi_st->bytes_per_word = 2;
171 spi_st->words_remaining = t->len / 2;
172
173 /* Set SSC_CTL to 16 bits-per-word */
174 ctl = readl_relaxed(spi_st->base + SSC_CTL);
175 writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL);
176
177 readl_relaxed(spi_st->base + SSC_RBUF);
178
179 } else {
180 spi_st->bytes_per_word = 1;
181 spi_st->words_remaining = t->len;
182 }
183
184 reinit_completion(&spi_st->done);
185
186 /* Start transfer by writing to the TX FIFO */
187 ssc_write_tx_fifo(spi_st);
188 writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN);
189
190 /* Wait for transfer to complete */
191 wait_for_completion(&spi_st->done);
192
193 /* Restore SSC_CTL if necessary */
194 if (ctl)
195 writel_relaxed(ctl, spi_st->base + SSC_CTL);
196
197 spi_finalize_current_transfer(spi->master);
198
199 return t->len;
200}
201
202static void spi_st_cleanup(struct spi_device *spi)
203{
204 int cs = spi->cs_gpio;
205
206 if (gpio_is_valid(cs))
207 devm_gpio_free(&spi->dev, cs);
208}
209
210/* the spi->mode bits understood by this driver: */
211#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
212static int spi_st_setup(struct spi_device *spi)
213{
214 struct spi_st *spi_st = spi_master_get_devdata(spi->master);
215 u32 spi_st_clk, sscbrg, var;
216 u32 hz = spi->max_speed_hz;
217 int cs = spi->cs_gpio;
218 int ret;
219
220 if (!hz) {
221 dev_err(&spi->dev, "max_speed_hz unspecified\n");
222 return -EINVAL;
223 }
224
225 if (!gpio_is_valid(cs)) {
226 dev_err(&spi->dev, "%d is not a valid gpio\n", cs);
227 return -EINVAL;
228 }
229
230 if (devm_gpio_request(&spi->dev, cs, dev_name(&spi->dev))) {
231 dev_err(&spi->dev, "could not request gpio:%d\n", cs);
232 return -EINVAL;
233 }
234
235 ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH);
236 if (ret)
237 return ret;
238
239 spi_st_clk = clk_get_rate(spi_st->clk);
240
241 /* Set SSC_BRF */
242 sscbrg = spi_st_clk / (2 * hz);
243 if (sscbrg < 0x07 || sscbrg > BIT(16)) {
244 dev_err(&spi->dev,
245 "baudrate %d outside valid range %d\n", sscbrg, hz);
246 return -EINVAL;
247 }
248
249 spi_st->baud = spi_st_clk / (2 * sscbrg);
250 if (sscbrg == BIT(16)) /* 16-bit counter wraps */
251 sscbrg = 0x0;
252
253 writel_relaxed(sscbrg, spi_st->base + SSC_BRG);
254
255 dev_dbg(&spi->dev,
256 "setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n",
257 hz, spi_st->baud, sscbrg);
258
259 /* Set SSC_CTL and enable SSC */
260 var = readl_relaxed(spi_st->base + SSC_CTL);
261 var |= SSC_CTL_MS;
262
263 if (spi->mode & SPI_CPOL)
264 var |= SSC_CTL_PO;
265 else
266 var &= ~SSC_CTL_PO;
267
268 if (spi->mode & SPI_CPHA)
269 var |= SSC_CTL_PH;
270 else
271 var &= ~SSC_CTL_PH;
272
273 if ((spi->mode & SPI_LSB_FIRST) == 0)
274 var |= SSC_CTL_HB;
275 else
276 var &= ~SSC_CTL_HB;
277
278 if (spi->mode & SPI_LOOP)
279 var |= SSC_CTL_LPB;
280 else
281 var &= ~SSC_CTL_LPB;
282
283 var &= ~SSC_CTL_DATA_WIDTH_MSK;
284 var |= (spi->bits_per_word - 1);
285
286 var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
287 var |= SSC_CTL_EN;
288
289 writel_relaxed(var, spi_st->base + SSC_CTL);
290
291 /* Clear the status register */
292 readl_relaxed(spi_st->base + SSC_RBUF);
293
294 return 0;
295}
296
297/* Interrupt fired when TX shift register becomes empty */
298static irqreturn_t spi_st_irq(int irq, void *dev_id)
299{
300 struct spi_st *spi_st = (struct spi_st *)dev_id;
301
302 /* Read RX FIFO */
303 ssc_read_rx_fifo(spi_st);
304
305 /* Fill TX FIFO */
306 if (spi_st->words_remaining) {
307 ssc_write_tx_fifo(spi_st);
308 } else {
309 /* TX/RX complete */
310 writel_relaxed(0x0, spi_st->base + SSC_IEN);
311 /*
312 * read SSC_IEN to ensure that this bit is set
313 * before re-enabling interrupt
314 */
315 readl(spi_st->base + SSC_IEN);
316 complete(&spi_st->done);
317 }
318
319 return IRQ_HANDLED;
320}
321
322static int spi_st_probe(struct platform_device *pdev)
323{
324 struct device_node *np = pdev->dev.of_node;
325 struct spi_master *master;
326 struct resource *res;
327 struct spi_st *spi_st;
328 int irq, ret = 0;
329 u32 var;
330
331 master = spi_alloc_master(&pdev->dev, sizeof(*spi_st));
332 if (!master)
333 return -ENOMEM;
334
335 master->dev.of_node = np;
336 master->mode_bits = MODEBITS;
337 master->setup = spi_st_setup;
338 master->cleanup = spi_st_cleanup;
339 master->transfer_one = spi_st_transfer_one;
340 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
341 master->auto_runtime_pm = true;
342 master->bus_num = pdev->id;
343 spi_st = spi_master_get_devdata(master);
344
345 spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
346 if (IS_ERR(spi_st->clk)) {
347 dev_err(&pdev->dev, "Unable to request clock\n");
348 return PTR_ERR(spi_st->clk);
349 }
350
351 ret = spi_st_clk_enable(spi_st);
352 if (ret)
353 return ret;
354
355 init_completion(&spi_st->done);
356
357 /* Get resources */
358 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
359 spi_st->base = devm_ioremap_resource(&pdev->dev, res);
360 if (IS_ERR(spi_st->base)) {
361 ret = PTR_ERR(spi_st->base);
362 goto clk_disable;
363 }
364
365 /* Disable I2C and Reset SSC */
366 writel_relaxed(0x0, spi_st->base + SSC_I2C);
367 var = readw_relaxed(spi_st->base + SSC_CTL);
368 var |= SSC_CTL_SR;
369 writel_relaxed(var, spi_st->base + SSC_CTL);
370
371 udelay(1);
372 var = readl_relaxed(spi_st->base + SSC_CTL);
373 var &= ~SSC_CTL_SR;
374 writel_relaxed(var, spi_st->base + SSC_CTL);
375
376 /* Set SSC into slave mode before reconfiguring PIO pins */
377 var = readl_relaxed(spi_st->base + SSC_CTL);
378 var &= ~SSC_CTL_MS;
379 writel_relaxed(var, spi_st->base + SSC_CTL);
380
381 irq = irq_of_parse_and_map(np, 0);
382 if (!irq) {
383 dev_err(&pdev->dev, "IRQ missing or invalid\n");
384 ret = -EINVAL;
385 goto clk_disable;
386 }
387
388 ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0,
389 pdev->name, spi_st);
390 if (ret) {
391 dev_err(&pdev->dev, "Failed to request irq %d\n", irq);
392 goto clk_disable;
393 }
394
395 /* by default the device is on */
396 pm_runtime_set_active(&pdev->dev);
397 pm_runtime_enable(&pdev->dev);
398
399 platform_set_drvdata(pdev, master);
400
401 ret = devm_spi_register_master(&pdev->dev, master);
402 if (ret) {
403 dev_err(&pdev->dev, "Failed to register master\n");
404 goto clk_disable;
405 }
406
407 return 0;
408
409clk_disable:
410 spi_st_clk_disable(spi_st);
411
412 return ret;
413}
414
415static int spi_st_remove(struct platform_device *pdev)
416{
417 struct spi_master *master = platform_get_drvdata(pdev);
418 struct spi_st *spi_st = spi_master_get_devdata(master);
419
420 spi_st_clk_disable(spi_st);
421
422 pinctrl_pm_select_sleep_state(&pdev->dev);
423
424 return 0;
425}
426
427#ifdef CONFIG_PM
428static int spi_st_runtime_suspend(struct device *dev)
429{
430 struct spi_master *master = dev_get_drvdata(dev);
431 struct spi_st *spi_st = spi_master_get_devdata(master);
432
433 writel_relaxed(0, spi_st->base + SSC_IEN);
434 pinctrl_pm_select_sleep_state(dev);
435
436 spi_st_clk_disable(spi_st);
437
438 return 0;
439}
440
441static int spi_st_runtime_resume(struct device *dev)
442{
443 struct spi_master *master = dev_get_drvdata(dev);
444 struct spi_st *spi_st = spi_master_get_devdata(master);
445 int ret;
446
447 ret = spi_st_clk_enable(spi_st);
448 pinctrl_pm_select_default_state(dev);
449
450 return ret;
451}
452#endif
453
454#ifdef CONFIG_PM_SLEEP
455static int spi_st_suspend(struct device *dev)
456{
457 struct spi_master *master = dev_get_drvdata(dev);
458 int ret;
459
460 ret = spi_master_suspend(master);
461 if (ret)
462 return ret;
463
464 return pm_runtime_force_suspend(dev);
465}
466
467static int spi_st_resume(struct device *dev)
468{
469 struct spi_master *master = dev_get_drvdata(dev);
470 int ret;
471
472 ret = spi_master_resume(master);
473 if (ret)
474 return ret;
475
476 return pm_runtime_force_resume(dev);
477}
478#endif
479
480static const struct dev_pm_ops spi_st_pm = {
481 SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume)
482 SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
483};
484
485static struct of_device_id stm_spi_match[] = {
486 { .compatible = "st,comms-ssc4-spi", },
487 {},
488};
489MODULE_DEVICE_TABLE(of, stm_spi_match);
490
491static struct platform_driver spi_st_driver = {
492 .driver = {
493 .name = "spi-st",
494 .pm = &spi_st_pm,
495 .of_match_table = of_match_ptr(stm_spi_match),
496 },
497 .probe = spi_st_probe,
498 .remove = spi_st_remove,
499};
500module_platform_driver(spi_st_driver);
501
502MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
503MODULE_DESCRIPTION("STM SSC SPI driver");
504MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 6146c4cd6583..884a716e50cb 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -201,7 +201,7 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
201 201
202static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) 202static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
203{ 203{
204 int wlen, count, ret; 204 int wlen, count;
205 unsigned int cmd; 205 unsigned int cmd;
206 const u8 *txbuf; 206 const u8 *txbuf;
207 207
@@ -230,9 +230,8 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
230 } 230 }
231 231
232 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); 232 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
233 ret = wait_for_completion_timeout(&qspi->transfer_complete, 233 if (!wait_for_completion_timeout(&qspi->transfer_complete,
234 QSPI_COMPLETION_TIMEOUT); 234 QSPI_COMPLETION_TIMEOUT)) {
235 if (ret == 0) {
236 dev_err(qspi->dev, "write timed out\n"); 235 dev_err(qspi->dev, "write timed out\n");
237 return -ETIMEDOUT; 236 return -ETIMEDOUT;
238 } 237 }
@@ -245,7 +244,7 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
245 244
246static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) 245static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
247{ 246{
248 int wlen, count, ret; 247 int wlen, count;
249 unsigned int cmd; 248 unsigned int cmd;
250 u8 *rxbuf; 249 u8 *rxbuf;
251 250
@@ -268,9 +267,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
268 while (count) { 267 while (count) {
269 dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); 268 dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
270 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); 269 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
271 ret = wait_for_completion_timeout(&qspi->transfer_complete, 270 if (!wait_for_completion_timeout(&qspi->transfer_complete,
272 QSPI_COMPLETION_TIMEOUT); 271 QSPI_COMPLETION_TIMEOUT)) {
273 if (ret == 0) {
274 dev_err(qspi->dev, "read timed out\n"); 272 dev_err(qspi->dev, "read timed out\n");
275 return -ETIMEDOUT; 273 return -ETIMEDOUT;
276 } 274 }
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index be692ad50442..93dfcee0f987 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
18 */ 14 */
19 15
20#include <linux/delay.h> 16#include <linux/delay.h>
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 79bd84f43430..133f53a9c1d4 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -22,6 +22,8 @@
22#include <linux/spi/xilinx_spi.h> 22#include <linux/spi/xilinx_spi.h>
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#define XILINX_SPI_MAX_CS 32
26
25#define XILINX_SPI_NAME "xilinx_spi" 27#define XILINX_SPI_NAME "xilinx_spi"
26 28
27/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) 29/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
@@ -34,7 +36,8 @@
34#define XSPI_CR_MASTER_MODE 0x04 36#define XSPI_CR_MASTER_MODE 0x04
35#define XSPI_CR_CPOL 0x08 37#define XSPI_CR_CPOL 0x08
36#define XSPI_CR_CPHA 0x10 38#define XSPI_CR_CPHA 0x10
37#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL) 39#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL | \
40 XSPI_CR_LSB_FIRST | XSPI_CR_LOOP)
38#define XSPI_CR_TXFIFO_RESET 0x20 41#define XSPI_CR_TXFIFO_RESET 0x20
39#define XSPI_CR_RXFIFO_RESET 0x40 42#define XSPI_CR_RXFIFO_RESET 0x40
40#define XSPI_CR_MANUAL_SSELECT 0x80 43#define XSPI_CR_MANUAL_SSELECT 0x80
@@ -85,12 +88,11 @@ struct xilinx_spi {
85 88
86 u8 *rx_ptr; /* pointer in the Tx buffer */ 89 u8 *rx_ptr; /* pointer in the Tx buffer */
87 const u8 *tx_ptr; /* pointer in the Rx buffer */ 90 const u8 *tx_ptr; /* pointer in the Rx buffer */
88 int remaining_bytes; /* the number of bytes left to transfer */ 91 u8 bytes_per_word;
89 u8 bits_per_word; 92 int buffer_size; /* buffer size in words */
93 u32 cs_inactive; /* Level of the CS pins when inactive*/
90 unsigned int (*read_fn)(void __iomem *); 94 unsigned int (*read_fn)(void __iomem *);
91 void (*write_fn)(u32, void __iomem *); 95 void (*write_fn)(u32, void __iomem *);
92 void (*tx_fn)(struct xilinx_spi *);
93 void (*rx_fn)(struct xilinx_spi *);
94}; 96};
95 97
96static void xspi_write32(u32 val, void __iomem *addr) 98static void xspi_write32(u32 val, void __iomem *addr)
@@ -113,49 +115,51 @@ static unsigned int xspi_read32_be(void __iomem *addr)
113 return ioread32be(addr); 115 return ioread32be(addr);
114} 116}
115 117
116static void xspi_tx8(struct xilinx_spi *xspi) 118static void xilinx_spi_tx(struct xilinx_spi *xspi)
117{ 119{
118 xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); 120 u32 data = 0;
119 xspi->tx_ptr++;
120}
121
122static void xspi_tx16(struct xilinx_spi *xspi)
123{
124 xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
125 xspi->tx_ptr += 2;
126}
127 121
128static void xspi_tx32(struct xilinx_spi *xspi) 122 if (!xspi->tx_ptr) {
129{ 123 xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
130 xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); 124 return;
131 xspi->tx_ptr += 4;
132}
133
134static void xspi_rx8(struct xilinx_spi *xspi)
135{
136 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
137 if (xspi->rx_ptr) {
138 *xspi->rx_ptr = data & 0xff;
139 xspi->rx_ptr++;
140 } 125 }
141}
142 126
143static void xspi_rx16(struct xilinx_spi *xspi) 127 switch (xspi->bytes_per_word) {
144{ 128 case 1:
145 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); 129 data = *(u8 *)(xspi->tx_ptr);
146 if (xspi->rx_ptr) { 130 break;
147 *(u16 *)(xspi->rx_ptr) = data & 0xffff; 131 case 2:
148 xspi->rx_ptr += 2; 132 data = *(u16 *)(xspi->tx_ptr);
133 break;
134 case 4:
135 data = *(u32 *)(xspi->tx_ptr);
136 break;
149 } 137 }
138
139 xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
140 xspi->tx_ptr += xspi->bytes_per_word;
150} 141}
151 142
152static void xspi_rx32(struct xilinx_spi *xspi) 143static void xilinx_spi_rx(struct xilinx_spi *xspi)
153{ 144{
154 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); 145 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
155 if (xspi->rx_ptr) { 146
147 if (!xspi->rx_ptr)
148 return;
149
150 switch (xspi->bytes_per_word) {
151 case 1:
152 *(u8 *)(xspi->rx_ptr) = data;
153 break;
154 case 2:
155 *(u16 *)(xspi->rx_ptr) = data;
156 break;
157 case 4:
156 *(u32 *)(xspi->rx_ptr) = data; 158 *(u32 *)(xspi->rx_ptr) = data;
157 xspi->rx_ptr += 4; 159 break;
158 } 160 }
161
162 xspi->rx_ptr += xspi->bytes_per_word;
159} 163}
160 164
161static void xspi_init_hw(struct xilinx_spi *xspi) 165static void xspi_init_hw(struct xilinx_spi *xspi)
@@ -165,46 +169,56 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
165 /* Reset the SPI device */ 169 /* Reset the SPI device */
166 xspi->write_fn(XIPIF_V123B_RESET_MASK, 170 xspi->write_fn(XIPIF_V123B_RESET_MASK,
167 regs_base + XIPIF_V123B_RESETR_OFFSET); 171 regs_base + XIPIF_V123B_RESETR_OFFSET);
168 /* Disable all the interrupts just in case */ 172 /* Enable the transmit empty interrupt, which we use to determine
169 xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET); 173 * progress on the transmission.
170 /* Enable the global IPIF interrupt */ 174 */
171 xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, 175 xspi->write_fn(XSPI_INTR_TX_EMPTY,
172 regs_base + XIPIF_V123B_DGIER_OFFSET); 176 regs_base + XIPIF_V123B_IIER_OFFSET);
177 /* Disable the global IPIF interrupt */
178 xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
173 /* Deselect the slave on the SPI bus */ 179 /* Deselect the slave on the SPI bus */
174 xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET); 180 xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
175 /* Disable the transmitter, enable Manual Slave Select Assertion, 181 /* Disable the transmitter, enable Manual Slave Select Assertion,
176 * put SPI controller into master mode, and enable it */ 182 * put SPI controller into master mode, and enable it */
177 xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT | 183 xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
178 XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | 184 XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
179 XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET); 185 regs_base + XSPI_CR_OFFSET);
180} 186}
181 187
182static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) 188static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
183{ 189{
184 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); 190 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
191 u16 cr;
192 u32 cs;
185 193
186 if (is_on == BITBANG_CS_INACTIVE) { 194 if (is_on == BITBANG_CS_INACTIVE) {
187 /* Deselect the slave on the SPI bus */ 195 /* Deselect the slave on the SPI bus */
188 xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET); 196 xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
189 } else if (is_on == BITBANG_CS_ACTIVE) { 197 return;
190 /* Set the SPI clock phase and polarity */
191 u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET)
192 & ~XSPI_CR_MODE_MASK;
193 if (spi->mode & SPI_CPHA)
194 cr |= XSPI_CR_CPHA;
195 if (spi->mode & SPI_CPOL)
196 cr |= XSPI_CR_CPOL;
197 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
198
199 /* We do not check spi->max_speed_hz here as the SPI clock
200 * frequency is not software programmable (the IP block design
201 * parameter)
202 */
203
204 /* Activate the chip select */
205 xspi->write_fn(~(0x0001 << spi->chip_select),
206 xspi->regs + XSPI_SSR_OFFSET);
207 } 198 }
199
200 /* Set the SPI clock phase and polarity */
201 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
202 if (spi->mode & SPI_CPHA)
203 cr |= XSPI_CR_CPHA;
204 if (spi->mode & SPI_CPOL)
205 cr |= XSPI_CR_CPOL;
206 if (spi->mode & SPI_LSB_FIRST)
207 cr |= XSPI_CR_LSB_FIRST;
208 if (spi->mode & SPI_LOOP)
209 cr |= XSPI_CR_LOOP;
210 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
211
212 /* We do not check spi->max_speed_hz here as the SPI clock
213 * frequency is not software programmable (the IP block design
214 * parameter)
215 */
216
217 cs = xspi->cs_inactive;
218 cs ^= BIT(spi->chip_select);
219
220 /* Activate the chip select */
221 xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
208} 222}
209 223
210/* spi_bitbang requires custom setup_transfer() to be defined if there is a 224/* spi_bitbang requires custom setup_transfer() to be defined if there is a
@@ -213,85 +227,85 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
213static int xilinx_spi_setup_transfer(struct spi_device *spi, 227static int xilinx_spi_setup_transfer(struct spi_device *spi,
214 struct spi_transfer *t) 228 struct spi_transfer *t)
215{ 229{
216 return 0; 230 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
217}
218 231
219static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) 232 if (spi->mode & SPI_CS_HIGH)
220{ 233 xspi->cs_inactive &= ~BIT(spi->chip_select);
221 u8 sr; 234 else
235 xspi->cs_inactive |= BIT(spi->chip_select);
222 236
223 /* Fill the Tx FIFO with as many bytes as possible */ 237 return 0;
224 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
225 while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
226 if (xspi->tx_ptr)
227 xspi->tx_fn(xspi);
228 else
229 xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
230 xspi->remaining_bytes -= xspi->bits_per_word / 8;
231 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
232 }
233} 238}
234 239
235static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) 240static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
236{ 241{
237 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); 242 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
238 u32 ipif_ier; 243 int remaining_words; /* the number of words left to transfer */
244 bool use_irq = false;
245 u16 cr = 0;
239 246
240 /* We get here with transmitter inhibited */ 247 /* We get here with transmitter inhibited */
241 248
242 xspi->tx_ptr = t->tx_buf; 249 xspi->tx_ptr = t->tx_buf;
243 xspi->rx_ptr = t->rx_buf; 250 xspi->rx_ptr = t->rx_buf;
244 xspi->remaining_bytes = t->len; 251 remaining_words = t->len / xspi->bytes_per_word;
245 reinit_completion(&xspi->done); 252 reinit_completion(&xspi->done);
246 253
254 if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
255 use_irq = true;
256 xspi->write_fn(XSPI_INTR_TX_EMPTY,
257 xspi->regs + XIPIF_V123B_IISR_OFFSET);
258 /* Enable the global IPIF interrupt */
259 xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
260 xspi->regs + XIPIF_V123B_DGIER_OFFSET);
261 /* Inhibit irq to avoid spurious irqs on tx_empty*/
262 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
263 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
264 xspi->regs + XSPI_CR_OFFSET);
265 }
247 266
248 /* Enable the transmit empty interrupt, which we use to determine 267 while (remaining_words) {
249 * progress on the transmission. 268 int n_words, tx_words, rx_words;
250 */
251 ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET);
252 xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
253 xspi->regs + XIPIF_V123B_IIER_OFFSET);
254 269
255 for (;;) { 270 n_words = min(remaining_words, xspi->buffer_size);
256 u16 cr;
257 u8 sr;
258 271
259 xilinx_spi_fill_tx_fifo(xspi); 272 tx_words = n_words;
273 while (tx_words--)
274 xilinx_spi_tx(xspi);
260 275
261 /* Start the transfer by not inhibiting the transmitter any 276 /* Start the transfer by not inhibiting the transmitter any
262 * longer 277 * longer
263 */ 278 */
264 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
265 ~XSPI_CR_TRANS_INHIBIT;
266 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
267 279
268 wait_for_completion(&xspi->done); 280 if (use_irq) {
281 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
282 wait_for_completion(&xspi->done);
283 } else
284 while (!(xspi->read_fn(xspi->regs + XSPI_SR_OFFSET) &
285 XSPI_SR_TX_EMPTY_MASK))
286 ;
269 287
270 /* A transmit has just completed. Process received data and 288 /* A transmit has just completed. Process received data and
271 * check for more data to transmit. Always inhibit the 289 * check for more data to transmit. Always inhibit the
272 * transmitter while the Isr refills the transmit register/FIFO, 290 * transmitter while the Isr refills the transmit register/FIFO,
273 * or make sure it is stopped if we're done. 291 * or make sure it is stopped if we're done.
274 */ 292 */
275 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); 293 if (use_irq)
276 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, 294 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
277 xspi->regs + XSPI_CR_OFFSET); 295 xspi->regs + XSPI_CR_OFFSET);
278 296
279 /* Read out all the data from the Rx FIFO */ 297 /* Read out all the data from the Rx FIFO */
280 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); 298 rx_words = n_words;
281 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { 299 while (rx_words--)
282 xspi->rx_fn(xspi); 300 xilinx_spi_rx(xspi);
283 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); 301
284 } 302 remaining_words -= n_words;
285
286 /* See if there is more data to send */
287 if (xspi->remaining_bytes <= 0)
288 break;
289 } 303 }
290 304
291 /* Disable the transmit empty interrupt */ 305 if (use_irq)
292 xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); 306 xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
293 307
294 return t->len - xspi->remaining_bytes; 308 return t->len;
295} 309}
296 310
297 311
@@ -316,6 +330,28 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
316 return IRQ_HANDLED; 330 return IRQ_HANDLED;
317} 331}
318 332
333static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
334{
335 u8 sr;
336 int n_words = 0;
337
338 /*
339 * Before the buffer_size detection we reset the core
340 * to make sure we start with a clean state.
341 */
342 xspi->write_fn(XIPIF_V123B_RESET_MASK,
343 xspi->regs + XIPIF_V123B_RESETR_OFFSET);
344
345 /* Fill the Tx FIFO with as many words as possible */
346 do {
347 xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
348 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
349 n_words++;
350 } while (!(sr & XSPI_SR_TX_FULL_MASK));
351
352 return n_words;
353}
354
319static const struct of_device_id xilinx_spi_of_match[] = { 355static const struct of_device_id xilinx_spi_of_match[] = {
320 { .compatible = "xlnx,xps-spi-2.00.a", }, 356 { .compatible = "xlnx,xps-spi-2.00.a", },
321 { .compatible = "xlnx,xps-spi-2.00.b", }, 357 { .compatible = "xlnx,xps-spi-2.00.b", },
@@ -348,14 +384,21 @@ static int xilinx_spi_probe(struct platform_device *pdev)
348 return -EINVAL; 384 return -EINVAL;
349 } 385 }
350 386
387 if (num_cs > XILINX_SPI_MAX_CS) {
388 dev_err(&pdev->dev, "Invalid number of spi slaves\n");
389 return -EINVAL;
390 }
391
351 master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi)); 392 master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
352 if (!master) 393 if (!master)
353 return -ENODEV; 394 return -ENODEV;
354 395
355 /* the spi->mode bits understood by this driver: */ 396 /* the spi->mode bits understood by this driver: */
356 master->mode_bits = SPI_CPOL | SPI_CPHA; 397 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
398 SPI_CS_HIGH;
357 399
358 xspi = spi_master_get_devdata(master); 400 xspi = spi_master_get_devdata(master);
401 xspi->cs_inactive = 0xffffffff;
359 xspi->bitbang.master = master; 402 xspi->bitbang.master = master;
360 xspi->bitbang.chipselect = xilinx_spi_chipselect; 403 xspi->bitbang.chipselect = xilinx_spi_chipselect;
361 xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; 404 xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
@@ -392,35 +435,20 @@ static int xilinx_spi_probe(struct platform_device *pdev)
392 } 435 }
393 436
394 master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word); 437 master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
395 xspi->bits_per_word = bits_per_word; 438 xspi->bytes_per_word = bits_per_word / 8;
396 if (xspi->bits_per_word == 8) { 439 xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
397 xspi->tx_fn = xspi_tx8;
398 xspi->rx_fn = xspi_rx8;
399 } else if (xspi->bits_per_word == 16) {
400 xspi->tx_fn = xspi_tx16;
401 xspi->rx_fn = xspi_rx16;
402 } else if (xspi->bits_per_word == 32) {
403 xspi->tx_fn = xspi_tx32;
404 xspi->rx_fn = xspi_rx32;
405 } else {
406 ret = -EINVAL;
407 goto put_master;
408 }
409
410 /* SPI controller initializations */
411 xspi_init_hw(xspi);
412 440
413 xspi->irq = platform_get_irq(pdev, 0); 441 xspi->irq = platform_get_irq(pdev, 0);
414 if (xspi->irq < 0) { 442 if (xspi->irq >= 0) {
415 ret = xspi->irq; 443 /* Register for SPI Interrupt */
416 goto put_master; 444 ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
445 dev_name(&pdev->dev), xspi);
446 if (ret)
447 goto put_master;
417 } 448 }
418 449
419 /* Register for SPI Interrupt */ 450 /* SPI controller initializations */
420 ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0, 451 xspi_init_hw(xspi);
421 dev_name(&pdev->dev), xspi);
422 if (ret)
423 goto put_master;
424 452
425 ret = spi_bitbang_start(&xspi->bitbang); 453 ret = spi_bitbang_start(&xspi->bitbang);
426 if (ret) { 454 if (ret) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 66a70e9bc743..c64a3e59fce3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 16 */
21 17
22#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -788,7 +784,7 @@ static int spi_transfer_one_message(struct spi_master *master,
788 struct spi_transfer *xfer; 784 struct spi_transfer *xfer;
789 bool keep_cs = false; 785 bool keep_cs = false;
790 int ret = 0; 786 int ret = 0;
791 int ms = 1; 787 unsigned long ms = 1;
792 788
793 spi_set_cs(msg->spi, true); 789 spi_set_cs(msg->spi, true);
794 790
@@ -875,31 +871,59 @@ void spi_finalize_current_transfer(struct spi_master *master)
875EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 871EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
876 872
877/** 873/**
878 * spi_pump_messages - kthread work function which processes spi message queue 874 * __spi_pump_messages - function which processes spi message queue
879 * @work: pointer to kthread work struct contained in the master struct 875 * @master: master to process queue for
876 * @in_kthread: true if we are in the context of the message pump thread
880 * 877 *
881 * This function checks if there is any spi message in the queue that 878 * This function checks if there is any spi message in the queue that
882 * needs processing and if so call out to the driver to initialize hardware 879 * needs processing and if so call out to the driver to initialize hardware
883 * and transfer each message. 880 * and transfer each message.
884 * 881 *
882 * Note that it is called both from the kthread itself and also from
883 * inside spi_sync(); the queue extraction handling at the top of the
884 * function should deal with this safely.
885 */ 885 */
886static void spi_pump_messages(struct kthread_work *work) 886static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
887{ 887{
888 struct spi_master *master =
889 container_of(work, struct spi_master, pump_messages);
890 unsigned long flags; 888 unsigned long flags;
891 bool was_busy = false; 889 bool was_busy = false;
892 int ret; 890 int ret;
893 891
894 /* Lock queue and check for queue work */ 892 /* Lock queue */
895 spin_lock_irqsave(&master->queue_lock, flags); 893 spin_lock_irqsave(&master->queue_lock, flags);
894
895 /* Make sure we are not already running a message */
896 if (master->cur_msg) {
897 spin_unlock_irqrestore(&master->queue_lock, flags);
898 return;
899 }
900
901 /* If another context is idling the device then defer */
902 if (master->idling) {
903 queue_kthread_work(&master->kworker, &master->pump_messages);
904 spin_unlock_irqrestore(&master->queue_lock, flags);
905 return;
906 }
907
908 /* Check if the queue is idle */
896 if (list_empty(&master->queue) || !master->running) { 909 if (list_empty(&master->queue) || !master->running) {
897 if (!master->busy) { 910 if (!master->busy) {
898 spin_unlock_irqrestore(&master->queue_lock, flags); 911 spin_unlock_irqrestore(&master->queue_lock, flags);
899 return; 912 return;
900 } 913 }
914
915 /* Only do teardown in the thread */
916 if (!in_kthread) {
917 queue_kthread_work(&master->kworker,
918 &master->pump_messages);
919 spin_unlock_irqrestore(&master->queue_lock, flags);
920 return;
921 }
922
901 master->busy = false; 923 master->busy = false;
924 master->idling = true;
902 spin_unlock_irqrestore(&master->queue_lock, flags); 925 spin_unlock_irqrestore(&master->queue_lock, flags);
926
903 kfree(master->dummy_rx); 927 kfree(master->dummy_rx);
904 master->dummy_rx = NULL; 928 master->dummy_rx = NULL;
905 kfree(master->dummy_tx); 929 kfree(master->dummy_tx);
@@ -913,14 +937,13 @@ static void spi_pump_messages(struct kthread_work *work)
913 pm_runtime_put_autosuspend(master->dev.parent); 937 pm_runtime_put_autosuspend(master->dev.parent);
914 } 938 }
915 trace_spi_master_idle(master); 939 trace_spi_master_idle(master);
916 return;
917 }
918 940
919 /* Make sure we are not already running a message */ 941 spin_lock_irqsave(&master->queue_lock, flags);
920 if (master->cur_msg) { 942 master->idling = false;
921 spin_unlock_irqrestore(&master->queue_lock, flags); 943 spin_unlock_irqrestore(&master->queue_lock, flags);
922 return; 944 return;
923 } 945 }
946
924 /* Extract head of queue */ 947 /* Extract head of queue */
925 master->cur_msg = 948 master->cur_msg =
926 list_first_entry(&master->queue, struct spi_message, queue); 949 list_first_entry(&master->queue, struct spi_message, queue);
@@ -985,13 +1008,22 @@ static void spi_pump_messages(struct kthread_work *work)
985 } 1008 }
986} 1009}
987 1010
1011/**
1012 * spi_pump_messages - kthread work function which processes spi message queue
1013 * @work: pointer to kthread work struct contained in the master struct
1014 */
1015static void spi_pump_messages(struct kthread_work *work)
1016{
1017 struct spi_master *master =
1018 container_of(work, struct spi_master, pump_messages);
1019
1020 __spi_pump_messages(master, true);
1021}
1022
988static int spi_init_queue(struct spi_master *master) 1023static int spi_init_queue(struct spi_master *master)
989{ 1024{
990 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1025 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
991 1026
992 INIT_LIST_HEAD(&master->queue);
993 spin_lock_init(&master->queue_lock);
994
995 master->running = false; 1027 master->running = false;
996 master->busy = false; 1028 master->busy = false;
997 1029
@@ -1161,12 +1193,9 @@ static int spi_destroy_queue(struct spi_master *master)
1161 return 0; 1193 return 0;
1162} 1194}
1163 1195
1164/** 1196static int __spi_queued_transfer(struct spi_device *spi,
1165 * spi_queued_transfer - transfer function for queued transfers 1197 struct spi_message *msg,
1166 * @spi: spi device which is requesting transfer 1198 bool need_pump)
1167 * @msg: spi message which is to handled is queued to driver queue
1168 */
1169static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1170{ 1199{
1171 struct spi_master *master = spi->master; 1200 struct spi_master *master = spi->master;
1172 unsigned long flags; 1201 unsigned long flags;
@@ -1181,13 +1210,23 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1181 msg->status = -EINPROGRESS; 1210 msg->status = -EINPROGRESS;
1182 1211
1183 list_add_tail(&msg->queue, &master->queue); 1212 list_add_tail(&msg->queue, &master->queue);
1184 if (!master->busy) 1213 if (!master->busy && need_pump)
1185 queue_kthread_work(&master->kworker, &master->pump_messages); 1214 queue_kthread_work(&master->kworker, &master->pump_messages);
1186 1215
1187 spin_unlock_irqrestore(&master->queue_lock, flags); 1216 spin_unlock_irqrestore(&master->queue_lock, flags);
1188 return 0; 1217 return 0;
1189} 1218}
1190 1219
1220/**
1221 * spi_queued_transfer - transfer function for queued transfers
1222 * @spi: spi device which is requesting transfer
1223 * @msg: spi message which is to handled is queued to driver queue
1224 */
1225static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1226{
1227 return __spi_queued_transfer(spi, msg, true);
1228}
1229
1191static int spi_master_initialize_queue(struct spi_master *master) 1230static int spi_master_initialize_queue(struct spi_master *master)
1192{ 1231{
1193 int ret; 1232 int ret;
@@ -1609,6 +1648,8 @@ int spi_register_master(struct spi_master *master)
1609 dynamic = 1; 1648 dynamic = 1;
1610 } 1649 }
1611 1650
1651 INIT_LIST_HEAD(&master->queue);
1652 spin_lock_init(&master->queue_lock);
1612 spin_lock_init(&master->bus_lock_spinlock); 1653 spin_lock_init(&master->bus_lock_spinlock);
1613 mutex_init(&master->bus_lock_mutex); 1654 mutex_init(&master->bus_lock_mutex);
1614 master->bus_lock_flag = 0; 1655 master->bus_lock_flag = 0;
@@ -2114,19 +2155,46 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2114 DECLARE_COMPLETION_ONSTACK(done); 2155 DECLARE_COMPLETION_ONSTACK(done);
2115 int status; 2156 int status;
2116 struct spi_master *master = spi->master; 2157 struct spi_master *master = spi->master;
2158 unsigned long flags;
2159
2160 status = __spi_validate(spi, message);
2161 if (status != 0)
2162 return status;
2117 2163
2118 message->complete = spi_complete; 2164 message->complete = spi_complete;
2119 message->context = &done; 2165 message->context = &done;
2166 message->spi = spi;
2120 2167
2121 if (!bus_locked) 2168 if (!bus_locked)
2122 mutex_lock(&master->bus_lock_mutex); 2169 mutex_lock(&master->bus_lock_mutex);
2123 2170
2124 status = spi_async_locked(spi, message); 2171 /* If we're not using the legacy transfer method then we will
2172 * try to transfer in the calling context so special case.
2173 * This code would be less tricky if we could remove the
2174 * support for driver implemented message queues.
2175 */
2176 if (master->transfer == spi_queued_transfer) {
2177 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2178
2179 trace_spi_message_submit(message);
2180
2181 status = __spi_queued_transfer(spi, message, false);
2182
2183 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2184 } else {
2185 status = spi_async_locked(spi, message);
2186 }
2125 2187
2126 if (!bus_locked) 2188 if (!bus_locked)
2127 mutex_unlock(&master->bus_lock_mutex); 2189 mutex_unlock(&master->bus_lock_mutex);
2128 2190
2129 if (status == 0) { 2191 if (status == 0) {
2192 /* Push out the messages in the calling context if we
2193 * can.
2194 */
2195 if (master->transfer == spi_queued_transfer)
2196 __spi_pump_messages(master, false);
2197
2130 wait_for_completion(&done); 2198 wait_for_completion(&done);
2131 status = message->status; 2199 status = message->status;
2132 } 2200 }
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 6941e04afb8c..4eb7a980e670 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -14,10 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 17 */
22 18
23#include <linux/init.h> 19#include <linux/init.h>
@@ -317,6 +313,37 @@ done:
317 return status; 313 return status;
318} 314}
319 315
316static struct spi_ioc_transfer *
317spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
318 unsigned *n_ioc)
319{
320 struct spi_ioc_transfer *ioc;
321 u32 tmp;
322
323 /* Check type, command number and direction */
324 if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
325 || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
326 || _IOC_DIR(cmd) != _IOC_WRITE)
327 return ERR_PTR(-ENOTTY);
328
329 tmp = _IOC_SIZE(cmd);
330 if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
331 return ERR_PTR(-EINVAL);
332 *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
333 if (*n_ioc == 0)
334 return NULL;
335
336 /* copy into scratch area */
337 ioc = kmalloc(tmp, GFP_KERNEL);
338 if (!ioc)
339 return ERR_PTR(-ENOMEM);
340 if (__copy_from_user(ioc, u_ioc, tmp)) {
341 kfree(ioc);
342 return ERR_PTR(-EFAULT);
343 }
344 return ioc;
345}
346
320static long 347static long
321spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 348spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
322{ 349{
@@ -456,32 +483,15 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
456 483
457 default: 484 default:
458 /* segmented and/or full-duplex I/O request */ 485 /* segmented and/or full-duplex I/O request */
459 if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) 486 /* Check message and copy into scratch area */
460 || _IOC_DIR(cmd) != _IOC_WRITE) { 487 ioc = spidev_get_ioc_message(cmd,
461 retval = -ENOTTY; 488 (struct spi_ioc_transfer __user *)arg, &n_ioc);
462 break; 489 if (IS_ERR(ioc)) {
463 } 490 retval = PTR_ERR(ioc);
464
465 tmp = _IOC_SIZE(cmd);
466 if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
467 retval = -EINVAL;
468 break;
469 }
470 n_ioc = tmp / sizeof(struct spi_ioc_transfer);
471 if (n_ioc == 0)
472 break;
473
474 /* copy into scratch area */
475 ioc = kmalloc(tmp, GFP_KERNEL);
476 if (!ioc) {
477 retval = -ENOMEM;
478 break;
479 }
480 if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
481 kfree(ioc);
482 retval = -EFAULT;
483 break; 491 break;
484 } 492 }
493 if (!ioc)
494 break; /* n_ioc is also 0 */
485 495
486 /* translate to spi_message, execute */ 496 /* translate to spi_message, execute */
487 retval = spidev_message(spidev, ioc, n_ioc); 497 retval = spidev_message(spidev, ioc, n_ioc);
@@ -496,8 +506,67 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
496 506
497#ifdef CONFIG_COMPAT 507#ifdef CONFIG_COMPAT
498static long 508static long
509spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
510 unsigned long arg)
511{
512 struct spi_ioc_transfer __user *u_ioc;
513 int retval = 0;
514 struct spidev_data *spidev;
515 struct spi_device *spi;
516 unsigned n_ioc, n;
517 struct spi_ioc_transfer *ioc;
518
519 u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
520 if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd)))
521 return -EFAULT;
522
523 /* guard against device removal before, or while,
524 * we issue this ioctl.
525 */
526 spidev = filp->private_data;
527 spin_lock_irq(&spidev->spi_lock);
528 spi = spi_dev_get(spidev->spi);
529 spin_unlock_irq(&spidev->spi_lock);
530
531 if (spi == NULL)
532 return -ESHUTDOWN;
533
534 /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
535 mutex_lock(&spidev->buf_lock);
536
537 /* Check message and copy into scratch area */
538 ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
539 if (IS_ERR(ioc)) {
540 retval = PTR_ERR(ioc);
541 goto done;
542 }
543 if (!ioc)
544 goto done; /* n_ioc is also 0 */
545
546 /* Convert buffer pointers */
547 for (n = 0; n < n_ioc; n++) {
548 ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
549 ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
550 }
551
552 /* translate to spi_message, execute */
553 retval = spidev_message(spidev, ioc, n_ioc);
554 kfree(ioc);
555
556done:
557 mutex_unlock(&spidev->buf_lock);
558 spi_dev_put(spi);
559 return retval;
560}
561
562static long
499spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 563spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
500{ 564{
565 if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
566 && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
567 && _IOC_DIR(cmd) == _IOC_WRITE)
568 return spidev_compat_ioc_message(filp, cmd, arg);
569
501 return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 570 return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
502} 571}
503#else 572#else
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 930f6010203e..65d610abe06e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
632 return 0; 632 return 0;
633 } 633 }
634 634
635 if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { 635 if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
636 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); 636 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
637 return -EFAULT; 637 return -EFAULT;
638 } 638 }
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 093535c6217b..120b70d72d79 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle;
85static const struct mfd_cell nvec_devices[] = { 85static const struct mfd_cell nvec_devices[] = {
86 { 86 {
87 .name = "nvec-kbd", 87 .name = "nvec-kbd",
88 .id = 1,
89 }, 88 },
90 { 89 {
91 .name = "nvec-mouse", 90 .name = "nvec-mouse",
92 .id = 1,
93 }, 91 },
94 { 92 {
95 .name = "nvec-power", 93 .name = "nvec-power",
96 .id = 1, 94 .id = 0,
97 }, 95 },
98 { 96 {
99 .name = "nvec-power", 97 .name = "nvec-power",
100 .id = 2, 98 .id = 1,
101 }, 99 },
102 { 100 {
103 .name = "nvec-paz00", 101 .name = "nvec-paz00",
104 .id = 1,
105 }, 102 },
106}; 103};
107 104
@@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
891 nvec_msg_free(nvec, msg); 888 nvec_msg_free(nvec, msg);
892 } 889 }
893 890
894 ret = mfd_add_devices(nvec->dev, -1, nvec_devices, 891 ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
895 ARRAY_SIZE(nvec_devices), NULL, 0, NULL); 892 ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
896 if (ret) 893 if (ret)
897 dev_err(nvec->dev, "error adding subdevices\n"); 894 dev_err(nvec->dev, "error adding subdevices\n");
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index de0c9c9d7091..a6315abe7b7c 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev)
55 le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) 55 le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
56 return 0; 56 return 0;
57 57
58 /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
59 if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
60 le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
61 return 1;
62
58 /* NOTE: can't use usb_match_id() since interface caches 63 /* NOTE: can't use usb_match_id() since interface caches
59 * aren't set up yet. this is cut/paste from that code. 64 * aren't set up yet. this is cut/paste from that code.
60 */ 65 */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 0ffb4ed0a945..41e510ae8c83 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
179 { USB_DEVICE(0x0b05, 0x17e0), .driver_info = 179 { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
180 USB_QUIRK_IGNORE_REMOTE_WAKEUP }, 180 USB_QUIRK_IGNORE_REMOTE_WAKEUP },
181 181
182 /* Protocol and OTG Electrical Test Device */
183 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
184 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
185
182 { } /* terminating entry must be last */ 186 { } /* terminating entry must be last */
183}; 187};
184 188
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index ad43c5bc1ef1..02e3e2d4ea56 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
476 u32 gintsts; 476 u32 gintsts;
477 irqreturn_t retval = IRQ_NONE; 477 irqreturn_t retval = IRQ_NONE;
478 478
479 spin_lock(&hsotg->lock);
480
479 if (!dwc2_is_controller_alive(hsotg)) { 481 if (!dwc2_is_controller_alive(hsotg)) {
480 dev_warn(hsotg->dev, "Controller is dead\n"); 482 dev_warn(hsotg->dev, "Controller is dead\n");
481 goto out; 483 goto out;
482 } 484 }
483 485
484 spin_lock(&hsotg->lock);
485
486 gintsts = dwc2_read_common_intr(hsotg); 486 gintsts = dwc2_read_common_intr(hsotg);
487 if (gintsts & ~GINTSTS_PRTINT) 487 if (gintsts & ~GINTSTS_PRTINT)
488 retval = IRQ_HANDLED; 488 retval = IRQ_HANDLED;
@@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
515 } 515 }
516 } 516 }
517 517
518 spin_unlock(&hsotg->lock);
519out: 518out:
519 spin_unlock(&hsotg->lock);
520 return retval; 520 return retval;
521} 521}
522EXPORT_SYMBOL_GPL(dwc2_handle_common_intr); 522EXPORT_SYMBOL_GPL(dwc2_handle_common_intr);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index ccfdfb24b240..2f9735b35338 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list,
34 return phy; 34 return phy;
35 } 35 }
36 36
37 return ERR_PTR(-EPROBE_DEFER); 37 return ERR_PTR(-ENODEV);
38} 38}
39 39
40static struct usb_phy *__usb_find_phy_dev(struct device *dev, 40static struct usb_phy *__usb_find_phy_dev(struct device *dev,
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 11c7a9676441..d684b4b8108f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -507,7 +507,7 @@ UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100,
507UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999, 507UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999,
508 "SCM Microsystems", 508 "SCM Microsystems",
509 "eUSB SCSI Adapter (Bus Powered)", 509 "eUSB SCSI Adapter (Bus Powered)",
510 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, 510 USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
511 US_FL_SCM_MULT_TARG ), 511 US_FL_SCM_MULT_TARG ),
512 512
513UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200, 513UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
@@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
1995 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1995 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1996 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), 1996 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
1997 1997
1998/* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */
1999UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
2000 "JMicron",
2001 "USB to ATA/ATAPI Bridge",
2002 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2003 US_FL_BROKEN_FUA ),
2004
1998/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) 2005/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
1999 * and Mac USB Dock USB-SCSI */ 2006 * and Mac USB Dock USB-SCSI */
2000UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, 2007UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 6df4357d9ee3..dbc00e56c7f5 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -140,3 +140,10 @@ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
140 "External HDD", 140 "External HDD",
141 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 141 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
142 US_FL_IGNORE_UAS), 142 US_FL_IGNORE_UAS),
143
144/* Reported-by: Richard Henderson <rth@redhat.com> */
145UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
146 "SimpleTech",
147 "External HDD",
148 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
149 US_FL_NO_REPORT_OPCODES),
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index d415d69dc237..9484d5652ca5 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -650,8 +650,10 @@ static void handle_rx(struct vhost_net *net)
650 break; 650 break;
651 } 651 }
652 /* TODO: Should check and handle checksum. */ 652 /* TODO: Should check and handle checksum. */
653
654 hdr.num_buffers = cpu_to_vhost16(vq, headcount);
653 if (likely(mergeable) && 655 if (likely(mergeable) &&
654 memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount, 656 memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
655 offsetof(typeof(hdr), num_buffers), 657 offsetof(typeof(hdr), num_buffers),
656 sizeof hdr.num_buffers)) { 658 sizeof hdr.num_buffers)) {
657 vq_err(vq, "Failed num_buffers write"); 659 vq_err(vq, "Failed num_buffers write");
diff --git a/fs/aio.c b/fs/aio.c
index 1b7893ecc296..c428871f1093 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1140,6 +1140,13 @@ static long aio_read_events_ring(struct kioctx *ctx,
1140 long ret = 0; 1140 long ret = 0;
1141 int copy_ret; 1141 int copy_ret;
1142 1142
1143 /*
1144 * The mutex can block and wake us up and that will cause
1145 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1146 * and repeat. This should be rare enough that it doesn't cause
1147 * peformance issues. See the comment in read_events() for more detail.
1148 */
1149 sched_annotate_sleep();
1143 mutex_lock(&ctx->ring_lock); 1150 mutex_lock(&ctx->ring_lock);
1144 1151
1145 /* Access to ->ring_pages here is protected by ctx->ring_lock. */ 1152 /* Access to ->ring_pages here is protected by ctx->ring_lock. */
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index a66768ebc8d1..80e9c18ea64f 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -8,6 +8,7 @@ config BTRFS_FS
8 select LZO_DECOMPRESS 8 select LZO_DECOMPRESS
9 select RAID6_PQ 9 select RAID6_PQ
10 select XOR_BLOCKS 10 select XOR_BLOCKS
11 select SRCU
11 12
12 help 13 help
13 Btrfs is a general purpose copy-on-write filesystem with extents, 14 Btrfs is a general purpose copy-on-write filesystem with extents,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 2f0fbc374e87..e427cb7ee12c 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3065,6 +3065,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3065 path->search_commit_root = 1; 3065 path->search_commit_root = 1;
3066 path->skip_locking = 1; 3066 path->skip_locking = 1;
3067 3067
3068 ppath->search_commit_root = 1;
3069 ppath->skip_locking = 1;
3068 /* 3070 /*
3069 * trigger the readahead for extent tree csum tree and wait for 3071 * trigger the readahead for extent tree csum tree and wait for
3070 * completion. During readahead, the scrub is officially paused 3072 * completion. During readahead, the scrub is officially paused
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9a02da16f2be..1a9585d4380a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2591,6 +2591,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2591 } 2591 }
2592 2592
2593 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { 2593 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2594 blk_finish_plug(&plug);
2594 mutex_unlock(&log_root_tree->log_mutex); 2595 mutex_unlock(&log_root_tree->log_mutex);
2595 ret = root_log_ctx.log_ret; 2596 ret = root_log_ctx.log_ret;
2596 goto out; 2597 goto out;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 9c56ef776407..7febcf2475c5 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -606,9 +606,11 @@ cifs_security_flags_handle_must_flags(unsigned int *flags)
606 *flags = CIFSSEC_MUST_NTLMV2; 606 *flags = CIFSSEC_MUST_NTLMV2;
607 else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM) 607 else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM)
608 *flags = CIFSSEC_MUST_NTLM; 608 *flags = CIFSSEC_MUST_NTLM;
609 else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) 609 else if (CIFSSEC_MUST_LANMAN &&
610 (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
610 *flags = CIFSSEC_MUST_LANMAN; 611 *flags = CIFSSEC_MUST_LANMAN;
611 else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) 612 else if (CIFSSEC_MUST_PLNTXT &&
613 (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
612 *flags = CIFSSEC_MUST_PLNTXT; 614 *flags = CIFSSEC_MUST_PLNTXT;
613 615
614 *flags |= signflags; 616 *flags |= signflags;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 96b7e9b7706d..74f12877493a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
366 struct cifsLockInfo *li, *tmp; 366 struct cifsLockInfo *li, *tmp;
367 struct cifs_fid fid; 367 struct cifs_fid fid;
368 struct cifs_pending_open open; 368 struct cifs_pending_open open;
369 bool oplock_break_cancelled;
369 370
370 spin_lock(&cifs_file_list_lock); 371 spin_lock(&cifs_file_list_lock);
371 if (--cifs_file->count > 0) { 372 if (--cifs_file->count > 0) {
@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
397 } 398 }
398 spin_unlock(&cifs_file_list_lock); 399 spin_unlock(&cifs_file_list_lock);
399 400
400 cancel_work_sync(&cifs_file->oplock_break); 401 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
401 402
402 if (!tcon->need_reconnect && !cifs_file->invalidHandle) { 403 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
403 struct TCP_Server_Info *server = tcon->ses->server; 404 struct TCP_Server_Info *server = tcon->ses->server;
@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
409 _free_xid(xid); 410 _free_xid(xid);
410 } 411 }
411 412
413 if (oplock_break_cancelled)
414 cifs_done_oplock_break(cifsi);
415
412 cifs_del_pending_open(&open); 416 cifs_del_pending_open(&open);
413 417
414 /* 418 /*
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 6c1566366a66..a4232ec4f2ba 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -221,7 +221,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
221 } 221 }
222 222
223 rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); 223 rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
224 memset(wpwd, 0, 129 * sizeof(__le16)); 224 memzero_explicit(wpwd, sizeof(wpwd));
225 225
226 return rc; 226 return rc;
227} 227}
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index c8b148bbdc8b..3e193cb36996 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
667 667
668static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 668static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
669 s64 change, struct gfs2_quota_data *qd, 669 s64 change, struct gfs2_quota_data *qd,
670 struct fs_disk_quota *fdq) 670 struct qc_dqblk *fdq)
671{ 671{
672 struct inode *inode = &ip->i_inode; 672 struct inode *inode = &ip->i_inode;
673 struct gfs2_sbd *sdp = GFS2_SB(inode); 673 struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
697 be64_add_cpu(&q.qu_value, change); 697 be64_add_cpu(&q.qu_value, change);
698 qd->qd_qb.qb_value = q.qu_value; 698 qd->qd_qb.qb_value = q.qu_value;
699 if (fdq) { 699 if (fdq) {
700 if (fdq->d_fieldmask & FS_DQ_BSOFT) { 700 if (fdq->d_fieldmask & QC_SPC_SOFT) {
701 q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); 701 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
702 qd->qd_qb.qb_warn = q.qu_warn; 702 qd->qd_qb.qb_warn = q.qu_warn;
703 } 703 }
704 if (fdq->d_fieldmask & FS_DQ_BHARD) { 704 if (fdq->d_fieldmask & QC_SPC_HARD) {
705 q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); 705 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
706 qd->qd_qb.qb_limit = q.qu_limit; 706 qd->qd_qb.qb_limit = q.qu_limit;
707 } 707 }
708 if (fdq->d_fieldmask & FS_DQ_BCOUNT) { 708 if (fdq->d_fieldmask & QC_SPACE) {
709 q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); 709 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
710 qd->qd_qb.qb_value = q.qu_value; 710 qd->qd_qb.qb_value = q.qu_value;
711 } 711 }
712 } 712 }
@@ -1497,7 +1497,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
1497} 1497}
1498 1498
1499static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, 1499static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1500 struct fs_disk_quota *fdq) 1500 struct qc_dqblk *fdq)
1501{ 1501{
1502 struct gfs2_sbd *sdp = sb->s_fs_info; 1502 struct gfs2_sbd *sdp = sb->s_fs_info;
1503 struct gfs2_quota_lvb *qlvb; 1503 struct gfs2_quota_lvb *qlvb;
@@ -1505,7 +1505,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1505 struct gfs2_holder q_gh; 1505 struct gfs2_holder q_gh;
1506 int error; 1506 int error;
1507 1507
1508 memset(fdq, 0, sizeof(struct fs_disk_quota)); 1508 memset(fdq, 0, sizeof(*fdq));
1509 1509
1510 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1510 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1511 return -ESRCH; /* Crazy XFS error code */ 1511 return -ESRCH; /* Crazy XFS error code */
@@ -1522,12 +1522,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1522 goto out; 1522 goto out;
1523 1523
1524 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 1524 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1525 fdq->d_version = FS_DQUOT_VERSION; 1525 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1526 fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA; 1526 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1527 fdq->d_id = from_kqid_munged(current_user_ns(), qid); 1527 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
1528 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1529 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1530 fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1531 1528
1532 gfs2_glock_dq_uninit(&q_gh); 1529 gfs2_glock_dq_uninit(&q_gh);
1533out: 1530out:
@@ -1536,10 +1533,10 @@ out:
1536} 1533}
1537 1534
1538/* GFS2 only supports a subset of the XFS fields */ 1535/* GFS2 only supports a subset of the XFS fields */
1539#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT) 1536#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1540 1537
1541static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, 1538static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1542 struct fs_disk_quota *fdq) 1539 struct qc_dqblk *fdq)
1543{ 1540{
1544 struct gfs2_sbd *sdp = sb->s_fs_info; 1541 struct gfs2_sbd *sdp = sb->s_fs_info;
1545 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 1542 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1583,17 +1580,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1583 goto out_i; 1580 goto out_i;
1584 1581
1585 /* If nothing has changed, this is a no-op */ 1582 /* If nothing has changed, this is a no-op */
1586 if ((fdq->d_fieldmask & FS_DQ_BSOFT) && 1583 if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1587 ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) 1584 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1588 fdq->d_fieldmask ^= FS_DQ_BSOFT; 1585 fdq->d_fieldmask ^= QC_SPC_SOFT;
1589 1586
1590 if ((fdq->d_fieldmask & FS_DQ_BHARD) && 1587 if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1591 ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) 1588 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1592 fdq->d_fieldmask ^= FS_DQ_BHARD; 1589 fdq->d_fieldmask ^= QC_SPC_HARD;
1593 1590
1594 if ((fdq->d_fieldmask & FS_DQ_BCOUNT) && 1591 if ((fdq->d_fieldmask & QC_SPACE) &&
1595 ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value))) 1592 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1596 fdq->d_fieldmask ^= FS_DQ_BCOUNT; 1593 fdq->d_fieldmask ^= QC_SPACE;
1597 1594
1598 if (fdq->d_fieldmask == 0) 1595 if (fdq->d_fieldmask == 0)
1599 goto out_i; 1596 goto out_i;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 10bf07280f4a..294692ff83b1 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
212 */ 212 */
213ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) 213ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
214{ 214{
215 struct inode *inode = iocb->ki_filp->f_mapping->host;
216
217 /* we only support swap file calling nfs_direct_IO */
218 if (!IS_SWAPFILE(inode))
219 return 0;
220
215#ifndef CONFIG_NFS_SWAP 221#ifndef CONFIG_NFS_SWAP
216 dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", 222 dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
217 iocb->ki_filp, (long long) pos, iter->nr_segs); 223 iocb->ki_filp, (long long) pos, iter->nr_segs);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 4bffe637ea32..2211f6ba8736 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
352 352
353 nfs_attr_check_mountpoint(sb, fattr); 353 nfs_attr_check_mountpoint(sb, fattr);
354 354
355 if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) && 355 if (nfs_attr_use_mounted_on_fileid(fattr))
356 !nfs_attr_use_mounted_on_fileid(fattr)) 356 fattr->fileid = fattr->mounted_on_fileid;
357 else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
357 goto out_no_inode; 358 goto out_no_inode;
358 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) 359 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
359 goto out_no_inode; 360 goto out_no_inode;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index efaa31c70fbe..b6f34bfa6fe8 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -31,8 +31,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
31 (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) && 31 (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
32 ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0))) 32 ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
33 return 0; 33 return 0;
34
35 fattr->fileid = fattr->mounted_on_fileid;
36 return 1; 34 return 1;
37} 35}
38 36
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 953daa44a282..706ad10b8186 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
639 prev = pos; 639 prev = pos;
640 640
641 status = nfs_wait_client_init_complete(pos); 641 status = nfs_wait_client_init_complete(pos);
642 if (status == 0) { 642 if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
643 nfs4_schedule_lease_recovery(pos); 643 nfs4_schedule_lease_recovery(pos);
644 status = nfs4_wait_clnt_recover(pos); 644 status = nfs4_wait_clnt_recover(pos);
645 } 645 }
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 91093cd74f0d..385704027575 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -141,7 +141,6 @@ enum {
141 * @ti_save: Backup of journal_info field of task_struct 141 * @ti_save: Backup of journal_info field of task_struct
142 * @ti_flags: Flags 142 * @ti_flags: Flags
143 * @ti_count: Nest level 143 * @ti_count: Nest level
144 * @ti_garbage: List of inode to be put when releasing semaphore
145 */ 144 */
146struct nilfs_transaction_info { 145struct nilfs_transaction_info {
147 u32 ti_magic; 146 u32 ti_magic;
@@ -150,7 +149,6 @@ struct nilfs_transaction_info {
150 one of other filesystems has a bug. */ 149 one of other filesystems has a bug. */
151 unsigned short ti_flags; 150 unsigned short ti_flags;
152 unsigned short ti_count; 151 unsigned short ti_count;
153 struct list_head ti_garbage;
154}; 152};
155 153
156/* ti_magic */ 154/* ti_magic */
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 7ef18fc656c2..469086b9f99b 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb,
305 ti->ti_count = 0; 305 ti->ti_count = 0;
306 ti->ti_save = cur_ti; 306 ti->ti_save = cur_ti;
307 ti->ti_magic = NILFS_TI_MAGIC; 307 ti->ti_magic = NILFS_TI_MAGIC;
308 INIT_LIST_HEAD(&ti->ti_garbage);
309 current->journal_info = ti; 308 current->journal_info = ti;
310 309
311 for (;;) { 310 for (;;) {
@@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb)
332 331
333 up_write(&nilfs->ns_segctor_sem); 332 up_write(&nilfs->ns_segctor_sem);
334 current->journal_info = ti->ti_save; 333 current->journal_info = ti->ti_save;
335 if (!list_empty(&ti->ti_garbage))
336 nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
337} 334}
338 335
339static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, 336static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
@@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs,
746 } 743 }
747} 744}
748 745
746static void nilfs_iput_work_func(struct work_struct *work)
747{
748 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
749 sc_iput_work);
750 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
751
752 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
753}
754
749static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, 755static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
750 struct nilfs_root *root) 756 struct nilfs_root *root)
751{ 757{
@@ -1900,8 +1906,8 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1900static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, 1906static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1901 struct the_nilfs *nilfs) 1907 struct the_nilfs *nilfs)
1902{ 1908{
1903 struct nilfs_transaction_info *ti = current->journal_info;
1904 struct nilfs_inode_info *ii, *n; 1909 struct nilfs_inode_info *ii, *n;
1910 int defer_iput = false;
1905 1911
1906 spin_lock(&nilfs->ns_inode_lock); 1912 spin_lock(&nilfs->ns_inode_lock);
1907 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { 1913 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
@@ -1912,9 +1918,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1912 clear_bit(NILFS_I_BUSY, &ii->i_state); 1918 clear_bit(NILFS_I_BUSY, &ii->i_state);
1913 brelse(ii->i_bh); 1919 brelse(ii->i_bh);
1914 ii->i_bh = NULL; 1920 ii->i_bh = NULL;
1915 list_move_tail(&ii->i_dirty, &ti->ti_garbage); 1921 list_del_init(&ii->i_dirty);
1922 if (!ii->vfs_inode.i_nlink) {
1923 /*
1924 * Defer calling iput() to avoid a deadlock
1925 * over I_SYNC flag for inodes with i_nlink == 0
1926 */
1927 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
1928 defer_iput = true;
1929 } else {
1930 spin_unlock(&nilfs->ns_inode_lock);
1931 iput(&ii->vfs_inode);
1932 spin_lock(&nilfs->ns_inode_lock);
1933 }
1916 } 1934 }
1917 spin_unlock(&nilfs->ns_inode_lock); 1935 spin_unlock(&nilfs->ns_inode_lock);
1936
1937 if (defer_iput)
1938 schedule_work(&sci->sc_iput_work);
1918} 1939}
1919 1940
1920/* 1941/*
@@ -2583,6 +2604,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2583 INIT_LIST_HEAD(&sci->sc_segbufs); 2604 INIT_LIST_HEAD(&sci->sc_segbufs);
2584 INIT_LIST_HEAD(&sci->sc_write_logs); 2605 INIT_LIST_HEAD(&sci->sc_write_logs);
2585 INIT_LIST_HEAD(&sci->sc_gc_inodes); 2606 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2607 INIT_LIST_HEAD(&sci->sc_iput_queue);
2608 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2586 init_timer(&sci->sc_timer); 2609 init_timer(&sci->sc_timer);
2587 2610
2588 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; 2611 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
@@ -2609,6 +2632,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2609 ret = nilfs_segctor_construct(sci, SC_LSEG_SR); 2632 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2610 nilfs_transaction_unlock(sci->sc_super); 2633 nilfs_transaction_unlock(sci->sc_super);
2611 2634
2635 flush_work(&sci->sc_iput_work);
2636
2612 } while (ret && retrycount-- > 0); 2637 } while (ret && retrycount-- > 0);
2613} 2638}
2614 2639
@@ -2633,6 +2658,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2633 || sci->sc_seq_request != sci->sc_seq_done); 2658 || sci->sc_seq_request != sci->sc_seq_done);
2634 spin_unlock(&sci->sc_state_lock); 2659 spin_unlock(&sci->sc_state_lock);
2635 2660
2661 if (flush_work(&sci->sc_iput_work))
2662 flag = true;
2663
2636 if (flag || !nilfs_segctor_confirm(sci)) 2664 if (flag || !nilfs_segctor_confirm(sci))
2637 nilfs_segctor_write_out(sci); 2665 nilfs_segctor_write_out(sci);
2638 2666
@@ -2642,6 +2670,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2642 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); 2670 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2643 } 2671 }
2644 2672
2673 if (!list_empty(&sci->sc_iput_queue)) {
2674 nilfs_warning(sci->sc_super, __func__,
2675 "iput queue is not empty\n");
2676 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2677 }
2678
2645 WARN_ON(!list_empty(&sci->sc_segbufs)); 2679 WARN_ON(!list_empty(&sci->sc_segbufs));
2646 WARN_ON(!list_empty(&sci->sc_write_logs)); 2680 WARN_ON(!list_empty(&sci->sc_write_logs));
2647 2681
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 38a1d0013314..a48d6de1e02c 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -26,6 +26,7 @@
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/buffer_head.h> 28#include <linux/buffer_head.h>
29#include <linux/workqueue.h>
29#include <linux/nilfs2_fs.h> 30#include <linux/nilfs2_fs.h>
30#include "nilfs.h" 31#include "nilfs.h"
31 32
@@ -92,6 +93,8 @@ struct nilfs_segsum_pointer {
92 * @sc_nblk_inc: Block count of current generation 93 * @sc_nblk_inc: Block count of current generation
93 * @sc_dirty_files: List of files to be written 94 * @sc_dirty_files: List of files to be written
94 * @sc_gc_inodes: List of GC inodes having blocks to be written 95 * @sc_gc_inodes: List of GC inodes having blocks to be written
96 * @sc_iput_queue: list of inodes for which iput should be done
97 * @sc_iput_work: work struct to defer iput call
95 * @sc_freesegs: array of segment numbers to be freed 98 * @sc_freesegs: array of segment numbers to be freed
96 * @sc_nfreesegs: number of segments on @sc_freesegs 99 * @sc_nfreesegs: number of segments on @sc_freesegs
97 * @sc_dsync_inode: inode whose data pages are written for a sync operation 100 * @sc_dsync_inode: inode whose data pages are written for a sync operation
@@ -135,6 +138,8 @@ struct nilfs_sc_info {
135 138
136 struct list_head sc_dirty_files; 139 struct list_head sc_dirty_files;
137 struct list_head sc_gc_inodes; 140 struct list_head sc_gc_inodes;
141 struct list_head sc_iput_queue;
142 struct work_struct sc_iput_work;
138 143
139 __u64 *sc_freesegs; 144 __u64 *sc_freesegs;
140 size_t sc_nfreesegs; 145 size_t sc_nfreesegs;
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index 22c629eedd82..2a24249b30af 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -1,5 +1,6 @@
1config FSNOTIFY 1config FSNOTIFY
2 def_bool n 2 def_bool n
3 select SRCU
3 4
4source "fs/notify/dnotify/Kconfig" 5source "fs/notify/dnotify/Kconfig"
5source "fs/notify/inotify/Kconfig" 6source "fs/notify/inotify/Kconfig"
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index c51df1dd237e..4a09975aac90 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -5,6 +5,7 @@
5config QUOTA 5config QUOTA
6 bool "Quota support" 6 bool "Quota support"
7 select QUOTACTL 7 select QUOTACTL
8 select SRCU
8 help 9 help
9 If you say Y here, you will be able to set per user limits for disk 10 If you say Y here, you will be able to set per user limits for disk
10 usage (also called disk quotas). Currently, it works for the 11 usage (also called disk quotas). Currently, it works for the
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 8f0acef3d184..69df5b239844 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2396,30 +2396,25 @@ static inline qsize_t stoqb(qsize_t space)
2396} 2396}
2397 2397
2398/* Generic routine for getting common part of quota structure */ 2398/* Generic routine for getting common part of quota structure */
2399static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di) 2399static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2400{ 2400{
2401 struct mem_dqblk *dm = &dquot->dq_dqb; 2401 struct mem_dqblk *dm = &dquot->dq_dqb;
2402 2402
2403 memset(di, 0, sizeof(*di)); 2403 memset(di, 0, sizeof(*di));
2404 di->d_version = FS_DQUOT_VERSION;
2405 di->d_flags = dquot->dq_id.type == USRQUOTA ?
2406 FS_USER_QUOTA : FS_GROUP_QUOTA;
2407 di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
2408
2409 spin_lock(&dq_data_lock); 2404 spin_lock(&dq_data_lock);
2410 di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit); 2405 di->d_spc_hardlimit = dm->dqb_bhardlimit;
2411 di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit); 2406 di->d_spc_softlimit = dm->dqb_bsoftlimit;
2412 di->d_ino_hardlimit = dm->dqb_ihardlimit; 2407 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2413 di->d_ino_softlimit = dm->dqb_isoftlimit; 2408 di->d_ino_softlimit = dm->dqb_isoftlimit;
2414 di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace; 2409 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2415 di->d_icount = dm->dqb_curinodes; 2410 di->d_ino_count = dm->dqb_curinodes;
2416 di->d_btimer = dm->dqb_btime; 2411 di->d_spc_timer = dm->dqb_btime;
2417 di->d_itimer = dm->dqb_itime; 2412 di->d_ino_timer = dm->dqb_itime;
2418 spin_unlock(&dq_data_lock); 2413 spin_unlock(&dq_data_lock);
2419} 2414}
2420 2415
2421int dquot_get_dqblk(struct super_block *sb, struct kqid qid, 2416int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2422 struct fs_disk_quota *di) 2417 struct qc_dqblk *di)
2423{ 2418{
2424 struct dquot *dquot; 2419 struct dquot *dquot;
2425 2420
@@ -2433,70 +2428,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2433} 2428}
2434EXPORT_SYMBOL(dquot_get_dqblk); 2429EXPORT_SYMBOL(dquot_get_dqblk);
2435 2430
2436#define VFS_FS_DQ_MASK \ 2431#define VFS_QC_MASK \
2437 (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \ 2432 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2438 FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \ 2433 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2439 FS_DQ_BTIMER | FS_DQ_ITIMER) 2434 QC_SPC_TIMER | QC_INO_TIMER)
2440 2435
2441/* Generic routine for setting common part of quota structure */ 2436/* Generic routine for setting common part of quota structure */
2442static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) 2437static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2443{ 2438{
2444 struct mem_dqblk *dm = &dquot->dq_dqb; 2439 struct mem_dqblk *dm = &dquot->dq_dqb;
2445 int check_blim = 0, check_ilim = 0; 2440 int check_blim = 0, check_ilim = 0;
2446 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 2441 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2447 2442
2448 if (di->d_fieldmask & ~VFS_FS_DQ_MASK) 2443 if (di->d_fieldmask & ~VFS_QC_MASK)
2449 return -EINVAL; 2444 return -EINVAL;
2450 2445
2451 if (((di->d_fieldmask & FS_DQ_BSOFT) && 2446 if (((di->d_fieldmask & QC_SPC_SOFT) &&
2452 (di->d_blk_softlimit > dqi->dqi_maxblimit)) || 2447 stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
2453 ((di->d_fieldmask & FS_DQ_BHARD) && 2448 ((di->d_fieldmask & QC_SPC_HARD) &&
2454 (di->d_blk_hardlimit > dqi->dqi_maxblimit)) || 2449 stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
2455 ((di->d_fieldmask & FS_DQ_ISOFT) && 2450 ((di->d_fieldmask & QC_INO_SOFT) &&
2456 (di->d_ino_softlimit > dqi->dqi_maxilimit)) || 2451 (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
2457 ((di->d_fieldmask & FS_DQ_IHARD) && 2452 ((di->d_fieldmask & QC_INO_HARD) &&
2458 (di->d_ino_hardlimit > dqi->dqi_maxilimit))) 2453 (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
2459 return -ERANGE; 2454 return -ERANGE;
2460 2455
2461 spin_lock(&dq_data_lock); 2456 spin_lock(&dq_data_lock);
2462 if (di->d_fieldmask & FS_DQ_BCOUNT) { 2457 if (di->d_fieldmask & QC_SPACE) {
2463 dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace; 2458 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2464 check_blim = 1; 2459 check_blim = 1;
2465 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2460 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2466 } 2461 }
2467 2462
2468 if (di->d_fieldmask & FS_DQ_BSOFT) 2463 if (di->d_fieldmask & QC_SPC_SOFT)
2469 dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit); 2464 dm->dqb_bsoftlimit = di->d_spc_softlimit;
2470 if (di->d_fieldmask & FS_DQ_BHARD) 2465 if (di->d_fieldmask & QC_SPC_HARD)
2471 dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit); 2466 dm->dqb_bhardlimit = di->d_spc_hardlimit;
2472 if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) { 2467 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2473 check_blim = 1; 2468 check_blim = 1;
2474 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 2469 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2475 } 2470 }
2476 2471
2477 if (di->d_fieldmask & FS_DQ_ICOUNT) { 2472 if (di->d_fieldmask & QC_INO_COUNT) {
2478 dm->dqb_curinodes = di->d_icount; 2473 dm->dqb_curinodes = di->d_ino_count;
2479 check_ilim = 1; 2474 check_ilim = 1;
2480 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 2475 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2481 } 2476 }
2482 2477
2483 if (di->d_fieldmask & FS_DQ_ISOFT) 2478 if (di->d_fieldmask & QC_INO_SOFT)
2484 dm->dqb_isoftlimit = di->d_ino_softlimit; 2479 dm->dqb_isoftlimit = di->d_ino_softlimit;
2485 if (di->d_fieldmask & FS_DQ_IHARD) 2480 if (di->d_fieldmask & QC_INO_HARD)
2486 dm->dqb_ihardlimit = di->d_ino_hardlimit; 2481 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2487 if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) { 2482 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2488 check_ilim = 1; 2483 check_ilim = 1;
2489 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 2484 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2490 } 2485 }
2491 2486
2492 if (di->d_fieldmask & FS_DQ_BTIMER) { 2487 if (di->d_fieldmask & QC_SPC_TIMER) {
2493 dm->dqb_btime = di->d_btimer; 2488 dm->dqb_btime = di->d_spc_timer;
2494 check_blim = 1; 2489 check_blim = 1;
2495 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2490 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2496 } 2491 }
2497 2492
2498 if (di->d_fieldmask & FS_DQ_ITIMER) { 2493 if (di->d_fieldmask & QC_INO_TIMER) {
2499 dm->dqb_itime = di->d_itimer; 2494 dm->dqb_itime = di->d_ino_timer;
2500 check_ilim = 1; 2495 check_ilim = 1;
2501 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2496 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2502 } 2497 }
@@ -2506,7 +2501,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2506 dm->dqb_curspace < dm->dqb_bsoftlimit) { 2501 dm->dqb_curspace < dm->dqb_bsoftlimit) {
2507 dm->dqb_btime = 0; 2502 dm->dqb_btime = 0;
2508 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 2503 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2509 } else if (!(di->d_fieldmask & FS_DQ_BTIMER)) 2504 } else if (!(di->d_fieldmask & QC_SPC_TIMER))
2510 /* Set grace only if user hasn't provided his own... */ 2505 /* Set grace only if user hasn't provided his own... */
2511 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; 2506 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2512 } 2507 }
@@ -2515,7 +2510,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2515 dm->dqb_curinodes < dm->dqb_isoftlimit) { 2510 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2516 dm->dqb_itime = 0; 2511 dm->dqb_itime = 0;
2517 clear_bit(DQ_INODES_B, &dquot->dq_flags); 2512 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2518 } else if (!(di->d_fieldmask & FS_DQ_ITIMER)) 2513 } else if (!(di->d_fieldmask & QC_INO_TIMER))
2519 /* Set grace only if user hasn't provided his own... */ 2514 /* Set grace only if user hasn't provided his own... */
2520 dm->dqb_itime = get_seconds() + dqi->dqi_igrace; 2515 dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2521 } 2516 }
@@ -2531,7 +2526,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2531} 2526}
2532 2527
2533int dquot_set_dqblk(struct super_block *sb, struct kqid qid, 2528int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2534 struct fs_disk_quota *di) 2529 struct qc_dqblk *di)
2535{ 2530{
2536 struct dquot *dquot; 2531 struct dquot *dquot;
2537 int rc; 2532 int rc;
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 2aa4151f99d2..6f3856328eea 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -118,17 +118,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
118 return sb->s_qcop->set_info(sb, type, &info); 118 return sb->s_qcop->set_info(sb, type, &info);
119} 119}
120 120
121static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src) 121static inline qsize_t qbtos(qsize_t blocks)
122{
123 return blocks << QIF_DQBLKSIZE_BITS;
124}
125
126static inline qsize_t stoqb(qsize_t space)
127{
128 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
129}
130
131static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
122{ 132{
123 memset(dst, 0, sizeof(*dst)); 133 memset(dst, 0, sizeof(*dst));
124 dst->dqb_bhardlimit = src->d_blk_hardlimit; 134 dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
125 dst->dqb_bsoftlimit = src->d_blk_softlimit; 135 dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
126 dst->dqb_curspace = src->d_bcount; 136 dst->dqb_curspace = src->d_space;
127 dst->dqb_ihardlimit = src->d_ino_hardlimit; 137 dst->dqb_ihardlimit = src->d_ino_hardlimit;
128 dst->dqb_isoftlimit = src->d_ino_softlimit; 138 dst->dqb_isoftlimit = src->d_ino_softlimit;
129 dst->dqb_curinodes = src->d_icount; 139 dst->dqb_curinodes = src->d_ino_count;
130 dst->dqb_btime = src->d_btimer; 140 dst->dqb_btime = src->d_spc_timer;
131 dst->dqb_itime = src->d_itimer; 141 dst->dqb_itime = src->d_ino_timer;
132 dst->dqb_valid = QIF_ALL; 142 dst->dqb_valid = QIF_ALL;
133} 143}
134 144
@@ -136,7 +146,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
136 void __user *addr) 146 void __user *addr)
137{ 147{
138 struct kqid qid; 148 struct kqid qid;
139 struct fs_disk_quota fdq; 149 struct qc_dqblk fdq;
140 struct if_dqblk idq; 150 struct if_dqblk idq;
141 int ret; 151 int ret;
142 152
@@ -154,36 +164,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
154 return 0; 164 return 0;
155} 165}
156 166
157static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src) 167static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
158{ 168{
159 dst->d_blk_hardlimit = src->dqb_bhardlimit; 169 dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
160 dst->d_blk_softlimit = src->dqb_bsoftlimit; 170 dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
161 dst->d_bcount = src->dqb_curspace; 171 dst->d_space = src->dqb_curspace;
162 dst->d_ino_hardlimit = src->dqb_ihardlimit; 172 dst->d_ino_hardlimit = src->dqb_ihardlimit;
163 dst->d_ino_softlimit = src->dqb_isoftlimit; 173 dst->d_ino_softlimit = src->dqb_isoftlimit;
164 dst->d_icount = src->dqb_curinodes; 174 dst->d_ino_count = src->dqb_curinodes;
165 dst->d_btimer = src->dqb_btime; 175 dst->d_spc_timer = src->dqb_btime;
166 dst->d_itimer = src->dqb_itime; 176 dst->d_ino_timer = src->dqb_itime;
167 177
168 dst->d_fieldmask = 0; 178 dst->d_fieldmask = 0;
169 if (src->dqb_valid & QIF_BLIMITS) 179 if (src->dqb_valid & QIF_BLIMITS)
170 dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD; 180 dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
171 if (src->dqb_valid & QIF_SPACE) 181 if (src->dqb_valid & QIF_SPACE)
172 dst->d_fieldmask |= FS_DQ_BCOUNT; 182 dst->d_fieldmask |= QC_SPACE;
173 if (src->dqb_valid & QIF_ILIMITS) 183 if (src->dqb_valid & QIF_ILIMITS)
174 dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD; 184 dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
175 if (src->dqb_valid & QIF_INODES) 185 if (src->dqb_valid & QIF_INODES)
176 dst->d_fieldmask |= FS_DQ_ICOUNT; 186 dst->d_fieldmask |= QC_INO_COUNT;
177 if (src->dqb_valid & QIF_BTIME) 187 if (src->dqb_valid & QIF_BTIME)
178 dst->d_fieldmask |= FS_DQ_BTIMER; 188 dst->d_fieldmask |= QC_SPC_TIMER;
179 if (src->dqb_valid & QIF_ITIME) 189 if (src->dqb_valid & QIF_ITIME)
180 dst->d_fieldmask |= FS_DQ_ITIMER; 190 dst->d_fieldmask |= QC_INO_TIMER;
181} 191}
182 192
183static int quota_setquota(struct super_block *sb, int type, qid_t id, 193static int quota_setquota(struct super_block *sb, int type, qid_t id,
184 void __user *addr) 194 void __user *addr)
185{ 195{
186 struct fs_disk_quota fdq; 196 struct qc_dqblk fdq;
187 struct if_dqblk idq; 197 struct if_dqblk idq;
188 struct kqid qid; 198 struct kqid qid;
189 199
@@ -247,10 +257,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
247 return ret; 257 return ret;
248} 258}
249 259
260/*
261 * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
262 * out of there as xfsprogs rely on definitions being in that header file. So
263 * just define same functions here for quota purposes.
264 */
265#define XFS_BB_SHIFT 9
266
267static inline u64 quota_bbtob(u64 blocks)
268{
269 return blocks << XFS_BB_SHIFT;
270}
271
272static inline u64 quota_btobb(u64 bytes)
273{
274 return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
275}
276
277static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
278{
279 dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
280 dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
281 dst->d_ino_hardlimit = src->d_ino_hardlimit;
282 dst->d_ino_softlimit = src->d_ino_softlimit;
283 dst->d_space = quota_bbtob(src->d_bcount);
284 dst->d_ino_count = src->d_icount;
285 dst->d_ino_timer = src->d_itimer;
286 dst->d_spc_timer = src->d_btimer;
287 dst->d_ino_warns = src->d_iwarns;
288 dst->d_spc_warns = src->d_bwarns;
289 dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
290 dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
291 dst->d_rt_space = quota_bbtob(src->d_rtbcount);
292 dst->d_rt_spc_timer = src->d_rtbtimer;
293 dst->d_rt_spc_warns = src->d_rtbwarns;
294 dst->d_fieldmask = 0;
295 if (src->d_fieldmask & FS_DQ_ISOFT)
296 dst->d_fieldmask |= QC_INO_SOFT;
297 if (src->d_fieldmask & FS_DQ_IHARD)
298 dst->d_fieldmask |= QC_INO_HARD;
299 if (src->d_fieldmask & FS_DQ_BSOFT)
300 dst->d_fieldmask |= QC_SPC_SOFT;
301 if (src->d_fieldmask & FS_DQ_BHARD)
302 dst->d_fieldmask |= QC_SPC_HARD;
303 if (src->d_fieldmask & FS_DQ_RTBSOFT)
304 dst->d_fieldmask |= QC_RT_SPC_SOFT;
305 if (src->d_fieldmask & FS_DQ_RTBHARD)
306 dst->d_fieldmask |= QC_RT_SPC_HARD;
307 if (src->d_fieldmask & FS_DQ_BTIMER)
308 dst->d_fieldmask |= QC_SPC_TIMER;
309 if (src->d_fieldmask & FS_DQ_ITIMER)
310 dst->d_fieldmask |= QC_INO_TIMER;
311 if (src->d_fieldmask & FS_DQ_RTBTIMER)
312 dst->d_fieldmask |= QC_RT_SPC_TIMER;
313 if (src->d_fieldmask & FS_DQ_BWARNS)
314 dst->d_fieldmask |= QC_SPC_WARNS;
315 if (src->d_fieldmask & FS_DQ_IWARNS)
316 dst->d_fieldmask |= QC_INO_WARNS;
317 if (src->d_fieldmask & FS_DQ_RTBWARNS)
318 dst->d_fieldmask |= QC_RT_SPC_WARNS;
319 if (src->d_fieldmask & FS_DQ_BCOUNT)
320 dst->d_fieldmask |= QC_SPACE;
321 if (src->d_fieldmask & FS_DQ_ICOUNT)
322 dst->d_fieldmask |= QC_INO_COUNT;
323 if (src->d_fieldmask & FS_DQ_RTBCOUNT)
324 dst->d_fieldmask |= QC_RT_SPACE;
325}
326
250static int quota_setxquota(struct super_block *sb, int type, qid_t id, 327static int quota_setxquota(struct super_block *sb, int type, qid_t id,
251 void __user *addr) 328 void __user *addr)
252{ 329{
253 struct fs_disk_quota fdq; 330 struct fs_disk_quota fdq;
331 struct qc_dqblk qdq;
254 struct kqid qid; 332 struct kqid qid;
255 333
256 if (copy_from_user(&fdq, addr, sizeof(fdq))) 334 if (copy_from_user(&fdq, addr, sizeof(fdq)))
@@ -260,13 +338,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
260 qid = make_kqid(current_user_ns(), type, id); 338 qid = make_kqid(current_user_ns(), type, id);
261 if (!qid_valid(qid)) 339 if (!qid_valid(qid))
262 return -EINVAL; 340 return -EINVAL;
263 return sb->s_qcop->set_dqblk(sb, qid, &fdq); 341 copy_from_xfs_dqblk(&qdq, &fdq);
342 return sb->s_qcop->set_dqblk(sb, qid, &qdq);
343}
344
345static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
346 int type, qid_t id)
347{
348 memset(dst, 0, sizeof(*dst));
349 dst->d_version = FS_DQUOT_VERSION;
350 dst->d_id = id;
351 if (type == USRQUOTA)
352 dst->d_flags = FS_USER_QUOTA;
353 else if (type == PRJQUOTA)
354 dst->d_flags = FS_PROJ_QUOTA;
355 else
356 dst->d_flags = FS_GROUP_QUOTA;
357 dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
358 dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
359 dst->d_ino_hardlimit = src->d_ino_hardlimit;
360 dst->d_ino_softlimit = src->d_ino_softlimit;
361 dst->d_bcount = quota_btobb(src->d_space);
362 dst->d_icount = src->d_ino_count;
363 dst->d_itimer = src->d_ino_timer;
364 dst->d_btimer = src->d_spc_timer;
365 dst->d_iwarns = src->d_ino_warns;
366 dst->d_bwarns = src->d_spc_warns;
367 dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
368 dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
369 dst->d_rtbcount = quota_btobb(src->d_rt_space);
370 dst->d_rtbtimer = src->d_rt_spc_timer;
371 dst->d_rtbwarns = src->d_rt_spc_warns;
264} 372}
265 373
266static int quota_getxquota(struct super_block *sb, int type, qid_t id, 374static int quota_getxquota(struct super_block *sb, int type, qid_t id,
267 void __user *addr) 375 void __user *addr)
268{ 376{
269 struct fs_disk_quota fdq; 377 struct fs_disk_quota fdq;
378 struct qc_dqblk qdq;
270 struct kqid qid; 379 struct kqid qid;
271 int ret; 380 int ret;
272 381
@@ -275,8 +384,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
275 qid = make_kqid(current_user_ns(), type, id); 384 qid = make_kqid(current_user_ns(), type, id);
276 if (!qid_valid(qid)) 385 if (!qid_valid(qid))
277 return -EINVAL; 386 return -EINVAL;
278 ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); 387 ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
279 if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) 388 if (ret)
389 return ret;
390 copy_to_xfs_dqblk(&fdq, &qdq, type, id);
391 if (copy_to_user(addr, &fdq, sizeof(fdq)))
280 return -EFAULT; 392 return -EFAULT;
281 return ret; 393 return ret;
282} 394}
diff --git a/fs/udf/file.c b/fs/udf/file.c
index bb15771b92ae..08f3555fbeac 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -224,7 +224,7 @@ out:
224static int udf_release_file(struct inode *inode, struct file *filp) 224static int udf_release_file(struct inode *inode, struct file *filp)
225{ 225{
226 if (filp->f_mode & FMODE_WRITE && 226 if (filp->f_mode & FMODE_WRITE &&
227 atomic_read(&inode->i_writecount) > 1) { 227 atomic_read(&inode->i_writecount) == 1) {
228 /* 228 /*
229 * Grab i_mutex to avoid races with writes changing i_size 229 * Grab i_mutex to avoid races with writes changing i_size
230 * while we are running. 230 * while we are running.
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 3a07a937e232..41f6c0b9d51c 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -166,9 +166,9 @@ extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
166/* quota ops */ 166/* quota ops */
167extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); 167extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
168extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, 168extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
169 uint, struct fs_disk_quota *); 169 uint, struct qc_dqblk *);
170extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, 170extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
171 struct fs_disk_quota *); 171 struct qc_dqblk *);
172extern int xfs_qm_scall_getqstat(struct xfs_mount *, 172extern int xfs_qm_scall_getqstat(struct xfs_mount *,
173 struct fs_quota_stat *); 173 struct fs_quota_stat *);
174extern int xfs_qm_scall_getqstatv(struct xfs_mount *, 174extern int xfs_qm_scall_getqstatv(struct xfs_mount *,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 74fca68e43b6..cb6168ec92c9 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -39,7 +39,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
39STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, 39STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
40 uint); 40 uint);
41STATIC uint xfs_qm_export_flags(uint); 41STATIC uint xfs_qm_export_flags(uint);
42STATIC uint xfs_qm_export_qtype_flags(uint);
43 42
44/* 43/*
45 * Turn off quota accounting and/or enforcement for all udquots and/or 44 * Turn off quota accounting and/or enforcement for all udquots and/or
@@ -573,8 +572,8 @@ xfs_qm_scall_getqstatv(
573 return 0; 572 return 0;
574} 573}
575 574
576#define XFS_DQ_MASK \ 575#define XFS_QC_MASK \
577 (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) 576 (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
578 577
579/* 578/*
580 * Adjust quota limits, and start/stop timers accordingly. 579 * Adjust quota limits, and start/stop timers accordingly.
@@ -584,7 +583,7 @@ xfs_qm_scall_setqlim(
584 struct xfs_mount *mp, 583 struct xfs_mount *mp,
585 xfs_dqid_t id, 584 xfs_dqid_t id,
586 uint type, 585 uint type,
587 fs_disk_quota_t *newlim) 586 struct qc_dqblk *newlim)
588{ 587{
589 struct xfs_quotainfo *q = mp->m_quotainfo; 588 struct xfs_quotainfo *q = mp->m_quotainfo;
590 struct xfs_disk_dquot *ddq; 589 struct xfs_disk_dquot *ddq;
@@ -593,9 +592,9 @@ xfs_qm_scall_setqlim(
593 int error; 592 int error;
594 xfs_qcnt_t hard, soft; 593 xfs_qcnt_t hard, soft;
595 594
596 if (newlim->d_fieldmask & ~XFS_DQ_MASK) 595 if (newlim->d_fieldmask & ~XFS_QC_MASK)
597 return -EINVAL; 596 return -EINVAL;
598 if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) 597 if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
599 return 0; 598 return 0;
600 599
601 /* 600 /*
@@ -633,11 +632,11 @@ xfs_qm_scall_setqlim(
633 /* 632 /*
634 * Make sure that hardlimits are >= soft limits before changing. 633 * Make sure that hardlimits are >= soft limits before changing.
635 */ 634 */
636 hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? 635 hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
637 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : 636 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
638 be64_to_cpu(ddq->d_blk_hardlimit); 637 be64_to_cpu(ddq->d_blk_hardlimit);
639 soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? 638 soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
640 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : 639 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
641 be64_to_cpu(ddq->d_blk_softlimit); 640 be64_to_cpu(ddq->d_blk_softlimit);
642 if (hard == 0 || hard >= soft) { 641 if (hard == 0 || hard >= soft) {
643 ddq->d_blk_hardlimit = cpu_to_be64(hard); 642 ddq->d_blk_hardlimit = cpu_to_be64(hard);
@@ -650,11 +649,11 @@ xfs_qm_scall_setqlim(
650 } else { 649 } else {
651 xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); 650 xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
652 } 651 }
653 hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? 652 hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
654 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : 653 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
655 be64_to_cpu(ddq->d_rtb_hardlimit); 654 be64_to_cpu(ddq->d_rtb_hardlimit);
656 soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? 655 soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
657 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : 656 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
658 be64_to_cpu(ddq->d_rtb_softlimit); 657 be64_to_cpu(ddq->d_rtb_softlimit);
659 if (hard == 0 || hard >= soft) { 658 if (hard == 0 || hard >= soft) {
660 ddq->d_rtb_hardlimit = cpu_to_be64(hard); 659 ddq->d_rtb_hardlimit = cpu_to_be64(hard);
@@ -667,10 +666,10 @@ xfs_qm_scall_setqlim(
667 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); 666 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
668 } 667 }
669 668
670 hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? 669 hard = (newlim->d_fieldmask & QC_INO_HARD) ?
671 (xfs_qcnt_t) newlim->d_ino_hardlimit : 670 (xfs_qcnt_t) newlim->d_ino_hardlimit :
672 be64_to_cpu(ddq->d_ino_hardlimit); 671 be64_to_cpu(ddq->d_ino_hardlimit);
673 soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? 672 soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
674 (xfs_qcnt_t) newlim->d_ino_softlimit : 673 (xfs_qcnt_t) newlim->d_ino_softlimit :
675 be64_to_cpu(ddq->d_ino_softlimit); 674 be64_to_cpu(ddq->d_ino_softlimit);
676 if (hard == 0 || hard >= soft) { 675 if (hard == 0 || hard >= soft) {
@@ -687,12 +686,12 @@ xfs_qm_scall_setqlim(
687 /* 686 /*
688 * Update warnings counter(s) if requested 687 * Update warnings counter(s) if requested
689 */ 688 */
690 if (newlim->d_fieldmask & FS_DQ_BWARNS) 689 if (newlim->d_fieldmask & QC_SPC_WARNS)
691 ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); 690 ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
692 if (newlim->d_fieldmask & FS_DQ_IWARNS) 691 if (newlim->d_fieldmask & QC_INO_WARNS)
693 ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); 692 ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
694 if (newlim->d_fieldmask & FS_DQ_RTBWARNS) 693 if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
695 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); 694 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
696 695
697 if (id == 0) { 696 if (id == 0) {
698 /* 697 /*
@@ -702,24 +701,24 @@ xfs_qm_scall_setqlim(
702 * soft and hard limit values (already done, above), and 701 * soft and hard limit values (already done, above), and
703 * for warnings. 702 * for warnings.
704 */ 703 */
705 if (newlim->d_fieldmask & FS_DQ_BTIMER) { 704 if (newlim->d_fieldmask & QC_SPC_TIMER) {
706 q->qi_btimelimit = newlim->d_btimer; 705 q->qi_btimelimit = newlim->d_spc_timer;
707 ddq->d_btimer = cpu_to_be32(newlim->d_btimer); 706 ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
708 } 707 }
709 if (newlim->d_fieldmask & FS_DQ_ITIMER) { 708 if (newlim->d_fieldmask & QC_INO_TIMER) {
710 q->qi_itimelimit = newlim->d_itimer; 709 q->qi_itimelimit = newlim->d_ino_timer;
711 ddq->d_itimer = cpu_to_be32(newlim->d_itimer); 710 ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
712 } 711 }
713 if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { 712 if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
714 q->qi_rtbtimelimit = newlim->d_rtbtimer; 713 q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
715 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); 714 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
716 } 715 }
717 if (newlim->d_fieldmask & FS_DQ_BWARNS) 716 if (newlim->d_fieldmask & QC_SPC_WARNS)
718 q->qi_bwarnlimit = newlim->d_bwarns; 717 q->qi_bwarnlimit = newlim->d_spc_warns;
719 if (newlim->d_fieldmask & FS_DQ_IWARNS) 718 if (newlim->d_fieldmask & QC_INO_WARNS)
720 q->qi_iwarnlimit = newlim->d_iwarns; 719 q->qi_iwarnlimit = newlim->d_ino_warns;
721 if (newlim->d_fieldmask & FS_DQ_RTBWARNS) 720 if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
722 q->qi_rtbwarnlimit = newlim->d_rtbwarns; 721 q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
723 } else { 722 } else {
724 /* 723 /*
725 * If the user is now over quota, start the timelimit. 724 * If the user is now over quota, start the timelimit.
@@ -824,7 +823,7 @@ xfs_qm_scall_getquota(
824 struct xfs_mount *mp, 823 struct xfs_mount *mp,
825 xfs_dqid_t id, 824 xfs_dqid_t id,
826 uint type, 825 uint type,
827 struct fs_disk_quota *dst) 826 struct qc_dqblk *dst)
828{ 827{
829 struct xfs_dquot *dqp; 828 struct xfs_dquot *dqp;
830 int error; 829 int error;
@@ -848,28 +847,25 @@ xfs_qm_scall_getquota(
848 } 847 }
849 848
850 memset(dst, 0, sizeof(*dst)); 849 memset(dst, 0, sizeof(*dst));
851 dst->d_version = FS_DQUOT_VERSION; 850 dst->d_spc_hardlimit =
852 dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); 851 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
853 dst->d_id = be32_to_cpu(dqp->q_core.d_id); 852 dst->d_spc_softlimit =
854 dst->d_blk_hardlimit = 853 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
855 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
856 dst->d_blk_softlimit =
857 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
858 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); 854 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
859 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); 855 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
860 dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); 856 dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
861 dst->d_icount = dqp->q_res_icount; 857 dst->d_ino_count = dqp->q_res_icount;
862 dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); 858 dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
863 dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); 859 dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
864 dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); 860 dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
865 dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); 861 dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
866 dst->d_rtb_hardlimit = 862 dst->d_rt_spc_hardlimit =
867 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); 863 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
868 dst->d_rtb_softlimit = 864 dst->d_rt_spc_softlimit =
869 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); 865 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
870 dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); 866 dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
871 dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); 867 dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
872 dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); 868 dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
873 869
874 /* 870 /*
875 * Internally, we don't reset all the timers when quota enforcement 871 * Internally, we don't reset all the timers when quota enforcement
@@ -882,23 +878,23 @@ xfs_qm_scall_getquota(
882 dqp->q_core.d_flags == XFS_DQ_GROUP) || 878 dqp->q_core.d_flags == XFS_DQ_GROUP) ||
883 (!XFS_IS_PQUOTA_ENFORCED(mp) && 879 (!XFS_IS_PQUOTA_ENFORCED(mp) &&
884 dqp->q_core.d_flags == XFS_DQ_PROJ)) { 880 dqp->q_core.d_flags == XFS_DQ_PROJ)) {
885 dst->d_btimer = 0; 881 dst->d_spc_timer = 0;
886 dst->d_itimer = 0; 882 dst->d_ino_timer = 0;
887 dst->d_rtbtimer = 0; 883 dst->d_rt_spc_timer = 0;
888 } 884 }
889 885
890#ifdef DEBUG 886#ifdef DEBUG
891 if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || 887 if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
892 (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) || 888 (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
893 (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) && 889 (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
894 dst->d_id != 0) { 890 id != 0) {
895 if ((dst->d_bcount > dst->d_blk_softlimit) && 891 if ((dst->d_space > dst->d_spc_softlimit) &&
896 (dst->d_blk_softlimit > 0)) { 892 (dst->d_spc_softlimit > 0)) {
897 ASSERT(dst->d_btimer != 0); 893 ASSERT(dst->d_spc_timer != 0);
898 } 894 }
899 if ((dst->d_icount > dst->d_ino_softlimit) && 895 if ((dst->d_ino_count > dst->d_ino_softlimit) &&
900 (dst->d_ino_softlimit > 0)) { 896 (dst->d_ino_softlimit > 0)) {
901 ASSERT(dst->d_itimer != 0); 897 ASSERT(dst->d_ino_timer != 0);
902 } 898 }
903 } 899 }
904#endif 900#endif
@@ -908,26 +904,6 @@ out_put:
908} 904}
909 905
910STATIC uint 906STATIC uint
911xfs_qm_export_qtype_flags(
912 uint flags)
913{
914 /*
915 * Can't be more than one, or none.
916 */
917 ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
918 (FS_PROJ_QUOTA | FS_USER_QUOTA));
919 ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
920 (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
921 ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
922 (FS_USER_QUOTA | FS_GROUP_QUOTA));
923 ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
924
925 return (flags & XFS_DQ_USER) ?
926 FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
927 FS_PROJ_QUOTA : FS_GROUP_QUOTA;
928}
929
930STATIC uint
931xfs_qm_export_flags( 907xfs_qm_export_flags(
932 uint flags) 908 uint flags)
933{ 909{
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 7542bbeca6a1..801a84c1cdc3 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -131,7 +131,7 @@ STATIC int
131xfs_fs_get_dqblk( 131xfs_fs_get_dqblk(
132 struct super_block *sb, 132 struct super_block *sb,
133 struct kqid qid, 133 struct kqid qid,
134 struct fs_disk_quota *fdq) 134 struct qc_dqblk *qdq)
135{ 135{
136 struct xfs_mount *mp = XFS_M(sb); 136 struct xfs_mount *mp = XFS_M(sb);
137 137
@@ -141,14 +141,14 @@ xfs_fs_get_dqblk(
141 return -ESRCH; 141 return -ESRCH;
142 142
143 return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), 143 return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
144 xfs_quota_type(qid.type), fdq); 144 xfs_quota_type(qid.type), qdq);
145} 145}
146 146
147STATIC int 147STATIC int
148xfs_fs_set_dqblk( 148xfs_fs_set_dqblk(
149 struct super_block *sb, 149 struct super_block *sb,
150 struct kqid qid, 150 struct kqid qid,
151 struct fs_disk_quota *fdq) 151 struct qc_dqblk *qdq)
152{ 152{
153 struct xfs_mount *mp = XFS_M(sb); 153 struct xfs_mount *mp = XFS_M(sb);
154 154
@@ -160,7 +160,7 @@ xfs_fs_set_dqblk(
160 return -ESRCH; 160 return -ESRCH;
161 161
162 return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), 162 return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
163 xfs_quota_type(qid.type), fdq); 163 xfs_quota_type(qid.type), qdq);
164} 164}
165 165
166const struct quotactl_ops xfs_quotactl_operations = { 166const struct quotactl_ops xfs_quotactl_operations = {
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 33063f872ee3..176bf816875e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -385,7 +385,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
385 385
386/* Is this type a native word size -- useful for atomic operations */ 386/* Is this type a native word size -- useful for atomic operations */
387#ifndef __native_word 387#ifndef __native_word
388# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) 388# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
389#endif 389#endif
390 390
391/* Compile time object size, -1 for unknown */ 391/* Compile time object size, -1 for unknown */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 0bebb5c348b8..d36f68b08acc 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -595,7 +595,7 @@ extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
595 char *filter_str); 595 char *filter_str);
596extern void ftrace_profile_free_filter(struct perf_event *event); 596extern void ftrace_profile_free_filter(struct perf_event *event);
597extern void *perf_trace_buf_prepare(int size, unsigned short type, 597extern void *perf_trace_buf_prepare(int size, unsigned short type,
598 struct pt_regs *regs, int *rctxp); 598 struct pt_regs **regs, int *rctxp);
599 599
600static inline void 600static inline void
601perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, 601perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e3a1721c8354..7c7695940ddd 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -228,7 +228,9 @@ struct i2c_client {
228 struct device dev; /* the device structure */ 228 struct device dev; /* the device structure */
229 int irq; /* irq issued by device */ 229 int irq; /* irq issued by device */
230 struct list_head detected; 230 struct list_head detected;
231#if IS_ENABLED(CONFIG_I2C_SLAVE)
231 i2c_slave_cb_t slave_cb; /* callback for slave mode */ 232 i2c_slave_cb_t slave_cb; /* callback for slave mode */
233#endif
232}; 234};
233#define to_i2c_client(d) container_of(d, struct i2c_client, dev) 235#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
234 236
@@ -253,6 +255,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
253 255
254/* I2C slave support */ 256/* I2C slave support */
255 257
258#if IS_ENABLED(CONFIG_I2C_SLAVE)
256enum i2c_slave_event { 259enum i2c_slave_event {
257 I2C_SLAVE_REQ_READ_START, 260 I2C_SLAVE_REQ_READ_START,
258 I2C_SLAVE_REQ_READ_END, 261 I2C_SLAVE_REQ_READ_END,
@@ -269,6 +272,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
269{ 272{
270 return client->slave_cb(client, event, val); 273 return client->slave_cb(client, event, val);
271} 274}
275#endif
272 276
273/** 277/**
274 * struct i2c_board_info - template for device creation 278 * struct i2c_board_info - template for device creation
@@ -404,8 +408,10 @@ struct i2c_algorithm {
404 /* To determine what the adapter supports */ 408 /* To determine what the adapter supports */
405 u32 (*functionality) (struct i2c_adapter *); 409 u32 (*functionality) (struct i2c_adapter *);
406 410
411#if IS_ENABLED(CONFIG_I2C_SLAVE)
407 int (*reg_slave)(struct i2c_client *client); 412 int (*reg_slave)(struct i2c_client *client);
408 int (*unreg_slave)(struct i2c_client *client); 413 int (*unreg_slave)(struct i2c_client *client);
414#endif
409}; 415};
410 416
411/** 417/**
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 515a35e2a48a..960e666c51e4 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
472/** 472/**
473 * vlan_get_protocol - get protocol EtherType. 473 * vlan_get_protocol - get protocol EtherType.
474 * @skb: skbuff to query 474 * @skb: skbuff to query
475 * @type: first vlan protocol
476 * @depth: buffer to store length of eth and vlan tags in bytes
475 * 477 *
476 * Returns the EtherType of the packet, regardless of whether it is 478 * Returns the EtherType of the packet, regardless of whether it is
477 * vlan encapsulated (normal or hardware accelerated) or not. 479 * vlan encapsulated (normal or hardware accelerated) or not.
478 */ 480 */
479static inline __be16 vlan_get_protocol(const struct sk_buff *skb) 481static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
482 int *depth)
480{ 483{
481 __be16 protocol = 0; 484 unsigned int vlan_depth = skb->mac_len;
482 485
483 if (vlan_tx_tag_present(skb) || 486 /* if type is 802.1Q/AD then the header should already be
484 skb->protocol != cpu_to_be16(ETH_P_8021Q)) 487 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
485 protocol = skb->protocol; 488 * ETH_HLEN otherwise
486 else { 489 */
487 __be16 proto, *protop; 490 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
488 protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, 491 if (vlan_depth) {
489 h_vlan_encapsulated_proto), 492 if (WARN_ON(vlan_depth < VLAN_HLEN))
490 sizeof(proto), &proto); 493 return 0;
491 if (likely(protop)) 494 vlan_depth -= VLAN_HLEN;
492 protocol = *protop; 495 } else {
496 vlan_depth = ETH_HLEN;
497 }
498 do {
499 struct vlan_hdr *vh;
500
501 if (unlikely(!pskb_may_pull(skb,
502 vlan_depth + VLAN_HLEN)))
503 return 0;
504
505 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
506 type = vh->h_vlan_encapsulated_proto;
507 vlan_depth += VLAN_HLEN;
508 } while (type == htons(ETH_P_8021Q) ||
509 type == htons(ETH_P_8021AD));
493 } 510 }
494 511
495 return protocol; 512 if (depth)
513 *depth = vlan_depth;
514
515 return type;
516}
517
518/**
519 * vlan_get_protocol - get protocol EtherType.
520 * @skb: skbuff to query
521 *
522 * Returns the EtherType of the packet, regardless of whether it is
523 * vlan encapsulated (normal or hardware accelerated) or not.
524 */
525static inline __be16 vlan_get_protocol(struct sk_buff *skb)
526{
527 return __vlan_get_protocol(skb, skb->protocol, NULL);
496} 528}
497 529
498static inline void vlan_set_encap_proto(struct sk_buff *skb, 530static inline void vlan_set_encap_proto(struct sk_buff *skb,
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5449d2f4a1ef..64ce58bee6f5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -176,7 +176,7 @@ extern int _cond_resched(void);
176 */ 176 */
177# define might_sleep() \ 177# define might_sleep() \
178 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) 178 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
179# define sched_annotate_sleep() __set_current_state(TASK_RUNNING) 179# define sched_annotate_sleep() (current->task_state_change = 0)
180#else 180#else
181 static inline void ___might_sleep(const char *file, int line, 181 static inline void ___might_sleep(const char *file, int line,
182 int preempt_offset) { } 182 int preempt_offset) { }
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 25c791e295fd..5f3a9aa7225d 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -97,7 +97,7 @@ enum {
97 MLX4_MAX_NUM_PF = 16, 97 MLX4_MAX_NUM_PF = 16,
98 MLX4_MAX_NUM_VF = 126, 98 MLX4_MAX_NUM_VF = 126,
99 MLX4_MAX_NUM_VF_P_PORT = 64, 99 MLX4_MAX_NUM_VF_P_PORT = 64,
100 MLX4_MFUNC_MAX = 80, 100 MLX4_MFUNC_MAX = 128,
101 MLX4_MAX_EQ_NUM = 1024, 101 MLX4_MAX_EQ_NUM = 1024,
102 MLX4_MFUNC_EQ_NUM = 4, 102 MLX4_MFUNC_EQ_NUM = 4,
103 MLX4_MFUNC_MAX_EQES = 8, 103 MLX4_MFUNC_MAX_EQES = 8,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80fc92a49649..dd5ea3016fc4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1070,6 +1070,7 @@ static inline int page_mapped(struct page *page)
1070#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 1070#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
1071#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ 1071#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
1072#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ 1072#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
1073#define VM_FAULT_SIGSEGV 0x0040
1073 1074
1074#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 1075#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
1075#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 1076#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
@@ -1078,8 +1079,9 @@ static inline int page_mapped(struct page *page)
1078 1079
1079#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 1080#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1080 1081
1081#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ 1082#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1082 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) 1083 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1084 VM_FAULT_FALLBACK)
1083 1085
1084/* Encode hstate index for a hwpoisoned large page */ 1086/* Encode hstate index for a hwpoisoned large page */
1085#define VM_FAULT_SET_HINDEX(x) ((x) << 12) 1087#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 90230d5811c5..3a6490e81b28 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -5,8 +5,11 @@
5 * An MCS like lock especially tailored for optimistic spinning for sleeping 5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc). 6 * lock implementations (mutex, rwsem, etc).
7 */ 7 */
8 8struct optimistic_spin_node {
9#define OSQ_UNLOCKED_VAL (0) 9 struct optimistic_spin_node *next, *prev;
10 int locked; /* 1 if lock acquired */
11 int cpu; /* encoded CPU # + 1 value */
12};
10 13
11struct optimistic_spin_queue { 14struct optimistic_spin_queue {
12 /* 15 /*
@@ -16,6 +19,8 @@ struct optimistic_spin_queue {
16 atomic_t tail; 19 atomic_t tail;
17}; 20};
18 21
22#define OSQ_UNLOCKED_VAL (0)
23
19/* Init macro and function. */ 24/* Init macro and function. */
20#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } 25#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
21 26
@@ -24,4 +29,7 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
24 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); 29 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
25} 30}
26 31
32extern bool osq_lock(struct optimistic_spin_queue *lock);
33extern void osq_unlock(struct optimistic_spin_queue *lock);
34
27#endif 35#endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4f7a61ca4b39..5cad0e6f3552 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -450,11 +450,6 @@ struct perf_event {
450#endif /* CONFIG_PERF_EVENTS */ 450#endif /* CONFIG_PERF_EVENTS */
451}; 451};
452 452
453enum perf_event_context_type {
454 task_context,
455 cpu_context,
456};
457
458/** 453/**
459 * struct perf_event_context - event context structure 454 * struct perf_event_context - event context structure
460 * 455 *
@@ -462,7 +457,6 @@ enum perf_event_context_type {
462 */ 457 */
463struct perf_event_context { 458struct perf_event_context {
464 struct pmu *pmu; 459 struct pmu *pmu;
465 enum perf_event_context_type type;
466 /* 460 /*
467 * Protect the states of the events in the list, 461 * Protect the states of the events in the list,
468 * nr_active, and the list: 462 * nr_active, and the list:
@@ -475,6 +469,7 @@ struct perf_event_context {
475 */ 469 */
476 struct mutex mutex; 470 struct mutex mutex;
477 471
472 struct list_head active_ctx_list;
478 struct list_head pinned_groups; 473 struct list_head pinned_groups;
479 struct list_head flexible_groups; 474 struct list_head flexible_groups;
480 struct list_head event_list; 475 struct list_head event_list;
@@ -525,7 +520,6 @@ struct perf_cpu_context {
525 int exclusive; 520 int exclusive;
526 struct hrtimer hrtimer; 521 struct hrtimer hrtimer;
527 ktime_t hrtimer_interval; 522 ktime_t hrtimer_interval;
528 struct list_head rotation_list;
529 struct pmu *unique_pmu; 523 struct pmu *unique_pmu;
530 struct perf_cgroup *cgrp; 524 struct perf_cgroup *cgrp;
531}; 525};
@@ -665,6 +659,7 @@ static inline int is_software_event(struct perf_event *event)
665 659
666extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 660extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
667 661
662extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
668extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); 663extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
669 664
670#ifndef perf_arch_fetch_caller_regs 665#ifndef perf_arch_fetch_caller_regs
@@ -689,14 +684,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
689static __always_inline void 684static __always_inline void
690perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 685perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
691{ 686{
692 struct pt_regs hot_regs; 687 if (static_key_false(&perf_swevent_enabled[event_id]))
688 __perf_sw_event(event_id, nr, regs, addr);
689}
690
691DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
693 692
693/*
694 * 'Special' version for the scheduler, it hard assumes no recursion,
695 * which is guaranteed by us not actually scheduling inside other swevents
696 * because those disable preemption.
697 */
698static __always_inline void
699perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
700{
694 if (static_key_false(&perf_swevent_enabled[event_id])) { 701 if (static_key_false(&perf_swevent_enabled[event_id])) {
695 if (!regs) { 702 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
696 perf_fetch_caller_regs(&hot_regs); 703
697 regs = &hot_regs; 704 perf_fetch_caller_regs(regs);
698 } 705 ___perf_sw_event(event_id, nr, regs, addr);
699 __perf_sw_event(event_id, nr, regs, addr);
700 } 706 }
701} 707}
702 708
@@ -712,7 +718,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
712static inline void perf_event_task_sched_out(struct task_struct *prev, 718static inline void perf_event_task_sched_out(struct task_struct *prev,
713 struct task_struct *next) 719 struct task_struct *next)
714{ 720{
715 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 721 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
716 722
717 if (static_key_false(&perf_sched_events.key)) 723 if (static_key_false(&perf_sched_events.key))
718 __perf_event_task_sched_out(prev, next); 724 __perf_event_task_sched_out(prev, next);
@@ -823,6 +829,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
823static inline void 829static inline void
824perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } 830perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
825static inline void 831static inline void
832perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
833static inline void
826perf_bp_event(struct perf_event *event, void *data) { } 834perf_bp_event(struct perf_event *event, void *data) { }
827 835
828static inline int perf_register_guest_info_callbacks 836static inline int perf_register_guest_info_callbacks
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 77aed9ea1d26..dab545bb66b3 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -37,6 +37,7 @@
37#define SSDR (0x10) /* SSP Data Write/Data Read Register */ 37#define SSDR (0x10) /* SSP Data Write/Data Read Register */
38 38
39#define SSTO (0x28) /* SSP Time Out Register */ 39#define SSTO (0x28) /* SSP Time Out Register */
40#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
40#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */ 41#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
41#define SSTSA (0x30) /* SSP Tx Timeslot Active */ 42#define SSTSA (0x30) /* SSP Tx Timeslot Active */
42#define SSRSA (0x34) /* SSP Rx Timeslot Active */ 43#define SSRSA (0x34) /* SSP Rx Timeslot Active */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 50978b781a19..097d7eb2441e 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -321,6 +321,49 @@ struct dquot_operations {
321 321
322struct path; 322struct path;
323 323
324/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
325struct qc_dqblk {
326 int d_fieldmask; /* mask of fields to change in ->set_dqblk() */
327 u64 d_spc_hardlimit; /* absolute limit on used space */
328 u64 d_spc_softlimit; /* preferred limit on used space */
329 u64 d_ino_hardlimit; /* maximum # allocated inodes */
330 u64 d_ino_softlimit; /* preferred inode limit */
331 u64 d_space; /* Space owned by the user */
332 u64 d_ino_count; /* # inodes owned by the user */
333 s64 d_ino_timer; /* zero if within inode limits */
334 /* if not, we refuse service */
335 s64 d_spc_timer; /* similar to above; for space */
336 int d_ino_warns; /* # warnings issued wrt num inodes */
337 int d_spc_warns; /* # warnings issued wrt used space */
338 u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
339 u64 d_rt_spc_softlimit; /* preferred limit on RT space */
340 u64 d_rt_space; /* realtime space owned */
341 s64 d_rt_spc_timer; /* similar to above; for RT space */
342 int d_rt_spc_warns; /* # warnings issued wrt RT space */
343};
344
345/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
346#define QC_INO_SOFT (1<<0)
347#define QC_INO_HARD (1<<1)
348#define QC_SPC_SOFT (1<<2)
349#define QC_SPC_HARD (1<<3)
350#define QC_RT_SPC_SOFT (1<<4)
351#define QC_RT_SPC_HARD (1<<5)
352#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
353 QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
354#define QC_SPC_TIMER (1<<6)
355#define QC_INO_TIMER (1<<7)
356#define QC_RT_SPC_TIMER (1<<8)
357#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
358#define QC_SPC_WARNS (1<<9)
359#define QC_INO_WARNS (1<<10)
360#define QC_RT_SPC_WARNS (1<<11)
361#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
362#define QC_SPACE (1<<12)
363#define QC_INO_COUNT (1<<13)
364#define QC_RT_SPACE (1<<14)
365#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
366
324/* Operations handling requests from userspace */ 367/* Operations handling requests from userspace */
325struct quotactl_ops { 368struct quotactl_ops {
326 int (*quota_on)(struct super_block *, int, int, struct path *); 369 int (*quota_on)(struct super_block *, int, int, struct path *);
@@ -329,8 +372,8 @@ struct quotactl_ops {
329 int (*quota_sync)(struct super_block *, int); 372 int (*quota_sync)(struct super_block *, int);
330 int (*get_info)(struct super_block *, int, struct if_dqinfo *); 373 int (*get_info)(struct super_block *, int, struct if_dqinfo *);
331 int (*set_info)(struct super_block *, int, struct if_dqinfo *); 374 int (*set_info)(struct super_block *, int, struct if_dqinfo *);
332 int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); 375 int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
333 int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); 376 int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
334 int (*get_xstate)(struct super_block *, struct fs_quota_stat *); 377 int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
335 int (*set_xstate)(struct super_block *, unsigned int, int); 378 int (*set_xstate)(struct super_block *, unsigned int, int);
336 int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); 379 int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index f23538a6e411..29e3455f7d41 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type);
98int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 98int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
99int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 99int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
100int dquot_get_dqblk(struct super_block *sb, struct kqid id, 100int dquot_get_dqblk(struct super_block *sb, struct kqid id,
101 struct fs_disk_quota *di); 101 struct qc_dqblk *di);
102int dquot_set_dqblk(struct super_block *sb, struct kqid id, 102int dquot_set_dqblk(struct super_block *sb, struct kqid id,
103 struct fs_disk_quota *di); 103 struct qc_dqblk *di);
104 104
105int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); 105int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
106int dquot_transfer(struct inode *inode, struct iattr *iattr); 106int dquot_transfer(struct inode *inode, struct iattr *iattr);
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 529bc946f450..a18b16f1dc0e 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -524,11 +524,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
524 * @member: the name of the hlist_node within the struct. 524 * @member: the name of the hlist_node within the struct.
525 */ 525 */
526#define hlist_for_each_entry_continue_rcu(pos, member) \ 526#define hlist_for_each_entry_continue_rcu(pos, member) \
527 for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ 527 for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
528 typeof(*(pos)), member); \ 528 &(pos)->member)), typeof(*(pos)), member); \
529 pos; \ 529 pos; \
530 pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ 530 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
531 typeof(*(pos)), member)) 531 &(pos)->member)), typeof(*(pos)), member))
532 532
533/** 533/**
534 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point 534 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
@@ -536,11 +536,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
536 * @member: the name of the hlist_node within the struct. 536 * @member: the name of the hlist_node within the struct.
537 */ 537 */
538#define hlist_for_each_entry_continue_rcu_bh(pos, member) \ 538#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
539 for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ 539 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
540 typeof(*(pos)), member); \ 540 &(pos)->member)), typeof(*(pos)), member); \
541 pos; \ 541 pos; \
542 pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ 542 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
543 typeof(*(pos)), member)) 543 &(pos)->member)), typeof(*(pos)), member))
544 544
545/** 545/**
546 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point 546 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index ed4f5939a452..78097491cd99 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -331,12 +331,13 @@ static inline void rcu_init_nohz(void)
331extern struct srcu_struct tasks_rcu_exit_srcu; 331extern struct srcu_struct tasks_rcu_exit_srcu;
332#define rcu_note_voluntary_context_switch(t) \ 332#define rcu_note_voluntary_context_switch(t) \
333 do { \ 333 do { \
334 rcu_all_qs(); \
334 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ 335 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
335 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ 336 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
336 } while (0) 337 } while (0)
337#else /* #ifdef CONFIG_TASKS_RCU */ 338#else /* #ifdef CONFIG_TASKS_RCU */
338#define TASKS_RCU(x) do { } while (0) 339#define TASKS_RCU(x) do { } while (0)
339#define rcu_note_voluntary_context_switch(t) do { } while (0) 340#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
340#endif /* #else #ifdef CONFIG_TASKS_RCU */ 341#endif /* #else #ifdef CONFIG_TASKS_RCU */
341 342
342/** 343/**
@@ -582,11 +583,11 @@ static inline void rcu_preempt_sleep_check(void)
582}) 583})
583#define __rcu_dereference_check(p, c, space) \ 584#define __rcu_dereference_check(p, c, space) \
584({ \ 585({ \
585 typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ 586 /* Dependency order vs. p above. */ \
587 typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
586 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ 588 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
587 rcu_dereference_sparse(p, space); \ 589 rcu_dereference_sparse(p, space); \
588 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ 590 ((typeof(*p) __force __kernel *)(________p1)); \
589 ((typeof(*p) __force __kernel *)(_________p1)); \
590}) 591})
591#define __rcu_dereference_protected(p, c, space) \ 592#define __rcu_dereference_protected(p, c, space) \
592({ \ 593({ \
@@ -603,10 +604,10 @@ static inline void rcu_preempt_sleep_check(void)
603}) 604})
604#define __rcu_dereference_index_check(p, c) \ 605#define __rcu_dereference_index_check(p, c) \
605({ \ 606({ \
606 typeof(p) _________p1 = ACCESS_ONCE(p); \ 607 /* Dependency order vs. p above. */ \
608 typeof(p) _________p1 = lockless_dereference(p); \
607 rcu_lockdep_assert(c, \ 609 rcu_lockdep_assert(c, \
608 "suspicious rcu_dereference_index_check() usage"); \ 610 "suspicious rcu_dereference_index_check() usage"); \
609 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
610 (_________p1); \ 611 (_________p1); \
611}) 612})
612 613
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 0e5366200154..937edaeb150d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu)
92} 92}
93 93
94/* 94/*
95 * Return the number of grace periods. 95 * Return the number of grace periods started.
96 */ 96 */
97static inline long rcu_batches_completed(void) 97static inline unsigned long rcu_batches_started(void)
98{ 98{
99 return 0; 99 return 0;
100} 100}
101 101
102/* 102/*
103 * Return the number of bottom-half grace periods. 103 * Return the number of bottom-half grace periods started.
104 */ 104 */
105static inline long rcu_batches_completed_bh(void) 105static inline unsigned long rcu_batches_started_bh(void)
106{
107 return 0;
108}
109
110/*
111 * Return the number of sched grace periods started.
112 */
113static inline unsigned long rcu_batches_started_sched(void)
114{
115 return 0;
116}
117
118/*
119 * Return the number of grace periods completed.
120 */
121static inline unsigned long rcu_batches_completed(void)
122{
123 return 0;
124}
125
126/*
127 * Return the number of bottom-half grace periods completed.
128 */
129static inline unsigned long rcu_batches_completed_bh(void)
130{
131 return 0;
132}
133
134/*
135 * Return the number of sched grace periods completed.
136 */
137static inline unsigned long rcu_batches_completed_sched(void)
106{ 138{
107 return 0; 139 return 0;
108} 140}
@@ -154,7 +186,10 @@ static inline bool rcu_is_watching(void)
154 return true; 186 return true;
155} 187}
156 188
157
158#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 189#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
159 190
191static inline void rcu_all_qs(void)
192{
193}
194
160#endif /* __LINUX_RCUTINY_H */ 195#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 52953790dcca..d2e583a6aaca 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -81,9 +81,12 @@ void cond_synchronize_rcu(unsigned long oldstate);
81 81
82extern unsigned long rcutorture_testseq; 82extern unsigned long rcutorture_testseq;
83extern unsigned long rcutorture_vernum; 83extern unsigned long rcutorture_vernum;
84long rcu_batches_completed(void); 84unsigned long rcu_batches_started(void);
85long rcu_batches_completed_bh(void); 85unsigned long rcu_batches_started_bh(void);
86long rcu_batches_completed_sched(void); 86unsigned long rcu_batches_started_sched(void);
87unsigned long rcu_batches_completed(void);
88unsigned long rcu_batches_completed_bh(void);
89unsigned long rcu_batches_completed_sched(void);
87void show_rcu_gp_kthreads(void); 90void show_rcu_gp_kthreads(void);
88 91
89void rcu_force_quiescent_state(void); 92void rcu_force_quiescent_state(void);
@@ -97,4 +100,6 @@ extern int rcu_scheduler_active __read_mostly;
97 100
98bool rcu_is_watching(void); 101bool rcu_is_watching(void);
99 102
103void rcu_all_qs(void);
104
100#endif /* __LINUX_RCUTREE_H */ 105#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 4419b99d8d6e..116655d92269 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -468,7 +468,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
468 * 468 *
469 * @reg: Offset of the register within the regmap bank 469 * @reg: Offset of the register within the regmap bank
470 * @lsb: lsb of the register field. 470 * @lsb: lsb of the register field.
471 * @reg: msb of the register field. 471 * @msb: msb of the register field.
472 * @id_size: port size if it has some ports 472 * @id_size: port size if it has some ports
473 * @id_offset: address offset for each ports 473 * @id_offset: address offset for each ports
474 */ 474 */
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5479394fefce..5dd65acc2a69 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -32,6 +32,8 @@ struct da9211_pdata {
32 * 2 : 2 phase 2 buck 32 * 2 : 2 phase 2 buck
33 */ 33 */
34 int num_buck; 34 int num_buck;
35 int gpio_ren[DA9211_MAX_REGULATORS];
36 struct device_node *reg_node[DA9211_MAX_REGULATORS];
35 struct regulator_init_data *init_data[DA9211_MAX_REGULATORS]; 37 struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
36}; 38};
37#endif 39#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 5f1e9ca47417..d4ad5b5a02bb 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -21,6 +21,7 @@
21 21
22struct regmap; 22struct regmap;
23struct regulator_dev; 23struct regulator_dev;
24struct regulator_config;
24struct regulator_init_data; 25struct regulator_init_data;
25struct regulator_enable_gpio; 26struct regulator_enable_gpio;
26 27
@@ -205,6 +206,15 @@ enum regulator_type {
205 * @supply_name: Identifying the regulator supply 206 * @supply_name: Identifying the regulator supply
206 * @of_match: Name used to identify regulator in DT. 207 * @of_match: Name used to identify regulator in DT.
207 * @regulators_node: Name of node containing regulator definitions in DT. 208 * @regulators_node: Name of node containing regulator definitions in DT.
209 * @of_parse_cb: Optional callback called only if of_match is present.
210 * Will be called for each regulator parsed from DT, during
211 * init_data parsing.
212 * The regulator_config passed as argument to the callback will
213 * be a copy of config passed to regulator_register, valid only
214 * for this particular call. Callback may freely change the
215 * config but it cannot store it for later usage.
216 * Callback should return 0 on success or negative ERRNO
217 * indicating failure.
208 * @id: Numerical identifier for the regulator. 218 * @id: Numerical identifier for the regulator.
209 * @ops: Regulator operations table. 219 * @ops: Regulator operations table.
210 * @irq: Interrupt number for the regulator. 220 * @irq: Interrupt number for the regulator.
@@ -251,6 +261,9 @@ struct regulator_desc {
251 const char *supply_name; 261 const char *supply_name;
252 const char *of_match; 262 const char *of_match;
253 const char *regulators_node; 263 const char *regulators_node;
264 int (*of_parse_cb)(struct device_node *,
265 const struct regulator_desc *,
266 struct regulator_config *);
254 int id; 267 int id;
255 bool continuous_voltage_range; 268 bool continuous_voltage_range;
256 unsigned n_voltages; 269 unsigned n_voltages;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 0b08d05d470b..b07562e082c4 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -191,15 +191,22 @@ struct regulator_init_data {
191 void *driver_data; /* core does not touch this */ 191 void *driver_data; /* core does not touch this */
192}; 192};
193 193
194int regulator_suspend_prepare(suspend_state_t state);
195int regulator_suspend_finish(void);
196
197#ifdef CONFIG_REGULATOR 194#ifdef CONFIG_REGULATOR
198void regulator_has_full_constraints(void); 195void regulator_has_full_constraints(void);
196int regulator_suspend_prepare(suspend_state_t state);
197int regulator_suspend_finish(void);
199#else 198#else
200static inline void regulator_has_full_constraints(void) 199static inline void regulator_has_full_constraints(void)
201{ 200{
202} 201}
202static inline int regulator_suspend_prepare(suspend_state_t state)
203{
204 return 0;
205}
206static inline int regulator_suspend_finish(void)
207{
208 return 0;
209}
203#endif 210#endif
204 211
205#endif 212#endif
diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h
new file mode 100644
index 000000000000..30cc5963e265
--- /dev/null
+++ b/include/linux/regulator/mt6397-regulator.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Flora Fu <flora.fu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __LINUX_REGULATOR_MT6397_H
16#define __LINUX_REGULATOR_MT6397_H
17
18enum {
19 MT6397_ID_VPCA15 = 0,
20 MT6397_ID_VPCA7,
21 MT6397_ID_VSRAMCA15,
22 MT6397_ID_VSRAMCA7,
23 MT6397_ID_VCORE,
24 MT6397_ID_VGPU,
25 MT6397_ID_VDRM,
26 MT6397_ID_VIO18 = 7,
27 MT6397_ID_VTCXO,
28 MT6397_ID_VA28,
29 MT6397_ID_VCAMA,
30 MT6397_ID_VIO28,
31 MT6397_ID_VUSB,
32 MT6397_ID_VMC,
33 MT6397_ID_VMCH,
34 MT6397_ID_VEMC3V3,
35 MT6397_ID_VGP1,
36 MT6397_ID_VGP2,
37 MT6397_ID_VGP3,
38 MT6397_ID_VGP4,
39 MT6397_ID_VGP5,
40 MT6397_ID_VGP6,
41 MT6397_ID_VIBR,
42 MT6397_ID_RG_MAX,
43};
44
45#define MT6397_MAX_REGULATOR MT6397_ID_RG_MAX
46#define MT6397_REGULATOR_ID97 0x97
47#define MT6397_REGULATOR_ID91 0x91
48
49#endif /* __LINUX_REGULATOR_MT6397_H */
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index 364f7a7c43db..70c6c66c5bcf 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -49,6 +49,20 @@
49#define PFUZE200_VGEN5 11 49#define PFUZE200_VGEN5 11
50#define PFUZE200_VGEN6 12 50#define PFUZE200_VGEN6 12
51 51
52#define PFUZE3000_SW1A 0
53#define PFUZE3000_SW1B 1
54#define PFUZE3000_SW2 2
55#define PFUZE3000_SW3 3
56#define PFUZE3000_SWBST 4
57#define PFUZE3000_VSNVS 5
58#define PFUZE3000_VREFDDR 6
59#define PFUZE3000_VLDO1 7
60#define PFUZE3000_VLDO2 8
61#define PFUZE3000_VCCSD 9
62#define PFUZE3000_V33 10
63#define PFUZE3000_VLDO3 11
64#define PFUZE3000_VLDO4 12
65
52struct regulator_init_data; 66struct regulator_init_data;
53 67
54struct pfuze_regulator_platform_data { 68struct pfuze_regulator_platform_data {
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
index b2b1afbb3202..cd519a11c2c6 100644
--- a/include/linux/spi/at86rf230.h
+++ b/include/linux/spi/at86rf230.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by: 15 * Written by:
20 * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com> 16 * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
21 */ 17 */
diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h
index bc8677c8eba9..e69e9b51b21a 100644
--- a/include/linux/spi/l4f00242t03.h
+++ b/include/linux/spi/l4f00242t03.h
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/ 15*/
20 16
21#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_ 17#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
index 555d254e6606..fdd1d1d51da5 100644
--- a/include/linux/spi/lms283gf05.h
+++ b/include/linux/spi/lms283gf05.h
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18*/ 14*/
19 15
20#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ 16#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
diff --git a/include/linux/spi/mxs-spi.h b/include/linux/spi/mxs-spi.h
index 4835486f58e5..381d368b91b4 100644
--- a/include/linux/spi/mxs-spi.h
+++ b/include/linux/spi/mxs-spi.h
@@ -15,10 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */ 18 */
23 19
24#ifndef __LINUX_SPI_MXS_SPI_H__ 20#ifndef __LINUX_SPI_MXS_SPI_H__
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index d5a316550177..6d36dacec4ba 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 13 */
18#ifndef __linux_pxa2xx_spi_h 14#ifndef __linux_pxa2xx_spi_h
19#define __linux_pxa2xx_spi_h 15#define __linux_pxa2xx_spi_h
@@ -57,7 +53,6 @@ struct pxa2xx_spi_chip {
57#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) 53#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
58 54
59#include <linux/clk.h> 55#include <linux/clk.h>
60#include <mach/dma.h>
61 56
62extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); 57extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
63 58
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
index e546b2ceb623..a693188cc08b 100644
--- a/include/linux/spi/rspi.h
+++ b/include/linux/spi/rspi.h
@@ -11,11 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */ 14 */
20 15
21#ifndef __LINUX_SPI_RENESAS_SPI_H__ 16#ifndef __LINUX_SPI_RENESAS_SPI_H__
diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h
index a1121f872ac1..aa0d440ab4f0 100644
--- a/include/linux/spi/sh_hspi.h
+++ b/include/linux/spi/sh_hspi.h
@@ -9,10 +9,6 @@
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details. 11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 */ 12 */
17#ifndef SH_HSPI_H 13#ifndef SH_HSPI_H
18#define SH_HSPI_H 14#define SH_HSPI_H
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index 88a14d81c49e..b087a85f5f72 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -7,6 +7,8 @@ struct sh_msiof_spi_info {
7 u16 num_chipselect; 7 u16 num_chipselect;
8 unsigned int dma_tx_id; 8 unsigned int dma_tx_id;
9 unsigned int dma_rx_id; 9 unsigned int dma_rx_id;
10 u32 dtdl;
11 u32 syncdl;
10}; 12};
11 13
12#endif /* __SPI_SH_MSIOF_H__ */ 14#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a6ef2a8e6de4..ed9489d893a4 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 13 */
18 14
19#ifndef __LINUX_SPI_H 15#ifndef __LINUX_SPI_H
@@ -260,6 +256,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
260 * @pump_messages: work struct for scheduling work to the message pump 256 * @pump_messages: work struct for scheduling work to the message pump
261 * @queue_lock: spinlock to syncronise access to message queue 257 * @queue_lock: spinlock to syncronise access to message queue
262 * @queue: message queue 258 * @queue: message queue
259 * @idling: the device is entering idle state
263 * @cur_msg: the currently in-flight message 260 * @cur_msg: the currently in-flight message
264 * @cur_msg_prepared: spi_prepare_message was called for the currently 261 * @cur_msg_prepared: spi_prepare_message was called for the currently
265 * in-flight message 262 * in-flight message
@@ -425,6 +422,7 @@ struct spi_master {
425 spinlock_t queue_lock; 422 spinlock_t queue_lock;
426 struct list_head queue; 423 struct list_head queue;
427 struct spi_message *cur_msg; 424 struct spi_message *cur_msg;
425 bool idling;
428 bool busy; 426 bool busy;
429 bool running; 427 bool running;
430 bool rt; 428 bool rt;
diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h
index 60b59187e590..414c6fddfcf0 100644
--- a/include/linux/spi/tle62x0.h
+++ b/include/linux/spi/tle62x0.h
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/ 15*/
20 16
21struct tle62x0_pdata { 17struct tle62x0_pdata {
diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h
index 8f721e465e05..563b3b1799a8 100644
--- a/include/linux/spi/tsc2005.h
+++ b/include/linux/spi/tsc2005.h
@@ -12,11 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */ 15 */
21 16
22#ifndef _LINUX_SPI_TSC2005_H 17#ifndef _LINUX_SPI_TSC2005_H
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a2783cb5d275..9cfd9623fb03 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -45,7 +45,7 @@ struct rcu_batch {
45#define RCU_BATCH_INIT(name) { NULL, &(name.head) } 45#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
46 46
47struct srcu_struct { 47struct srcu_struct {
48 unsigned completed; 48 unsigned long completed;
49 struct srcu_struct_array __percpu *per_cpu_ref; 49 struct srcu_struct_array __percpu *per_cpu_ref;
50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */ 50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */
51 bool running; 51 bool running;
@@ -102,13 +102,11 @@ void process_srcu(struct work_struct *work);
102 * define and init a srcu struct at build time. 102 * define and init a srcu struct at build time.
103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. 103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
104 */ 104 */
105#define DEFINE_SRCU(name) \ 105#define __DEFINE_SRCU(name, is_static) \
106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ 106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
107 struct srcu_struct name = __SRCU_STRUCT_INIT(name); 107 is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
108 108#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
109#define DEFINE_STATIC_SRCU(name) \ 109#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
110 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
111 static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
112 110
113/** 111/**
114 * call_srcu() - Queue a callback for invocation after an SRCU grace period 112 * call_srcu() - Queue a callback for invocation after an SRCU grace period
@@ -135,7 +133,7 @@ int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
135void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 133void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
136void synchronize_srcu(struct srcu_struct *sp); 134void synchronize_srcu(struct srcu_struct *sp);
137void synchronize_srcu_expedited(struct srcu_struct *sp); 135void synchronize_srcu_expedited(struct srcu_struct *sp);
138long srcu_batches_completed(struct srcu_struct *sp); 136unsigned long srcu_batches_completed(struct srcu_struct *sp);
139void srcu_barrier(struct srcu_struct *sp); 137void srcu_barrier(struct srcu_struct *sp);
140 138
141#ifdef CONFIG_DEBUG_LOCK_ALLOC 139#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index e08e21e5f601..c72851328ca9 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -173,7 +173,7 @@ extern void syscall_unregfunc(void);
173 TP_PROTO(data_proto), \ 173 TP_PROTO(data_proto), \
174 TP_ARGS(data_args), \ 174 TP_ARGS(data_args), \
175 TP_CONDITION(cond),,); \ 175 TP_CONDITION(cond),,); \
176 if (IS_ENABLED(CONFIG_LOCKDEP)) { \ 176 if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
177 rcu_read_lock_sched_notrace(); \ 177 rcu_read_lock_sched_notrace(); \
178 rcu_dereference_sched(__tracepoint_##name.funcs);\ 178 rcu_dereference_sched(__tracepoint_##name.funcs);\
179 rcu_read_unlock_sched_notrace(); \ 179 rcu_read_unlock_sched_notrace(); \
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index 7ee2df083542..dc8fd81412bf 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -22,9 +22,9 @@ struct flow_keys {
22 __be32 ports; 22 __be32 ports;
23 __be16 port16[2]; 23 __be16 port16[2];
24 }; 24 };
25 u16 thoff; 25 u16 thoff;
26 u16 n_proto; 26 __be16 n_proto;
27 u8 ip_proto; 27 u8 ip_proto;
28}; 28};
29 29
30bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, 30bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
diff --git a/include/net/ip.h b/include/net/ip.h
index 0bb620702929..09cf5aebb283 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -39,11 +39,12 @@ struct inet_skb_parm {
39 struct ip_options opt; /* Compiled IP options */ 39 struct ip_options opt; /* Compiled IP options */
40 unsigned char flags; 40 unsigned char flags;
41 41
42#define IPSKB_FORWARDED 1 42#define IPSKB_FORWARDED BIT(0)
43#define IPSKB_XFRM_TUNNEL_SIZE 2 43#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
44#define IPSKB_XFRM_TRANSFORMED 4 44#define IPSKB_XFRM_TRANSFORMED BIT(2)
45#define IPSKB_FRAG_COMPLETE 8 45#define IPSKB_FRAG_COMPLETE BIT(3)
46#define IPSKB_REROUTED 16 46#define IPSKB_REROUTED BIT(4)
47#define IPSKB_DOREDIRECT BIT(5)
47 48
48 u16 frag_max_size; 49 u16 frag_max_size;
49}; 50};
@@ -180,7 +181,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
180 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; 181 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
181} 182}
182 183
183void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, 184void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
184 const struct ip_options *sopt, 185 const struct ip_options *sopt,
185 __be32 daddr, __be32 saddr, 186 __be32 daddr, __be32 saddr,
186 const struct ip_reply_arg *arg, 187 const struct ip_reply_arg *arg,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4292929392b0..6e416f6d3e3c 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -671,6 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); 671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
672} 672}
673 673
674u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
675 struct in6_addr *src);
676void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
674void ipv6_proxy_select_ident(struct sk_buff *skb); 677void ipv6_proxy_select_ident(struct sk_buff *skb);
675 678
676int ip6_dst_hoplimit(struct dst_entry *dst); 679int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -708,7 +711,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
708 __be32 flowlabel, bool autolabel) 711 __be32 flowlabel, bool autolabel)
709{ 712{
710 if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) { 713 if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
711 __be32 hash; 714 u32 hash;
712 715
713 hash = skb_get_hash(skb); 716 hash = skb_get_hash(skb);
714 717
@@ -718,7 +721,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
718 */ 721 */
719 hash ^= hash >> 12; 722 hash ^= hash >> 12;
720 723
721 flowlabel = hash & IPV6_FLOWLABEL_MASK; 724 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
722 } 725 }
723 726
724 return flowlabel; 727 return flowlabel;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3ae969e3acf0..9eaaa7884586 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -530,6 +530,8 @@ enum nft_chain_type {
530 530
531int nft_chain_validate_dependency(const struct nft_chain *chain, 531int nft_chain_validate_dependency(const struct nft_chain *chain,
532 enum nft_chain_type type); 532 enum nft_chain_type type);
533int nft_chain_validate_hooks(const struct nft_chain *chain,
534 unsigned int hook_flags);
533 535
534struct nft_stats { 536struct nft_stats {
535 u64 bytes; 537 u64 bytes;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 24945cefc4fd..0ffef1a38efc 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -52,6 +52,7 @@ struct netns_ipv4 {
52 struct inet_peer_base *peers; 52 struct inet_peer_base *peers;
53 struct tcpm_hash_bucket *tcp_metrics_hash; 53 struct tcpm_hash_bucket *tcp_metrics_hash;
54 unsigned int tcp_metrics_hash_log; 54 unsigned int tcp_metrics_hash_log;
55 struct sock * __percpu *tcp_sk;
55 struct netns_frags frags; 56 struct netns_frags frags;
56#ifdef CONFIG_NETFILTER 57#ifdef CONFIG_NETFILTER
57 struct xt_table *iptable_filter; 58 struct xt_table *iptable_filter;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 3d282cbb66bf..c605d305c577 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -79,6 +79,9 @@ struct Qdisc {
79 struct netdev_queue *dev_queue; 79 struct netdev_queue *dev_queue;
80 80
81 struct gnet_stats_rate_est64 rate_est; 81 struct gnet_stats_rate_est64 rate_est;
82 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
83 struct gnet_stats_queue __percpu *cpu_qstats;
84
82 struct Qdisc *next_sched; 85 struct Qdisc *next_sched;
83 struct sk_buff *gso_skb; 86 struct sk_buff *gso_skb;
84 /* 87 /*
@@ -86,15 +89,9 @@ struct Qdisc {
86 */ 89 */
87 unsigned long state; 90 unsigned long state;
88 struct sk_buff_head q; 91 struct sk_buff_head q;
89 union { 92 struct gnet_stats_basic_packed bstats;
90 struct gnet_stats_basic_packed bstats;
91 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
92 } __packed;
93 unsigned int __state; 93 unsigned int __state;
94 union { 94 struct gnet_stats_queue qstats;
95 struct gnet_stats_queue qstats;
96 struct gnet_stats_queue __percpu *cpu_qstats;
97 } __packed;
98 struct rcu_head rcu_head; 95 struct rcu_head rcu_head;
99 int padded; 96 int padded;
100 atomic_t refcnt; 97 atomic_t refcnt;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f50f29faf76f..9d9111ef43ae 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -834,8 +834,8 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
834void tcp_get_allowed_congestion_control(char *buf, size_t len); 834void tcp_get_allowed_congestion_control(char *buf, size_t len);
835int tcp_set_allowed_congestion_control(char *allowed); 835int tcp_set_allowed_congestion_control(char *allowed);
836int tcp_set_congestion_control(struct sock *sk, const char *name); 836int tcp_set_congestion_control(struct sock *sk, const char *name);
837void tcp_slow_start(struct tcp_sock *tp, u32 acked); 837u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
838void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); 838void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
839 839
840u32 tcp_reno_ssthresh(struct sock *sk); 840u32 tcp_reno_ssthresh(struct sock *sk);
841void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 841void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0d74f1de99aa..65994a19e840 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1707,10 +1707,7 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t
1707 1707
1708static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 1708static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1709{ 1709{
1710 size_t copy_sz; 1710 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1711
1712 copy_sz = min_t(size_t, len, udata->outlen);
1713 return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0;
1714} 1711}
1715 1712
1716/** 1713/**
diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h
index 2609048c1d44..3a34f6edc2d1 100644
--- a/include/sound/ak4113.h
+++ b/include/sound/ak4113.h
@@ -286,7 +286,7 @@ struct ak4113 {
286 ak4113_write_t *write; 286 ak4113_write_t *write;
287 ak4113_read_t *read; 287 ak4113_read_t *read;
288 void *private_data; 288 void *private_data;
289 unsigned int init:1; 289 atomic_t wq_processing;
290 spinlock_t lock; 290 spinlock_t lock;
291 unsigned char regmap[AK4113_WRITABLE_REGS]; 291 unsigned char regmap[AK4113_WRITABLE_REGS];
292 struct snd_kcontrol *kctls[AK4113_CONTROLS]; 292 struct snd_kcontrol *kctls[AK4113_CONTROLS];
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index 52f02a60dba7..069299a88915 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -168,7 +168,7 @@ struct ak4114 {
168 ak4114_write_t * write; 168 ak4114_write_t * write;
169 ak4114_read_t * read; 169 ak4114_read_t * read;
170 void * private_data; 170 void * private_data;
171 unsigned int init: 1; 171 atomic_t wq_processing;
172 spinlock_t lock; 172 spinlock_t lock;
173 unsigned char regmap[6]; 173 unsigned char regmap[6];
174 unsigned char txcsb[5]; 174 unsigned char txcsb[5];
diff --git a/include/sound/soc.h b/include/sound/soc.h
index b4fca9aed2a2..ac8b333acb4d 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -498,6 +498,7 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
498 unsigned int mask, unsigned int value); 498 unsigned int mask, unsigned int value);
499 499
500#ifdef CONFIG_SND_SOC_AC97_BUS 500#ifdef CONFIG_SND_SOC_AC97_BUS
501struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
501struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec); 502struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
502void snd_soc_free_ac97_codec(struct snd_ac97 *ac97); 503void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
503 504
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 13391d288107..0e7635765153 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -13,11 +13,13 @@
13 { TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \ 13 { TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \
14 { TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" } 14 { TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" }
15 15
16TRACE_EVENT(tlb_flush, 16TRACE_EVENT_CONDITION(tlb_flush,
17 17
18 TP_PROTO(int reason, unsigned long pages), 18 TP_PROTO(int reason, unsigned long pages),
19 TP_ARGS(reason, pages), 19 TP_ARGS(reason, pages),
20 20
21 TP_CONDITION(cpu_online(smp_processor_id())),
22
21 TP_STRUCT__entry( 23 TP_STRUCT__entry(
22 __field( int, reason) 24 __field( int, reason)
23 __field(unsigned long, pages) 25 __field(unsigned long, pages)
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 139b5067345b..27609dfcce25 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -763,7 +763,7 @@ perf_trace_##call(void *__data, proto) \
763 struct ftrace_event_call *event_call = __data; \ 763 struct ftrace_event_call *event_call = __data; \
764 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 764 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
765 struct ftrace_raw_##call *entry; \ 765 struct ftrace_raw_##call *entry; \
766 struct pt_regs __regs; \ 766 struct pt_regs *__regs; \
767 u64 __addr = 0, __count = 1; \ 767 u64 __addr = 0, __count = 1; \
768 struct task_struct *__task = NULL; \ 768 struct task_struct *__task = NULL; \
769 struct hlist_head *head; \ 769 struct hlist_head *head; \
@@ -782,18 +782,19 @@ perf_trace_##call(void *__data, proto) \
782 sizeof(u64)); \ 782 sizeof(u64)); \
783 __entry_size -= sizeof(u32); \ 783 __entry_size -= sizeof(u32); \
784 \ 784 \
785 perf_fetch_caller_regs(&__regs); \
786 entry = perf_trace_buf_prepare(__entry_size, \ 785 entry = perf_trace_buf_prepare(__entry_size, \
787 event_call->event.type, &__regs, &rctx); \ 786 event_call->event.type, &__regs, &rctx); \
788 if (!entry) \ 787 if (!entry) \
789 return; \ 788 return; \
790 \ 789 \
790 perf_fetch_caller_regs(__regs); \
791 \
791 tstruct \ 792 tstruct \
792 \ 793 \
793 { assign; } \ 794 { assign; } \
794 \ 795 \
795 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 796 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
796 __count, &__regs, head, __task); \ 797 __count, __regs, head, __task); \
797} 798}
798 799
799/* 800/*
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 4275b961bf60..867cc5084afb 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -90,7 +90,6 @@ enum {
90}; 90};
91 91
92enum { 92enum {
93 IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
94 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 93 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
95 IB_USER_VERBS_EX_CMD_DESTROY_FLOW, 94 IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
96}; 95};
@@ -202,32 +201,6 @@ struct ib_uverbs_query_device_resp {
202 __u8 reserved[4]; 201 __u8 reserved[4];
203}; 202};
204 203
205enum {
206 IB_USER_VERBS_EX_QUERY_DEVICE_ODP = 1ULL << 0,
207};
208
209struct ib_uverbs_ex_query_device {
210 __u32 comp_mask;
211 __u32 reserved;
212};
213
214struct ib_uverbs_odp_caps {
215 __u64 general_caps;
216 struct {
217 __u32 rc_odp_caps;
218 __u32 uc_odp_caps;
219 __u32 ud_odp_caps;
220 } per_transport_caps;
221 __u32 reserved;
222};
223
224struct ib_uverbs_ex_query_device_resp {
225 struct ib_uverbs_query_device_resp base;
226 __u32 comp_mask;
227 __u32 reserved;
228 struct ib_uverbs_odp_caps odp_caps;
229};
230
231struct ib_uverbs_query_port { 204struct ib_uverbs_query_port {
232 __u64 response; 205 __u64 response;
233 __u8 port_num; 206 __u8 port_num;
diff --git a/init/Kconfig b/init/Kconfig
index 9afb971497f4..1354ac09b516 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -470,7 +470,6 @@ choice
470config TREE_RCU 470config TREE_RCU
471 bool "Tree-based hierarchical RCU" 471 bool "Tree-based hierarchical RCU"
472 depends on !PREEMPT && SMP 472 depends on !PREEMPT && SMP
473 select IRQ_WORK
474 help 473 help
475 This option selects the RCU implementation that is 474 This option selects the RCU implementation that is
476 designed for very large SMP system with hundreds or 475 designed for very large SMP system with hundreds or
@@ -480,7 +479,6 @@ config TREE_RCU
480config PREEMPT_RCU 479config PREEMPT_RCU
481 bool "Preemptible tree-based hierarchical RCU" 480 bool "Preemptible tree-based hierarchical RCU"
482 depends on PREEMPT 481 depends on PREEMPT
483 select IRQ_WORK
484 help 482 help
485 This option selects the RCU implementation that is 483 This option selects the RCU implementation that is
486 designed for very large SMP systems with hundreds or 484 designed for very large SMP systems with hundreds or
@@ -501,9 +499,17 @@ config TINY_RCU
501 499
502endchoice 500endchoice
503 501
502config SRCU
503 bool
504 help
505 This option selects the sleepable version of RCU. This version
506 permits arbitrary sleeping or blocking within RCU read-side critical
507 sections.
508
504config TASKS_RCU 509config TASKS_RCU
505 bool "Task_based RCU implementation using voluntary context switch" 510 bool "Task_based RCU implementation using voluntary context switch"
506 default n 511 default n
512 select SRCU
507 help 513 help
508 This option enables a task-based RCU implementation that uses 514 This option enables a task-based RCU implementation that uses
509 only voluntary context switch (not preemption!), idle, and 515 only voluntary context switch (not preemption!), idle, and
@@ -668,9 +674,10 @@ config RCU_BOOST
668 674
669config RCU_KTHREAD_PRIO 675config RCU_KTHREAD_PRIO
670 int "Real-time priority to use for RCU worker threads" 676 int "Real-time priority to use for RCU worker threads"
671 range 1 99 677 range 1 99 if RCU_BOOST
672 depends on RCU_BOOST 678 range 0 99 if !RCU_BOOST
673 default 1 679 default 1 if RCU_BOOST
680 default 0 if !RCU_BOOST
674 help 681 help
675 This option specifies the SCHED_FIFO priority value that will be 682 This option specifies the SCHED_FIFO priority value that will be
676 assigned to the rcuc/n and rcub/n threads and is also the value 683 assigned to the rcuc/n and rcub/n threads and is also the value
@@ -1595,6 +1602,7 @@ config PERF_EVENTS
1595 depends on HAVE_PERF_EVENTS 1602 depends on HAVE_PERF_EVENTS
1596 select ANON_INODES 1603 select ANON_INODES
1597 select IRQ_WORK 1604 select IRQ_WORK
1605 select SRCU
1598 help 1606 help
1599 Enable kernel support for various performance events provided 1607 Enable kernel support for various performance events provided
1600 by software and hardware. 1608 by software and hardware.
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 76768ee812b2..08561f1acd13 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -231,6 +231,10 @@ config RWSEM_SPIN_ON_OWNER
231 def_bool y 231 def_bool y
232 depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW 232 depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
233 233
234config LOCK_SPIN_ON_OWNER
235 def_bool y
236 depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER
237
234config ARCH_USE_QUEUE_RWLOCK 238config ARCH_USE_QUEUE_RWLOCK
235 bool 239 bool
236 240
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 088ac0b1b106..536edc2be307 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr)
150 int ufd = attr->map_fd; 150 int ufd = attr->map_fd;
151 struct fd f = fdget(ufd); 151 struct fd f = fdget(ufd);
152 struct bpf_map *map; 152 struct bpf_map *map;
153 void *key, *value; 153 void *key, *value, *ptr;
154 int err; 154 int err;
155 155
156 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 156 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
@@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr)
169 if (copy_from_user(key, ukey, map->key_size) != 0) 169 if (copy_from_user(key, ukey, map->key_size) != 0)
170 goto free_key; 170 goto free_key;
171 171
172 err = -ENOENT; 172 err = -ENOMEM;
173 rcu_read_lock(); 173 value = kmalloc(map->value_size, GFP_USER);
174 value = map->ops->map_lookup_elem(map, key);
175 if (!value) 174 if (!value)
176 goto err_unlock; 175 goto free_key;
176
177 rcu_read_lock();
178 ptr = map->ops->map_lookup_elem(map, key);
179 if (ptr)
180 memcpy(value, ptr, map->value_size);
181 rcu_read_unlock();
182
183 err = -ENOENT;
184 if (!ptr)
185 goto free_value;
177 186
178 err = -EFAULT; 187 err = -EFAULT;
179 if (copy_to_user(uvalue, value, map->value_size) != 0) 188 if (copy_to_user(uvalue, value, map->value_size) != 0)
180 goto err_unlock; 189 goto free_value;
181 190
182 err = 0; 191 err = 0;
183 192
184err_unlock: 193free_value:
185 rcu_read_unlock(); 194 kfree(value);
186free_key: 195free_key:
187 kfree(key); 196 kfree(key);
188err_put: 197err_put:
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5d220234b3ca..1972b161c61e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -58,22 +58,23 @@ static int cpu_hotplug_disabled;
58 58
59static struct { 59static struct {
60 struct task_struct *active_writer; 60 struct task_struct *active_writer;
61 struct mutex lock; /* Synchronizes accesses to refcount, */ 61 /* wait queue to wake up the active_writer */
62 wait_queue_head_t wq;
63 /* verifies that no writer will get active while readers are active */
64 struct mutex lock;
62 /* 65 /*
63 * Also blocks the new readers during 66 * Also blocks the new readers during
64 * an ongoing cpu hotplug operation. 67 * an ongoing cpu hotplug operation.
65 */ 68 */
66 int refcount; 69 atomic_t refcount;
67 /* And allows lockless put_online_cpus(). */
68 atomic_t puts_pending;
69 70
70#ifdef CONFIG_DEBUG_LOCK_ALLOC 71#ifdef CONFIG_DEBUG_LOCK_ALLOC
71 struct lockdep_map dep_map; 72 struct lockdep_map dep_map;
72#endif 73#endif
73} cpu_hotplug = { 74} cpu_hotplug = {
74 .active_writer = NULL, 75 .active_writer = NULL,
76 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
75 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 77 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
76 .refcount = 0,
77#ifdef CONFIG_DEBUG_LOCK_ALLOC 78#ifdef CONFIG_DEBUG_LOCK_ALLOC
78 .dep_map = {.name = "cpu_hotplug.lock" }, 79 .dep_map = {.name = "cpu_hotplug.lock" },
79#endif 80#endif
@@ -86,15 +87,6 @@ static struct {
86#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) 87#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
87#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) 88#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
88 89
89static void apply_puts_pending(int max)
90{
91 int delta;
92
93 if (atomic_read(&cpu_hotplug.puts_pending) >= max) {
94 delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
95 cpu_hotplug.refcount -= delta;
96 }
97}
98 90
99void get_online_cpus(void) 91void get_online_cpus(void)
100{ 92{
@@ -103,8 +95,7 @@ void get_online_cpus(void)
103 return; 95 return;
104 cpuhp_lock_acquire_read(); 96 cpuhp_lock_acquire_read();
105 mutex_lock(&cpu_hotplug.lock); 97 mutex_lock(&cpu_hotplug.lock);
106 apply_puts_pending(65536); 98 atomic_inc(&cpu_hotplug.refcount);
107 cpu_hotplug.refcount++;
108 mutex_unlock(&cpu_hotplug.lock); 99 mutex_unlock(&cpu_hotplug.lock);
109} 100}
110EXPORT_SYMBOL_GPL(get_online_cpus); 101EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -116,8 +107,7 @@ bool try_get_online_cpus(void)
116 if (!mutex_trylock(&cpu_hotplug.lock)) 107 if (!mutex_trylock(&cpu_hotplug.lock))
117 return false; 108 return false;
118 cpuhp_lock_acquire_tryread(); 109 cpuhp_lock_acquire_tryread();
119 apply_puts_pending(65536); 110 atomic_inc(&cpu_hotplug.refcount);
120 cpu_hotplug.refcount++;
121 mutex_unlock(&cpu_hotplug.lock); 111 mutex_unlock(&cpu_hotplug.lock);
122 return true; 112 return true;
123} 113}
@@ -125,20 +115,18 @@ EXPORT_SYMBOL_GPL(try_get_online_cpus);
125 115
126void put_online_cpus(void) 116void put_online_cpus(void)
127{ 117{
118 int refcount;
119
128 if (cpu_hotplug.active_writer == current) 120 if (cpu_hotplug.active_writer == current)
129 return; 121 return;
130 if (!mutex_trylock(&cpu_hotplug.lock)) {
131 atomic_inc(&cpu_hotplug.puts_pending);
132 cpuhp_lock_release();
133 return;
134 }
135 122
136 if (WARN_ON(!cpu_hotplug.refcount)) 123 refcount = atomic_dec_return(&cpu_hotplug.refcount);
137 cpu_hotplug.refcount++; /* try to fix things up */ 124 if (WARN_ON(refcount < 0)) /* try to fix things up */
125 atomic_inc(&cpu_hotplug.refcount);
126
127 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
128 wake_up(&cpu_hotplug.wq);
138 129
139 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
140 wake_up_process(cpu_hotplug.active_writer);
141 mutex_unlock(&cpu_hotplug.lock);
142 cpuhp_lock_release(); 130 cpuhp_lock_release();
143 131
144} 132}
@@ -168,18 +156,20 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
168 */ 156 */
169void cpu_hotplug_begin(void) 157void cpu_hotplug_begin(void)
170{ 158{
171 cpu_hotplug.active_writer = current; 159 DEFINE_WAIT(wait);
172 160
161 cpu_hotplug.active_writer = current;
173 cpuhp_lock_acquire(); 162 cpuhp_lock_acquire();
163
174 for (;;) { 164 for (;;) {
175 mutex_lock(&cpu_hotplug.lock); 165 mutex_lock(&cpu_hotplug.lock);
176 apply_puts_pending(1); 166 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
177 if (likely(!cpu_hotplug.refcount)) 167 if (likely(!atomic_read(&cpu_hotplug.refcount)))
178 break; 168 break;
179 __set_current_state(TASK_UNINTERRUPTIBLE);
180 mutex_unlock(&cpu_hotplug.lock); 169 mutex_unlock(&cpu_hotplug.lock);
181 schedule(); 170 schedule();
182 } 171 }
172 finish_wait(&cpu_hotplug.wq, &wait);
183} 173}
184 174
185void cpu_hotplug_done(void) 175void cpu_hotplug_done(void)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 882f835a0d85..7f2fbb8b5069 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -872,22 +872,32 @@ void perf_pmu_enable(struct pmu *pmu)
872 pmu->pmu_enable(pmu); 872 pmu->pmu_enable(pmu);
873} 873}
874 874
875static DEFINE_PER_CPU(struct list_head, rotation_list); 875static DEFINE_PER_CPU(struct list_head, active_ctx_list);
876 876
877/* 877/*
878 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized 878 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
879 * because they're strictly cpu affine and rotate_start is called with IRQs 879 * perf_event_task_tick() are fully serialized because they're strictly cpu
880 * disabled, while rotate_context is called from IRQ context. 880 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
881 * disabled, while perf_event_task_tick is called from IRQ context.
881 */ 882 */
882static void perf_pmu_rotate_start(struct pmu *pmu) 883static void perf_event_ctx_activate(struct perf_event_context *ctx)
883{ 884{
884 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 885 struct list_head *head = this_cpu_ptr(&active_ctx_list);
885 struct list_head *head = this_cpu_ptr(&rotation_list);
886 886
887 WARN_ON(!irqs_disabled()); 887 WARN_ON(!irqs_disabled());
888 888
889 if (list_empty(&cpuctx->rotation_list)) 889 WARN_ON(!list_empty(&ctx->active_ctx_list));
890 list_add(&cpuctx->rotation_list, head); 890
891 list_add(&ctx->active_ctx_list, head);
892}
893
894static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
895{
896 WARN_ON(!irqs_disabled());
897
898 WARN_ON(list_empty(&ctx->active_ctx_list));
899
900 list_del_init(&ctx->active_ctx_list);
891} 901}
892 902
893static void get_ctx(struct perf_event_context *ctx) 903static void get_ctx(struct perf_event_context *ctx)
@@ -907,6 +917,84 @@ static void put_ctx(struct perf_event_context *ctx)
907} 917}
908 918
909/* 919/*
920 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
921 * perf_pmu_migrate_context() we need some magic.
922 *
923 * Those places that change perf_event::ctx will hold both
924 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
925 *
926 * Lock ordering is by mutex address. There is one other site where
927 * perf_event_context::mutex nests and that is put_event(). But remember that
928 * that is a parent<->child context relation, and migration does not affect
929 * children, therefore these two orderings should not interact.
930 *
931 * The change in perf_event::ctx does not affect children (as claimed above)
932 * because the sys_perf_event_open() case will install a new event and break
933 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
934 * concerned with cpuctx and that doesn't have children.
935 *
936 * The places that change perf_event::ctx will issue:
937 *
938 * perf_remove_from_context();
939 * synchronize_rcu();
940 * perf_install_in_context();
941 *
942 * to affect the change. The remove_from_context() + synchronize_rcu() should
943 * quiesce the event, after which we can install it in the new location. This
944 * means that only external vectors (perf_fops, prctl) can perturb the event
945 * while in transit. Therefore all such accessors should also acquire
946 * perf_event_context::mutex to serialize against this.
947 *
948 * However; because event->ctx can change while we're waiting to acquire
949 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
950 * function.
951 *
952 * Lock order:
953 * task_struct::perf_event_mutex
954 * perf_event_context::mutex
955 * perf_event_context::lock
956 * perf_event::child_mutex;
957 * perf_event::mmap_mutex
958 * mmap_sem
959 */
960static struct perf_event_context *
961perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
962{
963 struct perf_event_context *ctx;
964
965again:
966 rcu_read_lock();
967 ctx = ACCESS_ONCE(event->ctx);
968 if (!atomic_inc_not_zero(&ctx->refcount)) {
969 rcu_read_unlock();
970 goto again;
971 }
972 rcu_read_unlock();
973
974 mutex_lock_nested(&ctx->mutex, nesting);
975 if (event->ctx != ctx) {
976 mutex_unlock(&ctx->mutex);
977 put_ctx(ctx);
978 goto again;
979 }
980
981 return ctx;
982}
983
984static inline struct perf_event_context *
985perf_event_ctx_lock(struct perf_event *event)
986{
987 return perf_event_ctx_lock_nested(event, 0);
988}
989
990static void perf_event_ctx_unlock(struct perf_event *event,
991 struct perf_event_context *ctx)
992{
993 mutex_unlock(&ctx->mutex);
994 put_ctx(ctx);
995}
996
997/*
910 * This must be done under the ctx->lock, such as to serialize against 998 * This must be done under the ctx->lock, such as to serialize against
911 * context_equiv(), therefore we cannot call put_ctx() since that might end up 999 * context_equiv(), therefore we cannot call put_ctx() since that might end up
912 * calling scheduler related locks and ctx->lock nests inside those. 1000 * calling scheduler related locks and ctx->lock nests inside those.
@@ -1155,8 +1243,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1155 ctx->nr_branch_stack++; 1243 ctx->nr_branch_stack++;
1156 1244
1157 list_add_rcu(&event->event_entry, &ctx->event_list); 1245 list_add_rcu(&event->event_entry, &ctx->event_list);
1158 if (!ctx->nr_events)
1159 perf_pmu_rotate_start(ctx->pmu);
1160 ctx->nr_events++; 1246 ctx->nr_events++;
1161 if (event->attr.inherit_stat) 1247 if (event->attr.inherit_stat)
1162 ctx->nr_stat++; 1248 ctx->nr_stat++;
@@ -1275,6 +1361,8 @@ static void perf_group_attach(struct perf_event *event)
1275 if (group_leader == event) 1361 if (group_leader == event)
1276 return; 1362 return;
1277 1363
1364 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1365
1278 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 1366 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1279 !is_software_event(event)) 1367 !is_software_event(event))
1280 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 1368 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
@@ -1296,6 +1384,10 @@ static void
1296list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1384list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1297{ 1385{
1298 struct perf_cpu_context *cpuctx; 1386 struct perf_cpu_context *cpuctx;
1387
1388 WARN_ON_ONCE(event->ctx != ctx);
1389 lockdep_assert_held(&ctx->lock);
1390
1299 /* 1391 /*
1300 * We can have double detach due to exit/hot-unplug + close. 1392 * We can have double detach due to exit/hot-unplug + close.
1301 */ 1393 */
@@ -1380,6 +1472,8 @@ static void perf_group_detach(struct perf_event *event)
1380 1472
1381 /* Inherit group flags from the previous leader */ 1473 /* Inherit group flags from the previous leader */
1382 sibling->group_flags = event->group_flags; 1474 sibling->group_flags = event->group_flags;
1475
1476 WARN_ON_ONCE(sibling->ctx != event->ctx);
1383 } 1477 }
1384 1478
1385out: 1479out:
@@ -1442,6 +1536,10 @@ event_sched_out(struct perf_event *event,
1442{ 1536{
1443 u64 tstamp = perf_event_time(event); 1537 u64 tstamp = perf_event_time(event);
1444 u64 delta; 1538 u64 delta;
1539
1540 WARN_ON_ONCE(event->ctx != ctx);
1541 lockdep_assert_held(&ctx->lock);
1542
1445 /* 1543 /*
1446 * An event which could not be activated because of 1544 * An event which could not be activated because of
1447 * filter mismatch still needs to have its timings 1545 * filter mismatch still needs to have its timings
@@ -1471,7 +1569,8 @@ event_sched_out(struct perf_event *event,
1471 1569
1472 if (!is_software_event(event)) 1570 if (!is_software_event(event))
1473 cpuctx->active_oncpu--; 1571 cpuctx->active_oncpu--;
1474 ctx->nr_active--; 1572 if (!--ctx->nr_active)
1573 perf_event_ctx_deactivate(ctx);
1475 if (event->attr.freq && event->attr.sample_freq) 1574 if (event->attr.freq && event->attr.sample_freq)
1476 ctx->nr_freq--; 1575 ctx->nr_freq--;
1477 if (event->attr.exclusive || !cpuctx->active_oncpu) 1576 if (event->attr.exclusive || !cpuctx->active_oncpu)
@@ -1654,7 +1753,7 @@ int __perf_event_disable(void *info)
1654 * is the current context on this CPU and preemption is disabled, 1753 * is the current context on this CPU and preemption is disabled,
1655 * hence we can't get into perf_event_task_sched_out for this context. 1754 * hence we can't get into perf_event_task_sched_out for this context.
1656 */ 1755 */
1657void perf_event_disable(struct perf_event *event) 1756static void _perf_event_disable(struct perf_event *event)
1658{ 1757{
1659 struct perf_event_context *ctx = event->ctx; 1758 struct perf_event_context *ctx = event->ctx;
1660 struct task_struct *task = ctx->task; 1759 struct task_struct *task = ctx->task;
@@ -1695,6 +1794,19 @@ retry:
1695 } 1794 }
1696 raw_spin_unlock_irq(&ctx->lock); 1795 raw_spin_unlock_irq(&ctx->lock);
1697} 1796}
1797
1798/*
1799 * Strictly speaking kernel users cannot create groups and therefore this
1800 * interface does not need the perf_event_ctx_lock() magic.
1801 */
1802void perf_event_disable(struct perf_event *event)
1803{
1804 struct perf_event_context *ctx;
1805
1806 ctx = perf_event_ctx_lock(event);
1807 _perf_event_disable(event);
1808 perf_event_ctx_unlock(event, ctx);
1809}
1698EXPORT_SYMBOL_GPL(perf_event_disable); 1810EXPORT_SYMBOL_GPL(perf_event_disable);
1699 1811
1700static void perf_set_shadow_time(struct perf_event *event, 1812static void perf_set_shadow_time(struct perf_event *event,
@@ -1782,7 +1894,8 @@ event_sched_in(struct perf_event *event,
1782 1894
1783 if (!is_software_event(event)) 1895 if (!is_software_event(event))
1784 cpuctx->active_oncpu++; 1896 cpuctx->active_oncpu++;
1785 ctx->nr_active++; 1897 if (!ctx->nr_active++)
1898 perf_event_ctx_activate(ctx);
1786 if (event->attr.freq && event->attr.sample_freq) 1899 if (event->attr.freq && event->attr.sample_freq)
1787 ctx->nr_freq++; 1900 ctx->nr_freq++;
1788 1901
@@ -2158,7 +2271,7 @@ unlock:
2158 * perf_event_for_each_child or perf_event_for_each as described 2271 * perf_event_for_each_child or perf_event_for_each as described
2159 * for perf_event_disable. 2272 * for perf_event_disable.
2160 */ 2273 */
2161void perf_event_enable(struct perf_event *event) 2274static void _perf_event_enable(struct perf_event *event)
2162{ 2275{
2163 struct perf_event_context *ctx = event->ctx; 2276 struct perf_event_context *ctx = event->ctx;
2164 struct task_struct *task = ctx->task; 2277 struct task_struct *task = ctx->task;
@@ -2214,9 +2327,21 @@ retry:
2214out: 2327out:
2215 raw_spin_unlock_irq(&ctx->lock); 2328 raw_spin_unlock_irq(&ctx->lock);
2216} 2329}
2330
2331/*
2332 * See perf_event_disable();
2333 */
2334void perf_event_enable(struct perf_event *event)
2335{
2336 struct perf_event_context *ctx;
2337
2338 ctx = perf_event_ctx_lock(event);
2339 _perf_event_enable(event);
2340 perf_event_ctx_unlock(event, ctx);
2341}
2217EXPORT_SYMBOL_GPL(perf_event_enable); 2342EXPORT_SYMBOL_GPL(perf_event_enable);
2218 2343
2219int perf_event_refresh(struct perf_event *event, int refresh) 2344static int _perf_event_refresh(struct perf_event *event, int refresh)
2220{ 2345{
2221 /* 2346 /*
2222 * not supported on inherited events 2347 * not supported on inherited events
@@ -2225,10 +2350,25 @@ int perf_event_refresh(struct perf_event *event, int refresh)
2225 return -EINVAL; 2350 return -EINVAL;
2226 2351
2227 atomic_add(refresh, &event->event_limit); 2352 atomic_add(refresh, &event->event_limit);
2228 perf_event_enable(event); 2353 _perf_event_enable(event);
2229 2354
2230 return 0; 2355 return 0;
2231} 2356}
2357
2358/*
2359 * See perf_event_disable()
2360 */
2361int perf_event_refresh(struct perf_event *event, int refresh)
2362{
2363 struct perf_event_context *ctx;
2364 int ret;
2365
2366 ctx = perf_event_ctx_lock(event);
2367 ret = _perf_event_refresh(event, refresh);
2368 perf_event_ctx_unlock(event, ctx);
2369
2370 return ret;
2371}
2232EXPORT_SYMBOL_GPL(perf_event_refresh); 2372EXPORT_SYMBOL_GPL(perf_event_refresh);
2233 2373
2234static void ctx_sched_out(struct perf_event_context *ctx, 2374static void ctx_sched_out(struct perf_event_context *ctx,
@@ -2612,12 +2752,6 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2612 2752
2613 perf_pmu_enable(ctx->pmu); 2753 perf_pmu_enable(ctx->pmu);
2614 perf_ctx_unlock(cpuctx, ctx); 2754 perf_ctx_unlock(cpuctx, ctx);
2615
2616 /*
2617 * Since these rotations are per-cpu, we need to ensure the
2618 * cpu-context we got scheduled on is actually rotating.
2619 */
2620 perf_pmu_rotate_start(ctx->pmu);
2621} 2755}
2622 2756
2623/* 2757/*
@@ -2905,25 +3039,18 @@ static void rotate_ctx(struct perf_event_context *ctx)
2905 list_rotate_left(&ctx->flexible_groups); 3039 list_rotate_left(&ctx->flexible_groups);
2906} 3040}
2907 3041
2908/*
2909 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2910 * because they're strictly cpu affine and rotate_start is called with IRQs
2911 * disabled, while rotate_context is called from IRQ context.
2912 */
2913static int perf_rotate_context(struct perf_cpu_context *cpuctx) 3042static int perf_rotate_context(struct perf_cpu_context *cpuctx)
2914{ 3043{
2915 struct perf_event_context *ctx = NULL; 3044 struct perf_event_context *ctx = NULL;
2916 int rotate = 0, remove = 1; 3045 int rotate = 0;
2917 3046
2918 if (cpuctx->ctx.nr_events) { 3047 if (cpuctx->ctx.nr_events) {
2919 remove = 0;
2920 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 3048 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2921 rotate = 1; 3049 rotate = 1;
2922 } 3050 }
2923 3051
2924 ctx = cpuctx->task_ctx; 3052 ctx = cpuctx->task_ctx;
2925 if (ctx && ctx->nr_events) { 3053 if (ctx && ctx->nr_events) {
2926 remove = 0;
2927 if (ctx->nr_events != ctx->nr_active) 3054 if (ctx->nr_events != ctx->nr_active)
2928 rotate = 1; 3055 rotate = 1;
2929 } 3056 }
@@ -2947,8 +3074,6 @@ static int perf_rotate_context(struct perf_cpu_context *cpuctx)
2947 perf_pmu_enable(cpuctx->ctx.pmu); 3074 perf_pmu_enable(cpuctx->ctx.pmu);
2948 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 3075 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2949done: 3076done:
2950 if (remove)
2951 list_del_init(&cpuctx->rotation_list);
2952 3077
2953 return rotate; 3078 return rotate;
2954} 3079}
@@ -2966,9 +3091,8 @@ bool perf_event_can_stop_tick(void)
2966 3091
2967void perf_event_task_tick(void) 3092void perf_event_task_tick(void)
2968{ 3093{
2969 struct list_head *head = this_cpu_ptr(&rotation_list); 3094 struct list_head *head = this_cpu_ptr(&active_ctx_list);
2970 struct perf_cpu_context *cpuctx, *tmp; 3095 struct perf_event_context *ctx, *tmp;
2971 struct perf_event_context *ctx;
2972 int throttled; 3096 int throttled;
2973 3097
2974 WARN_ON(!irqs_disabled()); 3098 WARN_ON(!irqs_disabled());
@@ -2976,14 +3100,8 @@ void perf_event_task_tick(void)
2976 __this_cpu_inc(perf_throttled_seq); 3100 __this_cpu_inc(perf_throttled_seq);
2977 throttled = __this_cpu_xchg(perf_throttled_count, 0); 3101 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2978 3102
2979 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { 3103 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
2980 ctx = &cpuctx->ctx;
2981 perf_adjust_freq_unthr_context(ctx, throttled); 3104 perf_adjust_freq_unthr_context(ctx, throttled);
2982
2983 ctx = cpuctx->task_ctx;
2984 if (ctx)
2985 perf_adjust_freq_unthr_context(ctx, throttled);
2986 }
2987} 3105}
2988 3106
2989static int event_enable_on_exec(struct perf_event *event, 3107static int event_enable_on_exec(struct perf_event *event,
@@ -3142,6 +3260,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
3142{ 3260{
3143 raw_spin_lock_init(&ctx->lock); 3261 raw_spin_lock_init(&ctx->lock);
3144 mutex_init(&ctx->mutex); 3262 mutex_init(&ctx->mutex);
3263 INIT_LIST_HEAD(&ctx->active_ctx_list);
3145 INIT_LIST_HEAD(&ctx->pinned_groups); 3264 INIT_LIST_HEAD(&ctx->pinned_groups);
3146 INIT_LIST_HEAD(&ctx->flexible_groups); 3265 INIT_LIST_HEAD(&ctx->flexible_groups);
3147 INIT_LIST_HEAD(&ctx->event_list); 3266 INIT_LIST_HEAD(&ctx->event_list);
@@ -3421,7 +3540,16 @@ static void perf_remove_from_owner(struct perf_event *event)
3421 rcu_read_unlock(); 3540 rcu_read_unlock();
3422 3541
3423 if (owner) { 3542 if (owner) {
3424 mutex_lock(&owner->perf_event_mutex); 3543 /*
3544 * If we're here through perf_event_exit_task() we're already
3545 * holding ctx->mutex which would be an inversion wrt. the
3546 * normal lock order.
3547 *
3548 * However we can safely take this lock because its the child
3549 * ctx->mutex.
3550 */
3551 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
3552
3425 /* 3553 /*
3426 * We have to re-check the event->owner field, if it is cleared 3554 * We have to re-check the event->owner field, if it is cleared
3427 * we raced with perf_event_exit_task(), acquiring the mutex 3555 * we raced with perf_event_exit_task(), acquiring the mutex
@@ -3440,7 +3568,7 @@ static void perf_remove_from_owner(struct perf_event *event)
3440 */ 3568 */
3441static void put_event(struct perf_event *event) 3569static void put_event(struct perf_event *event)
3442{ 3570{
3443 struct perf_event_context *ctx = event->ctx; 3571 struct perf_event_context *ctx;
3444 3572
3445 if (!atomic_long_dec_and_test(&event->refcount)) 3573 if (!atomic_long_dec_and_test(&event->refcount))
3446 return; 3574 return;
@@ -3448,7 +3576,6 @@ static void put_event(struct perf_event *event)
3448 if (!is_kernel_event(event)) 3576 if (!is_kernel_event(event))
3449 perf_remove_from_owner(event); 3577 perf_remove_from_owner(event);
3450 3578
3451 WARN_ON_ONCE(ctx->parent_ctx);
3452 /* 3579 /*
3453 * There are two ways this annotation is useful: 3580 * There are two ways this annotation is useful:
3454 * 3581 *
@@ -3461,7 +3588,8 @@ static void put_event(struct perf_event *event)
3461 * the last filedesc died, so there is no possibility 3588 * the last filedesc died, so there is no possibility
3462 * to trigger the AB-BA case. 3589 * to trigger the AB-BA case.
3463 */ 3590 */
3464 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); 3591 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
3592 WARN_ON_ONCE(ctx->parent_ctx);
3465 perf_remove_from_context(event, true); 3593 perf_remove_from_context(event, true);
3466 mutex_unlock(&ctx->mutex); 3594 mutex_unlock(&ctx->mutex);
3467 3595
@@ -3547,12 +3675,13 @@ static int perf_event_read_group(struct perf_event *event,
3547 u64 read_format, char __user *buf) 3675 u64 read_format, char __user *buf)
3548{ 3676{
3549 struct perf_event *leader = event->group_leader, *sub; 3677 struct perf_event *leader = event->group_leader, *sub;
3550 int n = 0, size = 0, ret = -EFAULT;
3551 struct perf_event_context *ctx = leader->ctx; 3678 struct perf_event_context *ctx = leader->ctx;
3552 u64 values[5]; 3679 int n = 0, size = 0, ret;
3553 u64 count, enabled, running; 3680 u64 count, enabled, running;
3681 u64 values[5];
3682
3683 lockdep_assert_held(&ctx->mutex);
3554 3684
3555 mutex_lock(&ctx->mutex);
3556 count = perf_event_read_value(leader, &enabled, &running); 3685 count = perf_event_read_value(leader, &enabled, &running);
3557 3686
3558 values[n++] = 1 + leader->nr_siblings; 3687 values[n++] = 1 + leader->nr_siblings;
@@ -3567,7 +3696,7 @@ static int perf_event_read_group(struct perf_event *event,
3567 size = n * sizeof(u64); 3696 size = n * sizeof(u64);
3568 3697
3569 if (copy_to_user(buf, values, size)) 3698 if (copy_to_user(buf, values, size))
3570 goto unlock; 3699 return -EFAULT;
3571 3700
3572 ret = size; 3701 ret = size;
3573 3702
@@ -3581,14 +3710,11 @@ static int perf_event_read_group(struct perf_event *event,
3581 size = n * sizeof(u64); 3710 size = n * sizeof(u64);
3582 3711
3583 if (copy_to_user(buf + ret, values, size)) { 3712 if (copy_to_user(buf + ret, values, size)) {
3584 ret = -EFAULT; 3713 return -EFAULT;
3585 goto unlock;
3586 } 3714 }
3587 3715
3588 ret += size; 3716 ret += size;
3589 } 3717 }
3590unlock:
3591 mutex_unlock(&ctx->mutex);
3592 3718
3593 return ret; 3719 return ret;
3594} 3720}
@@ -3660,8 +3786,14 @@ static ssize_t
3660perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 3786perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3661{ 3787{
3662 struct perf_event *event = file->private_data; 3788 struct perf_event *event = file->private_data;
3789 struct perf_event_context *ctx;
3790 int ret;
3791
3792 ctx = perf_event_ctx_lock(event);
3793 ret = perf_read_hw(event, buf, count);
3794 perf_event_ctx_unlock(event, ctx);
3663 3795
3664 return perf_read_hw(event, buf, count); 3796 return ret;
3665} 3797}
3666 3798
3667static unsigned int perf_poll(struct file *file, poll_table *wait) 3799static unsigned int perf_poll(struct file *file, poll_table *wait)
@@ -3687,7 +3819,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
3687 return events; 3819 return events;
3688} 3820}
3689 3821
3690static void perf_event_reset(struct perf_event *event) 3822static void _perf_event_reset(struct perf_event *event)
3691{ 3823{
3692 (void)perf_event_read(event); 3824 (void)perf_event_read(event);
3693 local64_set(&event->count, 0); 3825 local64_set(&event->count, 0);
@@ -3706,6 +3838,7 @@ static void perf_event_for_each_child(struct perf_event *event,
3706 struct perf_event *child; 3838 struct perf_event *child;
3707 3839
3708 WARN_ON_ONCE(event->ctx->parent_ctx); 3840 WARN_ON_ONCE(event->ctx->parent_ctx);
3841
3709 mutex_lock(&event->child_mutex); 3842 mutex_lock(&event->child_mutex);
3710 func(event); 3843 func(event);
3711 list_for_each_entry(child, &event->child_list, child_list) 3844 list_for_each_entry(child, &event->child_list, child_list)
@@ -3719,14 +3852,13 @@ static void perf_event_for_each(struct perf_event *event,
3719 struct perf_event_context *ctx = event->ctx; 3852 struct perf_event_context *ctx = event->ctx;
3720 struct perf_event *sibling; 3853 struct perf_event *sibling;
3721 3854
3722 WARN_ON_ONCE(ctx->parent_ctx); 3855 lockdep_assert_held(&ctx->mutex);
3723 mutex_lock(&ctx->mutex); 3856
3724 event = event->group_leader; 3857 event = event->group_leader;
3725 3858
3726 perf_event_for_each_child(event, func); 3859 perf_event_for_each_child(event, func);
3727 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3860 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3728 perf_event_for_each_child(sibling, func); 3861 perf_event_for_each_child(sibling, func);
3729 mutex_unlock(&ctx->mutex);
3730} 3862}
3731 3863
3732static int perf_event_period(struct perf_event *event, u64 __user *arg) 3864static int perf_event_period(struct perf_event *event, u64 __user *arg)
@@ -3796,25 +3928,24 @@ static int perf_event_set_output(struct perf_event *event,
3796 struct perf_event *output_event); 3928 struct perf_event *output_event);
3797static int perf_event_set_filter(struct perf_event *event, void __user *arg); 3929static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3798 3930
3799static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 3931static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
3800{ 3932{
3801 struct perf_event *event = file->private_data;
3802 void (*func)(struct perf_event *); 3933 void (*func)(struct perf_event *);
3803 u32 flags = arg; 3934 u32 flags = arg;
3804 3935
3805 switch (cmd) { 3936 switch (cmd) {
3806 case PERF_EVENT_IOC_ENABLE: 3937 case PERF_EVENT_IOC_ENABLE:
3807 func = perf_event_enable; 3938 func = _perf_event_enable;
3808 break; 3939 break;
3809 case PERF_EVENT_IOC_DISABLE: 3940 case PERF_EVENT_IOC_DISABLE:
3810 func = perf_event_disable; 3941 func = _perf_event_disable;
3811 break; 3942 break;
3812 case PERF_EVENT_IOC_RESET: 3943 case PERF_EVENT_IOC_RESET:
3813 func = perf_event_reset; 3944 func = _perf_event_reset;
3814 break; 3945 break;
3815 3946
3816 case PERF_EVENT_IOC_REFRESH: 3947 case PERF_EVENT_IOC_REFRESH:
3817 return perf_event_refresh(event, arg); 3948 return _perf_event_refresh(event, arg);
3818 3949
3819 case PERF_EVENT_IOC_PERIOD: 3950 case PERF_EVENT_IOC_PERIOD:
3820 return perf_event_period(event, (u64 __user *)arg); 3951 return perf_event_period(event, (u64 __user *)arg);
@@ -3861,6 +3992,19 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3861 return 0; 3992 return 0;
3862} 3993}
3863 3994
3995static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3996{
3997 struct perf_event *event = file->private_data;
3998 struct perf_event_context *ctx;
3999 long ret;
4000
4001 ctx = perf_event_ctx_lock(event);
4002 ret = _perf_ioctl(event, cmd, arg);
4003 perf_event_ctx_unlock(event, ctx);
4004
4005 return ret;
4006}
4007
3864#ifdef CONFIG_COMPAT 4008#ifdef CONFIG_COMPAT
3865static long perf_compat_ioctl(struct file *file, unsigned int cmd, 4009static long perf_compat_ioctl(struct file *file, unsigned int cmd,
3866 unsigned long arg) 4010 unsigned long arg)
@@ -3883,11 +4027,15 @@ static long perf_compat_ioctl(struct file *file, unsigned int cmd,
3883 4027
3884int perf_event_task_enable(void) 4028int perf_event_task_enable(void)
3885{ 4029{
4030 struct perf_event_context *ctx;
3886 struct perf_event *event; 4031 struct perf_event *event;
3887 4032
3888 mutex_lock(&current->perf_event_mutex); 4033 mutex_lock(&current->perf_event_mutex);
3889 list_for_each_entry(event, &current->perf_event_list, owner_entry) 4034 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
3890 perf_event_for_each_child(event, perf_event_enable); 4035 ctx = perf_event_ctx_lock(event);
4036 perf_event_for_each_child(event, _perf_event_enable);
4037 perf_event_ctx_unlock(event, ctx);
4038 }
3891 mutex_unlock(&current->perf_event_mutex); 4039 mutex_unlock(&current->perf_event_mutex);
3892 4040
3893 return 0; 4041 return 0;
@@ -3895,11 +4043,15 @@ int perf_event_task_enable(void)
3895 4043
3896int perf_event_task_disable(void) 4044int perf_event_task_disable(void)
3897{ 4045{
4046 struct perf_event_context *ctx;
3898 struct perf_event *event; 4047 struct perf_event *event;
3899 4048
3900 mutex_lock(&current->perf_event_mutex); 4049 mutex_lock(&current->perf_event_mutex);
3901 list_for_each_entry(event, &current->perf_event_list, owner_entry) 4050 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
3902 perf_event_for_each_child(event, perf_event_disable); 4051 ctx = perf_event_ctx_lock(event);
4052 perf_event_for_each_child(event, _perf_event_disable);
4053 perf_event_ctx_unlock(event, ctx);
4054 }
3903 mutex_unlock(&current->perf_event_mutex); 4055 mutex_unlock(&current->perf_event_mutex);
3904 4056
3905 return 0; 4057 return 0;
@@ -5889,6 +6041,8 @@ end:
5889 rcu_read_unlock(); 6041 rcu_read_unlock();
5890} 6042}
5891 6043
6044DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
6045
5892int perf_swevent_get_recursion_context(void) 6046int perf_swevent_get_recursion_context(void)
5893{ 6047{
5894 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6048 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
@@ -5904,21 +6058,30 @@ inline void perf_swevent_put_recursion_context(int rctx)
5904 put_recursion_context(swhash->recursion, rctx); 6058 put_recursion_context(swhash->recursion, rctx);
5905} 6059}
5906 6060
5907void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6061void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
5908{ 6062{
5909 struct perf_sample_data data; 6063 struct perf_sample_data data;
5910 int rctx;
5911 6064
5912 preempt_disable_notrace(); 6065 if (WARN_ON_ONCE(!regs))
5913 rctx = perf_swevent_get_recursion_context();
5914 if (rctx < 0)
5915 return; 6066 return;
5916 6067
5917 perf_sample_data_init(&data, addr, 0); 6068 perf_sample_data_init(&data, addr, 0);
5918
5919 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 6069 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
6070}
6071
6072void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
6073{
6074 int rctx;
6075
6076 preempt_disable_notrace();
6077 rctx = perf_swevent_get_recursion_context();
6078 if (unlikely(rctx < 0))
6079 goto fail;
6080
6081 ___perf_sw_event(event_id, nr, regs, addr);
5920 6082
5921 perf_swevent_put_recursion_context(rctx); 6083 perf_swevent_put_recursion_context(rctx);
6084fail:
5922 preempt_enable_notrace(); 6085 preempt_enable_notrace();
5923} 6086}
5924 6087
@@ -6776,12 +6939,10 @@ skip_type:
6776 __perf_event_init_context(&cpuctx->ctx); 6939 __perf_event_init_context(&cpuctx->ctx);
6777 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 6940 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
6778 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 6941 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
6779 cpuctx->ctx.type = cpu_context;
6780 cpuctx->ctx.pmu = pmu; 6942 cpuctx->ctx.pmu = pmu;
6781 6943
6782 __perf_cpu_hrtimer_init(cpuctx, cpu); 6944 __perf_cpu_hrtimer_init(cpuctx, cpu);
6783 6945
6784 INIT_LIST_HEAD(&cpuctx->rotation_list);
6785 cpuctx->unique_pmu = pmu; 6946 cpuctx->unique_pmu = pmu;
6786 } 6947 }
6787 6948
@@ -6854,6 +7015,20 @@ void perf_pmu_unregister(struct pmu *pmu)
6854} 7015}
6855EXPORT_SYMBOL_GPL(perf_pmu_unregister); 7016EXPORT_SYMBOL_GPL(perf_pmu_unregister);
6856 7017
7018static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
7019{
7020 int ret;
7021
7022 if (!try_module_get(pmu->module))
7023 return -ENODEV;
7024 event->pmu = pmu;
7025 ret = pmu->event_init(event);
7026 if (ret)
7027 module_put(pmu->module);
7028
7029 return ret;
7030}
7031
6857struct pmu *perf_init_event(struct perf_event *event) 7032struct pmu *perf_init_event(struct perf_event *event)
6858{ 7033{
6859 struct pmu *pmu = NULL; 7034 struct pmu *pmu = NULL;
@@ -6866,24 +7041,14 @@ struct pmu *perf_init_event(struct perf_event *event)
6866 pmu = idr_find(&pmu_idr, event->attr.type); 7041 pmu = idr_find(&pmu_idr, event->attr.type);
6867 rcu_read_unlock(); 7042 rcu_read_unlock();
6868 if (pmu) { 7043 if (pmu) {
6869 if (!try_module_get(pmu->module)) { 7044 ret = perf_try_init_event(pmu, event);
6870 pmu = ERR_PTR(-ENODEV);
6871 goto unlock;
6872 }
6873 event->pmu = pmu;
6874 ret = pmu->event_init(event);
6875 if (ret) 7045 if (ret)
6876 pmu = ERR_PTR(ret); 7046 pmu = ERR_PTR(ret);
6877 goto unlock; 7047 goto unlock;
6878 } 7048 }
6879 7049
6880 list_for_each_entry_rcu(pmu, &pmus, entry) { 7050 list_for_each_entry_rcu(pmu, &pmus, entry) {
6881 if (!try_module_get(pmu->module)) { 7051 ret = perf_try_init_event(pmu, event);
6882 pmu = ERR_PTR(-ENODEV);
6883 goto unlock;
6884 }
6885 event->pmu = pmu;
6886 ret = pmu->event_init(event);
6887 if (!ret) 7052 if (!ret)
6888 goto unlock; 7053 goto unlock;
6889 7054
@@ -7247,6 +7412,15 @@ out:
7247 return ret; 7412 return ret;
7248} 7413}
7249 7414
7415static void mutex_lock_double(struct mutex *a, struct mutex *b)
7416{
7417 if (b < a)
7418 swap(a, b);
7419
7420 mutex_lock(a);
7421 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
7422}
7423
7250/** 7424/**
7251 * sys_perf_event_open - open a performance event, associate it to a task/cpu 7425 * sys_perf_event_open - open a performance event, associate it to a task/cpu
7252 * 7426 *
@@ -7262,7 +7436,7 @@ SYSCALL_DEFINE5(perf_event_open,
7262 struct perf_event *group_leader = NULL, *output_event = NULL; 7436 struct perf_event *group_leader = NULL, *output_event = NULL;
7263 struct perf_event *event, *sibling; 7437 struct perf_event *event, *sibling;
7264 struct perf_event_attr attr; 7438 struct perf_event_attr attr;
7265 struct perf_event_context *ctx; 7439 struct perf_event_context *ctx, *uninitialized_var(gctx);
7266 struct file *event_file = NULL; 7440 struct file *event_file = NULL;
7267 struct fd group = {NULL, 0}; 7441 struct fd group = {NULL, 0};
7268 struct task_struct *task = NULL; 7442 struct task_struct *task = NULL;
@@ -7420,7 +7594,19 @@ SYSCALL_DEFINE5(perf_event_open,
7420 * task or CPU context: 7594 * task or CPU context:
7421 */ 7595 */
7422 if (move_group) { 7596 if (move_group) {
7423 if (group_leader->ctx->type != ctx->type) 7597 /*
7598 * Make sure we're both on the same task, or both
7599 * per-cpu events.
7600 */
7601 if (group_leader->ctx->task != ctx->task)
7602 goto err_context;
7603
7604 /*
7605 * Make sure we're both events for the same CPU;
7606 * grouping events for different CPUs is broken; since
7607 * you can never concurrently schedule them anyhow.
7608 */
7609 if (group_leader->cpu != event->cpu)
7424 goto err_context; 7610 goto err_context;
7425 } else { 7611 } else {
7426 if (group_leader->ctx != ctx) 7612 if (group_leader->ctx != ctx)
@@ -7448,43 +7634,68 @@ SYSCALL_DEFINE5(perf_event_open,
7448 } 7634 }
7449 7635
7450 if (move_group) { 7636 if (move_group) {
7451 struct perf_event_context *gctx = group_leader->ctx; 7637 gctx = group_leader->ctx;
7452
7453 mutex_lock(&gctx->mutex);
7454 perf_remove_from_context(group_leader, false);
7455 7638
7456 /* 7639 /*
7457 * Removing from the context ends up with disabled 7640 * See perf_event_ctx_lock() for comments on the details
7458 * event. What we want here is event in the initial 7641 * of swizzling perf_event::ctx.
7459 * startup state, ready to be add into new context.
7460 */ 7642 */
7461 perf_event__state_init(group_leader); 7643 mutex_lock_double(&gctx->mutex, &ctx->mutex);
7644
7645 perf_remove_from_context(group_leader, false);
7646
7462 list_for_each_entry(sibling, &group_leader->sibling_list, 7647 list_for_each_entry(sibling, &group_leader->sibling_list,
7463 group_entry) { 7648 group_entry) {
7464 perf_remove_from_context(sibling, false); 7649 perf_remove_from_context(sibling, false);
7465 perf_event__state_init(sibling);
7466 put_ctx(gctx); 7650 put_ctx(gctx);
7467 } 7651 }
7468 mutex_unlock(&gctx->mutex); 7652 } else {
7469 put_ctx(gctx); 7653 mutex_lock(&ctx->mutex);
7470 } 7654 }
7471 7655
7472 WARN_ON_ONCE(ctx->parent_ctx); 7656 WARN_ON_ONCE(ctx->parent_ctx);
7473 mutex_lock(&ctx->mutex);
7474 7657
7475 if (move_group) { 7658 if (move_group) {
7659 /*
7660 * Wait for everybody to stop referencing the events through
7661 * the old lists, before installing it on new lists.
7662 */
7476 synchronize_rcu(); 7663 synchronize_rcu();
7477 perf_install_in_context(ctx, group_leader, group_leader->cpu); 7664
7478 get_ctx(ctx); 7665 /*
7666 * Install the group siblings before the group leader.
7667 *
7668 * Because a group leader will try and install the entire group
7669 * (through the sibling list, which is still in-tact), we can
7670 * end up with siblings installed in the wrong context.
7671 *
7672 * By installing siblings first we NO-OP because they're not
7673 * reachable through the group lists.
7674 */
7479 list_for_each_entry(sibling, &group_leader->sibling_list, 7675 list_for_each_entry(sibling, &group_leader->sibling_list,
7480 group_entry) { 7676 group_entry) {
7677 perf_event__state_init(sibling);
7481 perf_install_in_context(ctx, sibling, sibling->cpu); 7678 perf_install_in_context(ctx, sibling, sibling->cpu);
7482 get_ctx(ctx); 7679 get_ctx(ctx);
7483 } 7680 }
7681
7682 /*
7683 * Removing from the context ends up with disabled
7684 * event. What we want here is event in the initial
7685 * startup state, ready to be add into new context.
7686 */
7687 perf_event__state_init(group_leader);
7688 perf_install_in_context(ctx, group_leader, group_leader->cpu);
7689 get_ctx(ctx);
7484 } 7690 }
7485 7691
7486 perf_install_in_context(ctx, event, event->cpu); 7692 perf_install_in_context(ctx, event, event->cpu);
7487 perf_unpin_context(ctx); 7693 perf_unpin_context(ctx);
7694
7695 if (move_group) {
7696 mutex_unlock(&gctx->mutex);
7697 put_ctx(gctx);
7698 }
7488 mutex_unlock(&ctx->mutex); 7699 mutex_unlock(&ctx->mutex);
7489 7700
7490 put_online_cpus(); 7701 put_online_cpus();
@@ -7592,7 +7803,11 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7592 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 7803 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
7593 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 7804 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
7594 7805
7595 mutex_lock(&src_ctx->mutex); 7806 /*
7807 * See perf_event_ctx_lock() for comments on the details
7808 * of swizzling perf_event::ctx.
7809 */
7810 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
7596 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 7811 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7597 event_entry) { 7812 event_entry) {
7598 perf_remove_from_context(event, false); 7813 perf_remove_from_context(event, false);
@@ -7600,11 +7815,36 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7600 put_ctx(src_ctx); 7815 put_ctx(src_ctx);
7601 list_add(&event->migrate_entry, &events); 7816 list_add(&event->migrate_entry, &events);
7602 } 7817 }
7603 mutex_unlock(&src_ctx->mutex);
7604 7818
7819 /*
7820 * Wait for the events to quiesce before re-instating them.
7821 */
7605 synchronize_rcu(); 7822 synchronize_rcu();
7606 7823
7607 mutex_lock(&dst_ctx->mutex); 7824 /*
7825 * Re-instate events in 2 passes.
7826 *
7827 * Skip over group leaders and only install siblings on this first
7828 * pass, siblings will not get enabled without a leader, however a
7829 * leader will enable its siblings, even if those are still on the old
7830 * context.
7831 */
7832 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7833 if (event->group_leader == event)
7834 continue;
7835
7836 list_del(&event->migrate_entry);
7837 if (event->state >= PERF_EVENT_STATE_OFF)
7838 event->state = PERF_EVENT_STATE_INACTIVE;
7839 account_event_cpu(event, dst_cpu);
7840 perf_install_in_context(dst_ctx, event, dst_cpu);
7841 get_ctx(dst_ctx);
7842 }
7843
7844 /*
7845 * Once all the siblings are setup properly, install the group leaders
7846 * to make it go.
7847 */
7608 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 7848 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7609 list_del(&event->migrate_entry); 7849 list_del(&event->migrate_entry);
7610 if (event->state >= PERF_EVENT_STATE_OFF) 7850 if (event->state >= PERF_EVENT_STATE_OFF)
@@ -7614,6 +7854,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7614 get_ctx(dst_ctx); 7854 get_ctx(dst_ctx);
7615 } 7855 }
7616 mutex_unlock(&dst_ctx->mutex); 7856 mutex_unlock(&dst_ctx->mutex);
7857 mutex_unlock(&src_ctx->mutex);
7617} 7858}
7618EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 7859EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
7619 7860
@@ -7800,14 +8041,19 @@ static void perf_free_event(struct perf_event *event,
7800 8041
7801 put_event(parent); 8042 put_event(parent);
7802 8043
8044 raw_spin_lock_irq(&ctx->lock);
7803 perf_group_detach(event); 8045 perf_group_detach(event);
7804 list_del_event(event, ctx); 8046 list_del_event(event, ctx);
8047 raw_spin_unlock_irq(&ctx->lock);
7805 free_event(event); 8048 free_event(event);
7806} 8049}
7807 8050
7808/* 8051/*
7809 * free an unexposed, unused context as created by inheritance by 8052 * Free an unexposed, unused context as created by inheritance by
7810 * perf_event_init_task below, used by fork() in case of fail. 8053 * perf_event_init_task below, used by fork() in case of fail.
8054 *
8055 * Not all locks are strictly required, but take them anyway to be nice and
8056 * help out with the lockdep assertions.
7811 */ 8057 */
7812void perf_event_free_task(struct task_struct *task) 8058void perf_event_free_task(struct task_struct *task)
7813{ 8059{
@@ -8126,7 +8372,7 @@ static void __init perf_event_init_all_cpus(void)
8126 for_each_possible_cpu(cpu) { 8372 for_each_possible_cpu(cpu) {
8127 swhash = &per_cpu(swevent_htable, cpu); 8373 swhash = &per_cpu(swevent_htable, cpu);
8128 mutex_init(&swhash->hlist_mutex); 8374 mutex_init(&swhash->hlist_mutex);
8129 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); 8375 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
8130 } 8376 }
8131} 8377}
8132 8378
@@ -8147,22 +8393,11 @@ static void perf_event_init_cpu(int cpu)
8147} 8393}
8148 8394
8149#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC 8395#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
8150static void perf_pmu_rotate_stop(struct pmu *pmu)
8151{
8152 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
8153
8154 WARN_ON(!irqs_disabled());
8155
8156 list_del_init(&cpuctx->rotation_list);
8157}
8158
8159static void __perf_event_exit_context(void *__info) 8396static void __perf_event_exit_context(void *__info)
8160{ 8397{
8161 struct remove_event re = { .detach_group = true }; 8398 struct remove_event re = { .detach_group = true };
8162 struct perf_event_context *ctx = __info; 8399 struct perf_event_context *ctx = __info;
8163 8400
8164 perf_pmu_rotate_stop(ctx->pmu);
8165
8166 rcu_read_lock(); 8401 rcu_read_lock();
8167 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) 8402 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
8168 __perf_remove_from_context(&re); 8403 __perf_remove_from_context(&re);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 146a5792b1d2..eadb95ce7aac 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -13,12 +13,13 @@
13#include <linux/vmalloc.h> 13#include <linux/vmalloc.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/circ_buf.h> 15#include <linux/circ_buf.h>
16#include <linux/poll.h>
16 17
17#include "internal.h" 18#include "internal.h"
18 19
19static void perf_output_wakeup(struct perf_output_handle *handle) 20static void perf_output_wakeup(struct perf_output_handle *handle)
20{ 21{
21 atomic_set(&handle->rb->poll, POLL_IN); 22 atomic_set(&handle->rb->poll, POLLIN);
22 23
23 handle->event->pending_wakeup = 1; 24 handle->event->pending_wakeup = 1;
24 irq_work_queue(&handle->event->pending); 25 irq_work_queue(&handle->event->pending);
diff --git a/kernel/futex.c b/kernel/futex.c
index 63678b573d61..4eeb63de7e54 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2258,7 +2258,7 @@ static long futex_wait_restart(struct restart_block *restart)
2258 * if there are waiters then it will block, it does PI, etc. (Due to 2258 * if there are waiters then it will block, it does PI, etc. (Due to
2259 * races the kernel might see a 0 value of the futex too.) 2259 * races the kernel might see a 0 value of the futex too.)
2260 */ 2260 */
2261static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect, 2261static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2262 ktime_t *time, int trylock) 2262 ktime_t *time, int trylock)
2263{ 2263{
2264 struct hrtimer_sleeper timeout, *to = NULL; 2264 struct hrtimer_sleeper timeout, *to = NULL;
@@ -2953,11 +2953,11 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2953 case FUTEX_WAKE_OP: 2953 case FUTEX_WAKE_OP:
2954 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); 2954 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2955 case FUTEX_LOCK_PI: 2955 case FUTEX_LOCK_PI:
2956 return futex_lock_pi(uaddr, flags, val, timeout, 0); 2956 return futex_lock_pi(uaddr, flags, timeout, 0);
2957 case FUTEX_UNLOCK_PI: 2957 case FUTEX_UNLOCK_PI:
2958 return futex_unlock_pi(uaddr, flags); 2958 return futex_unlock_pi(uaddr, flags);
2959 case FUTEX_TRYLOCK_PI: 2959 case FUTEX_TRYLOCK_PI:
2960 return futex_lock_pi(uaddr, flags, 0, timeout, 1); 2960 return futex_lock_pi(uaddr, flags, NULL, 1);
2961 case FUTEX_WAIT_REQUEUE_PI: 2961 case FUTEX_WAIT_REQUEUE_PI:
2962 val3 = FUTEX_BITSET_MATCH_ANY; 2962 val3 = FUTEX_BITSET_MATCH_ANY;
2963 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, 2963 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 8541bfdfd232..4ca8eb151975 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -1,5 +1,5 @@
1 1
2obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o 2obj-y += mutex.o semaphore.o rwsem.o
3 3
4ifdef CONFIG_FUNCTION_TRACER 4ifdef CONFIG_FUNCTION_TRACER
5CFLAGS_REMOVE_lockdep.o = -pg 5CFLAGS_REMOVE_lockdep.o = -pg
@@ -14,6 +14,7 @@ ifeq ($(CONFIG_PROC_FS),y)
14obj-$(CONFIG_LOCKDEP) += lockdep_proc.o 14obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
15endif 15endif
16obj-$(CONFIG_SMP) += spinlock.o 16obj-$(CONFIG_SMP) += spinlock.o
17obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
17obj-$(CONFIG_SMP) += lglock.o 18obj-$(CONFIG_SMP) += lglock.o
18obj-$(CONFIG_PROVE_LOCKING) += spinlock.o 19obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
19obj-$(CONFIG_RT_MUTEXES) += rtmutex.o 20obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index 4d60986fcbee..d1fe2ba5bac9 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -108,20 +108,4 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
108 arch_mcs_spin_unlock_contended(&next->locked); 108 arch_mcs_spin_unlock_contended(&next->locked);
109} 109}
110 110
111/*
112 * Cancellable version of the MCS lock above.
113 *
114 * Intended for adaptive spinning of sleeping locks:
115 * mutex_lock()/rwsem_down_{read,write}() etc.
116 */
117
118struct optimistic_spin_node {
119 struct optimistic_spin_node *next, *prev;
120 int locked; /* 1 if lock acquired */
121 int cpu; /* encoded CPU # value */
122};
123
124extern bool osq_lock(struct optimistic_spin_queue *lock);
125extern void osq_unlock(struct optimistic_spin_queue *lock);
126
127#endif /* __LINUX_MCS_SPINLOCK_H */ 111#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 5b49f652a3cf..94674e5919cb 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -147,7 +147,7 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
147} 147}
148 148
149/* 149/*
150 * after acquiring lock with fastpath or when we lost out in contested 150 * After acquiring lock with fastpath or when we lost out in contested
151 * slowpath, set ctx and wake up any waiters so they can recheck. 151 * slowpath, set ctx and wake up any waiters so they can recheck.
152 * 152 *
153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, 153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
@@ -191,19 +191,32 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
191 spin_unlock_mutex(&lock->base.wait_lock, flags); 191 spin_unlock_mutex(&lock->base.wait_lock, flags);
192} 192}
193 193
194
195#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
196/* 194/*
197 * In order to avoid a stampede of mutex spinners from acquiring the mutex 195 * After acquiring lock in the slowpath set ctx and wake up any
198 * more or less simultaneously, the spinners need to acquire a MCS lock 196 * waiters so they can recheck.
199 * first before spinning on the owner field.
200 * 197 *
198 * Callers must hold the mutex wait_lock.
201 */ 199 */
200static __always_inline void
201ww_mutex_set_context_slowpath(struct ww_mutex *lock,
202 struct ww_acquire_ctx *ctx)
203{
204 struct mutex_waiter *cur;
202 205
203/* 206 ww_mutex_lock_acquired(lock, ctx);
204 * Mutex spinning code migrated from kernel/sched/core.c 207 lock->ctx = ctx;
205 */ 208
209 /*
210 * Give any possible sleeping processes the chance to wake up,
211 * so they can recheck if they have to back off.
212 */
213 list_for_each_entry(cur, &lock->base.wait_list, list) {
214 debug_mutex_wake_waiter(&lock->base, cur);
215 wake_up_process(cur->task);
216 }
217}
206 218
219#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
207static inline bool owner_running(struct mutex *lock, struct task_struct *owner) 220static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
208{ 221{
209 if (lock->owner != owner) 222 if (lock->owner != owner)
@@ -307,6 +320,11 @@ static bool mutex_optimistic_spin(struct mutex *lock,
307 if (!mutex_can_spin_on_owner(lock)) 320 if (!mutex_can_spin_on_owner(lock))
308 goto done; 321 goto done;
309 322
323 /*
324 * In order to avoid a stampede of mutex spinners trying to
325 * acquire the mutex all at once, the spinners need to take a
326 * MCS (queued) lock first before spinning on the owner field.
327 */
310 if (!osq_lock(&lock->osq)) 328 if (!osq_lock(&lock->osq))
311 goto done; 329 goto done;
312 330
@@ -469,7 +487,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
469EXPORT_SYMBOL(ww_mutex_unlock); 487EXPORT_SYMBOL(ww_mutex_unlock);
470 488
471static inline int __sched 489static inline int __sched
472__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) 490__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
473{ 491{
474 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 492 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
475 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); 493 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
@@ -557,7 +575,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
557 } 575 }
558 576
559 if (use_ww_ctx && ww_ctx->acquired > 0) { 577 if (use_ww_ctx && ww_ctx->acquired > 0) {
560 ret = __mutex_lock_check_stamp(lock, ww_ctx); 578 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
561 if (ret) 579 if (ret)
562 goto err; 580 goto err;
563 } 581 }
@@ -569,6 +587,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
569 schedule_preempt_disabled(); 587 schedule_preempt_disabled();
570 spin_lock_mutex(&lock->wait_lock, flags); 588 spin_lock_mutex(&lock->wait_lock, flags);
571 } 589 }
590 __set_task_state(task, TASK_RUNNING);
591
572 mutex_remove_waiter(lock, &waiter, current_thread_info()); 592 mutex_remove_waiter(lock, &waiter, current_thread_info());
573 /* set it to 0 if there are no waiters left: */ 593 /* set it to 0 if there are no waiters left: */
574 if (likely(list_empty(&lock->wait_list))) 594 if (likely(list_empty(&lock->wait_list)))
@@ -582,23 +602,7 @@ skip_wait:
582 602
583 if (use_ww_ctx) { 603 if (use_ww_ctx) {
584 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 604 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
585 struct mutex_waiter *cur; 605 ww_mutex_set_context_slowpath(ww, ww_ctx);
586
587 /*
588 * This branch gets optimized out for the common case,
589 * and is only important for ww_mutex_lock.
590 */
591 ww_mutex_lock_acquired(ww, ww_ctx);
592 ww->ctx = ww_ctx;
593
594 /*
595 * Give any possible sleeping processes the chance to wake up,
596 * so they can recheck if they have to back off.
597 */
598 list_for_each_entry(cur, &lock->wait_list, list) {
599 debug_mutex_wake_waiter(lock, cur);
600 wake_up_process(cur->task);
601 }
602 } 606 }
603 607
604 spin_unlock_mutex(&lock->wait_lock, flags); 608 spin_unlock_mutex(&lock->wait_lock, flags);
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/osq_lock.c
index 9887a905a762..c112d00341b0 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/osq_lock.c
@@ -1,8 +1,6 @@
1#include <linux/percpu.h> 1#include <linux/percpu.h>
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include "mcs_spinlock.h" 3#include <linux/osq_lock.h>
4
5#ifdef CONFIG_SMP
6 4
7/* 5/*
8 * An MCS like lock especially tailored for optimistic spinning for sleeping 6 * An MCS like lock especially tailored for optimistic spinning for sleeping
@@ -111,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
111 * cmpxchg in an attempt to undo our queueing. 109 * cmpxchg in an attempt to undo our queueing.
112 */ 110 */
113 111
114 while (!smp_load_acquire(&node->locked)) { 112 while (!ACCESS_ONCE(node->locked)) {
115 /* 113 /*
116 * If we need to reschedule bail... so we can block. 114 * If we need to reschedule bail... so we can block.
117 */ 115 */
@@ -203,6 +201,3 @@ void osq_unlock(struct optimistic_spin_queue *lock)
203 if (next) 201 if (next)
204 ACCESS_ONCE(next->locked) = 1; 202 ACCESS_ONCE(next->locked) = 1;
205} 203}
206
207#endif
208
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 7c98873a3077..3059bc2f022d 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1130,6 +1130,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
1130 set_current_state(state); 1130 set_current_state(state);
1131 } 1131 }
1132 1132
1133 __set_current_state(TASK_RUNNING);
1133 return ret; 1134 return ret;
1134} 1135}
1135 1136
@@ -1188,10 +1189,9 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1188 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); 1189 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1189 1190
1190 if (likely(!ret)) 1191 if (likely(!ret))
1192 /* sleep on the mutex */
1191 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); 1193 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
1192 1194
1193 set_current_state(TASK_RUNNING);
1194
1195 if (unlikely(ret)) { 1195 if (unlikely(ret)) {
1196 remove_waiter(lock, &waiter); 1196 remove_waiter(lock, &waiter);
1197 rt_mutex_handle_deadlock(ret, chwalk, &waiter); 1197 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
@@ -1626,10 +1626,9 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1626 1626
1627 set_current_state(TASK_INTERRUPTIBLE); 1627 set_current_state(TASK_INTERRUPTIBLE);
1628 1628
1629 /* sleep on the mutex */
1629 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); 1630 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1630 1631
1631 set_current_state(TASK_RUNNING);
1632
1633 if (unlikely(ret)) 1632 if (unlikely(ret))
1634 remove_waiter(lock, waiter); 1633 remove_waiter(lock, waiter);
1635 1634
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 2c93571162cb..2555ae15ec14 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -154,7 +154,7 @@ void __sched __down_read(struct rw_semaphore *sem)
154 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 154 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
155 } 155 }
156 156
157 tsk->state = TASK_RUNNING; 157 __set_task_state(tsk, TASK_RUNNING);
158 out: 158 out:
159 ; 159 ;
160} 160}
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 7628c3fc37ca..2f7cc4076f50 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -242,8 +242,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
242 schedule(); 242 schedule();
243 } 243 }
244 244
245 tsk->state = TASK_RUNNING; 245 __set_task_state(tsk, TASK_RUNNING);
246
247 return sem; 246 return sem;
248} 247}
249EXPORT_SYMBOL(rwsem_down_read_failed); 248EXPORT_SYMBOL(rwsem_down_read_failed);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 4803da6eab62..ae9fc7cc360e 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -402,6 +402,7 @@ int raw_notifier_call_chain(struct raw_notifier_head *nh,
402} 402}
403EXPORT_SYMBOL_GPL(raw_notifier_call_chain); 403EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
404 404
405#ifdef CONFIG_SRCU
405/* 406/*
406 * SRCU notifier chain routines. Registration and unregistration 407 * SRCU notifier chain routines. Registration and unregistration
407 * use a mutex, and call_chain is synchronized by SRCU (no locks). 408 * use a mutex, and call_chain is synchronized by SRCU (no locks).
@@ -528,6 +529,8 @@ void srcu_init_notifier_head(struct srcu_notifier_head *nh)
528} 529}
529EXPORT_SYMBOL_GPL(srcu_init_notifier_head); 530EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
530 531
532#endif /* CONFIG_SRCU */
533
531static ATOMIC_NOTIFIER_HEAD(die_chain); 534static ATOMIC_NOTIFIER_HEAD(die_chain);
532 535
533int notrace notify_die(enum die_val val, const char *str, 536int notrace notify_die(enum die_val val, const char *str,
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 48b28d387c7f..7e01f78f0417 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -251,6 +251,7 @@ config APM_EMULATION
251 251
252config PM_OPP 252config PM_OPP
253 bool 253 bool
254 select SRCU
254 ---help--- 255 ---help---
255 SOCs have a standard set of tuples consisting of frequency and 256 SOCs have a standard set of tuples consisting of frequency and
256 voltage pairs that the device will support per voltage domain. This 257 voltage pairs that the device will support per voltage domain. This
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
index e6fae503d1bc..50a808424b06 100644
--- a/kernel/rcu/Makefile
+++ b/kernel/rcu/Makefile
@@ -1,4 +1,5 @@
1obj-y += update.o srcu.o 1obj-y += update.o
2obj-$(CONFIG_SRCU) += srcu.o
2obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o 3obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
3obj-$(CONFIG_TREE_RCU) += tree.o 4obj-$(CONFIG_TREE_RCU) += tree.o
4obj-$(CONFIG_PREEMPT_RCU) += tree.o 5obj-$(CONFIG_PREEMPT_RCU) += tree.o
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 07bb02eda844..80adef7d4c3d 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -137,4 +137,10 @@ int rcu_jiffies_till_stall_check(void);
137 137
138void rcu_early_boot_tests(void); 138void rcu_early_boot_tests(void);
139 139
140/*
141 * This function really isn't for public consumption, but RCU is special in
142 * that context switches can allow the state machine to make progress.
143 */
144extern void resched_cpu(int cpu);
145
140#endif /* __LINUX_RCU_H */ 146#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 4d559baf06e0..30d42aa55d83 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -244,7 +244,8 @@ struct rcu_torture_ops {
244 int (*readlock)(void); 244 int (*readlock)(void);
245 void (*read_delay)(struct torture_random_state *rrsp); 245 void (*read_delay)(struct torture_random_state *rrsp);
246 void (*readunlock)(int idx); 246 void (*readunlock)(int idx);
247 int (*completed)(void); 247 unsigned long (*started)(void);
248 unsigned long (*completed)(void);
248 void (*deferred_free)(struct rcu_torture *p); 249 void (*deferred_free)(struct rcu_torture *p);
249 void (*sync)(void); 250 void (*sync)(void);
250 void (*exp_sync)(void); 251 void (*exp_sync)(void);
@@ -296,11 +297,6 @@ static void rcu_torture_read_unlock(int idx) __releases(RCU)
296 rcu_read_unlock(); 297 rcu_read_unlock();
297} 298}
298 299
299static int rcu_torture_completed(void)
300{
301 return rcu_batches_completed();
302}
303
304/* 300/*
305 * Update callback in the pipe. This should be invoked after a grace period. 301 * Update callback in the pipe. This should be invoked after a grace period.
306 */ 302 */
@@ -356,7 +352,7 @@ rcu_torture_cb(struct rcu_head *p)
356 cur_ops->deferred_free(rp); 352 cur_ops->deferred_free(rp);
357} 353}
358 354
359static int rcu_no_completed(void) 355static unsigned long rcu_no_completed(void)
360{ 356{
361 return 0; 357 return 0;
362} 358}
@@ -377,7 +373,8 @@ static struct rcu_torture_ops rcu_ops = {
377 .readlock = rcu_torture_read_lock, 373 .readlock = rcu_torture_read_lock,
378 .read_delay = rcu_read_delay, 374 .read_delay = rcu_read_delay,
379 .readunlock = rcu_torture_read_unlock, 375 .readunlock = rcu_torture_read_unlock,
380 .completed = rcu_torture_completed, 376 .started = rcu_batches_started,
377 .completed = rcu_batches_completed,
381 .deferred_free = rcu_torture_deferred_free, 378 .deferred_free = rcu_torture_deferred_free,
382 .sync = synchronize_rcu, 379 .sync = synchronize_rcu,
383 .exp_sync = synchronize_rcu_expedited, 380 .exp_sync = synchronize_rcu_expedited,
@@ -407,11 +404,6 @@ static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
407 rcu_read_unlock_bh(); 404 rcu_read_unlock_bh();
408} 405}
409 406
410static int rcu_bh_torture_completed(void)
411{
412 return rcu_batches_completed_bh();
413}
414
415static void rcu_bh_torture_deferred_free(struct rcu_torture *p) 407static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
416{ 408{
417 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); 409 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
@@ -423,7 +415,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
423 .readlock = rcu_bh_torture_read_lock, 415 .readlock = rcu_bh_torture_read_lock,
424 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 416 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
425 .readunlock = rcu_bh_torture_read_unlock, 417 .readunlock = rcu_bh_torture_read_unlock,
426 .completed = rcu_bh_torture_completed, 418 .started = rcu_batches_started_bh,
419 .completed = rcu_batches_completed_bh,
427 .deferred_free = rcu_bh_torture_deferred_free, 420 .deferred_free = rcu_bh_torture_deferred_free,
428 .sync = synchronize_rcu_bh, 421 .sync = synchronize_rcu_bh,
429 .exp_sync = synchronize_rcu_bh_expedited, 422 .exp_sync = synchronize_rcu_bh_expedited,
@@ -466,6 +459,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
466 .readlock = rcu_torture_read_lock, 459 .readlock = rcu_torture_read_lock,
467 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 460 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
468 .readunlock = rcu_torture_read_unlock, 461 .readunlock = rcu_torture_read_unlock,
462 .started = rcu_no_completed,
469 .completed = rcu_no_completed, 463 .completed = rcu_no_completed,
470 .deferred_free = rcu_busted_torture_deferred_free, 464 .deferred_free = rcu_busted_torture_deferred_free,
471 .sync = synchronize_rcu_busted, 465 .sync = synchronize_rcu_busted,
@@ -510,7 +504,7 @@ static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
510 srcu_read_unlock(&srcu_ctl, idx); 504 srcu_read_unlock(&srcu_ctl, idx);
511} 505}
512 506
513static int srcu_torture_completed(void) 507static unsigned long srcu_torture_completed(void)
514{ 508{
515 return srcu_batches_completed(&srcu_ctl); 509 return srcu_batches_completed(&srcu_ctl);
516} 510}
@@ -564,6 +558,7 @@ static struct rcu_torture_ops srcu_ops = {
564 .readlock = srcu_torture_read_lock, 558 .readlock = srcu_torture_read_lock,
565 .read_delay = srcu_read_delay, 559 .read_delay = srcu_read_delay,
566 .readunlock = srcu_torture_read_unlock, 560 .readunlock = srcu_torture_read_unlock,
561 .started = NULL,
567 .completed = srcu_torture_completed, 562 .completed = srcu_torture_completed,
568 .deferred_free = srcu_torture_deferred_free, 563 .deferred_free = srcu_torture_deferred_free,
569 .sync = srcu_torture_synchronize, 564 .sync = srcu_torture_synchronize,
@@ -600,7 +595,8 @@ static struct rcu_torture_ops sched_ops = {
600 .readlock = sched_torture_read_lock, 595 .readlock = sched_torture_read_lock,
601 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 596 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
602 .readunlock = sched_torture_read_unlock, 597 .readunlock = sched_torture_read_unlock,
603 .completed = rcu_no_completed, 598 .started = rcu_batches_started_sched,
599 .completed = rcu_batches_completed_sched,
604 .deferred_free = rcu_sched_torture_deferred_free, 600 .deferred_free = rcu_sched_torture_deferred_free,
605 .sync = synchronize_sched, 601 .sync = synchronize_sched,
606 .exp_sync = synchronize_sched_expedited, 602 .exp_sync = synchronize_sched_expedited,
@@ -638,6 +634,7 @@ static struct rcu_torture_ops tasks_ops = {
638 .readlock = tasks_torture_read_lock, 634 .readlock = tasks_torture_read_lock,
639 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 635 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
640 .readunlock = tasks_torture_read_unlock, 636 .readunlock = tasks_torture_read_unlock,
637 .started = rcu_no_completed,
641 .completed = rcu_no_completed, 638 .completed = rcu_no_completed,
642 .deferred_free = rcu_tasks_torture_deferred_free, 639 .deferred_free = rcu_tasks_torture_deferred_free,
643 .sync = synchronize_rcu_tasks, 640 .sync = synchronize_rcu_tasks,
@@ -1015,8 +1012,8 @@ static void rcutorture_trace_dump(void)
1015static void rcu_torture_timer(unsigned long unused) 1012static void rcu_torture_timer(unsigned long unused)
1016{ 1013{
1017 int idx; 1014 int idx;
1018 int completed; 1015 unsigned long started;
1019 int completed_end; 1016 unsigned long completed;
1020 static DEFINE_TORTURE_RANDOM(rand); 1017 static DEFINE_TORTURE_RANDOM(rand);
1021 static DEFINE_SPINLOCK(rand_lock); 1018 static DEFINE_SPINLOCK(rand_lock);
1022 struct rcu_torture *p; 1019 struct rcu_torture *p;
@@ -1024,7 +1021,10 @@ static void rcu_torture_timer(unsigned long unused)
1024 unsigned long long ts; 1021 unsigned long long ts;
1025 1022
1026 idx = cur_ops->readlock(); 1023 idx = cur_ops->readlock();
1027 completed = cur_ops->completed(); 1024 if (cur_ops->started)
1025 started = cur_ops->started();
1026 else
1027 started = cur_ops->completed();
1028 ts = rcu_trace_clock_local(); 1028 ts = rcu_trace_clock_local();
1029 p = rcu_dereference_check(rcu_torture_current, 1029 p = rcu_dereference_check(rcu_torture_current,
1030 rcu_read_lock_bh_held() || 1030 rcu_read_lock_bh_held() ||
@@ -1047,14 +1047,16 @@ static void rcu_torture_timer(unsigned long unused)
1047 /* Should not happen, but... */ 1047 /* Should not happen, but... */
1048 pipe_count = RCU_TORTURE_PIPE_LEN; 1048 pipe_count = RCU_TORTURE_PIPE_LEN;
1049 } 1049 }
1050 completed_end = cur_ops->completed(); 1050 completed = cur_ops->completed();
1051 if (pipe_count > 1) { 1051 if (pipe_count > 1) {
1052 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, 1052 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
1053 completed, completed_end); 1053 started, completed);
1054 rcutorture_trace_dump(); 1054 rcutorture_trace_dump();
1055 } 1055 }
1056 __this_cpu_inc(rcu_torture_count[pipe_count]); 1056 __this_cpu_inc(rcu_torture_count[pipe_count]);
1057 completed = completed_end - completed; 1057 completed = completed - started;
1058 if (cur_ops->started)
1059 completed++;
1058 if (completed > RCU_TORTURE_PIPE_LEN) { 1060 if (completed > RCU_TORTURE_PIPE_LEN) {
1059 /* Should not happen, but... */ 1061 /* Should not happen, but... */
1060 completed = RCU_TORTURE_PIPE_LEN; 1062 completed = RCU_TORTURE_PIPE_LEN;
@@ -1073,8 +1075,8 @@ static void rcu_torture_timer(unsigned long unused)
1073static int 1075static int
1074rcu_torture_reader(void *arg) 1076rcu_torture_reader(void *arg)
1075{ 1077{
1076 int completed; 1078 unsigned long started;
1077 int completed_end; 1079 unsigned long completed;
1078 int idx; 1080 int idx;
1079 DEFINE_TORTURE_RANDOM(rand); 1081 DEFINE_TORTURE_RANDOM(rand);
1080 struct rcu_torture *p; 1082 struct rcu_torture *p;
@@ -1093,7 +1095,10 @@ rcu_torture_reader(void *arg)
1093 mod_timer(&t, jiffies + 1); 1095 mod_timer(&t, jiffies + 1);
1094 } 1096 }
1095 idx = cur_ops->readlock(); 1097 idx = cur_ops->readlock();
1096 completed = cur_ops->completed(); 1098 if (cur_ops->started)
1099 started = cur_ops->started();
1100 else
1101 started = cur_ops->completed();
1097 ts = rcu_trace_clock_local(); 1102 ts = rcu_trace_clock_local();
1098 p = rcu_dereference_check(rcu_torture_current, 1103 p = rcu_dereference_check(rcu_torture_current,
1099 rcu_read_lock_bh_held() || 1104 rcu_read_lock_bh_held() ||
@@ -1114,14 +1119,16 @@ rcu_torture_reader(void *arg)
1114 /* Should not happen, but... */ 1119 /* Should not happen, but... */
1115 pipe_count = RCU_TORTURE_PIPE_LEN; 1120 pipe_count = RCU_TORTURE_PIPE_LEN;
1116 } 1121 }
1117 completed_end = cur_ops->completed(); 1122 completed = cur_ops->completed();
1118 if (pipe_count > 1) { 1123 if (pipe_count > 1) {
1119 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1124 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1120 ts, completed, completed_end); 1125 ts, started, completed);
1121 rcutorture_trace_dump(); 1126 rcutorture_trace_dump();
1122 } 1127 }
1123 __this_cpu_inc(rcu_torture_count[pipe_count]); 1128 __this_cpu_inc(rcu_torture_count[pipe_count]);
1124 completed = completed_end - completed; 1129 completed = completed - started;
1130 if (cur_ops->started)
1131 completed++;
1125 if (completed > RCU_TORTURE_PIPE_LEN) { 1132 if (completed > RCU_TORTURE_PIPE_LEN) {
1126 /* Should not happen, but... */ 1133 /* Should not happen, but... */
1127 completed = RCU_TORTURE_PIPE_LEN; 1134 completed = RCU_TORTURE_PIPE_LEN;
@@ -1420,6 +1427,9 @@ static int rcu_torture_barrier(void *arg)
1420 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 1427 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1421 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 1428 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1422 n_rcu_torture_barrier_error++; 1429 n_rcu_torture_barrier_error++;
1430 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1431 atomic_read(&barrier_cbs_invoked),
1432 n_barrier_cbs);
1423 WARN_ON_ONCE(1); 1433 WARN_ON_ONCE(1);
1424 } 1434 }
1425 n_barrier_successes++; 1435 n_barrier_successes++;
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index e037f3eb2f7b..445bf8ffe3fb 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(srcu_barrier);
546 * Report the number of batches, correlated with, but not necessarily 546 * Report the number of batches, correlated with, but not necessarily
547 * precisely the same as, the number of grace periods that have elapsed. 547 * precisely the same as, the number of grace periods that have elapsed.
548 */ 548 */
549long srcu_batches_completed(struct srcu_struct *sp) 549unsigned long srcu_batches_completed(struct srcu_struct *sp)
550{ 550{
551 return sp->completed; 551 return sp->completed;
552} 552}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 0db5649f8817..cc9ceca7bde1 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -47,54 +47,14 @@ static void __call_rcu(struct rcu_head *head,
47 void (*func)(struct rcu_head *rcu), 47 void (*func)(struct rcu_head *rcu),
48 struct rcu_ctrlblk *rcp); 48 struct rcu_ctrlblk *rcp);
49 49
50static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
51
52#include "tiny_plugin.h" 50#include "tiny_plugin.h"
53 51
54/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
55static void rcu_idle_enter_common(long long newval)
56{
57 if (newval) {
58 RCU_TRACE(trace_rcu_dyntick(TPS("--="),
59 rcu_dynticks_nesting, newval));
60 rcu_dynticks_nesting = newval;
61 return;
62 }
63 RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
64 rcu_dynticks_nesting, newval));
65 if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
66 struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
67
68 RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
69 rcu_dynticks_nesting, newval));
70 ftrace_dump(DUMP_ALL);
71 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
72 current->pid, current->comm,
73 idle->pid, idle->comm); /* must be idle task! */
74 }
75 rcu_sched_qs(); /* implies rcu_bh_inc() */
76 barrier();
77 rcu_dynticks_nesting = newval;
78}
79
80/* 52/*
81 * Enter idle, which is an extended quiescent state if we have fully 53 * Enter idle, which is an extended quiescent state if we have fully
82 * entered that mode (i.e., if the new value of dynticks_nesting is zero). 54 * entered that mode.
83 */ 55 */
84void rcu_idle_enter(void) 56void rcu_idle_enter(void)
85{ 57{
86 unsigned long flags;
87 long long newval;
88
89 local_irq_save(flags);
90 WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
91 if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
92 DYNTICK_TASK_NEST_VALUE)
93 newval = 0;
94 else
95 newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
96 rcu_idle_enter_common(newval);
97 local_irq_restore(flags);
98} 58}
99EXPORT_SYMBOL_GPL(rcu_idle_enter); 59EXPORT_SYMBOL_GPL(rcu_idle_enter);
100 60
@@ -103,55 +63,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
103 */ 63 */
104void rcu_irq_exit(void) 64void rcu_irq_exit(void)
105{ 65{
106 unsigned long flags;
107 long long newval;
108
109 local_irq_save(flags);
110 newval = rcu_dynticks_nesting - 1;
111 WARN_ON_ONCE(newval < 0);
112 rcu_idle_enter_common(newval);
113 local_irq_restore(flags);
114} 66}
115EXPORT_SYMBOL_GPL(rcu_irq_exit); 67EXPORT_SYMBOL_GPL(rcu_irq_exit);
116 68
117/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
118static void rcu_idle_exit_common(long long oldval)
119{
120 if (oldval) {
121 RCU_TRACE(trace_rcu_dyntick(TPS("++="),
122 oldval, rcu_dynticks_nesting));
123 return;
124 }
125 RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
126 if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
127 struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
128
129 RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
130 oldval, rcu_dynticks_nesting));
131 ftrace_dump(DUMP_ALL);
132 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
133 current->pid, current->comm,
134 idle->pid, idle->comm); /* must be idle task! */
135 }
136}
137
138/* 69/*
139 * Exit idle, so that we are no longer in an extended quiescent state. 70 * Exit idle, so that we are no longer in an extended quiescent state.
140 */ 71 */
141void rcu_idle_exit(void) 72void rcu_idle_exit(void)
142{ 73{
143 unsigned long flags;
144 long long oldval;
145
146 local_irq_save(flags);
147 oldval = rcu_dynticks_nesting;
148 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
149 if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
150 rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
151 else
152 rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
153 rcu_idle_exit_common(oldval);
154 local_irq_restore(flags);
155} 74}
156EXPORT_SYMBOL_GPL(rcu_idle_exit); 75EXPORT_SYMBOL_GPL(rcu_idle_exit);
157 76
@@ -160,15 +79,6 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
160 */ 79 */
161void rcu_irq_enter(void) 80void rcu_irq_enter(void)
162{ 81{
163 unsigned long flags;
164 long long oldval;
165
166 local_irq_save(flags);
167 oldval = rcu_dynticks_nesting;
168 rcu_dynticks_nesting++;
169 WARN_ON_ONCE(rcu_dynticks_nesting == 0);
170 rcu_idle_exit_common(oldval);
171 local_irq_restore(flags);
172} 82}
173EXPORT_SYMBOL_GPL(rcu_irq_enter); 83EXPORT_SYMBOL_GPL(rcu_irq_enter);
174 84
@@ -179,23 +89,13 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter);
179 */ 89 */
180bool notrace __rcu_is_watching(void) 90bool notrace __rcu_is_watching(void)
181{ 91{
182 return rcu_dynticks_nesting; 92 return true;
183} 93}
184EXPORT_SYMBOL(__rcu_is_watching); 94EXPORT_SYMBOL(__rcu_is_watching);
185 95
186#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 96#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
187 97
188/* 98/*
189 * Test whether the current CPU was interrupted from idle. Nested
190 * interrupts don't count, we must be running at the first interrupt
191 * level.
192 */
193static int rcu_is_cpu_rrupt_from_idle(void)
194{
195 return rcu_dynticks_nesting <= 1;
196}
197
198/*
199 * Helper function for rcu_sched_qs() and rcu_bh_qs(). 99 * Helper function for rcu_sched_qs() and rcu_bh_qs().
200 * Also irqs are disabled to avoid confusion due to interrupt handlers 100 * Also irqs are disabled to avoid confusion due to interrupt handlers
201 * invoking call_rcu(). 101 * invoking call_rcu().
@@ -250,7 +150,7 @@ void rcu_bh_qs(void)
250void rcu_check_callbacks(int user) 150void rcu_check_callbacks(int user)
251{ 151{
252 RCU_TRACE(check_cpu_stalls()); 152 RCU_TRACE(check_cpu_stalls());
253 if (user || rcu_is_cpu_rrupt_from_idle()) 153 if (user)
254 rcu_sched_qs(); 154 rcu_sched_qs();
255 else if (!in_softirq()) 155 else if (!in_softirq())
256 rcu_bh_qs(); 156 rcu_bh_qs();
@@ -357,6 +257,11 @@ static void __call_rcu(struct rcu_head *head,
357 rcp->curtail = &head->next; 257 rcp->curtail = &head->next;
358 RCU_TRACE(rcp->qlen++); 258 RCU_TRACE(rcp->qlen++);
359 local_irq_restore(flags); 259 local_irq_restore(flags);
260
261 if (unlikely(is_idle_task(current))) {
262 /* force scheduling for rcu_sched_qs() */
263 resched_cpu(0);
264 }
360} 265}
361 266
362/* 267/*
@@ -383,6 +288,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
383void __init rcu_init(void) 288void __init rcu_init(void)
384{ 289{
385 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 290 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
291 RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk));
292 RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk));
386 293
387 rcu_early_boot_tests(); 294 rcu_early_boot_tests();
388} 295}
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 858c56569127..f94e209a10d6 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -145,17 +145,16 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
145 rcp->ticks_this_gp++; 145 rcp->ticks_this_gp++;
146 j = jiffies; 146 j = jiffies;
147 js = ACCESS_ONCE(rcp->jiffies_stall); 147 js = ACCESS_ONCE(rcp->jiffies_stall);
148 if (*rcp->curtail && ULONG_CMP_GE(j, js)) { 148 if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
149 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", 149 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
150 rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting, 150 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
151 jiffies - rcp->gp_start, rcp->qlen); 151 jiffies - rcp->gp_start, rcp->qlen);
152 dump_stack(); 152 dump_stack();
153 }
154 if (*rcp->curtail && ULONG_CMP_GE(j, js))
155 ACCESS_ONCE(rcp->jiffies_stall) = jiffies + 153 ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
156 3 * rcu_jiffies_till_stall_check() + 3; 154 3 * rcu_jiffies_till_stall_check() + 3;
157 else if (ULONG_CMP_GE(j, js)) 155 } else if (ULONG_CMP_GE(j, js)) {
158 ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); 156 ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
157 }
159} 158}
160 159
161static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp) 160static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 7680fc275036..48d640ca1a05 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -156,6 +156,10 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
156static void invoke_rcu_core(void); 156static void invoke_rcu_core(void);
157static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 157static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
158 158
159/* rcuc/rcub kthread realtime priority */
160static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
161module_param(kthread_prio, int, 0644);
162
159/* 163/*
160 * Track the rcutorture test sequence number and the update version 164 * Track the rcutorture test sequence number and the update version
161 * number within a given test. The rcutorture_testseq is incremented 165 * number within a given test. The rcutorture_testseq is incremented
@@ -215,6 +219,9 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
215#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 219#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
216}; 220};
217 221
222DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
223EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
224
218/* 225/*
219 * Let the RCU core know that this CPU has gone through the scheduler, 226 * Let the RCU core know that this CPU has gone through the scheduler,
220 * which is a quiescent state. This is called when the need for a 227 * which is a quiescent state. This is called when the need for a
@@ -284,6 +291,22 @@ void rcu_note_context_switch(void)
284} 291}
285EXPORT_SYMBOL_GPL(rcu_note_context_switch); 292EXPORT_SYMBOL_GPL(rcu_note_context_switch);
286 293
294/*
295 * Register a quiesecent state for all RCU flavors. If there is an
296 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
297 * dyntick-idle quiescent state visible to other CPUs (but only for those
298 * RCU flavors in desparate need of a quiescent state, which will normally
299 * be none of them). Either way, do a lightweight quiescent state for
300 * all RCU flavors.
301 */
302void rcu_all_qs(void)
303{
304 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
305 rcu_momentary_dyntick_idle();
306 this_cpu_inc(rcu_qs_ctr);
307}
308EXPORT_SYMBOL_GPL(rcu_all_qs);
309
287static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ 310static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
288static long qhimark = 10000; /* If this many pending, ignore blimit. */ 311static long qhimark = 10000; /* If this many pending, ignore blimit. */
289static long qlowmark = 100; /* Once only this many pending, use blimit. */ 312static long qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -315,18 +338,54 @@ static void force_quiescent_state(struct rcu_state *rsp);
315static int rcu_pending(void); 338static int rcu_pending(void);
316 339
317/* 340/*
318 * Return the number of RCU-sched batches processed thus far for debug & stats. 341 * Return the number of RCU batches started thus far for debug & stats.
342 */
343unsigned long rcu_batches_started(void)
344{
345 return rcu_state_p->gpnum;
346}
347EXPORT_SYMBOL_GPL(rcu_batches_started);
348
349/*
350 * Return the number of RCU-sched batches started thus far for debug & stats.
351 */
352unsigned long rcu_batches_started_sched(void)
353{
354 return rcu_sched_state.gpnum;
355}
356EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
357
358/*
359 * Return the number of RCU BH batches started thus far for debug & stats.
319 */ 360 */
320long rcu_batches_completed_sched(void) 361unsigned long rcu_batches_started_bh(void)
362{
363 return rcu_bh_state.gpnum;
364}
365EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
366
367/*
368 * Return the number of RCU batches completed thus far for debug & stats.
369 */
370unsigned long rcu_batches_completed(void)
371{
372 return rcu_state_p->completed;
373}
374EXPORT_SYMBOL_GPL(rcu_batches_completed);
375
376/*
377 * Return the number of RCU-sched batches completed thus far for debug & stats.
378 */
379unsigned long rcu_batches_completed_sched(void)
321{ 380{
322 return rcu_sched_state.completed; 381 return rcu_sched_state.completed;
323} 382}
324EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); 383EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
325 384
326/* 385/*
327 * Return the number of RCU BH batches processed thus far for debug & stats. 386 * Return the number of RCU BH batches completed thus far for debug & stats.
328 */ 387 */
329long rcu_batches_completed_bh(void) 388unsigned long rcu_batches_completed_bh(void)
330{ 389{
331 return rcu_bh_state.completed; 390 return rcu_bh_state.completed;
332} 391}
@@ -759,39 +818,71 @@ void rcu_irq_enter(void)
759/** 818/**
760 * rcu_nmi_enter - inform RCU of entry to NMI context 819 * rcu_nmi_enter - inform RCU of entry to NMI context
761 * 820 *
762 * If the CPU was idle with dynamic ticks active, and there is no 821 * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
763 * irq handler running, this updates rdtp->dynticks_nmi to let the 822 * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
764 * RCU grace-period handling know that the CPU is active. 823 * that the CPU is active. This implementation permits nested NMIs, as
824 * long as the nesting level does not overflow an int. (You will probably
825 * run out of stack space first.)
765 */ 826 */
766void rcu_nmi_enter(void) 827void rcu_nmi_enter(void)
767{ 828{
768 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 829 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
830 int incby = 2;
769 831
770 if (rdtp->dynticks_nmi_nesting == 0 && 832 /* Complain about underflow. */
771 (atomic_read(&rdtp->dynticks) & 0x1)) 833 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
772 return; 834
773 rdtp->dynticks_nmi_nesting++; 835 /*
774 smp_mb__before_atomic(); /* Force delay from prior write. */ 836 * If idle from RCU viewpoint, atomically increment ->dynticks
775 atomic_inc(&rdtp->dynticks); 837 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
776 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 838 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
777 smp_mb__after_atomic(); /* See above. */ 839 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
778 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 840 * to be in the outermost NMI handler that interrupted an RCU-idle
841 * period (observation due to Andy Lutomirski).
842 */
843 if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
844 smp_mb__before_atomic(); /* Force delay from prior write. */
845 atomic_inc(&rdtp->dynticks);
846 /* atomic_inc() before later RCU read-side crit sects */
847 smp_mb__after_atomic(); /* See above. */
848 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
849 incby = 1;
850 }
851 rdtp->dynticks_nmi_nesting += incby;
852 barrier();
779} 853}
780 854
781/** 855/**
782 * rcu_nmi_exit - inform RCU of exit from NMI context 856 * rcu_nmi_exit - inform RCU of exit from NMI context
783 * 857 *
784 * If the CPU was idle with dynamic ticks active, and there is no 858 * If we are returning from the outermost NMI handler that interrupted an
785 * irq handler running, this updates rdtp->dynticks_nmi to let the 859 * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
786 * RCU grace-period handling know that the CPU is no longer active. 860 * to let the RCU grace-period handling know that the CPU is back to
861 * being RCU-idle.
787 */ 862 */
788void rcu_nmi_exit(void) 863void rcu_nmi_exit(void)
789{ 864{
790 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 865 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
791 866
792 if (rdtp->dynticks_nmi_nesting == 0 || 867 /*
793 --rdtp->dynticks_nmi_nesting != 0) 868 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
869 * (We are exiting an NMI handler, so RCU better be paying attention
870 * to us!)
871 */
872 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
873 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
874
875 /*
876 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
877 * leave it in non-RCU-idle state.
878 */
879 if (rdtp->dynticks_nmi_nesting != 1) {
880 rdtp->dynticks_nmi_nesting -= 2;
794 return; 881 return;
882 }
883
884 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
885 rdtp->dynticks_nmi_nesting = 0;
795 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 886 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
796 smp_mb__before_atomic(); /* See above. */ 887 smp_mb__before_atomic(); /* See above. */
797 atomic_inc(&rdtp->dynticks); 888 atomic_inc(&rdtp->dynticks);
@@ -898,17 +989,14 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
898 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 989 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
899 return 1; 990 return 1;
900 } else { 991 } else {
992 if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
993 rdp->mynode->gpnum))
994 ACCESS_ONCE(rdp->gpwrap) = true;
901 return 0; 995 return 0;
902 } 996 }
903} 997}
904 998
905/* 999/*
906 * This function really isn't for public consumption, but RCU is special in
907 * that context switches can allow the state machine to make progress.
908 */
909extern void resched_cpu(int cpu);
910
911/*
912 * Return true if the specified CPU has passed through a quiescent 1000 * Return true if the specified CPU has passed through a quiescent
913 * state by virtue of being in or having passed through an dynticks 1001 * state by virtue of being in or having passed through an dynticks
914 * idle state since the last call to dyntick_save_progress_counter() 1002 * idle state since the last call to dyntick_save_progress_counter()
@@ -1011,6 +1099,22 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
1011 j1 = rcu_jiffies_till_stall_check(); 1099 j1 = rcu_jiffies_till_stall_check();
1012 ACCESS_ONCE(rsp->jiffies_stall) = j + j1; 1100 ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
1013 rsp->jiffies_resched = j + j1 / 2; 1101 rsp->jiffies_resched = j + j1 / 2;
1102 rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
1103}
1104
1105/*
1106 * Complain about starvation of grace-period kthread.
1107 */
1108static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1109{
1110 unsigned long gpa;
1111 unsigned long j;
1112
1113 j = jiffies;
1114 gpa = ACCESS_ONCE(rsp->gp_activity);
1115 if (j - gpa > 2 * HZ)
1116 pr_err("%s kthread starved for %ld jiffies!\n",
1117 rsp->name, j - gpa);
1014} 1118}
1015 1119
1016/* 1120/*
@@ -1033,11 +1137,13 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1033 } 1137 }
1034} 1138}
1035 1139
1036static void print_other_cpu_stall(struct rcu_state *rsp) 1140static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1037{ 1141{
1038 int cpu; 1142 int cpu;
1039 long delta; 1143 long delta;
1040 unsigned long flags; 1144 unsigned long flags;
1145 unsigned long gpa;
1146 unsigned long j;
1041 int ndetected = 0; 1147 int ndetected = 0;
1042 struct rcu_node *rnp = rcu_get_root(rsp); 1148 struct rcu_node *rnp = rcu_get_root(rsp);
1043 long totqlen = 0; 1149 long totqlen = 0;
@@ -1075,30 +1181,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
1075 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1181 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1076 } 1182 }
1077 1183
1078 /*
1079 * Now rat on any tasks that got kicked up to the root rcu_node
1080 * due to CPU offlining.
1081 */
1082 rnp = rcu_get_root(rsp);
1083 raw_spin_lock_irqsave(&rnp->lock, flags);
1084 ndetected += rcu_print_task_stall(rnp);
1085 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1086
1087 print_cpu_stall_info_end(); 1184 print_cpu_stall_info_end();
1088 for_each_possible_cpu(cpu) 1185 for_each_possible_cpu(cpu)
1089 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; 1186 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
1090 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n", 1187 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1091 smp_processor_id(), (long)(jiffies - rsp->gp_start), 1188 smp_processor_id(), (long)(jiffies - rsp->gp_start),
1092 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1189 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1093 if (ndetected == 0) 1190 if (ndetected) {
1094 pr_err("INFO: Stall ended before state dump start\n");
1095 else
1096 rcu_dump_cpu_stacks(rsp); 1191 rcu_dump_cpu_stacks(rsp);
1192 } else {
1193 if (ACCESS_ONCE(rsp->gpnum) != gpnum ||
1194 ACCESS_ONCE(rsp->completed) == gpnum) {
1195 pr_err("INFO: Stall ended before state dump start\n");
1196 } else {
1197 j = jiffies;
1198 gpa = ACCESS_ONCE(rsp->gp_activity);
1199 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n",
1200 rsp->name, j - gpa, j, gpa,
1201 jiffies_till_next_fqs);
1202 /* In this case, the current CPU might be at fault. */
1203 sched_show_task(current);
1204 }
1205 }
1097 1206
1098 /* Complain about tasks blocking the grace period. */ 1207 /* Complain about tasks blocking the grace period. */
1099
1100 rcu_print_detail_task_stall(rsp); 1208 rcu_print_detail_task_stall(rsp);
1101 1209
1210 rcu_check_gp_kthread_starvation(rsp);
1211
1102 force_quiescent_state(rsp); /* Kick them all. */ 1212 force_quiescent_state(rsp); /* Kick them all. */
1103} 1213}
1104 1214
@@ -1123,6 +1233,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
1123 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n", 1233 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1124 jiffies - rsp->gp_start, 1234 jiffies - rsp->gp_start,
1125 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1235 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1236
1237 rcu_check_gp_kthread_starvation(rsp);
1238
1126 rcu_dump_cpu_stacks(rsp); 1239 rcu_dump_cpu_stacks(rsp);
1127 1240
1128 raw_spin_lock_irqsave(&rnp->lock, flags); 1241 raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1193,7 +1306,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1193 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { 1306 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1194 1307
1195 /* They had a few time units to dump stack, so complain. */ 1308 /* They had a few time units to dump stack, so complain. */
1196 print_other_cpu_stall(rsp); 1309 print_other_cpu_stall(rsp, gpnum);
1197 } 1310 }
1198} 1311}
1199 1312
@@ -1530,7 +1643,8 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1530 bool ret; 1643 bool ret;
1531 1644
1532 /* Handle the ends of any preceding grace periods first. */ 1645 /* Handle the ends of any preceding grace periods first. */
1533 if (rdp->completed == rnp->completed) { 1646 if (rdp->completed == rnp->completed &&
1647 !unlikely(ACCESS_ONCE(rdp->gpwrap))) {
1534 1648
1535 /* No grace period end, so just accelerate recent callbacks. */ 1649 /* No grace period end, so just accelerate recent callbacks. */
1536 ret = rcu_accelerate_cbs(rsp, rnp, rdp); 1650 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1545,7 +1659,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1545 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); 1659 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1546 } 1660 }
1547 1661
1548 if (rdp->gpnum != rnp->gpnum) { 1662 if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) {
1549 /* 1663 /*
1550 * If the current grace period is waiting for this CPU, 1664 * If the current grace period is waiting for this CPU,
1551 * set up to detect a quiescent state, otherwise don't 1665 * set up to detect a quiescent state, otherwise don't
@@ -1554,8 +1668,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1554 rdp->gpnum = rnp->gpnum; 1668 rdp->gpnum = rnp->gpnum;
1555 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); 1669 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1556 rdp->passed_quiesce = 0; 1670 rdp->passed_quiesce = 0;
1671 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1557 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); 1672 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1558 zero_cpu_stall_ticks(rdp); 1673 zero_cpu_stall_ticks(rdp);
1674 ACCESS_ONCE(rdp->gpwrap) = false;
1559 } 1675 }
1560 return ret; 1676 return ret;
1561} 1677}
@@ -1569,7 +1685,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1569 local_irq_save(flags); 1685 local_irq_save(flags);
1570 rnp = rdp->mynode; 1686 rnp = rdp->mynode;
1571 if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && 1687 if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
1572 rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */ 1688 rdp->completed == ACCESS_ONCE(rnp->completed) &&
1689 !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */
1573 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ 1690 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1574 local_irq_restore(flags); 1691 local_irq_restore(flags);
1575 return; 1692 return;
@@ -1589,6 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1589 struct rcu_data *rdp; 1706 struct rcu_data *rdp;
1590 struct rcu_node *rnp = rcu_get_root(rsp); 1707 struct rcu_node *rnp = rcu_get_root(rsp);
1591 1708
1709 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1592 rcu_bind_gp_kthread(); 1710 rcu_bind_gp_kthread();
1593 raw_spin_lock_irq(&rnp->lock); 1711 raw_spin_lock_irq(&rnp->lock);
1594 smp_mb__after_unlock_lock(); 1712 smp_mb__after_unlock_lock();
@@ -1649,6 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1649 rnp->grphi, rnp->qsmask); 1767 rnp->grphi, rnp->qsmask);
1650 raw_spin_unlock_irq(&rnp->lock); 1768 raw_spin_unlock_irq(&rnp->lock);
1651 cond_resched_rcu_qs(); 1769 cond_resched_rcu_qs();
1770 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1652 } 1771 }
1653 1772
1654 mutex_unlock(&rsp->onoff_mutex); 1773 mutex_unlock(&rsp->onoff_mutex);
@@ -1665,6 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1665 unsigned long maxj; 1784 unsigned long maxj;
1666 struct rcu_node *rnp = rcu_get_root(rsp); 1785 struct rcu_node *rnp = rcu_get_root(rsp);
1667 1786
1787 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1668 rsp->n_force_qs++; 1788 rsp->n_force_qs++;
1669 if (fqs_state == RCU_SAVE_DYNTICK) { 1789 if (fqs_state == RCU_SAVE_DYNTICK) {
1670 /* Collect dyntick-idle snapshots. */ 1790 /* Collect dyntick-idle snapshots. */
@@ -1703,6 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1703 struct rcu_data *rdp; 1823 struct rcu_data *rdp;
1704 struct rcu_node *rnp = rcu_get_root(rsp); 1824 struct rcu_node *rnp = rcu_get_root(rsp);
1705 1825
1826 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1706 raw_spin_lock_irq(&rnp->lock); 1827 raw_spin_lock_irq(&rnp->lock);
1707 smp_mb__after_unlock_lock(); 1828 smp_mb__after_unlock_lock();
1708 gp_duration = jiffies - rsp->gp_start; 1829 gp_duration = jiffies - rsp->gp_start;
@@ -1739,6 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1739 nocb += rcu_future_gp_cleanup(rsp, rnp); 1860 nocb += rcu_future_gp_cleanup(rsp, rnp);
1740 raw_spin_unlock_irq(&rnp->lock); 1861 raw_spin_unlock_irq(&rnp->lock);
1741 cond_resched_rcu_qs(); 1862 cond_resched_rcu_qs();
1863 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1742 } 1864 }
1743 rnp = rcu_get_root(rsp); 1865 rnp = rcu_get_root(rsp);
1744 raw_spin_lock_irq(&rnp->lock); 1866 raw_spin_lock_irq(&rnp->lock);
@@ -1788,6 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
1788 if (rcu_gp_init(rsp)) 1910 if (rcu_gp_init(rsp))
1789 break; 1911 break;
1790 cond_resched_rcu_qs(); 1912 cond_resched_rcu_qs();
1913 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1791 WARN_ON(signal_pending(current)); 1914 WARN_ON(signal_pending(current));
1792 trace_rcu_grace_period(rsp->name, 1915 trace_rcu_grace_period(rsp->name,
1793 ACCESS_ONCE(rsp->gpnum), 1916 ACCESS_ONCE(rsp->gpnum),
@@ -1831,9 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
1831 ACCESS_ONCE(rsp->gpnum), 1954 ACCESS_ONCE(rsp->gpnum),
1832 TPS("fqsend")); 1955 TPS("fqsend"));
1833 cond_resched_rcu_qs(); 1956 cond_resched_rcu_qs();
1957 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1834 } else { 1958 } else {
1835 /* Deal with stray signal. */ 1959 /* Deal with stray signal. */
1836 cond_resched_rcu_qs(); 1960 cond_resched_rcu_qs();
1961 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1837 WARN_ON(signal_pending(current)); 1962 WARN_ON(signal_pending(current));
1838 trace_rcu_grace_period(rsp->name, 1963 trace_rcu_grace_period(rsp->name,
1839 ACCESS_ONCE(rsp->gpnum), 1964 ACCESS_ONCE(rsp->gpnum),
@@ -2010,8 +2135,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2010 rnp = rdp->mynode; 2135 rnp = rdp->mynode;
2011 raw_spin_lock_irqsave(&rnp->lock, flags); 2136 raw_spin_lock_irqsave(&rnp->lock, flags);
2012 smp_mb__after_unlock_lock(); 2137 smp_mb__after_unlock_lock();
2013 if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum || 2138 if ((rdp->passed_quiesce == 0 &&
2014 rnp->completed == rnp->gpnum) { 2139 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
2140 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
2141 rdp->gpwrap) {
2015 2142
2016 /* 2143 /*
2017 * The grace period in which this quiescent state was 2144 * The grace period in which this quiescent state was
@@ -2020,6 +2147,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2020 * within the current grace period. 2147 * within the current grace period.
2021 */ 2148 */
2022 rdp->passed_quiesce = 0; /* need qs for new gp. */ 2149 rdp->passed_quiesce = 0; /* need qs for new gp. */
2150 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
2023 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2151 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2024 return; 2152 return;
2025 } 2153 }
@@ -2064,7 +2192,8 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2064 * Was there a quiescent state since the beginning of the grace 2192 * Was there a quiescent state since the beginning of the grace
2065 * period? If no, then exit and wait for the next call. 2193 * period? If no, then exit and wait for the next call.
2066 */ 2194 */
2067 if (!rdp->passed_quiesce) 2195 if (!rdp->passed_quiesce &&
2196 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
2068 return; 2197 return;
2069 2198
2070 /* 2199 /*
@@ -2195,6 +2324,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2195} 2324}
2196 2325
2197/* 2326/*
2327 * All CPUs for the specified rcu_node structure have gone offline,
2328 * and all tasks that were preempted within an RCU read-side critical
2329 * section while running on one of those CPUs have since exited their RCU
2330 * read-side critical section. Some other CPU is reporting this fact with
2331 * the specified rcu_node structure's ->lock held and interrupts disabled.
2332 * This function therefore goes up the tree of rcu_node structures,
2333 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2334 * the leaf rcu_node structure's ->qsmaskinit field has already been
2335 * updated
2336 *
2337 * This function does check that the specified rcu_node structure has
2338 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2339 * prematurely. That said, invoking it after the fact will cost you
2340 * a needless lock acquisition. So once it has done its work, don't
2341 * invoke it again.
2342 */
2343static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2344{
2345 long mask;
2346 struct rcu_node *rnp = rnp_leaf;
2347
2348 if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2349 return;
2350 for (;;) {
2351 mask = rnp->grpmask;
2352 rnp = rnp->parent;
2353 if (!rnp)
2354 break;
2355 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2356 smp_mb__after_unlock_lock(); /* GP memory ordering. */
2357 rnp->qsmaskinit &= ~mask;
2358 if (rnp->qsmaskinit) {
2359 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2360 return;
2361 }
2362 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2363 }
2364}
2365
2366/*
2198 * The CPU has been completely removed, and some other CPU is reporting 2367 * The CPU has been completely removed, and some other CPU is reporting
2199 * this fact from process context. Do the remainder of the cleanup, 2368 * this fact from process context. Do the remainder of the cleanup,
2200 * including orphaning the outgoing CPU's RCU callbacks, and also 2369 * including orphaning the outgoing CPU's RCU callbacks, and also
@@ -2204,8 +2373,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2204static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2373static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2205{ 2374{
2206 unsigned long flags; 2375 unsigned long flags;
2207 unsigned long mask;
2208 int need_report = 0;
2209 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 2376 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2210 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2377 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2211 2378
@@ -2219,40 +2386,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2219 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ 2386 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
2220 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); 2387 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2221 rcu_adopt_orphan_cbs(rsp, flags); 2388 rcu_adopt_orphan_cbs(rsp, flags);
2389 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
2222 2390
2223 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 2391 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
2224 mask = rdp->grpmask; /* rnp->grplo is constant. */ 2392 raw_spin_lock_irqsave(&rnp->lock, flags);
2225 do { 2393 smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
2226 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 2394 rnp->qsmaskinit &= ~rdp->grpmask;
2227 smp_mb__after_unlock_lock(); 2395 if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp))
2228 rnp->qsmaskinit &= ~mask; 2396 rcu_cleanup_dead_rnp(rnp);
2229 if (rnp->qsmaskinit != 0) { 2397 rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */
2230 if (rnp != rdp->mynode)
2231 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2232 break;
2233 }
2234 if (rnp == rdp->mynode)
2235 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
2236 else
2237 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2238 mask = rnp->grpmask;
2239 rnp = rnp->parent;
2240 } while (rnp != NULL);
2241
2242 /*
2243 * We still hold the leaf rcu_node structure lock here, and
2244 * irqs are still disabled. The reason for this subterfuge is
2245 * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
2246 * held leads to deadlock.
2247 */
2248 raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
2249 rnp = rdp->mynode;
2250 if (need_report & RCU_OFL_TASKS_NORM_GP)
2251 rcu_report_unblock_qs_rnp(rnp, flags);
2252 else
2253 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2254 if (need_report & RCU_OFL_TASKS_EXP_GP)
2255 rcu_report_exp_rnp(rsp, rnp, true);
2256 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, 2398 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2257 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", 2399 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2258 cpu, rdp->qlen, rdp->nxtlist); 2400 cpu, rdp->qlen, rdp->nxtlist);
@@ -2268,6 +2410,10 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2268{ 2410{
2269} 2411}
2270 2412
2413static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2414{
2415}
2416
2271static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2417static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2272{ 2418{
2273} 2419}
@@ -2464,12 +2610,6 @@ static void force_qs_rnp(struct rcu_state *rsp,
2464 } 2610 }
2465 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2611 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2466 } 2612 }
2467 rnp = rcu_get_root(rsp);
2468 if (rnp->qsmask == 0) {
2469 raw_spin_lock_irqsave(&rnp->lock, flags);
2470 smp_mb__after_unlock_lock();
2471 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
2472 }
2473} 2613}
2474 2614
2475/* 2615/*
@@ -2569,7 +2709,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
2569 * Schedule RCU callback invocation. If the specified type of RCU 2709 * Schedule RCU callback invocation. If the specified type of RCU
2570 * does not support RCU priority boosting, just do a direct call, 2710 * does not support RCU priority boosting, just do a direct call,
2571 * otherwise wake up the per-CPU kernel kthread. Note that because we 2711 * otherwise wake up the per-CPU kernel kthread. Note that because we
2572 * are running on the current CPU with interrupts disabled, the 2712 * are running on the current CPU with softirqs disabled, the
2573 * rcu_cpu_kthread_task cannot disappear out from under us. 2713 * rcu_cpu_kthread_task cannot disappear out from under us.
2574 */ 2714 */
2575static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 2715static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
@@ -3109,9 +3249,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3109 3249
3110 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3250 /* Is the RCU core waiting for a quiescent state from this CPU? */
3111 if (rcu_scheduler_fully_active && 3251 if (rcu_scheduler_fully_active &&
3112 rdp->qs_pending && !rdp->passed_quiesce) { 3252 rdp->qs_pending && !rdp->passed_quiesce &&
3253 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
3113 rdp->n_rp_qs_pending++; 3254 rdp->n_rp_qs_pending++;
3114 } else if (rdp->qs_pending && rdp->passed_quiesce) { 3255 } else if (rdp->qs_pending &&
3256 (rdp->passed_quiesce ||
3257 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
3115 rdp->n_rp_report_qs++; 3258 rdp->n_rp_report_qs++;
3116 return 1; 3259 return 1;
3117 } 3260 }
@@ -3135,7 +3278,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3135 } 3278 }
3136 3279
3137 /* Has a new RCU grace period started? */ 3280 /* Has a new RCU grace period started? */
3138 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ 3281 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum ||
3282 unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */
3139 rdp->n_rp_gp_started++; 3283 rdp->n_rp_gp_started++;
3140 return 1; 3284 return 1;
3141 } 3285 }
@@ -3318,6 +3462,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3318 } else { 3462 } else {
3319 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, 3463 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3320 rsp->n_barrier_done); 3464 rsp->n_barrier_done);
3465 smp_mb__before_atomic();
3321 atomic_inc(&rsp->barrier_cpu_count); 3466 atomic_inc(&rsp->barrier_cpu_count);
3322 __call_rcu(&rdp->barrier_head, 3467 __call_rcu(&rdp->barrier_head,
3323 rcu_barrier_callback, rsp, cpu, 0); 3468 rcu_barrier_callback, rsp, cpu, 0);
@@ -3385,9 +3530,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3385 /* Set up local state, ensuring consistent view of global state. */ 3530 /* Set up local state, ensuring consistent view of global state. */
3386 raw_spin_lock_irqsave(&rnp->lock, flags); 3531 raw_spin_lock_irqsave(&rnp->lock, flags);
3387 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); 3532 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
3388 init_callback_list(rdp);
3389 rdp->qlen_lazy = 0;
3390 ACCESS_ONCE(rdp->qlen) = 0;
3391 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 3533 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3392 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); 3534 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3393 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); 3535 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
@@ -3444,6 +3586,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3444 rdp->gpnum = rnp->completed; 3586 rdp->gpnum = rnp->completed;
3445 rdp->completed = rnp->completed; 3587 rdp->completed = rnp->completed;
3446 rdp->passed_quiesce = 0; 3588 rdp->passed_quiesce = 0;
3589 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
3447 rdp->qs_pending = 0; 3590 rdp->qs_pending = 0;
3448 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 3591 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3449 } 3592 }
@@ -3535,17 +3678,35 @@ static int rcu_pm_notify(struct notifier_block *self,
3535static int __init rcu_spawn_gp_kthread(void) 3678static int __init rcu_spawn_gp_kthread(void)
3536{ 3679{
3537 unsigned long flags; 3680 unsigned long flags;
3681 int kthread_prio_in = kthread_prio;
3538 struct rcu_node *rnp; 3682 struct rcu_node *rnp;
3539 struct rcu_state *rsp; 3683 struct rcu_state *rsp;
3684 struct sched_param sp;
3540 struct task_struct *t; 3685 struct task_struct *t;
3541 3686
3687 /* Force priority into range. */
3688 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3689 kthread_prio = 1;
3690 else if (kthread_prio < 0)
3691 kthread_prio = 0;
3692 else if (kthread_prio > 99)
3693 kthread_prio = 99;
3694 if (kthread_prio != kthread_prio_in)
3695 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3696 kthread_prio, kthread_prio_in);
3697
3542 rcu_scheduler_fully_active = 1; 3698 rcu_scheduler_fully_active = 1;
3543 for_each_rcu_flavor(rsp) { 3699 for_each_rcu_flavor(rsp) {
3544 t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name); 3700 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
3545 BUG_ON(IS_ERR(t)); 3701 BUG_ON(IS_ERR(t));
3546 rnp = rcu_get_root(rsp); 3702 rnp = rcu_get_root(rsp);
3547 raw_spin_lock_irqsave(&rnp->lock, flags); 3703 raw_spin_lock_irqsave(&rnp->lock, flags);
3548 rsp->gp_kthread = t; 3704 rsp->gp_kthread = t;
3705 if (kthread_prio) {
3706 sp.sched_priority = kthread_prio;
3707 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3708 }
3709 wake_up_process(t);
3549 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3710 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3550 } 3711 }
3551 rcu_spawn_nocb_kthreads(); 3712 rcu_spawn_nocb_kthreads();
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 8e7b1843896e..119de399eb2f 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -27,7 +27,6 @@
27#include <linux/threads.h> 27#include <linux/threads.h>
28#include <linux/cpumask.h> 28#include <linux/cpumask.h>
29#include <linux/seqlock.h> 29#include <linux/seqlock.h>
30#include <linux/irq_work.h>
31 30
32/* 31/*
33 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and 32 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@@ -172,11 +171,6 @@ struct rcu_node {
172 /* queued on this rcu_node structure that */ 171 /* queued on this rcu_node structure that */
173 /* are blocking the current grace period, */ 172 /* are blocking the current grace period, */
174 /* there can be no such task. */ 173 /* there can be no such task. */
175 struct completion boost_completion;
176 /* Used to ensure that the rt_mutex used */
177 /* to carry out the boosting is fully */
178 /* released with no future boostee accesses */
179 /* before that rt_mutex is re-initialized. */
180 struct rt_mutex boost_mtx; 174 struct rt_mutex boost_mtx;
181 /* Used only for the priority-boosting */ 175 /* Used only for the priority-boosting */
182 /* side effect, not as a lock. */ 176 /* side effect, not as a lock. */
@@ -257,9 +251,12 @@ struct rcu_data {
257 /* in order to detect GP end. */ 251 /* in order to detect GP end. */
258 unsigned long gpnum; /* Highest gp number that this CPU */ 252 unsigned long gpnum; /* Highest gp number that this CPU */
259 /* is aware of having started. */ 253 /* is aware of having started. */
254 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
255 /* for rcu_all_qs() invocations. */
260 bool passed_quiesce; /* User-mode/idle loop etc. */ 256 bool passed_quiesce; /* User-mode/idle loop etc. */
261 bool qs_pending; /* Core waits for quiesc state. */ 257 bool qs_pending; /* Core waits for quiesc state. */
262 bool beenonline; /* CPU online at least once. */ 258 bool beenonline; /* CPU online at least once. */
259 bool gpwrap; /* Possible gpnum/completed wrap. */
263 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 260 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
264 unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 261 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
265#ifdef CONFIG_RCU_CPU_STALL_INFO 262#ifdef CONFIG_RCU_CPU_STALL_INFO
@@ -340,14 +337,10 @@ struct rcu_data {
340#ifdef CONFIG_RCU_NOCB_CPU 337#ifdef CONFIG_RCU_NOCB_CPU
341 struct rcu_head *nocb_head; /* CBs waiting for kthread. */ 338 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
342 struct rcu_head **nocb_tail; 339 struct rcu_head **nocb_tail;
343 atomic_long_t nocb_q_count; /* # CBs waiting for kthread */ 340 atomic_long_t nocb_q_count; /* # CBs waiting for nocb */
344 atomic_long_t nocb_q_count_lazy; /* (approximate). */ 341 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
345 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ 342 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
346 struct rcu_head **nocb_follower_tail; 343 struct rcu_head **nocb_follower_tail;
347 atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */
348 atomic_long_t nocb_follower_count_lazy; /* (approximate). */
349 int nocb_p_count; /* # CBs being invoked by kthread */
350 int nocb_p_count_lazy; /* (approximate). */
351 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ 344 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
352 struct task_struct *nocb_kthread; 345 struct task_struct *nocb_kthread;
353 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ 346 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
@@ -356,8 +349,6 @@ struct rcu_data {
356 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; 349 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
357 /* CBs waiting for GP. */ 350 /* CBs waiting for GP. */
358 struct rcu_head **nocb_gp_tail; 351 struct rcu_head **nocb_gp_tail;
359 long nocb_gp_count;
360 long nocb_gp_count_lazy;
361 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ 352 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */
362 struct rcu_data *nocb_next_follower; 353 struct rcu_data *nocb_next_follower;
363 /* Next follower in wakeup chain. */ 354 /* Next follower in wakeup chain. */
@@ -488,10 +479,14 @@ struct rcu_state {
488 /* due to no GP active. */ 479 /* due to no GP active. */
489 unsigned long gp_start; /* Time at which GP started, */ 480 unsigned long gp_start; /* Time at which GP started, */
490 /* but in jiffies. */ 481 /* but in jiffies. */
482 unsigned long gp_activity; /* Time of last GP kthread */
483 /* activity in jiffies. */
491 unsigned long jiffies_stall; /* Time at which to check */ 484 unsigned long jiffies_stall; /* Time at which to check */
492 /* for CPU stalls. */ 485 /* for CPU stalls. */
493 unsigned long jiffies_resched; /* Time at which to resched */ 486 unsigned long jiffies_resched; /* Time at which to resched */
494 /* a reluctant CPU. */ 487 /* a reluctant CPU. */
488 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
489 /* GP start. */
495 unsigned long gp_max; /* Maximum GP duration in */ 490 unsigned long gp_max; /* Maximum GP duration in */
496 /* jiffies. */ 491 /* jiffies. */
497 const char *name; /* Name of structure. */ 492 const char *name; /* Name of structure. */
@@ -514,13 +509,6 @@ extern struct list_head rcu_struct_flavors;
514#define for_each_rcu_flavor(rsp) \ 509#define for_each_rcu_flavor(rsp) \
515 list_for_each_entry((rsp), &rcu_struct_flavors, flavors) 510 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
516 511
517/* Return values for rcu_preempt_offline_tasks(). */
518
519#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
520 /* GP were moved to root. */
521#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
522 /* GP were moved to root. */
523
524/* 512/*
525 * RCU implementation internal declarations: 513 * RCU implementation internal declarations:
526 */ 514 */
@@ -546,27 +534,16 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
546 534
547/* Forward declarations for rcutree_plugin.h */ 535/* Forward declarations for rcutree_plugin.h */
548static void rcu_bootup_announce(void); 536static void rcu_bootup_announce(void);
549long rcu_batches_completed(void);
550static void rcu_preempt_note_context_switch(void); 537static void rcu_preempt_note_context_switch(void);
551static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 538static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
552#ifdef CONFIG_HOTPLUG_CPU 539#ifdef CONFIG_HOTPLUG_CPU
553static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, 540static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
554 unsigned long flags);
555#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 541#endif /* #ifdef CONFIG_HOTPLUG_CPU */
556static void rcu_print_detail_task_stall(struct rcu_state *rsp); 542static void rcu_print_detail_task_stall(struct rcu_state *rsp);
557static int rcu_print_task_stall(struct rcu_node *rnp); 543static int rcu_print_task_stall(struct rcu_node *rnp);
558static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 544static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
559#ifdef CONFIG_HOTPLUG_CPU
560static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
561 struct rcu_node *rnp,
562 struct rcu_data *rdp);
563#endif /* #ifdef CONFIG_HOTPLUG_CPU */
564static void rcu_preempt_check_callbacks(void); 545static void rcu_preempt_check_callbacks(void);
565void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 546void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
566#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU)
567static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
568 bool wake);
569#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */
570static void __init __rcu_init_preempt(void); 547static void __init __rcu_init_preempt(void);
571static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 548static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
572static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 549static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -622,24 +599,15 @@ static void rcu_dynticks_task_exit(void);
622#endif /* #ifndef RCU_TREE_NONCORE */ 599#endif /* #ifndef RCU_TREE_NONCORE */
623 600
624#ifdef CONFIG_RCU_TRACE 601#ifdef CONFIG_RCU_TRACE
625#ifdef CONFIG_RCU_NOCB_CPU 602/* Read out queue lengths for tracing. */
626/* Sum up queue lengths for tracing. */
627static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) 603static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
628{ 604{
629 *ql = atomic_long_read(&rdp->nocb_q_count) + 605#ifdef CONFIG_RCU_NOCB_CPU
630 rdp->nocb_p_count + 606 *ql = atomic_long_read(&rdp->nocb_q_count);
631 atomic_long_read(&rdp->nocb_follower_count) + 607 *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
632 rdp->nocb_p_count + rdp->nocb_gp_count;
633 *qll = atomic_long_read(&rdp->nocb_q_count_lazy) +
634 rdp->nocb_p_count_lazy +
635 atomic_long_read(&rdp->nocb_follower_count_lazy) +
636 rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy;
637}
638#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 608#else /* #ifdef CONFIG_RCU_NOCB_CPU */
639static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
640{
641 *ql = 0; 609 *ql = 0;
642 *qll = 0; 610 *qll = 0;
643}
644#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 611#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
612}
645#endif /* #ifdef CONFIG_RCU_TRACE */ 613#endif /* #ifdef CONFIG_RCU_TRACE */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 3ec85cb5d544..2e850a51bb8f 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -34,10 +34,6 @@
34 34
35#include "../locking/rtmutex_common.h" 35#include "../locking/rtmutex_common.h"
36 36
37/* rcuc/rcub kthread realtime priority */
38static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
39module_param(kthread_prio, int, 0644);
40
41/* 37/*
42 * Control variables for per-CPU and per-rcu_node kthreads. These 38 * Control variables for per-CPU and per-rcu_node kthreads. These
43 * handle all flavors of RCU. 39 * handle all flavors of RCU.
@@ -103,6 +99,8 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
103static struct rcu_state *rcu_state_p = &rcu_preempt_state; 99static struct rcu_state *rcu_state_p = &rcu_preempt_state;
104 100
105static int rcu_preempted_readers_exp(struct rcu_node *rnp); 101static int rcu_preempted_readers_exp(struct rcu_node *rnp);
102static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
103 bool wake);
106 104
107/* 105/*
108 * Tell them what RCU they are running. 106 * Tell them what RCU they are running.
@@ -114,25 +112,6 @@ static void __init rcu_bootup_announce(void)
114} 112}
115 113
116/* 114/*
117 * Return the number of RCU-preempt batches processed thus far
118 * for debug and statistics.
119 */
120static long rcu_batches_completed_preempt(void)
121{
122 return rcu_preempt_state.completed;
123}
124EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
125
126/*
127 * Return the number of RCU batches processed thus far for debug & stats.
128 */
129long rcu_batches_completed(void)
130{
131 return rcu_batches_completed_preempt();
132}
133EXPORT_SYMBOL_GPL(rcu_batches_completed);
134
135/*
136 * Record a preemptible-RCU quiescent state for the specified CPU. Note 115 * Record a preemptible-RCU quiescent state for the specified CPU. Note
137 * that this just means that the task currently running on the CPU is 116 * that this just means that the task currently running on the CPU is
138 * not in a quiescent state. There might be any number of tasks blocked 117 * not in a quiescent state. There might be any number of tasks blocked
@@ -307,15 +286,25 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
307} 286}
308 287
309/* 288/*
289 * Return true if the specified rcu_node structure has tasks that were
290 * preempted within an RCU read-side critical section.
291 */
292static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
293{
294 return !list_empty(&rnp->blkd_tasks);
295}
296
297/*
310 * Handle special cases during rcu_read_unlock(), such as needing to 298 * Handle special cases during rcu_read_unlock(), such as needing to
311 * notify RCU core processing or task having blocked during the RCU 299 * notify RCU core processing or task having blocked during the RCU
312 * read-side critical section. 300 * read-side critical section.
313 */ 301 */
314void rcu_read_unlock_special(struct task_struct *t) 302void rcu_read_unlock_special(struct task_struct *t)
315{ 303{
316 int empty; 304 bool empty;
317 int empty_exp; 305 bool empty_exp;
318 int empty_exp_now; 306 bool empty_norm;
307 bool empty_exp_now;
319 unsigned long flags; 308 unsigned long flags;
320 struct list_head *np; 309 struct list_head *np;
321#ifdef CONFIG_RCU_BOOST 310#ifdef CONFIG_RCU_BOOST
@@ -367,7 +356,8 @@ void rcu_read_unlock_special(struct task_struct *t)
367 break; 356 break;
368 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 357 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
369 } 358 }
370 empty = !rcu_preempt_blocked_readers_cgp(rnp); 359 empty = !rcu_preempt_has_tasks(rnp);
360 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
371 empty_exp = !rcu_preempted_readers_exp(rnp); 361 empty_exp = !rcu_preempted_readers_exp(rnp);
372 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 362 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
373 np = rcu_next_node_entry(t, rnp); 363 np = rcu_next_node_entry(t, rnp);
@@ -387,13 +377,21 @@ void rcu_read_unlock_special(struct task_struct *t)
387#endif /* #ifdef CONFIG_RCU_BOOST */ 377#endif /* #ifdef CONFIG_RCU_BOOST */
388 378
389 /* 379 /*
380 * If this was the last task on the list, go see if we
381 * need to propagate ->qsmaskinit bit clearing up the
382 * rcu_node tree.
383 */
384 if (!empty && !rcu_preempt_has_tasks(rnp))
385 rcu_cleanup_dead_rnp(rnp);
386
387 /*
390 * If this was the last task on the current list, and if 388 * If this was the last task on the current list, and if
391 * we aren't waiting on any CPUs, report the quiescent state. 389 * we aren't waiting on any CPUs, report the quiescent state.
392 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 390 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
393 * so we must take a snapshot of the expedited state. 391 * so we must take a snapshot of the expedited state.
394 */ 392 */
395 empty_exp_now = !rcu_preempted_readers_exp(rnp); 393 empty_exp_now = !rcu_preempted_readers_exp(rnp);
396 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { 394 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
397 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 395 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
398 rnp->gpnum, 396 rnp->gpnum,
399 0, rnp->qsmask, 397 0, rnp->qsmask,
@@ -408,10 +406,8 @@ void rcu_read_unlock_special(struct task_struct *t)
408 406
409#ifdef CONFIG_RCU_BOOST 407#ifdef CONFIG_RCU_BOOST
410 /* Unboost if we were boosted. */ 408 /* Unboost if we were boosted. */
411 if (drop_boost_mutex) { 409 if (drop_boost_mutex)
412 rt_mutex_unlock(&rnp->boost_mtx); 410 rt_mutex_unlock(&rnp->boost_mtx);
413 complete(&rnp->boost_completion);
414 }
415#endif /* #ifdef CONFIG_RCU_BOOST */ 411#endif /* #ifdef CONFIG_RCU_BOOST */
416 412
417 /* 413 /*
@@ -519,99 +515,13 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
519static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 515static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
520{ 516{
521 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 517 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
522 if (!list_empty(&rnp->blkd_tasks)) 518 if (rcu_preempt_has_tasks(rnp))
523 rnp->gp_tasks = rnp->blkd_tasks.next; 519 rnp->gp_tasks = rnp->blkd_tasks.next;
524 WARN_ON_ONCE(rnp->qsmask); 520 WARN_ON_ONCE(rnp->qsmask);
525} 521}
526 522
527#ifdef CONFIG_HOTPLUG_CPU 523#ifdef CONFIG_HOTPLUG_CPU
528 524
529/*
530 * Handle tasklist migration for case in which all CPUs covered by the
531 * specified rcu_node have gone offline. Move them up to the root
532 * rcu_node. The reason for not just moving them to the immediate
533 * parent is to remove the need for rcu_read_unlock_special() to
534 * make more than two attempts to acquire the target rcu_node's lock.
535 * Returns true if there were tasks blocking the current RCU grace
536 * period.
537 *
538 * Returns 1 if there was previously a task blocking the current grace
539 * period on the specified rcu_node structure.
540 *
541 * The caller must hold rnp->lock with irqs disabled.
542 */
543static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
544 struct rcu_node *rnp,
545 struct rcu_data *rdp)
546{
547 struct list_head *lp;
548 struct list_head *lp_root;
549 int retval = 0;
550 struct rcu_node *rnp_root = rcu_get_root(rsp);
551 struct task_struct *t;
552
553 if (rnp == rnp_root) {
554 WARN_ONCE(1, "Last CPU thought to be offlined?");
555 return 0; /* Shouldn't happen: at least one CPU online. */
556 }
557
558 /* If we are on an internal node, complain bitterly. */
559 WARN_ON_ONCE(rnp != rdp->mynode);
560
561 /*
562 * Move tasks up to root rcu_node. Don't try to get fancy for
563 * this corner-case operation -- just put this node's tasks
564 * at the head of the root node's list, and update the root node's
565 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
566 * if non-NULL. This might result in waiting for more tasks than
567 * absolutely necessary, but this is a good performance/complexity
568 * tradeoff.
569 */
570 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
571 retval |= RCU_OFL_TASKS_NORM_GP;
572 if (rcu_preempted_readers_exp(rnp))
573 retval |= RCU_OFL_TASKS_EXP_GP;
574 lp = &rnp->blkd_tasks;
575 lp_root = &rnp_root->blkd_tasks;
576 while (!list_empty(lp)) {
577 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
578 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
579 smp_mb__after_unlock_lock();
580 list_del(&t->rcu_node_entry);
581 t->rcu_blocked_node = rnp_root;
582 list_add(&t->rcu_node_entry, lp_root);
583 if (&t->rcu_node_entry == rnp->gp_tasks)
584 rnp_root->gp_tasks = rnp->gp_tasks;
585 if (&t->rcu_node_entry == rnp->exp_tasks)
586 rnp_root->exp_tasks = rnp->exp_tasks;
587#ifdef CONFIG_RCU_BOOST
588 if (&t->rcu_node_entry == rnp->boost_tasks)
589 rnp_root->boost_tasks = rnp->boost_tasks;
590#endif /* #ifdef CONFIG_RCU_BOOST */
591 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
592 }
593
594 rnp->gp_tasks = NULL;
595 rnp->exp_tasks = NULL;
596#ifdef CONFIG_RCU_BOOST
597 rnp->boost_tasks = NULL;
598 /*
599 * In case root is being boosted and leaf was not. Make sure
600 * that we boost the tasks blocking the current grace period
601 * in this case.
602 */
603 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
604 smp_mb__after_unlock_lock();
605 if (rnp_root->boost_tasks != NULL &&
606 rnp_root->boost_tasks != rnp_root->gp_tasks &&
607 rnp_root->boost_tasks != rnp_root->exp_tasks)
608 rnp_root->boost_tasks = rnp_root->gp_tasks;
609 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
610#endif /* #ifdef CONFIG_RCU_BOOST */
611
612 return retval;
613}
614
615#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 525#endif /* #ifdef CONFIG_HOTPLUG_CPU */
616 526
617/* 527/*
@@ -771,7 +681,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
771 681
772 raw_spin_lock_irqsave(&rnp->lock, flags); 682 raw_spin_lock_irqsave(&rnp->lock, flags);
773 smp_mb__after_unlock_lock(); 683 smp_mb__after_unlock_lock();
774 if (list_empty(&rnp->blkd_tasks)) { 684 if (!rcu_preempt_has_tasks(rnp)) {
775 raw_spin_unlock_irqrestore(&rnp->lock, flags); 685 raw_spin_unlock_irqrestore(&rnp->lock, flags);
776 } else { 686 } else {
777 rnp->exp_tasks = rnp->blkd_tasks.next; 687 rnp->exp_tasks = rnp->blkd_tasks.next;
@@ -933,15 +843,6 @@ static void __init rcu_bootup_announce(void)
933} 843}
934 844
935/* 845/*
936 * Return the number of RCU batches processed thus far for debug & stats.
937 */
938long rcu_batches_completed(void)
939{
940 return rcu_batches_completed_sched();
941}
942EXPORT_SYMBOL_GPL(rcu_batches_completed);
943
944/*
945 * Because preemptible RCU does not exist, we never have to check for 846 * Because preemptible RCU does not exist, we never have to check for
946 * CPUs being in quiescent states. 847 * CPUs being in quiescent states.
947 */ 848 */
@@ -960,11 +861,12 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
960 861
961#ifdef CONFIG_HOTPLUG_CPU 862#ifdef CONFIG_HOTPLUG_CPU
962 863
963/* Because preemptible RCU does not exist, no quieting of tasks. */ 864/*
964static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 865 * Because there is no preemptible RCU, there can be no readers blocked.
965 __releases(rnp->lock) 866 */
867static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
966{ 868{
967 raw_spin_unlock_irqrestore(&rnp->lock, flags); 869 return false;
968} 870}
969 871
970#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 872#endif /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -996,23 +898,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
996 WARN_ON_ONCE(rnp->qsmask); 898 WARN_ON_ONCE(rnp->qsmask);
997} 899}
998 900
999#ifdef CONFIG_HOTPLUG_CPU
1000
1001/*
1002 * Because preemptible RCU does not exist, it never needs to migrate
1003 * tasks that were blocked within RCU read-side critical sections, and
1004 * such non-existent tasks cannot possibly have been blocking the current
1005 * grace period.
1006 */
1007static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1008 struct rcu_node *rnp,
1009 struct rcu_data *rdp)
1010{
1011 return 0;
1012}
1013
1014#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1015
1016/* 901/*
1017 * Because preemptible RCU does not exist, it never has any callbacks 902 * Because preemptible RCU does not exist, it never has any callbacks
1018 * to check. 903 * to check.
@@ -1031,20 +916,6 @@ void synchronize_rcu_expedited(void)
1031} 916}
1032EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 917EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1033 918
1034#ifdef CONFIG_HOTPLUG_CPU
1035
1036/*
1037 * Because preemptible RCU does not exist, there is never any need to
1038 * report on tasks preempted in RCU read-side critical sections during
1039 * expedited RCU grace periods.
1040 */
1041static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1042 bool wake)
1043{
1044}
1045
1046#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1047
1048/* 919/*
1049 * Because preemptible RCU does not exist, rcu_barrier() is just 920 * Because preemptible RCU does not exist, rcu_barrier() is just
1050 * another name for rcu_barrier_sched(). 921 * another name for rcu_barrier_sched().
@@ -1080,7 +951,7 @@ void exit_rcu(void)
1080 951
1081static void rcu_initiate_boost_trace(struct rcu_node *rnp) 952static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1082{ 953{
1083 if (list_empty(&rnp->blkd_tasks)) 954 if (!rcu_preempt_has_tasks(rnp))
1084 rnp->n_balk_blkd_tasks++; 955 rnp->n_balk_blkd_tasks++;
1085 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) 956 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1086 rnp->n_balk_exp_gp_tasks++; 957 rnp->n_balk_exp_gp_tasks++;
@@ -1127,7 +998,8 @@ static int rcu_boost(struct rcu_node *rnp)
1127 struct task_struct *t; 998 struct task_struct *t;
1128 struct list_head *tb; 999 struct list_head *tb;
1129 1000
1130 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) 1001 if (ACCESS_ONCE(rnp->exp_tasks) == NULL &&
1002 ACCESS_ONCE(rnp->boost_tasks) == NULL)
1131 return 0; /* Nothing left to boost. */ 1003 return 0; /* Nothing left to boost. */
1132 1004
1133 raw_spin_lock_irqsave(&rnp->lock, flags); 1005 raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1175,15 +1047,11 @@ static int rcu_boost(struct rcu_node *rnp)
1175 */ 1047 */
1176 t = container_of(tb, struct task_struct, rcu_node_entry); 1048 t = container_of(tb, struct task_struct, rcu_node_entry);
1177 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); 1049 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
1178 init_completion(&rnp->boost_completion);
1179 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1050 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1180 /* Lock only for side effect: boosts task t's priority. */ 1051 /* Lock only for side effect: boosts task t's priority. */
1181 rt_mutex_lock(&rnp->boost_mtx); 1052 rt_mutex_lock(&rnp->boost_mtx);
1182 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1053 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
1183 1054
1184 /* Wait for boostee to be done w/boost_mtx before reinitializing. */
1185 wait_for_completion(&rnp->boost_completion);
1186
1187 return ACCESS_ONCE(rnp->exp_tasks) != NULL || 1055 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1188 ACCESS_ONCE(rnp->boost_tasks) != NULL; 1056 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1189} 1057}
@@ -1416,12 +1284,8 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1416 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) 1284 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1417 if ((mask & 0x1) && cpu != outgoingcpu) 1285 if ((mask & 0x1) && cpu != outgoingcpu)
1418 cpumask_set_cpu(cpu, cm); 1286 cpumask_set_cpu(cpu, cm);
1419 if (cpumask_weight(cm) == 0) { 1287 if (cpumask_weight(cm) == 0)
1420 cpumask_setall(cm); 1288 cpumask_setall(cm);
1421 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1422 cpumask_clear_cpu(cpu, cm);
1423 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1424 }
1425 set_cpus_allowed_ptr(t, cm); 1289 set_cpus_allowed_ptr(t, cm);
1426 free_cpumask_var(cm); 1290 free_cpumask_var(cm);
1427} 1291}
@@ -1446,12 +1310,8 @@ static void __init rcu_spawn_boost_kthreads(void)
1446 for_each_possible_cpu(cpu) 1310 for_each_possible_cpu(cpu)
1447 per_cpu(rcu_cpu_has_work, cpu) = 0; 1311 per_cpu(rcu_cpu_has_work, cpu) = 0;
1448 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); 1312 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1449 rnp = rcu_get_root(rcu_state_p); 1313 rcu_for_each_leaf_node(rcu_state_p, rnp)
1450 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1314 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1451 if (NUM_RCU_NODES > 1) {
1452 rcu_for_each_leaf_node(rcu_state_p, rnp)
1453 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1454 }
1455} 1315}
1456 1316
1457static void rcu_prepare_kthreads(int cpu) 1317static void rcu_prepare_kthreads(int cpu)
@@ -1605,7 +1465,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
1605 * completed since we last checked and there are 1465 * completed since we last checked and there are
1606 * callbacks not yet ready to invoke. 1466 * callbacks not yet ready to invoke.
1607 */ 1467 */
1608 if (rdp->completed != rnp->completed && 1468 if ((rdp->completed != rnp->completed ||
1469 unlikely(ACCESS_ONCE(rdp->gpwrap))) &&
1609 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) 1470 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1610 note_gp_changes(rsp, rdp); 1471 note_gp_changes(rsp, rdp);
1611 1472
@@ -1898,11 +1759,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1898 ticks_value = rsp->gpnum - rdp->gpnum; 1759 ticks_value = rsp->gpnum - rdp->gpnum;
1899 } 1760 }
1900 print_cpu_stall_fast_no_hz(fast_no_hz, cpu); 1761 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1901 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n", 1762 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
1902 cpu, ticks_value, ticks_title, 1763 cpu, ticks_value, ticks_title,
1903 atomic_read(&rdtp->dynticks) & 0xfff, 1764 atomic_read(&rdtp->dynticks) & 0xfff,
1904 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, 1765 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1905 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), 1766 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1767 ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
1906 fast_no_hz); 1768 fast_no_hz);
1907} 1769}
1908 1770
@@ -2056,9 +1918,26 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
2056static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) 1918static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2057{ 1919{
2058 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1920 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1921 unsigned long ret;
1922#ifdef CONFIG_PROVE_RCU
2059 struct rcu_head *rhp; 1923 struct rcu_head *rhp;
1924#endif /* #ifdef CONFIG_PROVE_RCU */
2060 1925
2061 /* No-CBs CPUs might have callbacks on any of three lists. */ 1926 /*
1927 * Check count of all no-CBs callbacks awaiting invocation.
1928 * There needs to be a barrier before this function is called,
1929 * but associated with a prior determination that no more
1930 * callbacks would be posted. In the worst case, the first
1931 * barrier in _rcu_barrier() suffices (but the caller cannot
1932 * necessarily rely on this, not a substitute for the caller
1933 * getting the concurrency design right!). There must also be
1934 * a barrier between the following load an posting of a callback
1935 * (if a callback is in fact needed). This is associated with an
1936 * atomic_inc() in the caller.
1937 */
1938 ret = atomic_long_read(&rdp->nocb_q_count);
1939
1940#ifdef CONFIG_PROVE_RCU
2062 rhp = ACCESS_ONCE(rdp->nocb_head); 1941 rhp = ACCESS_ONCE(rdp->nocb_head);
2063 if (!rhp) 1942 if (!rhp)
2064 rhp = ACCESS_ONCE(rdp->nocb_gp_head); 1943 rhp = ACCESS_ONCE(rdp->nocb_gp_head);
@@ -2072,8 +1951,9 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2072 cpu, rhp->func); 1951 cpu, rhp->func);
2073 WARN_ON_ONCE(1); 1952 WARN_ON_ONCE(1);
2074 } 1953 }
1954#endif /* #ifdef CONFIG_PROVE_RCU */
2075 1955
2076 return !!rhp; 1956 return !!ret;
2077} 1957}
2078 1958
2079/* 1959/*
@@ -2095,9 +1975,10 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2095 struct task_struct *t; 1975 struct task_struct *t;
2096 1976
2097 /* Enqueue the callback on the nocb list and update counts. */ 1977 /* Enqueue the callback on the nocb list and update counts. */
1978 atomic_long_add(rhcount, &rdp->nocb_q_count);
1979 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
2098 old_rhpp = xchg(&rdp->nocb_tail, rhtp); 1980 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2099 ACCESS_ONCE(*old_rhpp) = rhp; 1981 ACCESS_ONCE(*old_rhpp) = rhp;
2100 atomic_long_add(rhcount, &rdp->nocb_q_count);
2101 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); 1982 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2102 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ 1983 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
2103 1984
@@ -2288,9 +2169,6 @@ wait_again:
2288 /* Move callbacks to wait-for-GP list, which is empty. */ 2169 /* Move callbacks to wait-for-GP list, which is empty. */
2289 ACCESS_ONCE(rdp->nocb_head) = NULL; 2170 ACCESS_ONCE(rdp->nocb_head) = NULL;
2290 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); 2171 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2291 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
2292 rdp->nocb_gp_count_lazy =
2293 atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2294 gotcbs = true; 2172 gotcbs = true;
2295 } 2173 }
2296 2174
@@ -2338,9 +2216,6 @@ wait_again:
2338 /* Append callbacks to follower's "done" list. */ 2216 /* Append callbacks to follower's "done" list. */
2339 tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail); 2217 tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
2340 *tail = rdp->nocb_gp_head; 2218 *tail = rdp->nocb_gp_head;
2341 atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
2342 atomic_long_add(rdp->nocb_gp_count_lazy,
2343 &rdp->nocb_follower_count_lazy);
2344 smp_mb__after_atomic(); /* Store *tail before wakeup. */ 2219 smp_mb__after_atomic(); /* Store *tail before wakeup. */
2345 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { 2220 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2346 /* 2221 /*
@@ -2415,13 +2290,11 @@ static int rcu_nocb_kthread(void *arg)
2415 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); 2290 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
2416 ACCESS_ONCE(rdp->nocb_follower_head) = NULL; 2291 ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
2417 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); 2292 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
2418 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
2419 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
2420 rdp->nocb_p_count += c;
2421 rdp->nocb_p_count_lazy += cl;
2422 2293
2423 /* Each pass through the following loop invokes a callback. */ 2294 /* Each pass through the following loop invokes a callback. */
2424 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1); 2295 trace_rcu_batch_start(rdp->rsp->name,
2296 atomic_long_read(&rdp->nocb_q_count_lazy),
2297 atomic_long_read(&rdp->nocb_q_count), -1);
2425 c = cl = 0; 2298 c = cl = 0;
2426 while (list) { 2299 while (list) {
2427 next = list->next; 2300 next = list->next;
@@ -2443,9 +2316,9 @@ static int rcu_nocb_kthread(void *arg)
2443 list = next; 2316 list = next;
2444 } 2317 }
2445 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); 2318 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2446 ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c; 2319 smp_mb__before_atomic(); /* _add after CB invocation. */
2447 ACCESS_ONCE(rdp->nocb_p_count_lazy) = 2320 atomic_long_add(-c, &rdp->nocb_q_count);
2448 rdp->nocb_p_count_lazy - cl; 2321 atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
2449 rdp->n_nocbs_invoked += c; 2322 rdp->n_nocbs_invoked += c;
2450 } 2323 }
2451 return 0; 2324 return 0;
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 5cdc62e1beeb..fbb6240509ea 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -46,6 +46,8 @@
46#define RCU_TREE_NONCORE 46#define RCU_TREE_NONCORE
47#include "tree.h" 47#include "tree.h"
48 48
49DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
50
49static int r_open(struct inode *inode, struct file *file, 51static int r_open(struct inode *inode, struct file *file,
50 const struct seq_operations *op) 52 const struct seq_operations *op)
51{ 53{
@@ -115,11 +117,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
115 117
116 if (!rdp->beenonline) 118 if (!rdp->beenonline)
117 return; 119 return;
118 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d qp=%d", 120 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d",
119 rdp->cpu, 121 rdp->cpu,
120 cpu_is_offline(rdp->cpu) ? '!' : ' ', 122 cpu_is_offline(rdp->cpu) ? '!' : ' ',
121 ulong2long(rdp->completed), ulong2long(rdp->gpnum), 123 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
122 rdp->passed_quiesce, rdp->qs_pending); 124 rdp->passed_quiesce,
125 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
126 rdp->qs_pending);
123 seq_printf(m, " dt=%d/%llx/%d df=%lu", 127 seq_printf(m, " dt=%d/%llx/%d df=%lu",
124 atomic_read(&rdp->dynticks->dynticks), 128 atomic_read(&rdp->dynticks->dynticks),
125 rdp->dynticks->dynticks_nesting, 129 rdp->dynticks->dynticks_nesting,
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 607f852b4d04..7052d3fd4e7b 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -268,6 +268,15 @@ bool try_wait_for_completion(struct completion *x)
268 unsigned long flags; 268 unsigned long flags;
269 int ret = 1; 269 int ret = 1;
270 270
271 /*
272 * Since x->done will need to be locked only
273 * in the non-blocking case, we check x->done
274 * first without taking the lock so we can
275 * return early in the blocking case.
276 */
277 if (!ACCESS_ONCE(x->done))
278 return 0;
279
271 spin_lock_irqsave(&x->wait.lock, flags); 280 spin_lock_irqsave(&x->wait.lock, flags);
272 if (!x->done) 281 if (!x->done)
273 ret = 0; 282 ret = 0;
@@ -288,13 +297,6 @@ EXPORT_SYMBOL(try_wait_for_completion);
288 */ 297 */
289bool completion_done(struct completion *x) 298bool completion_done(struct completion *x)
290{ 299{
291 unsigned long flags; 300 return !!ACCESS_ONCE(x->done);
292 int ret = 1;
293
294 spin_lock_irqsave(&x->wait.lock, flags);
295 if (!x->done)
296 ret = 0;
297 spin_unlock_irqrestore(&x->wait.lock, flags);
298 return ret;
299} 301}
300EXPORT_SYMBOL(completion_done); 302EXPORT_SYMBOL(completion_done);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b269e8a2a516..1f37fe7f77a4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1089,7 +1089,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1089 if (p->sched_class->migrate_task_rq) 1089 if (p->sched_class->migrate_task_rq)
1090 p->sched_class->migrate_task_rq(p, new_cpu); 1090 p->sched_class->migrate_task_rq(p, new_cpu);
1091 p->se.nr_migrations++; 1091 p->se.nr_migrations++;
1092 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); 1092 perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
1093 } 1093 }
1094 1094
1095 __set_task_cpu(p, new_cpu); 1095 __set_task_cpu(p, new_cpu);
@@ -7346,13 +7346,12 @@ void __might_sleep(const char *file, int line, int preempt_offset)
7346 * since we will exit with TASK_RUNNING make sure we enter with it, 7346 * since we will exit with TASK_RUNNING make sure we enter with it,
7347 * otherwise we will destroy state. 7347 * otherwise we will destroy state.
7348 */ 7348 */
7349 if (WARN_ONCE(current->state != TASK_RUNNING, 7349 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
7350 "do not call blocking ops when !TASK_RUNNING; " 7350 "do not call blocking ops when !TASK_RUNNING; "
7351 "state=%lx set at [<%p>] %pS\n", 7351 "state=%lx set at [<%p>] %pS\n",
7352 current->state, 7352 current->state,
7353 (void *)current->task_state_change, 7353 (void *)current->task_state_change,
7354 (void *)current->task_state_change)) 7354 (void *)current->task_state_change);
7355 __set_current_state(TASK_RUNNING);
7356 7355
7357 ___might_sleep(file, line, preempt_offset); 7356 ___might_sleep(file, line, preempt_offset);
7358} 7357}
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index f032fb5284e3..40190f28db35 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -280,6 +280,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
280 unsigned int cpu; 280 unsigned int cpu;
281 int ret = 0; 281 int ret = 0;
282 282
283 get_online_cpus();
283 mutex_lock(&smpboot_threads_lock); 284 mutex_lock(&smpboot_threads_lock);
284 for_each_online_cpu(cpu) { 285 for_each_online_cpu(cpu) {
285 ret = __smpboot_create_thread(plug_thread, cpu); 286 ret = __smpboot_create_thread(plug_thread, cpu);
@@ -292,6 +293,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
292 list_add(&plug_thread->list, &hotplug_threads); 293 list_add(&plug_thread->list, &hotplug_threads);
293out: 294out:
294 mutex_unlock(&smpboot_threads_lock); 295 mutex_unlock(&smpboot_threads_lock);
296 put_online_cpus();
295 return ret; 297 return ret;
296} 298}
297EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread); 299EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 501baa9ac1be..479e4436f787 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -114,8 +114,12 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
114 trace_softirqs_off(ip); 114 trace_softirqs_off(ip);
115 raw_local_irq_restore(flags); 115 raw_local_irq_restore(flags);
116 116
117 if (preempt_count() == cnt) 117 if (preempt_count() == cnt) {
118#ifdef CONFIG_DEBUG_PREEMPT
119 current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1);
120#endif
118 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 121 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
122 }
119} 123}
120EXPORT_SYMBOL(__local_bh_disable_ip); 124EXPORT_SYMBOL(__local_bh_disable_ip);
121#endif /* CONFIG_TRACE_IRQFLAGS */ 125#endif /* CONFIG_TRACE_IRQFLAGS */
@@ -656,9 +660,8 @@ static void run_ksoftirqd(unsigned int cpu)
656 * in the task stack here. 660 * in the task stack here.
657 */ 661 */
658 __do_softirq(); 662 __do_softirq();
659 rcu_note_context_switch();
660 local_irq_enable(); 663 local_irq_enable();
661 cond_resched(); 664 cond_resched_rcu_qs();
662 return; 665 return;
663 } 666 }
664 local_irq_enable(); 667 local_irq_enable();
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 37e50aadd471..d8c724cda37b 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -122,7 +122,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
122 mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai); 122 mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai);
123 boot = ktime_add(mono, off_boot); 123 boot = ktime_add(mono, off_boot);
124 xtim = ktime_add(mono, off_real); 124 xtim = ktime_add(mono, off_real);
125 tai = ktime_add(xtim, off_tai); 125 tai = ktime_add(mono, off_tai);
126 126
127 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; 127 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
128 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; 128 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 4b9c114ee9de..6fa484de2ba1 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -261,7 +261,7 @@ void perf_trace_del(struct perf_event *p_event, int flags)
261} 261}
262 262
263void *perf_trace_buf_prepare(int size, unsigned short type, 263void *perf_trace_buf_prepare(int size, unsigned short type,
264 struct pt_regs *regs, int *rctxp) 264 struct pt_regs **regs, int *rctxp)
265{ 265{
266 struct trace_entry *entry; 266 struct trace_entry *entry;
267 unsigned long flags; 267 unsigned long flags;
@@ -280,6 +280,8 @@ void *perf_trace_buf_prepare(int size, unsigned short type,
280 if (*rctxp < 0) 280 if (*rctxp < 0)
281 return NULL; 281 return NULL;
282 282
283 if (regs)
284 *regs = this_cpu_ptr(&__perf_regs[*rctxp]);
283 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]); 285 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
284 286
285 /* zero the dead bytes from align to not leak stack to user */ 287 /* zero the dead bytes from align to not leak stack to user */
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 5edb518be345..296079ae6583 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1148,7 +1148,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1148 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1148 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1149 size -= sizeof(u32); 1149 size -= sizeof(u32);
1150 1150
1151 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1151 entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1152 if (!entry) 1152 if (!entry)
1153 return; 1153 return;
1154 1154
@@ -1179,7 +1179,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1179 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1179 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1180 size -= sizeof(u32); 1180 size -= sizeof(u32);
1181 1181
1182 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1182 entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1183 if (!entry) 1183 if (!entry)
1184 return; 1184 return;
1185 1185
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index c6ee36fcbf90..f97f6e3a676c 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -574,7 +574,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
574 size -= sizeof(u32); 574 size -= sizeof(u32);
575 575
576 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, 576 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
577 sys_data->enter_event->event.type, regs, &rctx); 577 sys_data->enter_event->event.type, NULL, &rctx);
578 if (!rec) 578 if (!rec)
579 return; 579 return;
580 580
@@ -647,7 +647,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
647 size -= sizeof(u32); 647 size -= sizeof(u32);
648 648
649 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, 649 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
650 sys_data->exit_event->event.type, regs, &rctx); 650 sys_data->exit_event->event.type, NULL, &rctx);
651 if (!rec) 651 if (!rec)
652 return; 652 return;
653 653
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 8520acc34b18..b11441321e7a 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1111,7 +1111,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
1111 if (hlist_empty(head)) 1111 if (hlist_empty(head))
1112 goto out; 1112 goto out;
1113 1113
1114 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1114 entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1115 if (!entry) 1115 if (!entry)
1116 goto out; 1116 goto out;
1117 1117
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 5f2ce616c046..a2ca213c71ca 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1215,6 +1215,7 @@ config RCU_TORTURE_TEST
1215 tristate "torture tests for RCU" 1215 tristate "torture tests for RCU"
1216 depends on DEBUG_KERNEL 1216 depends on DEBUG_KERNEL
1217 select TORTURE_TEST 1217 select TORTURE_TEST
1218 select SRCU
1218 default n 1219 default n
1219 help 1220 help
1220 This option provides a kernel module that runs torture tests 1221 This option provides a kernel module that runs torture tests
@@ -1257,7 +1258,7 @@ config RCU_CPU_STALL_TIMEOUT
1257config RCU_CPU_STALL_INFO 1258config RCU_CPU_STALL_INFO
1258 bool "Print additional diagnostics on RCU CPU stall" 1259 bool "Print additional diagnostics on RCU CPU stall"
1259 depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL 1260 depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL
1260 default n 1261 default y
1261 help 1262 help
1262 For each stalled CPU that is aware of the current RCU grace 1263 For each stalled CPU that is aware of the current RCU grace
1263 period, print out additional per-CPU diagnostic information 1264 period, print out additional per-CPU diagnostic information
diff --git a/lib/checksum.c b/lib/checksum.c
index 129775eb6de6..8b39e86dbab5 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
181EXPORT_SYMBOL(csum_partial_copy); 181EXPORT_SYMBOL(csum_partial_copy);
182 182
183#ifndef csum_tcpudp_nofold 183#ifndef csum_tcpudp_nofold
184static inline u32 from64to32(u64 x)
185{
186 /* add up 32-bit and 32-bit for 32+c bit */
187 x = (x & 0xffffffff) + (x >> 32);
188 /* add up carry.. */
189 x = (x & 0xffffffff) + (x >> 32);
190 return (u32)x;
191}
192
184__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 193__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
185 unsigned short len, 194 unsigned short len,
186 unsigned short proto, 195 unsigned short proto,
@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
195#else 204#else
196 s += (proto + len) << 8; 205 s += (proto + len) << 8;
197#endif 206#endif
198 s += (s >> 32); 207 return (__force __wsum)from64to32(s);
199 return (__force __wsum)s;
200} 208}
201EXPORT_SYMBOL(csum_tcpudp_nofold); 209EXPORT_SYMBOL(csum_tcpudp_nofold);
202#endif 210#endif
diff --git a/mm/Kconfig b/mm/Kconfig
index 1d1ae6b078fd..4395b12869c8 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -325,6 +325,7 @@ config VIRT_TO_BUS
325 325
326config MMU_NOTIFIER 326config MMU_NOTIFIER
327 bool 327 bool
328 select SRCU
328 329
329config KSM 330config KSM
330 bool "Enable KSM for page merging" 331 bool "Enable KSM for page merging"
diff --git a/mm/gup.c b/mm/gup.c
index a900759cc807..8dd50ce6326f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
296 return -ENOMEM; 296 return -ENOMEM;
297 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 297 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
298 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; 298 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
299 if (ret & VM_FAULT_SIGBUS) 299 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
300 return -EFAULT; 300 return -EFAULT;
301 BUG(); 301 BUG();
302 } 302 }
@@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
571 return -ENOMEM; 571 return -ENOMEM;
572 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 572 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
573 return -EHWPOISON; 573 return -EHWPOISON;
574 if (ret & VM_FAULT_SIGBUS) 574 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
575 return -EFAULT; 575 return -EFAULT;
576 BUG(); 576 BUG();
577 } 577 }
diff --git a/mm/ksm.c b/mm/ksm.c
index d247efab5073..15647fb0394f 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
376 else 376 else
377 ret = VM_FAULT_WRITE; 377 ret = VM_FAULT_WRITE;
378 put_page(page); 378 put_page(page);
379 } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); 379 } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
380 /* 380 /*
381 * We must loop because handle_mm_fault() may back out if there's 381 * We must loop because handle_mm_fault() may back out if there's
382 * any difficulty e.g. if pte accessed bit gets updated concurrently. 382 * any difficulty e.g. if pte accessed bit gets updated concurrently.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 683b4782019b..2f6893c2f01b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5773,7 +5773,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
5773 * mem_cgroup_migrate - migrate a charge to another page 5773 * mem_cgroup_migrate - migrate a charge to another page
5774 * @oldpage: currently charged page 5774 * @oldpage: currently charged page
5775 * @newpage: page to transfer the charge to 5775 * @newpage: page to transfer the charge to
5776 * @lrucare: both pages might be on the LRU already 5776 * @lrucare: either or both pages might be on the LRU already
5777 * 5777 *
5778 * Migrate the charge from @oldpage to @newpage. 5778 * Migrate the charge from @oldpage to @newpage.
5779 * 5779 *
diff --git a/mm/memory.c b/mm/memory.c
index 54f3a9b00956..2c3536cc6c63 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2632,7 +2632,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2632 2632
2633 /* Check if we need to add a guard page to the stack */ 2633 /* Check if we need to add a guard page to the stack */
2634 if (check_stack_guard_page(vma, address) < 0) 2634 if (check_stack_guard_page(vma, address) < 0)
2635 return VM_FAULT_SIGBUS; 2635 return VM_FAULT_SIGSEGV;
2636 2636
2637 /* Use the zero-page for reads */ 2637 /* Use the zero-page for reads */
2638 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { 2638 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
diff --git a/mm/nommu.c b/mm/nommu.c
index b51eadf6d952..28bd8c4dff6f 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -59,6 +59,7 @@
59#endif 59#endif
60 60
61void *high_memory; 61void *high_memory;
62EXPORT_SYMBOL(high_memory);
62struct page *mem_map; 63struct page *mem_map;
63unsigned long max_mapnr; 64unsigned long max_mapnr;
64unsigned long highest_memmap_pfn; 65unsigned long highest_memmap_pfn;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index ad83195521f2..b264bda46e1b 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -199,7 +199,10 @@ int walk_page_range(unsigned long addr, unsigned long end,
199 */ 199 */
200 if ((vma->vm_start <= addr) && 200 if ((vma->vm_start <= addr) &&
201 (vma->vm_flags & VM_PFNMAP)) { 201 (vma->vm_flags & VM_PFNMAP)) {
202 next = vma->vm_end; 202 if (walk->pte_hole)
203 err = walk->pte_hole(addr, next, walk);
204 if (err)
205 break;
203 pgd = pgd_offset(walk->mm, next); 206 pgd = pgd_offset(walk->mm, next);
204 continue; 207 continue;
205 } 208 }
diff --git a/mm/shmem.c b/mm/shmem.c
index 73ba1df7c8ba..993e6ba689cc 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1013,7 +1013,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1013 */ 1013 */
1014 oldpage = newpage; 1014 oldpage = newpage;
1015 } else { 1015 } else {
1016 mem_cgroup_migrate(oldpage, newpage, false); 1016 mem_cgroup_migrate(oldpage, newpage, true);
1017 lru_cache_add_anon(newpage); 1017 lru_cache_add_anon(newpage);
1018 *pagep = newpage; 1018 *pagep = newpage;
1019 } 1019 }
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index b0330aecbf97..3244aead0926 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -265,22 +265,12 @@ out:
265 data[NFT_REG_VERDICT].verdict = NF_DROP; 265 data[NFT_REG_VERDICT].verdict = NF_DROP;
266} 266}
267 267
268static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain) 268static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
269 const struct nft_expr *expr,
270 const struct nft_data **data)
269{ 271{
270 struct nft_base_chain *basechain; 272 return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
271 273 (1 << NF_BR_LOCAL_IN));
272 if (chain->flags & NFT_BASE_CHAIN) {
273 basechain = nft_base_chain(chain);
274
275 switch (basechain->ops[0].hooknum) {
276 case NF_BR_PRE_ROUTING:
277 case NF_BR_LOCAL_IN:
278 break;
279 default:
280 return -EOPNOTSUPP;
281 }
282 }
283 return 0;
284} 274}
285 275
286static int nft_reject_bridge_init(const struct nft_ctx *ctx, 276static int nft_reject_bridge_init(const struct nft_ctx *ctx,
@@ -290,7 +280,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx,
290 struct nft_reject *priv = nft_expr_priv(expr); 280 struct nft_reject *priv = nft_expr_priv(expr);
291 int icmp_code, err; 281 int icmp_code, err;
292 282
293 err = nft_reject_bridge_validate_hooks(ctx->chain); 283 err = nft_reject_bridge_validate(ctx, expr, NULL);
294 if (err < 0) 284 if (err < 0)
295 return err; 285 return err;
296 286
@@ -341,13 +331,6 @@ nla_put_failure:
341 return -1; 331 return -1;
342} 332}
343 333
344static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
345 const struct nft_expr *expr,
346 const struct nft_data **data)
347{
348 return nft_reject_bridge_validate_hooks(ctx->chain);
349}
350
351static struct nft_expr_type nft_reject_bridge_type; 334static struct nft_expr_type nft_reject_bridge_type;
352static const struct nft_expr_ops nft_reject_bridge_ops = { 335static const struct nft_expr_ops nft_reject_bridge_ops = {
353 .type = &nft_reject_bridge_type, 336 .type = &nft_reject_bridge_type,
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 4589ff67bfa9..67a4a36febd1 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -470,7 +470,6 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
470 ASSERT_RTNL(); 470 ASSERT_RTNL();
471 caifdev = netdev_priv(dev); 471 caifdev = netdev_priv(dev);
472 caif_netlink_parms(data, &caifdev->conn_req); 472 caif_netlink_parms(data, &caifdev->conn_req);
473 dev_net_set(caifdev->netdev, src_net);
474 473
475 ret = register_netdevice(dev); 474 ret = register_netdevice(dev);
476 if (ret) 475 if (ret)
diff --git a/net/core/dev.c b/net/core/dev.c
index 171420e75b03..7fe82929f509 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2352,7 +2352,6 @@ EXPORT_SYMBOL(skb_checksum_help);
2352 2352
2353__be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2353__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2354{ 2354{
2355 unsigned int vlan_depth = skb->mac_len;
2356 __be16 type = skb->protocol; 2355 __be16 type = skb->protocol;
2357 2356
2358 /* Tunnel gso handlers can set protocol to ethernet. */ 2357 /* Tunnel gso handlers can set protocol to ethernet. */
@@ -2366,35 +2365,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2366 type = eth->h_proto; 2365 type = eth->h_proto;
2367 } 2366 }
2368 2367
2369 /* if skb->protocol is 802.1Q/AD then the header should already be 2368 return __vlan_get_protocol(skb, type, depth);
2370 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2371 * ETH_HLEN otherwise
2372 */
2373 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2374 if (vlan_depth) {
2375 if (WARN_ON(vlan_depth < VLAN_HLEN))
2376 return 0;
2377 vlan_depth -= VLAN_HLEN;
2378 } else {
2379 vlan_depth = ETH_HLEN;
2380 }
2381 do {
2382 struct vlan_hdr *vh;
2383
2384 if (unlikely(!pskb_may_pull(skb,
2385 vlan_depth + VLAN_HLEN)))
2386 return 0;
2387
2388 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2389 type = vh->h_vlan_encapsulated_proto;
2390 vlan_depth += VLAN_HLEN;
2391 } while (type == htons(ETH_P_8021Q) ||
2392 type == htons(ETH_P_8021AD));
2393 }
2394
2395 *depth = vlan_depth;
2396
2397 return type;
2398} 2369}
2399 2370
2400/** 2371/**
@@ -5323,7 +5294,7 @@ void netdev_upper_dev_unlink(struct net_device *dev,
5323} 5294}
5324EXPORT_SYMBOL(netdev_upper_dev_unlink); 5295EXPORT_SYMBOL(netdev_upper_dev_unlink);
5325 5296
5326void netdev_adjacent_add_links(struct net_device *dev) 5297static void netdev_adjacent_add_links(struct net_device *dev)
5327{ 5298{
5328 struct netdev_adjacent *iter; 5299 struct netdev_adjacent *iter;
5329 5300
@@ -5348,7 +5319,7 @@ void netdev_adjacent_add_links(struct net_device *dev)
5348 } 5319 }
5349} 5320}
5350 5321
5351void netdev_adjacent_del_links(struct net_device *dev) 5322static void netdev_adjacent_del_links(struct net_device *dev)
5352{ 5323{
5353 struct netdev_adjacent *iter; 5324 struct netdev_adjacent *iter;
5354 5325
@@ -6656,7 +6627,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6656 if (!queue) 6627 if (!queue)
6657 return NULL; 6628 return NULL;
6658 netdev_init_one_queue(dev, queue, NULL); 6629 netdev_init_one_queue(dev, queue, NULL);
6659 queue->qdisc = &noop_qdisc; 6630 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
6660 queue->qdisc_sleeping = &noop_qdisc; 6631 queue->qdisc_sleeping = &noop_qdisc;
6661 rcu_assign_pointer(dev->ingress_queue, queue); 6632 rcu_assign_pointer(dev->ingress_queue, queue);
6662#endif 6633#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9cf6fe9ddc0c..446cbaf81185 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2895,12 +2895,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
2895 goto errout; 2895 goto errout;
2896 } 2896 }
2897 2897
2898 if (!skb->len)
2899 goto errout;
2900
2898 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 2901 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
2899 return 0; 2902 return 0;
2900errout: 2903errout:
2901 WARN_ON(err == -EMSGSIZE); 2904 WARN_ON(err == -EMSGSIZE);
2902 kfree_skb(skb); 2905 kfree_skb(skb);
2903 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 2906 if (err)
2907 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2904 return err; 2908 return err;
2905} 2909}
2906 2910
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 515569ffde8a..589aafd01fc5 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
46 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x", 46 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
47 ds->index, ds->pd->sw_addr); 47 ds->index, ds->pd->sw_addr);
48 ds->slave_mii_bus->parent = ds->master_dev; 48 ds->slave_mii_bus->parent = ds->master_dev;
49 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
49} 50}
50 51
51 52
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 3a83ce5efa80..787b3c294ce6 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb)
129 * We now generate an ICMP HOST REDIRECT giving the route 129 * We now generate an ICMP HOST REDIRECT giving the route
130 * we calculated. 130 * we calculated.
131 */ 131 */
132 if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) 132 if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
133 !skb_sec_path(skb))
133 ip_rt_send_redirect(skb); 134 ip_rt_send_redirect(skb);
134 135
135 skb->priority = rt_tos2priority(iph->tos); 136 skb->priority = rt_tos2priority(iph->tos);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index b50861b22b6b..c373c0708d97 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1506,23 +1506,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1506/* 1506/*
1507 * Generic function to send a packet as reply to another packet. 1507 * Generic function to send a packet as reply to another packet.
1508 * Used to send some TCP resets/acks so far. 1508 * Used to send some TCP resets/acks so far.
1509 *
1510 * Use a fake percpu inet socket to avoid false sharing and contention.
1511 */ 1509 */
1512static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { 1510void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1513 .sk = {
1514 .__sk_common = {
1515 .skc_refcnt = ATOMIC_INIT(1),
1516 },
1517 .sk_wmem_alloc = ATOMIC_INIT(1),
1518 .sk_allocation = GFP_ATOMIC,
1519 .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
1520 },
1521 .pmtudisc = IP_PMTUDISC_WANT,
1522 .uc_ttl = -1,
1523};
1524
1525void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1526 const struct ip_options *sopt, 1511 const struct ip_options *sopt,
1527 __be32 daddr, __be32 saddr, 1512 __be32 daddr, __be32 saddr,
1528 const struct ip_reply_arg *arg, 1513 const struct ip_reply_arg *arg,
@@ -1532,9 +1517,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1532 struct ipcm_cookie ipc; 1517 struct ipcm_cookie ipc;
1533 struct flowi4 fl4; 1518 struct flowi4 fl4;
1534 struct rtable *rt = skb_rtable(skb); 1519 struct rtable *rt = skb_rtable(skb);
1520 struct net *net = sock_net(sk);
1535 struct sk_buff *nskb; 1521 struct sk_buff *nskb;
1536 struct sock *sk;
1537 struct inet_sock *inet;
1538 int err; 1522 int err;
1539 1523
1540 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) 1524 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
@@ -1565,15 +1549,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1565 if (IS_ERR(rt)) 1549 if (IS_ERR(rt))
1566 return; 1550 return;
1567 1551
1568 inet = &get_cpu_var(unicast_sock); 1552 inet_sk(sk)->tos = arg->tos;
1569 1553
1570 inet->tos = arg->tos;
1571 sk = &inet->sk;
1572 sk->sk_priority = skb->priority; 1554 sk->sk_priority = skb->priority;
1573 sk->sk_protocol = ip_hdr(skb)->protocol; 1555 sk->sk_protocol = ip_hdr(skb)->protocol;
1574 sk->sk_bound_dev_if = arg->bound_dev_if; 1556 sk->sk_bound_dev_if = arg->bound_dev_if;
1575 sock_net_set(sk, net);
1576 __skb_queue_head_init(&sk->sk_write_queue);
1577 sk->sk_sndbuf = sysctl_wmem_default; 1557 sk->sk_sndbuf = sysctl_wmem_default;
1578 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1558 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1579 len, 0, &ipc, &rt, MSG_DONTWAIT); 1559 len, 0, &ipc, &rt, MSG_DONTWAIT);
@@ -1589,13 +1569,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1589 arg->csumoffset) = csum_fold(csum_add(nskb->csum, 1569 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1590 arg->csum)); 1570 arg->csum));
1591 nskb->ip_summed = CHECKSUM_NONE; 1571 nskb->ip_summed = CHECKSUM_NONE;
1592 skb_orphan(nskb);
1593 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); 1572 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
1594 ip_push_pending_frames(sk, &fl4); 1573 ip_push_pending_frames(sk, &fl4);
1595 } 1574 }
1596out: 1575out:
1597 put_cpu_var(unicast_sock);
1598
1599 ip_rt_put(rt); 1576 ip_rt_put(rt);
1600} 1577}
1601 1578
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index c0d82f78d364..2a3720fb5a5f 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -966,8 +966,11 @@ bool ping_rcv(struct sk_buff *skb)
966 966
967 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); 967 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
968 if (sk != NULL) { 968 if (sk != NULL) {
969 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
970
969 pr_debug("rcv on socket %p\n", sk); 971 pr_debug("rcv on socket %p\n", sk);
970 ping_queue_rcv_skb(sk, skb_get(skb)); 972 if (skb2)
973 ping_queue_rcv_skb(sk, skb2);
971 sock_put(sk); 974 sock_put(sk);
972 return true; 975 return true;
973 } 976 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6a2155b02602..52e1f2bf0ca2 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -966,6 +966,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
966 if (dst->dev->mtu < mtu) 966 if (dst->dev->mtu < mtu)
967 return; 967 return;
968 968
969 if (rt->rt_pmtu && rt->rt_pmtu < mtu)
970 return;
971
969 if (mtu < ip_rt_min_pmtu) 972 if (mtu < ip_rt_min_pmtu)
970 mtu = ip_rt_min_pmtu; 973 mtu = ip_rt_min_pmtu;
971 974
@@ -1554,11 +1557,10 @@ static int __mkroute_input(struct sk_buff *skb,
1554 1557
1555 do_cache = res->fi && !itag; 1558 do_cache = res->fi && !itag;
1556 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && 1559 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1560 skb->protocol == htons(ETH_P_IP) &&
1557 (IN_DEV_SHARED_MEDIA(out_dev) || 1561 (IN_DEV_SHARED_MEDIA(out_dev) ||
1558 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { 1562 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1559 flags |= RTCF_DOREDIRECT; 1563 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1560 do_cache = false;
1561 }
1562 1564
1563 if (skb->protocol != htons(ETH_P_IP)) { 1565 if (skb->protocol != htons(ETH_P_IP)) {
1564 /* Not IP (i.e. ARP). Do not create route, if it is 1566 /* Not IP (i.e. ARP). Do not create route, if it is
@@ -2303,6 +2305,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2303 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 2305 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2304 if (rt->rt_flags & RTCF_NOTIFY) 2306 if (rt->rt_flags & RTCF_NOTIFY)
2305 r->rtm_flags |= RTM_F_NOTIFY; 2307 r->rtm_flags |= RTM_F_NOTIFY;
2308 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2309 r->rtm_flags |= RTCF_DOREDIRECT;
2306 2310
2307 if (nla_put_be32(skb, RTA_DST, dst)) 2311 if (nla_put_be32(skb, RTA_DST, dst))
2308 goto nla_put_failure; 2312 goto nla_put_failure;
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index bb395d46a389..c037644eafb7 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
150 tcp_slow_start(tp, acked); 150 tcp_slow_start(tp, acked);
151 else { 151 else {
152 bictcp_update(ca, tp->snd_cwnd); 152 bictcp_update(ca, tp->snd_cwnd);
153 tcp_cong_avoid_ai(tp, ca->cnt); 153 tcp_cong_avoid_ai(tp, ca->cnt, 1);
154 } 154 }
155} 155}
156 156
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 27ead0dd16bc..8670e68e2ce6 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -291,26 +291,32 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
291 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and 291 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
292 * returns the leftover acks to adjust cwnd in congestion avoidance mode. 292 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
293 */ 293 */
294void tcp_slow_start(struct tcp_sock *tp, u32 acked) 294u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
295{ 295{
296 u32 cwnd = tp->snd_cwnd + acked; 296 u32 cwnd = tp->snd_cwnd + acked;
297 297
298 if (cwnd > tp->snd_ssthresh) 298 if (cwnd > tp->snd_ssthresh)
299 cwnd = tp->snd_ssthresh + 1; 299 cwnd = tp->snd_ssthresh + 1;
300 acked -= cwnd - tp->snd_cwnd;
300 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); 301 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
302
303 return acked;
301} 304}
302EXPORT_SYMBOL_GPL(tcp_slow_start); 305EXPORT_SYMBOL_GPL(tcp_slow_start);
303 306
304/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ 307/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
305void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) 308 * for every packet that was ACKed.
309 */
310void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
306{ 311{
312 tp->snd_cwnd_cnt += acked;
307 if (tp->snd_cwnd_cnt >= w) { 313 if (tp->snd_cwnd_cnt >= w) {
308 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 314 u32 delta = tp->snd_cwnd_cnt / w;
309 tp->snd_cwnd++; 315
310 tp->snd_cwnd_cnt = 0; 316 tp->snd_cwnd_cnt -= delta * w;
311 } else { 317 tp->snd_cwnd += delta;
312 tp->snd_cwnd_cnt++;
313 } 318 }
319 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
314} 320}
315EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); 321EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
316 322
@@ -329,11 +335,13 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
329 return; 335 return;
330 336
331 /* In "safe" area, increase. */ 337 /* In "safe" area, increase. */
332 if (tp->snd_cwnd <= tp->snd_ssthresh) 338 if (tp->snd_cwnd <= tp->snd_ssthresh) {
333 tcp_slow_start(tp, acked); 339 acked = tcp_slow_start(tp, acked);
340 if (!acked)
341 return;
342 }
334 /* In dangerous area, increase slowly. */ 343 /* In dangerous area, increase slowly. */
335 else 344 tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
336 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
337} 345}
338EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 346EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
339 347
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 6b6002416a73..4b276d1ed980 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -93,9 +93,7 @@ struct bictcp {
93 u32 epoch_start; /* beginning of an epoch */ 93 u32 epoch_start; /* beginning of an epoch */
94 u32 ack_cnt; /* number of acks */ 94 u32 ack_cnt; /* number of acks */
95 u32 tcp_cwnd; /* estimated tcp cwnd */ 95 u32 tcp_cwnd; /* estimated tcp cwnd */
96#define ACK_RATIO_SHIFT 4 96 u16 unused;
97#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
98 u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
99 u8 sample_cnt; /* number of samples to decide curr_rtt */ 97 u8 sample_cnt; /* number of samples to decide curr_rtt */
100 u8 found; /* the exit point is found? */ 98 u8 found; /* the exit point is found? */
101 u32 round_start; /* beginning of each round */ 99 u32 round_start; /* beginning of each round */
@@ -114,7 +112,6 @@ static inline void bictcp_reset(struct bictcp *ca)
114 ca->bic_K = 0; 112 ca->bic_K = 0;
115 ca->delay_min = 0; 113 ca->delay_min = 0;
116 ca->epoch_start = 0; 114 ca->epoch_start = 0;
117 ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
118 ca->ack_cnt = 0; 115 ca->ack_cnt = 0;
119 ca->tcp_cwnd = 0; 116 ca->tcp_cwnd = 0;
120 ca->found = 0; 117 ca->found = 0;
@@ -205,23 +202,30 @@ static u32 cubic_root(u64 a)
205/* 202/*
206 * Compute congestion window to use. 203 * Compute congestion window to use.
207 */ 204 */
208static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 205static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
209{ 206{
210 u32 delta, bic_target, max_cnt; 207 u32 delta, bic_target, max_cnt;
211 u64 offs, t; 208 u64 offs, t;
212 209
213 ca->ack_cnt++; /* count the number of ACKs */ 210 ca->ack_cnt += acked; /* count the number of ACKed packets */
214 211
215 if (ca->last_cwnd == cwnd && 212 if (ca->last_cwnd == cwnd &&
216 (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) 213 (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
217 return; 214 return;
218 215
216 /* The CUBIC function can update ca->cnt at most once per jiffy.
217 * On all cwnd reduction events, ca->epoch_start is set to 0,
218 * which will force a recalculation of ca->cnt.
219 */
220 if (ca->epoch_start && tcp_time_stamp == ca->last_time)
221 goto tcp_friendliness;
222
219 ca->last_cwnd = cwnd; 223 ca->last_cwnd = cwnd;
220 ca->last_time = tcp_time_stamp; 224 ca->last_time = tcp_time_stamp;
221 225
222 if (ca->epoch_start == 0) { 226 if (ca->epoch_start == 0) {
223 ca->epoch_start = tcp_time_stamp; /* record beginning */ 227 ca->epoch_start = tcp_time_stamp; /* record beginning */
224 ca->ack_cnt = 1; /* start counting */ 228 ca->ack_cnt = acked; /* start counting */
225 ca->tcp_cwnd = cwnd; /* syn with cubic */ 229 ca->tcp_cwnd = cwnd; /* syn with cubic */
226 230
227 if (ca->last_max_cwnd <= cwnd) { 231 if (ca->last_max_cwnd <= cwnd) {
@@ -283,6 +287,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
283 if (ca->last_max_cwnd == 0 && ca->cnt > 20) 287 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
284 ca->cnt = 20; /* increase cwnd 5% per RTT */ 288 ca->cnt = 20; /* increase cwnd 5% per RTT */
285 289
290tcp_friendliness:
286 /* TCP Friendly */ 291 /* TCP Friendly */
287 if (tcp_friendliness) { 292 if (tcp_friendliness) {
288 u32 scale = beta_scale; 293 u32 scale = beta_scale;
@@ -301,7 +306,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
301 } 306 }
302 } 307 }
303 308
304 ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
305 if (ca->cnt == 0) /* cannot be zero */ 309 if (ca->cnt == 0) /* cannot be zero */
306 ca->cnt = 1; 310 ca->cnt = 1;
307} 311}
@@ -317,11 +321,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
317 if (tp->snd_cwnd <= tp->snd_ssthresh) { 321 if (tp->snd_cwnd <= tp->snd_ssthresh) {
318 if (hystart && after(ack, ca->end_seq)) 322 if (hystart && after(ack, ca->end_seq))
319 bictcp_hystart_reset(sk); 323 bictcp_hystart_reset(sk);
320 tcp_slow_start(tp, acked); 324 acked = tcp_slow_start(tp, acked);
321 } else { 325 if (!acked)
322 bictcp_update(ca, tp->snd_cwnd); 326 return;
323 tcp_cong_avoid_ai(tp, ca->cnt);
324 } 327 }
328 bictcp_update(ca, tp->snd_cwnd, acked);
329 tcp_cong_avoid_ai(tp, ca->cnt, acked);
325} 330}
326 331
327static u32 bictcp_recalc_ssthresh(struct sock *sk) 332static u32 bictcp_recalc_ssthresh(struct sock *sk)
@@ -411,20 +416,10 @@ static void hystart_update(struct sock *sk, u32 delay)
411 */ 416 */
412static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) 417static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
413{ 418{
414 const struct inet_connection_sock *icsk = inet_csk(sk);
415 const struct tcp_sock *tp = tcp_sk(sk); 419 const struct tcp_sock *tp = tcp_sk(sk);
416 struct bictcp *ca = inet_csk_ca(sk); 420 struct bictcp *ca = inet_csk_ca(sk);
417 u32 delay; 421 u32 delay;
418 422
419 if (icsk->icsk_ca_state == TCP_CA_Open) {
420 u32 ratio = ca->delayed_ack;
421
422 ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
423 ratio += cnt;
424
425 ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
426 }
427
428 /* Some calls are for duplicates without timetamps */ 423 /* Some calls are for duplicates without timetamps */
429 if (rtt_us < 0) 424 if (rtt_us < 0)
430 return; 425 return;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a3f72d7fc06c..d22f54482bab 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -683,7 +683,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
683 arg.bound_dev_if = sk->sk_bound_dev_if; 683 arg.bound_dev_if = sk->sk_bound_dev_if;
684 684
685 arg.tos = ip_hdr(skb)->tos; 685 arg.tos = ip_hdr(skb)->tos;
686 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, 686 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
687 skb, &TCP_SKB_CB(skb)->header.h4.opt,
687 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 688 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
688 &arg, arg.iov[0].iov_len); 689 &arg, arg.iov[0].iov_len);
689 690
@@ -767,7 +768,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
767 if (oif) 768 if (oif)
768 arg.bound_dev_if = oif; 769 arg.bound_dev_if = oif;
769 arg.tos = tos; 770 arg.tos = tos;
770 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, 771 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
772 skb, &TCP_SKB_CB(skb)->header.h4.opt,
771 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 773 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
772 &arg, arg.iov[0].iov_len); 774 &arg, arg.iov[0].iov_len);
773 775
@@ -2428,14 +2430,39 @@ struct proto tcp_prot = {
2428}; 2430};
2429EXPORT_SYMBOL(tcp_prot); 2431EXPORT_SYMBOL(tcp_prot);
2430 2432
2433static void __net_exit tcp_sk_exit(struct net *net)
2434{
2435 int cpu;
2436
2437 for_each_possible_cpu(cpu)
2438 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2439 free_percpu(net->ipv4.tcp_sk);
2440}
2441
2431static int __net_init tcp_sk_init(struct net *net) 2442static int __net_init tcp_sk_init(struct net *net)
2432{ 2443{
2444 int res, cpu;
2445
2446 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2447 if (!net->ipv4.tcp_sk)
2448 return -ENOMEM;
2449
2450 for_each_possible_cpu(cpu) {
2451 struct sock *sk;
2452
2453 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2454 IPPROTO_TCP, net);
2455 if (res)
2456 goto fail;
2457 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2458 }
2433 net->ipv4.sysctl_tcp_ecn = 2; 2459 net->ipv4.sysctl_tcp_ecn = 2;
2434 return 0; 2460 return 0;
2435}
2436 2461
2437static void __net_exit tcp_sk_exit(struct net *net) 2462fail:
2438{ 2463 tcp_sk_exit(net);
2464
2465 return res;
2439} 2466}
2440 2467
2441static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) 2468static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 6824afb65d93..333bcb2415ff 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -25,7 +25,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
25 if (tp->snd_cwnd <= tp->snd_ssthresh) 25 if (tp->snd_cwnd <= tp->snd_ssthresh)
26 tcp_slow_start(tp, acked); 26 tcp_slow_start(tp, acked);
27 else 27 else
28 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)); 28 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
29 1);
29} 30}
30 31
31static u32 tcp_scalable_ssthresh(struct sock *sk) 32static u32 tcp_scalable_ssthresh(struct sock *sk)
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index a4d2d2d88dca..112151eeee45 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -159,7 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
159 /* In the "non-congestive state", increase cwnd 159 /* In the "non-congestive state", increase cwnd
160 * every rtt. 160 * every rtt.
161 */ 161 */
162 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 162 tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
163 } else { 163 } else {
164 /* In the "congestive state", increase cwnd 164 /* In the "congestive state", increase cwnd
165 * every other rtt. 165 * every other rtt.
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index cd7273218598..17d35662930d 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -92,7 +92,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
92 92
93 } else { 93 } else {
94 /* Reno */ 94 /* Reno */
95 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 95 tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
96 } 96 }
97 97
98 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt. 98 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 7927db0a9279..4a000f1dd757 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
99 s_slot = cb->args[0]; 99 s_slot = cb->args[0];
100 num = s_num = cb->args[1]; 100 num = s_num = cb->args[1];
101 101
102 for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) { 102 for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
103 struct sock *sk; 103 struct sock *sk;
104 struct hlist_nulls_node *node; 104 struct hlist_nulls_node *node;
105 struct udp_hslot *hslot = &table->hash[slot]; 105 struct udp_hslot *hslot = &table->hash[slot];
106 106
107 num = 0;
108
107 if (hlist_nulls_empty(&hslot->head)) 109 if (hlist_nulls_empty(&hslot->head))
108 continue; 110 continue;
109 111
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index b2d1838897c9..f1c6d5e98322 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -659,6 +659,29 @@ static int fib6_commit_metrics(struct dst_entry *dst,
659 return 0; 659 return 0;
660} 660}
661 661
662static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
663 struct net *net)
664{
665 if (atomic_read(&rt->rt6i_ref) != 1) {
666 /* This route is used as dummy address holder in some split
667 * nodes. It is not leaked, but it still holds other resources,
668 * which must be released in time. So, scan ascendant nodes
669 * and replace dummy references to this route with references
670 * to still alive ones.
671 */
672 while (fn) {
673 if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
674 fn->leaf = fib6_find_prefix(net, fn);
675 atomic_inc(&fn->leaf->rt6i_ref);
676 rt6_release(rt);
677 }
678 fn = fn->parent;
679 }
680 /* No more references are possible at this point. */
681 BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
682 }
683}
684
662/* 685/*
663 * Insert routing information in a node. 686 * Insert routing information in a node.
664 */ 687 */
@@ -807,11 +830,12 @@ add:
807 rt->dst.rt6_next = iter->dst.rt6_next; 830 rt->dst.rt6_next = iter->dst.rt6_next;
808 atomic_inc(&rt->rt6i_ref); 831 atomic_inc(&rt->rt6i_ref);
809 inet6_rt_notify(RTM_NEWROUTE, rt, info); 832 inet6_rt_notify(RTM_NEWROUTE, rt, info);
810 rt6_release(iter);
811 if (!(fn->fn_flags & RTN_RTINFO)) { 833 if (!(fn->fn_flags & RTN_RTINFO)) {
812 info->nl_net->ipv6.rt6_stats->fib_route_nodes++; 834 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
813 fn->fn_flags |= RTN_RTINFO; 835 fn->fn_flags |= RTN_RTINFO;
814 } 836 }
837 fib6_purge_rt(iter, fn, info->nl_net);
838 rt6_release(iter);
815 } 839 }
816 840
817 return 0; 841 return 0;
@@ -1322,24 +1346,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1322 fn = fib6_repair_tree(net, fn); 1346 fn = fib6_repair_tree(net, fn);
1323 } 1347 }
1324 1348
1325 if (atomic_read(&rt->rt6i_ref) != 1) { 1349 fib6_purge_rt(rt, fn, net);
1326 /* This route is used as dummy address holder in some split
1327 * nodes. It is not leaked, but it still holds other resources,
1328 * which must be released in time. So, scan ascendant nodes
1329 * and replace dummy references to this route with references
1330 * to still alive ones.
1331 */
1332 while (fn) {
1333 if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
1334 fn->leaf = fib6_find_prefix(net, fn);
1335 atomic_inc(&fn->leaf->rt6i_ref);
1336 rt6_release(rt);
1337 }
1338 fn = fn->parent;
1339 }
1340 /* No more references are possible at this point. */
1341 BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
1342 }
1343 1350
1344 inet6_rt_notify(RTM_DELROUTE, rt, info); 1351 inet6_rt_notify(RTM_DELROUTE, rt, info);
1345 rt6_release(rt); 1352 rt6_release(rt);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 13cda4c6313b..01ccc28a686f 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -417,7 +417,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
417 if (code == ICMPV6_HDR_FIELD) 417 if (code == ICMPV6_HDR_FIELD)
418 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 418 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
419 419
420 if (teli && teli == info - 2) { 420 if (teli && teli == be32_to_cpu(info) - 2) {
421 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 421 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
422 if (tel->encap_limit == 0) { 422 if (tel->encap_limit == 0) {
423 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 423 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
@@ -429,7 +429,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
429 } 429 }
430 break; 430 break;
431 case ICMPV6_PKT_TOOBIG: 431 case ICMPV6_PKT_TOOBIG:
432 mtu = info - offset; 432 mtu = be32_to_cpu(info) - offset;
433 if (mtu < IPV6_MIN_MTU) 433 if (mtu < IPV6_MIN_MTU)
434 mtu = IPV6_MIN_MTU; 434 mtu = IPV6_MIN_MTU;
435 t->dev->mtu = mtu; 435 t->dev->mtu = mtu;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ce69a12ae48c..d28f2a2efb32 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -537,20 +537,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
537 skb_copy_secmark(to, from); 537 skb_copy_secmark(to, from);
538} 538}
539 539
540static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
541{
542 static u32 ip6_idents_hashrnd __read_mostly;
543 u32 hash, id;
544
545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
546
547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
548 hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
549
550 id = ip_idents_reserve(hash, 1);
551 fhdr->identification = htonl(id);
552}
553
554int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 540int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
555{ 541{
556 struct sk_buff *frag; 542 struct sk_buff *frag;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 97f41a3e68d9..54520a0bd5e3 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -9,6 +9,24 @@
9#include <net/addrconf.h> 9#include <net/addrconf.h>
10#include <net/secure_seq.h> 10#include <net/secure_seq.h>
11 11
12u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src)
13{
14 u32 hash, id;
15
16 hash = __ipv6_addr_jhash(dst, hashrnd);
17 hash = __ipv6_addr_jhash(src, hash);
18
19 /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
20 * set the hight order instead thus minimizing possible future
21 * collisions.
22 */
23 id = ip_idents_reserve(hash, 1);
24 if (unlikely(!id))
25 id = 1 << 31;
26
27 return id;
28}
29
12/* This function exists only for tap drivers that must support broken 30/* This function exists only for tap drivers that must support broken
13 * clients requesting UFO without specifying an IPv6 fragment ID. 31 * clients requesting UFO without specifying an IPv6 fragment ID.
14 * 32 *
@@ -22,7 +40,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
22 static u32 ip6_proxy_idents_hashrnd __read_mostly; 40 static u32 ip6_proxy_idents_hashrnd __read_mostly;
23 struct in6_addr buf[2]; 41 struct in6_addr buf[2];
24 struct in6_addr *addrs; 42 struct in6_addr *addrs;
25 u32 hash, id; 43 u32 id;
26 44
27 addrs = skb_header_pointer(skb, 45 addrs = skb_header_pointer(skb,
28 skb_network_offset(skb) + 46 skb_network_offset(skb) +
@@ -34,14 +52,25 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
34 net_get_random_once(&ip6_proxy_idents_hashrnd, 52 net_get_random_once(&ip6_proxy_idents_hashrnd,
35 sizeof(ip6_proxy_idents_hashrnd)); 53 sizeof(ip6_proxy_idents_hashrnd));
36 54
37 hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); 55 id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
38 hash = __ipv6_addr_jhash(&addrs[0], hash); 56 &addrs[1], &addrs[0]);
39 57 skb_shinfo(skb)->ip6_frag_id = id;
40 id = ip_idents_reserve(hash, 1);
41 skb_shinfo(skb)->ip6_frag_id = htonl(id);
42} 58}
43EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); 59EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
44 60
61void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
62{
63 static u32 ip6_idents_hashrnd __read_mostly;
64 u32 id;
65
66 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
67
68 id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr,
69 &rt->rt6i_src.addr);
70 fhdr->identification = htonl(id);
71}
72EXPORT_SYMBOL(ipv6_select_ident);
73
45int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 74int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
46{ 75{
47 u16 offset = sizeof(struct ipv6hdr); 76 u16 offset = sizeof(struct ipv6hdr);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 166e33bed222..495965358d22 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1242,12 +1242,16 @@ restart:
1242 rt = net->ipv6.ip6_null_entry; 1242 rt = net->ipv6.ip6_null_entry;
1243 else if (rt->dst.error) { 1243 else if (rt->dst.error) {
1244 rt = net->ipv6.ip6_null_entry; 1244 rt = net->ipv6.ip6_null_entry;
1245 } else if (rt == net->ipv6.ip6_null_entry) { 1245 goto out;
1246 }
1247
1248 if (rt == net->ipv6.ip6_null_entry) {
1246 fn = fib6_backtrack(fn, &fl6->saddr); 1249 fn = fib6_backtrack(fn, &fl6->saddr);
1247 if (fn) 1250 if (fn)
1248 goto restart; 1251 goto restart;
1249 } 1252 }
1250 1253
1254out:
1251 dst_hold(&rt->dst); 1255 dst_hold(&rt->dst);
1252 1256
1253 read_unlock_bh(&table->tb6_lock); 1257 read_unlock_bh(&table->tb6_lock);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 213546bd6d5d..cdbfe5af6187 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1506,12 +1506,12 @@ static bool ipip6_netlink_encap_parms(struct nlattr *data[],
1506 1506
1507 if (data[IFLA_IPTUN_ENCAP_SPORT]) { 1507 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1508 ret = true; 1508 ret = true;
1509 ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]); 1509 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1510 } 1510 }
1511 1511
1512 if (data[IFLA_IPTUN_ENCAP_DPORT]) { 1512 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1513 ret = true; 1513 ret = true;
1514 ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]); 1514 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1515 } 1515 }
1516 1516
1517 return ret; 1517 return ret;
@@ -1707,9 +1707,9 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
1707 1707
1708 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, 1708 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
1709 tunnel->encap.type) || 1709 tunnel->encap.type) ||
1710 nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT, 1710 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
1711 tunnel->encap.sport) || 1711 tunnel->encap.sport) ||
1712 nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, 1712 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
1713 tunnel->encap.dport) || 1713 tunnel->encap.dport) ||
1714 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, 1714 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
1715 tunnel->encap.flags)) 1715 tunnel->encap.flags))
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index b6aa8ed18257..a56276996b72 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -52,6 +52,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
52 52
53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
54 54
55 /* Set the IPv6 fragment id if not set yet */
56 if (!skb_shinfo(skb)->ip6_frag_id)
57 ipv6_proxy_select_ident(skb);
58
55 segs = NULL; 59 segs = NULL;
56 goto out; 60 goto out;
57 } 61 }
@@ -108,7 +112,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
108 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); 112 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
109 fptr->nexthdr = nexthdr; 113 fptr->nexthdr = nexthdr;
110 fptr->reserved = 0; 114 fptr->reserved = 0;
111 fptr->identification = skb_shinfo(skb)->ip6_frag_id; 115 if (skb_shinfo(skb)->ip6_frag_id)
116 fptr->identification = skb_shinfo(skb)->ip6_frag_id;
117 else
118 ipv6_select_ident(fptr,
119 (struct rt6_info *)skb_dst(skb));
112 120
113 /* Fragment the skb. ipv6 header and the remaining fields of the 121 /* Fragment the skb. ipv6 header and the remaining fields of the
114 * fragment header are updated in ipv6_gso_segment() 122 * fragment header are updated in ipv6_gso_segment()
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 5f983644373a..48bf5a06847b 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
130{ 130{
131 struct flowi6 *fl6 = &fl->u.ip6; 131 struct flowi6 *fl6 = &fl->u.ip6;
132 int onlyproto = 0; 132 int onlyproto = 0;
133 u16 offset = skb_network_header_len(skb);
134 const struct ipv6hdr *hdr = ipv6_hdr(skb); 133 const struct ipv6hdr *hdr = ipv6_hdr(skb);
134 u16 offset = sizeof(*hdr);
135 struct ipv6_opt_hdr *exthdr; 135 struct ipv6_opt_hdr *exthdr;
136 const unsigned char *nh = skb_network_header(skb); 136 const unsigned char *nh = skb_network_header(skb);
137 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 137 u16 nhoff = IP6CB(skb)->nhoff;
138 int oif = 0; 138 int oif = 0;
139 u8 nexthdr;
140
141 if (!nhoff)
142 nhoff = offsetof(struct ipv6hdr, nexthdr);
143
144 nexthdr = nh[nhoff];
139 145
140 if (skb_dst(skb)) 146 if (skb_dst(skb))
141 oif = skb_dst(skb)->dev->ifindex; 147 oif = skb_dst(skb)->dev->ifindex;
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index 612a5ddaf93b..799bafc2af39 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
18 { 18 {
19 .procname = "ack", 19 .procname = "ack",
20 .data = &sysctl_llc2_ack_timeout, 20 .data = &sysctl_llc2_ack_timeout,
21 .maxlen = sizeof(long), 21 .maxlen = sizeof(sysctl_llc2_ack_timeout),
22 .mode = 0644, 22 .mode = 0644,
23 .proc_handler = proc_dointvec_jiffies, 23 .proc_handler = proc_dointvec_jiffies,
24 }, 24 },
25 { 25 {
26 .procname = "busy", 26 .procname = "busy",
27 .data = &sysctl_llc2_busy_timeout, 27 .data = &sysctl_llc2_busy_timeout,
28 .maxlen = sizeof(long), 28 .maxlen = sizeof(sysctl_llc2_busy_timeout),
29 .mode = 0644, 29 .mode = 0644,
30 .proc_handler = proc_dointvec_jiffies, 30 .proc_handler = proc_dointvec_jiffies,
31 }, 31 },
32 { 32 {
33 .procname = "p", 33 .procname = "p",
34 .data = &sysctl_llc2_p_timeout, 34 .data = &sysctl_llc2_p_timeout,
35 .maxlen = sizeof(long), 35 .maxlen = sizeof(sysctl_llc2_p_timeout),
36 .mode = 0644, 36 .mode = 0644,
37 .proc_handler = proc_dointvec_jiffies, 37 .proc_handler = proc_dointvec_jiffies,
38 }, 38 },
39 { 39 {
40 .procname = "rej", 40 .procname = "rej",
41 .data = &sysctl_llc2_rej_timeout, 41 .data = &sysctl_llc2_rej_timeout,
42 .maxlen = sizeof(long), 42 .maxlen = sizeof(sysctl_llc2_rej_timeout),
43 .mode = 0644, 43 .mode = 0644,
44 .proc_handler = proc_dointvec_jiffies, 44 .proc_handler = proc_dointvec_jiffies,
45 }, 45 },
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 4c5192e0d66c..4a95fe3cffbc 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
86 } 86 }
87 } 87 }
88 88
89 /* tear down aggregation sessions and remove STAs */
90 mutex_lock(&local->sta_mtx);
91 list_for_each_entry(sta, &local->sta_list, list) {
92 if (sta->uploaded) {
93 enum ieee80211_sta_state state;
94
95 state = sta->sta_state;
96 for (; state > IEEE80211_STA_NOTEXIST; state--)
97 WARN_ON(drv_sta_state(local, sta->sdata, sta,
98 state, state - 1));
99 }
100 }
101 mutex_unlock(&local->sta_mtx);
102
103 /* remove all interfaces that were created in the driver */ 89 /* remove all interfaces that were created in the driver */
104 list_for_each_entry(sdata, &local->interfaces, list) { 90 list_for_each_entry(sdata, &local->interfaces, list) {
105 if (!ieee80211_sdata_running(sdata)) 91 if (!ieee80211_sdata_running(sdata))
@@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
111 case NL80211_IFTYPE_STATION: 97 case NL80211_IFTYPE_STATION:
112 ieee80211_mgd_quiesce(sdata); 98 ieee80211_mgd_quiesce(sdata);
113 break; 99 break;
100 case NL80211_IFTYPE_WDS:
101 /* tear down aggregation sessions and remove STAs */
102 mutex_lock(&local->sta_mtx);
103 sta = sdata->u.wds.sta;
104 if (sta && sta->uploaded) {
105 enum ieee80211_sta_state state;
106
107 state = sta->sta_state;
108 for (; state > IEEE80211_STA_NOTEXIST; state--)
109 WARN_ON(drv_sta_state(local, sta->sdata,
110 sta, state,
111 state - 1));
112 }
113 mutex_unlock(&local->sta_mtx);
114 break;
114 default: 115 default:
115 break; 116 break;
116 } 117 }
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 683b10f46505..d69ca513848e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
272 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 272 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
273 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 273 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
274 else if (rate) 274 else if (rate)
275 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 275 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
276 else 276 else
277 channel_flags |= IEEE80211_CHAN_2GHZ; 277 channel_flags |= IEEE80211_CHAN_2GHZ;
278 put_unaligned_le16(channel_flags, pos); 278 put_unaligned_le16(channel_flags, pos);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 990decba1fe4..b87ca32efa0b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -659,16 +659,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
659 return err; 659 return err;
660} 660}
661 661
662static int ip_vs_route_me_harder(int af, struct sk_buff *skb) 662static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
663 unsigned int hooknum)
663{ 664{
665 if (!sysctl_snat_reroute(skb))
666 return 0;
667 /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
668 if (NF_INET_LOCAL_IN == hooknum)
669 return 0;
664#ifdef CONFIG_IP_VS_IPV6 670#ifdef CONFIG_IP_VS_IPV6
665 if (af == AF_INET6) { 671 if (af == AF_INET6) {
666 if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0) 672 struct dst_entry *dst = skb_dst(skb);
673
674 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
675 ip6_route_me_harder(skb) != 0)
667 return 1; 676 return 1;
668 } else 677 } else
669#endif 678#endif
670 if ((sysctl_snat_reroute(skb) || 679 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
671 skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
672 ip_route_me_harder(skb, RTN_LOCAL) != 0) 680 ip_route_me_harder(skb, RTN_LOCAL) != 0)
673 return 1; 681 return 1;
674 682
@@ -791,7 +799,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
791 union nf_inet_addr *snet, 799 union nf_inet_addr *snet,
792 __u8 protocol, struct ip_vs_conn *cp, 800 __u8 protocol, struct ip_vs_conn *cp,
793 struct ip_vs_protocol *pp, 801 struct ip_vs_protocol *pp,
794 unsigned int offset, unsigned int ihl) 802 unsigned int offset, unsigned int ihl,
803 unsigned int hooknum)
795{ 804{
796 unsigned int verdict = NF_DROP; 805 unsigned int verdict = NF_DROP;
797 806
@@ -821,7 +830,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
821#endif 830#endif
822 ip_vs_nat_icmp(skb, pp, cp, 1); 831 ip_vs_nat_icmp(skb, pp, cp, 1);
823 832
824 if (ip_vs_route_me_harder(af, skb)) 833 if (ip_vs_route_me_harder(af, skb, hooknum))
825 goto out; 834 goto out;
826 835
827 /* do the statistics and put it back */ 836 /* do the statistics and put it back */
@@ -916,7 +925,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
916 925
917 snet.ip = iph->saddr; 926 snet.ip = iph->saddr;
918 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, 927 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
919 pp, ciph.len, ihl); 928 pp, ciph.len, ihl, hooknum);
920} 929}
921 930
922#ifdef CONFIG_IP_VS_IPV6 931#ifdef CONFIG_IP_VS_IPV6
@@ -981,7 +990,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
981 snet.in6 = ciph.saddr.in6; 990 snet.in6 = ciph.saddr.in6;
982 writable = ciph.len; 991 writable = ciph.len;
983 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp, 992 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
984 pp, writable, sizeof(struct ipv6hdr)); 993 pp, writable, sizeof(struct ipv6hdr),
994 hooknum);
985} 995}
986#endif 996#endif
987 997
@@ -1040,7 +1050,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
1040 */ 1050 */
1041static unsigned int 1051static unsigned int
1042handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 1052handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1043 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 1053 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1054 unsigned int hooknum)
1044{ 1055{
1045 struct ip_vs_protocol *pp = pd->pp; 1056 struct ip_vs_protocol *pp = pd->pp;
1046 1057
@@ -1078,7 +1089,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1078 * if it came from this machine itself. So re-compute 1089 * if it came from this machine itself. So re-compute
1079 * the routing information. 1090 * the routing information.
1080 */ 1091 */
1081 if (ip_vs_route_me_harder(af, skb)) 1092 if (ip_vs_route_me_harder(af, skb, hooknum))
1082 goto drop; 1093 goto drop;
1083 1094
1084 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT"); 1095 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
@@ -1181,7 +1192,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1181 cp = pp->conn_out_get(af, skb, &iph, 0); 1192 cp = pp->conn_out_get(af, skb, &iph, 0);
1182 1193
1183 if (likely(cp)) 1194 if (likely(cp))
1184 return handle_response(af, skb, pd, cp, &iph); 1195 return handle_response(af, skb, pd, cp, &iph, hooknum);
1185 if (sysctl_nat_icmp_send(net) && 1196 if (sysctl_nat_icmp_send(net) &&
1186 (pp->protocol == IPPROTO_TCP || 1197 (pp->protocol == IPPROTO_TCP ||
1187 pp->protocol == IPPROTO_UDP || 1198 pp->protocol == IPPROTO_UDP ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 3b3ddb4fb9ee..1ff04bcd4871 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1134,9 +1134,11 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
1134 /* Restore old counters on this cpu, no problem. Per-cpu statistics 1134 /* Restore old counters on this cpu, no problem. Per-cpu statistics
1135 * are not exposed to userspace. 1135 * are not exposed to userspace.
1136 */ 1136 */
1137 preempt_disable();
1137 stats = this_cpu_ptr(newstats); 1138 stats = this_cpu_ptr(newstats);
1138 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); 1139 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
1139 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); 1140 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
1141 preempt_enable();
1140 1142
1141 return newstats; 1143 return newstats;
1142} 1144}
@@ -1262,8 +1264,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1262 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1264 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
1263 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, 1265 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
1264 sizeof(struct nft_trans_chain)); 1266 sizeof(struct nft_trans_chain));
1265 if (trans == NULL) 1267 if (trans == NULL) {
1268 free_percpu(stats);
1266 return -ENOMEM; 1269 return -ENOMEM;
1270 }
1267 1271
1268 nft_trans_chain_stats(trans) = stats; 1272 nft_trans_chain_stats(trans) = stats;
1269 nft_trans_chain_update(trans) = true; 1273 nft_trans_chain_update(trans) = true;
@@ -1319,8 +1323,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1319 hookfn = type->hooks[hooknum]; 1323 hookfn = type->hooks[hooknum];
1320 1324
1321 basechain = kzalloc(sizeof(*basechain), GFP_KERNEL); 1325 basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
1322 if (basechain == NULL) 1326 if (basechain == NULL) {
1327 module_put(type->owner);
1323 return -ENOMEM; 1328 return -ENOMEM;
1329 }
1324 1330
1325 if (nla[NFTA_CHAIN_COUNTERS]) { 1331 if (nla[NFTA_CHAIN_COUNTERS]) {
1326 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); 1332 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
@@ -3753,6 +3759,24 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
3753} 3759}
3754EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); 3760EXPORT_SYMBOL_GPL(nft_chain_validate_dependency);
3755 3761
3762int nft_chain_validate_hooks(const struct nft_chain *chain,
3763 unsigned int hook_flags)
3764{
3765 struct nft_base_chain *basechain;
3766
3767 if (chain->flags & NFT_BASE_CHAIN) {
3768 basechain = nft_base_chain(chain);
3769
3770 if ((1 << basechain->ops[0].hooknum) & hook_flags)
3771 return 0;
3772
3773 return -EOPNOTSUPP;
3774 }
3775
3776 return 0;
3777}
3778EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
3779
3756/* 3780/*
3757 * Loop detection - walk through the ruleset beginning at the destination chain 3781 * Loop detection - walk through the ruleset beginning at the destination chain
3758 * of a new jump until either the source chain is reached (loop) or all 3782 * of a new jump until either the source chain is reached (loop) or all
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index d1ffd5eb3a9b..9aea747b43ea 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -21,6 +21,21 @@ const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
21}; 21};
22EXPORT_SYMBOL_GPL(nft_masq_policy); 22EXPORT_SYMBOL_GPL(nft_masq_policy);
23 23
24int nft_masq_validate(const struct nft_ctx *ctx,
25 const struct nft_expr *expr,
26 const struct nft_data **data)
27{
28 int err;
29
30 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
31 if (err < 0)
32 return err;
33
34 return nft_chain_validate_hooks(ctx->chain,
35 (1 << NF_INET_POST_ROUTING));
36}
37EXPORT_SYMBOL_GPL(nft_masq_validate);
38
24int nft_masq_init(const struct nft_ctx *ctx, 39int nft_masq_init(const struct nft_ctx *ctx,
25 const struct nft_expr *expr, 40 const struct nft_expr *expr,
26 const struct nlattr * const tb[]) 41 const struct nlattr * const tb[])
@@ -28,8 +43,8 @@ int nft_masq_init(const struct nft_ctx *ctx,
28 struct nft_masq *priv = nft_expr_priv(expr); 43 struct nft_masq *priv = nft_expr_priv(expr);
29 int err; 44 int err;
30 45
31 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 46 err = nft_masq_validate(ctx, expr, NULL);
32 if (err < 0) 47 if (err)
33 return err; 48 return err;
34 49
35 if (tb[NFTA_MASQ_FLAGS] == NULL) 50 if (tb[NFTA_MASQ_FLAGS] == NULL)
@@ -60,12 +75,5 @@ nla_put_failure:
60} 75}
61EXPORT_SYMBOL_GPL(nft_masq_dump); 76EXPORT_SYMBOL_GPL(nft_masq_dump);
62 77
63int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
64 const struct nft_data **data)
65{
66 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
67}
68EXPORT_SYMBOL_GPL(nft_masq_validate);
69
70MODULE_LICENSE("GPL"); 78MODULE_LICENSE("GPL");
71MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); 79MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index aff54fb1c8a0..a0837c6c9283 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -88,17 +88,40 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
88 [NFTA_NAT_FLAGS] = { .type = NLA_U32 }, 88 [NFTA_NAT_FLAGS] = { .type = NLA_U32 },
89}; 89};
90 90
91static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 91static int nft_nat_validate(const struct nft_ctx *ctx,
92 const struct nlattr * const tb[]) 92 const struct nft_expr *expr,
93 const struct nft_data **data)
93{ 94{
94 struct nft_nat *priv = nft_expr_priv(expr); 95 struct nft_nat *priv = nft_expr_priv(expr);
95 u32 family;
96 int err; 96 int err;
97 97
98 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 98 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
99 if (err < 0) 99 if (err < 0)
100 return err; 100 return err;
101 101
102 switch (priv->type) {
103 case NFT_NAT_SNAT:
104 err = nft_chain_validate_hooks(ctx->chain,
105 (1 << NF_INET_POST_ROUTING) |
106 (1 << NF_INET_LOCAL_IN));
107 break;
108 case NFT_NAT_DNAT:
109 err = nft_chain_validate_hooks(ctx->chain,
110 (1 << NF_INET_PRE_ROUTING) |
111 (1 << NF_INET_LOCAL_OUT));
112 break;
113 }
114
115 return err;
116}
117
118static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
119 const struct nlattr * const tb[])
120{
121 struct nft_nat *priv = nft_expr_priv(expr);
122 u32 family;
123 int err;
124
102 if (tb[NFTA_NAT_TYPE] == NULL || 125 if (tb[NFTA_NAT_TYPE] == NULL ||
103 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && 126 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
104 tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) 127 tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
@@ -115,6 +138,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
115 return -EINVAL; 138 return -EINVAL;
116 } 139 }
117 140
141 err = nft_nat_validate(ctx, expr, NULL);
142 if (err < 0)
143 return err;
144
118 if (tb[NFTA_NAT_FAMILY] == NULL) 145 if (tb[NFTA_NAT_FAMILY] == NULL)
119 return -EINVAL; 146 return -EINVAL;
120 147
@@ -219,13 +246,6 @@ nla_put_failure:
219 return -1; 246 return -1;
220} 247}
221 248
222static int nft_nat_validate(const struct nft_ctx *ctx,
223 const struct nft_expr *expr,
224 const struct nft_data **data)
225{
226 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
227}
228
229static struct nft_expr_type nft_nat_type; 249static struct nft_expr_type nft_nat_type;
230static const struct nft_expr_ops nft_nat_ops = { 250static const struct nft_expr_ops nft_nat_ops = {
231 .type = &nft_nat_type, 251 .type = &nft_nat_type,
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 9e8093f28311..d7e9e93a4e90 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -23,6 +23,22 @@ const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
23}; 23};
24EXPORT_SYMBOL_GPL(nft_redir_policy); 24EXPORT_SYMBOL_GPL(nft_redir_policy);
25 25
26int nft_redir_validate(const struct nft_ctx *ctx,
27 const struct nft_expr *expr,
28 const struct nft_data **data)
29{
30 int err;
31
32 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
33 if (err < 0)
34 return err;
35
36 return nft_chain_validate_hooks(ctx->chain,
37 (1 << NF_INET_PRE_ROUTING) |
38 (1 << NF_INET_LOCAL_OUT));
39}
40EXPORT_SYMBOL_GPL(nft_redir_validate);
41
26int nft_redir_init(const struct nft_ctx *ctx, 42int nft_redir_init(const struct nft_ctx *ctx,
27 const struct nft_expr *expr, 43 const struct nft_expr *expr,
28 const struct nlattr * const tb[]) 44 const struct nlattr * const tb[])
@@ -30,7 +46,7 @@ int nft_redir_init(const struct nft_ctx *ctx,
30 struct nft_redir *priv = nft_expr_priv(expr); 46 struct nft_redir *priv = nft_expr_priv(expr);
31 int err; 47 int err;
32 48
33 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 49 err = nft_redir_validate(ctx, expr, NULL);
34 if (err < 0) 50 if (err < 0)
35 return err; 51 return err;
36 52
@@ -88,12 +104,5 @@ nla_put_failure:
88} 104}
89EXPORT_SYMBOL_GPL(nft_redir_dump); 105EXPORT_SYMBOL_GPL(nft_redir_dump);
90 106
91int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
92 const struct nft_data **data)
93{
94 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
95}
96EXPORT_SYMBOL_GPL(nft_redir_validate);
97
98MODULE_LICENSE("GPL"); 107MODULE_LICENSE("GPL");
99MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); 108MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 02fdde28dada..75532efa51cd 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1438,7 +1438,7 @@ static void netlink_undo_bind(int group, long unsigned int groups,
1438 1438
1439 for (undo = 0; undo < group; undo++) 1439 for (undo = 0; undo < group; undo++)
1440 if (test_bit(undo, &groups)) 1440 if (test_bit(undo, &groups))
1441 nlk->netlink_unbind(sock_net(sk), undo); 1441 nlk->netlink_unbind(sock_net(sk), undo + 1);
1442} 1442}
1443 1443
1444static int netlink_bind(struct socket *sock, struct sockaddr *addr, 1444static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1476,7 +1476,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1476 for (group = 0; group < nlk->ngroups; group++) { 1476 for (group = 0; group < nlk->ngroups; group++) {
1477 if (!test_bit(group, &groups)) 1477 if (!test_bit(group, &groups))
1478 continue; 1478 continue;
1479 err = nlk->netlink_bind(net, group); 1479 err = nlk->netlink_bind(net, group + 1);
1480 if (!err) 1480 if (!err)
1481 continue; 1481 continue;
1482 netlink_undo_bind(group, groups, sk); 1482 netlink_undo_bind(group, groups, sk);
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index c3b0cd43eb56..c173f69e1479 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = {
71 { 71 {
72 .procname = "max_unacked_packets", 72 .procname = "max_unacked_packets",
73 .data = &rds_sysctl_max_unacked_packets, 73 .data = &rds_sysctl_max_unacked_packets,
74 .maxlen = sizeof(unsigned long), 74 .maxlen = sizeof(int),
75 .mode = 0644, 75 .mode = 0644,
76 .proc_handler = proc_dointvec, 76 .proc_handler = proc_dointvec,
77 }, 77 },
78 { 78 {
79 .procname = "max_unacked_bytes", 79 .procname = "max_unacked_bytes",
80 .data = &rds_sysctl_max_unacked_bytes, 80 .data = &rds_sysctl_max_unacked_bytes,
81 .maxlen = sizeof(unsigned long), 81 .maxlen = sizeof(int),
82 .mode = 0644, 82 .mode = 0644,
83 .proc_handler = proc_dointvec, 83 .proc_handler = proc_dointvec,
84 }, 84 },
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index aad6a679fb13..baef987fe2c0 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -556,8 +556,9 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
556} 556}
557EXPORT_SYMBOL(tcf_exts_change); 557EXPORT_SYMBOL(tcf_exts_change);
558 558
559#define tcf_exts_first_act(ext) \ 559#define tcf_exts_first_act(ext) \
560 list_first_entry(&(exts)->actions, struct tc_action, list) 560 list_first_entry_or_null(&(exts)->actions, \
561 struct tc_action, list)
561 562
562int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 563int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
563{ 564{
@@ -603,7 +604,7 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
603{ 604{
604#ifdef CONFIG_NET_CLS_ACT 605#ifdef CONFIG_NET_CLS_ACT
605 struct tc_action *a = tcf_exts_first_act(exts); 606 struct tc_action *a = tcf_exts_first_act(exts);
606 if (tcf_action_copy_stats(skb, a, 1) < 0) 607 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
607 return -1; 608 return -1;
608#endif 609#endif
609 return 0; 610 return 0;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 84c8219c3e1c..f59adf8a4cd7 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -180,6 +180,11 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
180 } 180 }
181 181
182 bpf_size = bpf_len * sizeof(*bpf_ops); 182 bpf_size = bpf_len * sizeof(*bpf_ops);
183 if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
184 ret = -EINVAL;
185 goto errout;
186 }
187
183 bpf_ops = kzalloc(bpf_size, GFP_KERNEL); 188 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
184 if (bpf_ops == NULL) { 189 if (bpf_ops == NULL) {
185 ret = -ENOMEM; 190 ret = -ENOMEM;
@@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
215 struct cls_bpf_head *head) 220 struct cls_bpf_head *head)
216{ 221{
217 unsigned int i = 0x80000000; 222 unsigned int i = 0x80000000;
223 u32 handle;
218 224
219 do { 225 do {
220 if (++head->hgen == 0x7FFFFFFF) 226 if (++head->hgen == 0x7FFFFFFF)
221 head->hgen = 1; 227 head->hgen = 1;
222 } while (--i > 0 && cls_bpf_get(tp, head->hgen)); 228 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
223 if (i == 0) 229
230 if (unlikely(i == 0)) {
224 pr_err("Insufficient number of handles\n"); 231 pr_err("Insufficient number of handles\n");
232 handle = 0;
233 } else {
234 handle = head->hgen;
235 }
225 236
226 return i; 237 return handle;
227} 238}
228 239
229static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, 240static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 9b05924cc386..333cd94ba381 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -670,8 +670,14 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
670 if (tb[TCA_FQ_FLOW_PLIMIT]) 670 if (tb[TCA_FQ_FLOW_PLIMIT])
671 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); 671 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
672 672
673 if (tb[TCA_FQ_QUANTUM]) 673 if (tb[TCA_FQ_QUANTUM]) {
674 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 674 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
675
676 if (quantum > 0)
677 q->quantum = quantum;
678 else
679 err = -EINVAL;
680 }
675 681
676 if (tb[TCA_FQ_INITIAL_QUANTUM]) 682 if (tb[TCA_FQ_INITIAL_QUANTUM])
677 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 683 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f791edd64d6c..26d06dbcc1c8 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1182,7 +1182,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
1182 asoc->peer.peer_hmacs = new->peer.peer_hmacs; 1182 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1183 new->peer.peer_hmacs = NULL; 1183 new->peer.peer_hmacs = NULL;
1184 1184
1185 sctp_auth_key_put(asoc->asoc_shared_key);
1186 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); 1185 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1187} 1186}
1188 1187
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index e49e231cef52..06320c8c1c86 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2608,7 +2608,7 @@ do_addr_param:
2608 2608
2609 addr_param = param.v + sizeof(sctp_addip_param_t); 2609 addr_param = param.v + sizeof(sctp_addip_param_t);
2610 2610
2611 af = sctp_get_af_specific(param_type2af(param.p->type)); 2611 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
2612 if (af == NULL) 2612 if (af == NULL)
2613 break; 2613 break;
2614 2614
diff --git a/net/socket.c b/net/socket.c
index a2c33a4dc7ba..418795caa897 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -869,9 +869,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
869static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, 869static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
870 struct sock_iocb *siocb) 870 struct sock_iocb *siocb)
871{ 871{
872 if (!is_sync_kiocb(iocb))
873 BUG();
874
875 siocb->kiocb = iocb; 872 siocb->kiocb = iocb;
876 iocb->private = siocb; 873 iocb->private = siocb;
877 return siocb; 874 return siocb;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7ca4b5133123..8887c6e5fca8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2854,6 +2854,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2854 if (!rdev->ops->get_key) 2854 if (!rdev->ops->get_key)
2855 return -EOPNOTSUPP; 2855 return -EOPNOTSUPP;
2856 2856
2857 if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
2858 return -ENOENT;
2859
2857 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2860 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2858 if (!msg) 2861 if (!msg)
2859 return -ENOMEM; 2862 return -ENOMEM;
@@ -2873,10 +2876,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2873 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) 2876 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
2874 goto nla_put_failure; 2877 goto nla_put_failure;
2875 2878
2876 if (pairwise && mac_addr &&
2877 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
2878 return -ENOENT;
2879
2880 err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie, 2879 err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
2881 get_key_callback); 2880 get_key_callback);
2882 2881
@@ -3047,7 +3046,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
3047 wdev_lock(dev->ieee80211_ptr); 3046 wdev_lock(dev->ieee80211_ptr);
3048 err = nl80211_key_allowed(dev->ieee80211_ptr); 3047 err = nl80211_key_allowed(dev->ieee80211_ptr);
3049 3048
3050 if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr && 3049 if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
3051 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) 3050 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
3052 err = -ENOENT; 3051 err = -ENOENT;
3053 3052
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d0ac795445b7..5488c3662f7d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -308,6 +308,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
308 goto out; 308 goto out;
309 } 309 }
310 310
311 if (ieee80211_is_mgmt(fc)) {
312 if (ieee80211_has_order(fc))
313 hdrlen += IEEE80211_HT_CTL_LEN;
314 goto out;
315 }
316
311 if (ieee80211_is_ctl(fc)) { 317 if (ieee80211_is_ctl(fc)) {
312 /* 318 /*
313 * ACK and CTS are 10 bytes, all others 16. To see how 319 * ACK and CTS are 10 bytes, all others 16. To see how
diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c
index e286b42307f3..6299ee95cd11 100644
--- a/samples/bpf/test_maps.c
+++ b/samples/bpf/test_maps.c
@@ -69,9 +69,9 @@ static void test_hashmap_sanity(int i, void *data)
69 69
70 /* iterate over two elements */ 70 /* iterate over two elements */
71 assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 && 71 assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
72 next_key == 2); 72 (next_key == 1 || next_key == 2));
73 assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 && 73 assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
74 next_key == 1); 74 (next_key == 1 || next_key == 2));
75 assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 && 75 assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
76 errno == ENOENT); 76 errno == ENOENT);
77 77
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index 8eb779b9d77f..604e718d68d3 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -5,6 +5,7 @@ config SECURITY_TOMOYO
5 select SECURITYFS 5 select SECURITYFS
6 select SECURITY_PATH 6 select SECURITY_PATH
7 select SECURITY_NETWORK 7 select SECURITY_NETWORK
8 select SRCU
8 default n 9 default n
9 help 10 help
10 This selects TOMOYO Linux, pathname-based access control. 11 This selects TOMOYO Linux, pathname-based access control.
diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
index ec667f158f19..5d905d90d504 100644
--- a/sound/core/seq/seq_dummy.c
+++ b/sound/core/seq/seq_dummy.c
@@ -82,36 +82,6 @@ struct snd_seq_dummy_port {
82static int my_client = -1; 82static int my_client = -1;
83 83
84/* 84/*
85 * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
86 * to subscribers.
87 * Note: this callback is called only after all subscribers are removed.
88 */
89static int
90dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
91{
92 struct snd_seq_dummy_port *p;
93 int i;
94 struct snd_seq_event ev;
95
96 p = private_data;
97 memset(&ev, 0, sizeof(ev));
98 if (p->duplex)
99 ev.source.port = p->connect;
100 else
101 ev.source.port = p->port;
102 ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
103 ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
104 for (i = 0; i < 16; i++) {
105 ev.data.control.channel = i;
106 ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
107 snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
108 ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
109 snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
110 }
111 return 0;
112}
113
114/*
115 * event input callback - just redirect events to subscribers 85 * event input callback - just redirect events to subscribers
116 */ 86 */
117static int 87static int
@@ -175,7 +145,6 @@ create_port(int idx, int type)
175 | SNDRV_SEQ_PORT_TYPE_PORT; 145 | SNDRV_SEQ_PORT_TYPE_PORT;
176 memset(&pcb, 0, sizeof(pcb)); 146 memset(&pcb, 0, sizeof(pcb));
177 pcb.owner = THIS_MODULE; 147 pcb.owner = THIS_MODULE;
178 pcb.unuse = dummy_unuse;
179 pcb.event_input = dummy_input; 148 pcb.event_input = dummy_input;
180 pcb.private_free = dummy_free; 149 pcb.private_free = dummy_free;
181 pcb.private_data = rec; 150 pcb.private_data = rec;
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
index 1a3a6fa27158..c6bba99a90b2 100644
--- a/sound/i2c/other/ak4113.c
+++ b/sound/i2c/other/ak4113.c
@@ -56,8 +56,7 @@ static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg)
56 56
57static void snd_ak4113_free(struct ak4113 *chip) 57static void snd_ak4113_free(struct ak4113 *chip)
58{ 58{
59 chip->init = 1; /* don't schedule new work */ 59 atomic_inc(&chip->wq_processing); /* don't schedule new work */
60 mb();
61 cancel_delayed_work_sync(&chip->work); 60 cancel_delayed_work_sync(&chip->work);
62 kfree(chip); 61 kfree(chip);
63} 62}
@@ -89,6 +88,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read,
89 chip->write = write; 88 chip->write = write;
90 chip->private_data = private_data; 89 chip->private_data = private_data;
91 INIT_DELAYED_WORK(&chip->work, ak4113_stats); 90 INIT_DELAYED_WORK(&chip->work, ak4113_stats);
91 atomic_set(&chip->wq_processing, 0);
92 92
93 for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++) 93 for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++)
94 chip->regmap[reg] = pgm[reg]; 94 chip->regmap[reg] = pgm[reg];
@@ -139,13 +139,11 @@ static void ak4113_init_regs(struct ak4113 *chip)
139 139
140void snd_ak4113_reinit(struct ak4113 *chip) 140void snd_ak4113_reinit(struct ak4113 *chip)
141{ 141{
142 chip->init = 1; 142 if (atomic_inc_return(&chip->wq_processing) == 1)
143 mb(); 143 cancel_delayed_work_sync(&chip->work);
144 flush_delayed_work(&chip->work);
145 ak4113_init_regs(chip); 144 ak4113_init_regs(chip);
146 /* bring up statistics / event queing */ 145 /* bring up statistics / event queing */
147 chip->init = 0; 146 if (atomic_dec_and_test(&chip->wq_processing))
148 if (chip->kctls[0])
149 schedule_delayed_work(&chip->work, HZ / 10); 147 schedule_delayed_work(&chip->work, HZ / 10);
150} 148}
151EXPORT_SYMBOL_GPL(snd_ak4113_reinit); 149EXPORT_SYMBOL_GPL(snd_ak4113_reinit);
@@ -632,8 +630,9 @@ static void ak4113_stats(struct work_struct *work)
632{ 630{
633 struct ak4113 *chip = container_of(work, struct ak4113, work.work); 631 struct ak4113 *chip = container_of(work, struct ak4113, work.work);
634 632
635 if (!chip->init) 633 if (atomic_inc_return(&chip->wq_processing) == 1)
636 snd_ak4113_check_rate_and_errors(chip, chip->check_flags); 634 snd_ak4113_check_rate_and_errors(chip, chip->check_flags);
637 635
638 schedule_delayed_work(&chip->work, HZ / 10); 636 if (atomic_dec_and_test(&chip->wq_processing))
637 schedule_delayed_work(&chip->work, HZ / 10);
639} 638}
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index c7f56339415d..b70e6eccbd03 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -66,8 +66,7 @@ static void reg_dump(struct ak4114 *ak4114)
66 66
67static void snd_ak4114_free(struct ak4114 *chip) 67static void snd_ak4114_free(struct ak4114 *chip)
68{ 68{
69 chip->init = 1; /* don't schedule new work */ 69 atomic_inc(&chip->wq_processing); /* don't schedule new work */
70 mb();
71 cancel_delayed_work_sync(&chip->work); 70 cancel_delayed_work_sync(&chip->work);
72 kfree(chip); 71 kfree(chip);
73} 72}
@@ -100,6 +99,7 @@ int snd_ak4114_create(struct snd_card *card,
100 chip->write = write; 99 chip->write = write;
101 chip->private_data = private_data; 100 chip->private_data = private_data;
102 INIT_DELAYED_WORK(&chip->work, ak4114_stats); 101 INIT_DELAYED_WORK(&chip->work, ak4114_stats);
102 atomic_set(&chip->wq_processing, 0);
103 103
104 for (reg = 0; reg < 6; reg++) 104 for (reg = 0; reg < 6; reg++)
105 chip->regmap[reg] = pgm[reg]; 105 chip->regmap[reg] = pgm[reg];
@@ -152,13 +152,11 @@ static void ak4114_init_regs(struct ak4114 *chip)
152 152
153void snd_ak4114_reinit(struct ak4114 *chip) 153void snd_ak4114_reinit(struct ak4114 *chip)
154{ 154{
155 chip->init = 1; 155 if (atomic_inc_return(&chip->wq_processing) == 1)
156 mb(); 156 cancel_delayed_work_sync(&chip->work);
157 flush_delayed_work(&chip->work);
158 ak4114_init_regs(chip); 157 ak4114_init_regs(chip);
159 /* bring up statistics / event queing */ 158 /* bring up statistics / event queing */
160 chip->init = 0; 159 if (atomic_dec_and_test(&chip->wq_processing))
161 if (chip->kctls[0])
162 schedule_delayed_work(&chip->work, HZ / 10); 160 schedule_delayed_work(&chip->work, HZ / 10);
163} 161}
164 162
@@ -612,10 +610,10 @@ static void ak4114_stats(struct work_struct *work)
612{ 610{
613 struct ak4114 *chip = container_of(work, struct ak4114, work.work); 611 struct ak4114 *chip = container_of(work, struct ak4114, work.work);
614 612
615 if (!chip->init) 613 if (atomic_inc_return(&chip->wq_processing) == 1)
616 snd_ak4114_check_rate_and_errors(chip, chip->check_flags); 614 snd_ak4114_check_rate_and_errors(chip, chip->check_flags);
617 615 if (atomic_dec_and_test(&chip->wq_processing))
618 schedule_delayed_work(&chip->work, HZ / 10); 616 schedule_delayed_work(&chip->work, HZ / 10);
619} 617}
620 618
621EXPORT_SYMBOL(snd_ak4114_create); 619EXPORT_SYMBOL(snd_ak4114_create);
diff --git a/sound/soc/adi/axi-i2s.c b/sound/soc/adi/axi-i2s.c
index 7752860f7230..4c23381727a1 100644
--- a/sound/soc/adi/axi-i2s.c
+++ b/sound/soc/adi/axi-i2s.c
@@ -240,6 +240,8 @@ static int axi_i2s_probe(struct platform_device *pdev)
240 if (ret) 240 if (ret)
241 goto err_clk_disable; 241 goto err_clk_disable;
242 242
243 return 0;
244
243err_clk_disable: 245err_clk_disable:
244 clk_disable_unprepare(i2s->clk); 246 clk_disable_unprepare(i2s->clk);
245 return ret; 247 return ret;
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 99ff35e2a25d..35e44e463cfe 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -348,7 +348,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
348 struct atmel_pcm_dma_params *dma_params; 348 struct atmel_pcm_dma_params *dma_params;
349 int dir, channels, bits; 349 int dir, channels, bits;
350 u32 tfmr, rfmr, tcmr, rcmr; 350 u32 tfmr, rfmr, tcmr, rcmr;
351 int start_event;
352 int ret; 351 int ret;
353 int fslen, fslen_ext; 352 int fslen, fslen_ext;
354 353
@@ -457,19 +456,10 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
457 * The SSC transmit clock is obtained from the BCLK signal on 456 * The SSC transmit clock is obtained from the BCLK signal on
458 * on the TK line, and the SSC receive clock is 457 * on the TK line, and the SSC receive clock is
459 * generated from the transmit clock. 458 * generated from the transmit clock.
460 *
461 * For single channel data, one sample is transferred
462 * on the falling edge of the LRC clock.
463 * For two channel data, one sample is
464 * transferred on both edges of the LRC clock.
465 */ 459 */
466 start_event = ((channels == 1)
467 ? SSC_START_FALLING_RF
468 : SSC_START_EDGE_RF);
469
470 rcmr = SSC_BF(RCMR_PERIOD, 0) 460 rcmr = SSC_BF(RCMR_PERIOD, 0)
471 | SSC_BF(RCMR_STTDLY, START_DELAY) 461 | SSC_BF(RCMR_STTDLY, START_DELAY)
472 | SSC_BF(RCMR_START, start_event) 462 | SSC_BF(RCMR_START, SSC_START_FALLING_RF)
473 | SSC_BF(RCMR_CKI, SSC_CKI_RISING) 463 | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
474 | SSC_BF(RCMR_CKO, SSC_CKO_NONE) 464 | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
475 | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ? 465 | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
@@ -478,14 +468,14 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
478 rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE) 468 rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
479 | SSC_BF(RFMR_FSOS, SSC_FSOS_NONE) 469 | SSC_BF(RFMR_FSOS, SSC_FSOS_NONE)
480 | SSC_BF(RFMR_FSLEN, 0) 470 | SSC_BF(RFMR_FSLEN, 0)
481 | SSC_BF(RFMR_DATNB, 0) 471 | SSC_BF(RFMR_DATNB, (channels - 1))
482 | SSC_BIT(RFMR_MSBF) 472 | SSC_BIT(RFMR_MSBF)
483 | SSC_BF(RFMR_LOOP, 0) 473 | SSC_BF(RFMR_LOOP, 0)
484 | SSC_BF(RFMR_DATLEN, (bits - 1)); 474 | SSC_BF(RFMR_DATLEN, (bits - 1));
485 475
486 tcmr = SSC_BF(TCMR_PERIOD, 0) 476 tcmr = SSC_BF(TCMR_PERIOD, 0)
487 | SSC_BF(TCMR_STTDLY, START_DELAY) 477 | SSC_BF(TCMR_STTDLY, START_DELAY)
488 | SSC_BF(TCMR_START, start_event) 478 | SSC_BF(TCMR_START, SSC_START_FALLING_RF)
489 | SSC_BF(TCMR_CKI, SSC_CKI_FALLING) 479 | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
490 | SSC_BF(TCMR_CKO, SSC_CKO_NONE) 480 | SSC_BF(TCMR_CKO, SSC_CKO_NONE)
491 | SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ? 481 | SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ?
@@ -495,7 +485,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
495 | SSC_BF(TFMR_FSDEN, 0) 485 | SSC_BF(TFMR_FSDEN, 0)
496 | SSC_BF(TFMR_FSOS, SSC_FSOS_NONE) 486 | SSC_BF(TFMR_FSOS, SSC_FSOS_NONE)
497 | SSC_BF(TFMR_FSLEN, 0) 487 | SSC_BF(TFMR_FSLEN, 0)
498 | SSC_BF(TFMR_DATNB, 0) 488 | SSC_BF(TFMR_DATNB, (channels - 1))
499 | SSC_BIT(TFMR_MSBF) 489 | SSC_BIT(TFMR_MSBF)
500 | SSC_BF(TFMR_DATDEF, 0) 490 | SSC_BF(TFMR_DATDEF, 0)
501 | SSC_BF(TFMR_DATLEN, (bits - 1)); 491 | SSC_BF(TFMR_DATLEN, (bits - 1));
@@ -512,7 +502,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
512 rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period) 502 rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
513 | SSC_BF(RCMR_STTDLY, 1) 503 | SSC_BF(RCMR_STTDLY, 1)
514 | SSC_BF(RCMR_START, SSC_START_RISING_RF) 504 | SSC_BF(RCMR_START, SSC_START_RISING_RF)
515 | SSC_BF(RCMR_CKI, SSC_CKI_RISING) 505 | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
516 | SSC_BF(RCMR_CKO, SSC_CKO_NONE) 506 | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
517 | SSC_BF(RCMR_CKS, SSC_CKS_DIV); 507 | SSC_BF(RCMR_CKS, SSC_CKS_DIV);
518 508
@@ -527,7 +517,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
527 tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period) 517 tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
528 | SSC_BF(TCMR_STTDLY, 1) 518 | SSC_BF(TCMR_STTDLY, 1)
529 | SSC_BF(TCMR_START, SSC_START_RISING_RF) 519 | SSC_BF(TCMR_START, SSC_START_RISING_RF)
530 | SSC_BF(TCMR_CKI, SSC_CKI_RISING) 520 | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
531 | SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS) 521 | SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS)
532 | SSC_BF(TCMR_CKS, SSC_CKS_DIV); 522 | SSC_BF(TCMR_CKS, SSC_CKS_DIV);
533 523
@@ -556,7 +546,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
556 rcmr = SSC_BF(RCMR_PERIOD, 0) 546 rcmr = SSC_BF(RCMR_PERIOD, 0)
557 | SSC_BF(RCMR_STTDLY, START_DELAY) 547 | SSC_BF(RCMR_STTDLY, START_DELAY)
558 | SSC_BF(RCMR_START, SSC_START_RISING_RF) 548 | SSC_BF(RCMR_START, SSC_START_RISING_RF)
559 | SSC_BF(RCMR_CKI, SSC_CKI_RISING) 549 | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
560 | SSC_BF(RCMR_CKO, SSC_CKO_NONE) 550 | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
561 | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ? 551 | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
562 SSC_CKS_PIN : SSC_CKS_CLOCK); 552 SSC_CKS_PIN : SSC_CKS_CLOCK);
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index e5f2fb884bf3..30c673cdc12e 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -188,8 +188,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);
188static const char * const pcm512x_dsp_program_texts[] = { 188static const char * const pcm512x_dsp_program_texts[] = {
189 "FIR interpolation with de-emphasis", 189 "FIR interpolation with de-emphasis",
190 "Low latency IIR with de-emphasis", 190 "Low latency IIR with de-emphasis",
191 "Fixed process flow",
192 "High attenuation with de-emphasis", 191 "High attenuation with de-emphasis",
192 "Fixed process flow",
193 "Ringing-less low latency FIR", 193 "Ringing-less low latency FIR",
194}; 194};
195 195
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 2cd4fe463102..1d1c7f8a9af2 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -861,10 +861,8 @@ static int rt286_hw_params(struct snd_pcm_substream *substream,
861 RT286_I2S_CTRL1, 0x0018, d_len_code << 3); 861 RT286_I2S_CTRL1, 0x0018, d_len_code << 3);
862 dev_dbg(codec->dev, "format val = 0x%x\n", val); 862 dev_dbg(codec->dev, "format val = 0x%x\n", val);
863 863
864 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 864 snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
865 snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val); 865 snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
866 else
867 snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
868 866
869 return 0; 867 return 0;
870} 868}
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index c3f2decd643c..1ff726c29249 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -2124,6 +2124,7 @@ MODULE_DEVICE_TABLE(of, rt5640_of_match);
2124static struct acpi_device_id rt5640_acpi_match[] = { 2124static struct acpi_device_id rt5640_acpi_match[] = {
2125 { "INT33CA", 0 }, 2125 { "INT33CA", 0 },
2126 { "10EC5640", 0 }, 2126 { "10EC5640", 0 },
2127 { "10EC5642", 0 },
2127 { }, 2128 { },
2128}; 2129};
2129MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match); 2130MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index c0fbe1881439..918ada9738b0 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -2083,10 +2083,14 @@ static int rt5677_set_pll1_event(struct snd_soc_dapm_widget *w,
2083 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 2083 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
2084 2084
2085 switch (event) { 2085 switch (event) {
2086 case SND_SOC_DAPM_POST_PMU: 2086 case SND_SOC_DAPM_PRE_PMU:
2087 regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2); 2087 regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2);
2088 break;
2089
2090 case SND_SOC_DAPM_POST_PMU:
2088 regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0); 2091 regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0);
2089 break; 2092 break;
2093
2090 default: 2094 default:
2091 return 0; 2095 return 0;
2092 } 2096 }
@@ -2101,10 +2105,14 @@ static int rt5677_set_pll2_event(struct snd_soc_dapm_widget *w,
2101 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 2105 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
2102 2106
2103 switch (event) { 2107 switch (event) {
2104 case SND_SOC_DAPM_POST_PMU: 2108 case SND_SOC_DAPM_PRE_PMU:
2105 regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2); 2109 regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2);
2110 break;
2111
2112 case SND_SOC_DAPM_POST_PMU:
2106 regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0); 2113 regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0);
2107 break; 2114 break;
2115
2108 default: 2116 default:
2109 return 0; 2117 return 0;
2110 } 2118 }
@@ -2212,9 +2220,11 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
2212 2220
2213static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = { 2221static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
2214 SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT, 2222 SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
2215 0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU), 2223 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU |
2224 SND_SOC_DAPM_POST_PMU),
2216 SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT, 2225 SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT,
2217 0, rt5677_set_pll2_event, SND_SOC_DAPM_POST_PMU), 2226 0, rt5677_set_pll2_event, SND_SOC_DAPM_PRE_PMU |
2227 SND_SOC_DAPM_POST_PMU),
2218 2228
2219 /* Input Side */ 2229 /* Input Side */
2220 /* micbias */ 2230 /* micbias */
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 29cf7ce610f4..aa98be32bb60 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -483,21 +483,21 @@ static int sgtl5000_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
483 /* setting i2s data format */ 483 /* setting i2s data format */
484 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 484 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
485 case SND_SOC_DAIFMT_DSP_A: 485 case SND_SOC_DAIFMT_DSP_A:
486 i2sctl |= SGTL5000_I2S_MODE_PCM; 486 i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
487 break; 487 break;
488 case SND_SOC_DAIFMT_DSP_B: 488 case SND_SOC_DAIFMT_DSP_B:
489 i2sctl |= SGTL5000_I2S_MODE_PCM; 489 i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
490 i2sctl |= SGTL5000_I2S_LRALIGN; 490 i2sctl |= SGTL5000_I2S_LRALIGN;
491 break; 491 break;
492 case SND_SOC_DAIFMT_I2S: 492 case SND_SOC_DAIFMT_I2S:
493 i2sctl |= SGTL5000_I2S_MODE_I2S_LJ; 493 i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
494 break; 494 break;
495 case SND_SOC_DAIFMT_RIGHT_J: 495 case SND_SOC_DAIFMT_RIGHT_J:
496 i2sctl |= SGTL5000_I2S_MODE_RJ; 496 i2sctl |= SGTL5000_I2S_MODE_RJ << SGTL5000_I2S_MODE_SHIFT;
497 i2sctl |= SGTL5000_I2S_LRPOL; 497 i2sctl |= SGTL5000_I2S_LRPOL;
498 break; 498 break;
499 case SND_SOC_DAIFMT_LEFT_J: 499 case SND_SOC_DAIFMT_LEFT_J:
500 i2sctl |= SGTL5000_I2S_MODE_I2S_LJ; 500 i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
501 i2sctl |= SGTL5000_I2S_LRALIGN; 501 i2sctl |= SGTL5000_I2S_LRALIGN;
502 break; 502 break;
503 default: 503 default:
@@ -1462,6 +1462,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
1462 if (ret) 1462 if (ret)
1463 return ret; 1463 return ret;
1464 1464
1465 /* Need 8 clocks before I2C accesses */
1466 udelay(1);
1467
1465 /* read chip information */ 1468 /* read chip information */
1466 ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, &reg); 1469 ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, &reg);
1467 if (ret) 1470 if (ret)
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index b7ebce054b4e..dd222b10ce13 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1046,7 +1046,7 @@ static int aic3x_prepare(struct snd_pcm_substream *substream,
1046 delay += aic3x->tdm_delay; 1046 delay += aic3x->tdm_delay;
1047 1047
1048 /* Configure data delay */ 1048 /* Configure data delay */
1049 snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, aic3x->tdm_delay); 1049 snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, delay);
1050 1050
1051 return 0; 1051 return 0;
1052} 1052}
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
index 1d1205702d23..9f2dced046de 100644
--- a/sound/soc/codecs/ts3a227e.c
+++ b/sound/soc/codecs/ts3a227e.c
@@ -254,6 +254,7 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
254 struct ts3a227e *ts3a227e; 254 struct ts3a227e *ts3a227e;
255 struct device *dev = &i2c->dev; 255 struct device *dev = &i2c->dev;
256 int ret; 256 int ret;
257 unsigned int acc_reg;
257 258
258 ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL); 259 ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL);
259 if (ts3a227e == NULL) 260 if (ts3a227e == NULL)
@@ -283,6 +284,11 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
283 INTB_DISABLE | ADC_COMPLETE_INT_DISABLE, 284 INTB_DISABLE | ADC_COMPLETE_INT_DISABLE,
284 ADC_COMPLETE_INT_DISABLE); 285 ADC_COMPLETE_INT_DISABLE);
285 286
287 /* Read jack status because chip might not trigger interrupt at boot. */
288 regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg);
289 ts3a227e_new_jack_state(ts3a227e, acc_reg);
290 ts3a227e_jack_report(ts3a227e);
291
286 return 0; 292 return 0;
287} 293}
288 294
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index b9211b42f6e9..b115ed815db9 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -717,6 +717,8 @@ static int wm8731_i2c_probe(struct i2c_client *i2c,
717 if (wm8731 == NULL) 717 if (wm8731 == NULL)
718 return -ENOMEM; 718 return -ENOMEM;
719 719
720 mutex_init(&wm8731->lock);
721
720 wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap); 722 wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap);
721 if (IS_ERR(wm8731->regmap)) { 723 if (IS_ERR(wm8731->regmap)) {
722 ret = PTR_ERR(wm8731->regmap); 724 ret = PTR_ERR(wm8731->regmap);
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 4d2d2b1380d5..75b87c5c0f04 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1076,10 +1076,13 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
1076 { "Right Capture PGA", NULL, "Right Capture Mux" }, 1076 { "Right Capture PGA", NULL, "Right Capture Mux" },
1077 { "Right Capture PGA", NULL, "Right Capture Inverting Mux" }, 1077 { "Right Capture PGA", NULL, "Right Capture Inverting Mux" },
1078 1078
1079 { "AIFOUTL", "Left", "ADCL" }, 1079 { "AIFOUTL Mux", "Left", "ADCL" },
1080 { "AIFOUTL", "Right", "ADCR" }, 1080 { "AIFOUTL Mux", "Right", "ADCR" },
1081 { "AIFOUTR", "Left", "ADCL" }, 1081 { "AIFOUTR Mux", "Left", "ADCL" },
1082 { "AIFOUTR", "Right", "ADCR" }, 1082 { "AIFOUTR Mux", "Right", "ADCR" },
1083
1084 { "AIFOUTL", NULL, "AIFOUTL Mux" },
1085 { "AIFOUTR", NULL, "AIFOUTR Mux" },
1083 1086
1084 { "ADCL", NULL, "CLK_DSP" }, 1087 { "ADCL", NULL, "CLK_DSP" },
1085 { "ADCL", NULL, "Left Capture PGA" }, 1088 { "ADCL", NULL, "Left Capture PGA" },
@@ -1089,12 +1092,16 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
1089}; 1092};
1090 1093
1091static const struct snd_soc_dapm_route dac_intercon[] = { 1094static const struct snd_soc_dapm_route dac_intercon[] = {
1092 { "DACL", "Right", "AIFINR" }, 1095 { "DACL Mux", "Left", "AIFINL" },
1093 { "DACL", "Left", "AIFINL" }, 1096 { "DACL Mux", "Right", "AIFINR" },
1097
1098 { "DACR Mux", "Left", "AIFINL" },
1099 { "DACR Mux", "Right", "AIFINR" },
1100
1101 { "DACL", NULL, "DACL Mux" },
1094 { "DACL", NULL, "CLK_DSP" }, 1102 { "DACL", NULL, "CLK_DSP" },
1095 1103
1096 { "DACR", "Right", "AIFINR" }, 1104 { "DACR", NULL, "DACR Mux" },
1097 { "DACR", "Left", "AIFINL" },
1098 { "DACR", NULL, "CLK_DSP" }, 1105 { "DACR", NULL, "CLK_DSP" },
1099 1106
1100 { "Charge pump", NULL, "SYSCLK" }, 1107 { "Charge pump", NULL, "SYSCLK" },
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 031a1ae71d94..a96eb497a379 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -556,7 +556,7 @@ static struct {
556 { 22050, 2 }, 556 { 22050, 2 },
557 { 24000, 2 }, 557 { 24000, 2 },
558 { 16000, 3 }, 558 { 16000, 3 },
559 { 11250, 4 }, 559 { 11025, 4 },
560 { 12000, 4 }, 560 { 12000, 4 },
561 { 8000, 5 }, 561 { 8000, 5 },
562}; 562};
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index 3eddb18fefd1..5cc457ef8894 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -344,23 +344,27 @@ static int wm9705_soc_probe(struct snd_soc_codec *codec)
344 struct snd_ac97 *ac97; 344 struct snd_ac97 *ac97;
345 int ret = 0; 345 int ret = 0;
346 346
347 ac97 = snd_soc_new_ac97_codec(codec); 347 ac97 = snd_soc_alloc_ac97_codec(codec);
348 if (IS_ERR(ac97)) { 348 if (IS_ERR(ac97)) {
349 ret = PTR_ERR(ac97); 349 ret = PTR_ERR(ac97);
350 dev_err(codec->dev, "Failed to register AC97 codec\n"); 350 dev_err(codec->dev, "Failed to register AC97 codec\n");
351 return ret; 351 return ret;
352 } 352 }
353 353
354 snd_soc_codec_set_drvdata(codec, ac97);
355
356 ret = wm9705_reset(codec); 354 ret = wm9705_reset(codec);
357 if (ret) 355 if (ret)
358 goto reset_err; 356 goto err_put_device;
357
358 ret = device_add(&ac97->dev);
359 if (ret)
360 goto err_put_device;
361
362 snd_soc_codec_set_drvdata(codec, ac97);
359 363
360 return 0; 364 return 0;
361 365
362reset_err: 366err_put_device:
363 snd_soc_free_ac97_codec(ac97); 367 put_device(&ac97->dev);
364 return ret; 368 return ret;
365} 369}
366 370
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index e04643d2bb24..9517571e820d 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -666,7 +666,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
666 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); 666 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
667 int ret = 0; 667 int ret = 0;
668 668
669 wm9712->ac97 = snd_soc_new_ac97_codec(codec); 669 wm9712->ac97 = snd_soc_alloc_ac97_codec(codec);
670 if (IS_ERR(wm9712->ac97)) { 670 if (IS_ERR(wm9712->ac97)) {
671 ret = PTR_ERR(wm9712->ac97); 671 ret = PTR_ERR(wm9712->ac97);
672 dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret); 672 dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
@@ -675,15 +675,19 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
675 675
676 ret = wm9712_reset(codec, 0); 676 ret = wm9712_reset(codec, 0);
677 if (ret < 0) 677 if (ret < 0)
678 goto reset_err; 678 goto err_put_device;
679
680 ret = device_add(&wm9712->ac97->dev);
681 if (ret)
682 goto err_put_device;
679 683
680 /* set alc mux to none */ 684 /* set alc mux to none */
681 ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000); 685 ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
682 686
683 return 0; 687 return 0;
684 688
685reset_err: 689err_put_device:
686 snd_soc_free_ac97_codec(wm9712->ac97); 690 put_device(&wm9712->ac97->dev);
687 return ret; 691 return ret;
688} 692}
689 693
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 71b9d5b0734d..6ab1122a3872 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -1225,7 +1225,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
1225 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); 1225 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
1226 int ret = 0, reg; 1226 int ret = 0, reg;
1227 1227
1228 wm9713->ac97 = snd_soc_new_ac97_codec(codec); 1228 wm9713->ac97 = snd_soc_alloc_ac97_codec(codec);
1229 if (IS_ERR(wm9713->ac97)) 1229 if (IS_ERR(wm9713->ac97))
1230 return PTR_ERR(wm9713->ac97); 1230 return PTR_ERR(wm9713->ac97);
1231 1231
@@ -1234,7 +1234,11 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
1234 wm9713_reset(codec, 0); 1234 wm9713_reset(codec, 0);
1235 ret = wm9713_reset(codec, 1); 1235 ret = wm9713_reset(codec, 1);
1236 if (ret < 0) 1236 if (ret < 0)
1237 goto reset_err; 1237 goto err_put_device;
1238
1239 ret = device_add(&wm9713->ac97->dev);
1240 if (ret)
1241 goto err_put_device;
1238 1242
1239 /* unmute the adc - move to kcontrol */ 1243 /* unmute the adc - move to kcontrol */
1240 reg = ac97_read(codec, AC97_CD) & 0x7fff; 1244 reg = ac97_read(codec, AC97_CD) & 0x7fff;
@@ -1242,8 +1246,8 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
1242 1246
1243 return 0; 1247 return 0;
1244 1248
1245reset_err: 1249err_put_device:
1246 snd_soc_free_ac97_codec(wm9713->ac97); 1250 put_device(&wm9713->ac97->dev);
1247 return ret; 1251 return ret;
1248} 1252}
1249 1253
diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h
index 91a550f4a10d..5e793bbb6b02 100644
--- a/sound/soc/fsl/fsl_esai.h
+++ b/sound/soc/fsl/fsl_esai.h
@@ -302,7 +302,7 @@
302#define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT) 302#define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT)
303#define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK) 303#define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK)
304#define ESAI_xCCR_xDC_SHIFT 9 304#define ESAI_xCCR_xDC_SHIFT 9
305#define ESAI_xCCR_xDC_WIDTH 4 305#define ESAI_xCCR_xDC_WIDTH 5
306#define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT) 306#define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT)
307#define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK) 307#define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK)
308#define ESAI_xCCR_xPSR_SHIFT 8 308#define ESAI_xCCR_xPSR_SHIFT 8
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index a65f17d57ffb..059496ed9ad7 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -1362,9 +1362,9 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1362 } 1362 }
1363 1363
1364 ssi_private->irq = platform_get_irq(pdev, 0); 1364 ssi_private->irq = platform_get_irq(pdev, 0);
1365 if (!ssi_private->irq) { 1365 if (ssi_private->irq < 0) {
1366 dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); 1366 dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
1367 return -ENXIO; 1367 return ssi_private->irq;
1368 } 1368 }
1369 1369
1370 /* Are the RX and the TX clocks locked? */ 1370 /* Are the RX and the TX clocks locked? */
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 4caacb05a623..cd146d4fa805 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -257,6 +257,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
257 if (ret) 257 if (ret)
258 goto clk_fail; 258 goto clk_fail;
259 data->card.num_links = 1; 259 data->card.num_links = 1;
260 data->card.owner = THIS_MODULE;
260 data->card.dai_link = &data->dai; 261 data->card.dai_link = &data->dai;
261 data->card.dapm_widgets = imx_wm8962_dapm_widgets; 262 data->card.dapm_widgets = imx_wm8962_dapm_widgets;
262 data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets); 263 data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets);
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index fb9240fdc9b7..7fe3009b1c43 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -452,9 +452,8 @@ static int asoc_simple_card_parse_of(struct device_node *node,
452} 452}
453 453
454/* Decrease the reference count of the device nodes */ 454/* Decrease the reference count of the device nodes */
455static int asoc_simple_card_unref(struct platform_device *pdev) 455static int asoc_simple_card_unref(struct snd_soc_card *card)
456{ 456{
457 struct snd_soc_card *card = platform_get_drvdata(pdev);
458 struct snd_soc_dai_link *dai_link; 457 struct snd_soc_dai_link *dai_link;
459 int num_links; 458 int num_links;
460 459
@@ -556,7 +555,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
556 return ret; 555 return ret;
557 556
558err: 557err:
559 asoc_simple_card_unref(pdev); 558 asoc_simple_card_unref(&priv->snd_card);
560 return ret; 559 return ret;
561} 560}
562 561
@@ -572,7 +571,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
572 snd_soc_jack_free_gpios(&simple_card_mic_jack, 1, 571 snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
573 &simple_card_mic_jack_gpio); 572 &simple_card_mic_jack_gpio);
574 573
575 return asoc_simple_card_unref(pdev); 574 return asoc_simple_card_unref(card);
576} 575}
577 576
578static const struct of_device_id asoc_simple_of_match[] = { 577static const struct of_device_id asoc_simple_of_match[] = {
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c
index ef2e8b5766a1..b3f9489794a6 100644
--- a/sound/soc/intel/sst-firmware.c
+++ b/sound/soc/intel/sst-firmware.c
@@ -706,6 +706,7 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
706 struct list_head *block_list) 706 struct list_head *block_list)
707{ 707{
708 struct sst_mem_block *block, *tmp; 708 struct sst_mem_block *block, *tmp;
709 struct sst_block_allocator ba_tmp = *ba;
709 u32 end = ba->offset + ba->size, block_end; 710 u32 end = ba->offset + ba->size, block_end;
710 int err; 711 int err;
711 712
@@ -730,9 +731,9 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
730 if (ba->offset >= block->offset && ba->offset < block_end) { 731 if (ba->offset >= block->offset && ba->offset < block_end) {
731 732
732 /* align ba to block boundary */ 733 /* align ba to block boundary */
733 ba->size -= block_end - ba->offset; 734 ba_tmp.size -= block_end - ba->offset;
734 ba->offset = block_end; 735 ba_tmp.offset = block_end;
735 err = block_alloc_contiguous(dsp, ba, block_list); 736 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
736 if (err < 0) 737 if (err < 0)
737 return -ENOMEM; 738 return -ENOMEM;
738 739
@@ -767,10 +768,10 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
767 list_move(&block->list, &dsp->used_block_list); 768 list_move(&block->list, &dsp->used_block_list);
768 list_add(&block->module_list, block_list); 769 list_add(&block->module_list, block_list);
769 /* align ba to block boundary */ 770 /* align ba to block boundary */
770 ba->size -= block_end - ba->offset; 771 ba_tmp.size -= block_end - ba->offset;
771 ba->offset = block_end; 772 ba_tmp.offset = block_end;
772 773
773 err = block_alloc_contiguous(dsp, ba, block_list); 774 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
774 if (err < 0) 775 if (err < 0)
775 return -ENOMEM; 776 return -ENOMEM;
776 777
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c
index 3f8c48231364..8156cc1accb7 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/sst-haswell-ipc.c
@@ -651,11 +651,11 @@ static void hsw_notification_work(struct work_struct *work)
651 } 651 }
652 652
653 /* tell DSP that notification has been handled */ 653 /* tell DSP that notification has been handled */
654 sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IPCD, 654 sst_dsp_shim_update_bits(hsw->dsp, SST_IPCD,
655 SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE); 655 SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE);
656 656
657 /* unmask busy interrupt */ 657 /* unmask busy interrupt */
658 sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0); 658 sst_dsp_shim_update_bits(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
659} 659}
660 660
661static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header) 661static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header)
@@ -1228,6 +1228,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1228 struct sst_dsp *sst = hsw->dsp; 1228 struct sst_dsp *sst = hsw->dsp;
1229 unsigned long flags; 1229 unsigned long flags;
1230 1230
1231 if (!stream) {
1232 dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n");
1233 return 0;
1234 }
1235
1231 /* dont free DSP streams that are not commited */ 1236 /* dont free DSP streams that are not commited */
1232 if (!stream->commited) 1237 if (!stream->commited)
1233 goto out; 1238 goto out;
@@ -1415,6 +1420,16 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1415 u32 header; 1420 u32 header;
1416 int ret; 1421 int ret;
1417 1422
1423 if (!stream) {
1424 dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n");
1425 return 0;
1426 }
1427
1428 if (stream->commited) {
1429 dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n");
1430 return 0;
1431 }
1432
1418 trace_ipc_request("stream alloc", stream->host_id); 1433 trace_ipc_request("stream alloc", stream->host_id);
1419 1434
1420 header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM); 1435 header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM);
@@ -1519,6 +1534,11 @@ int sst_hsw_stream_pause(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
1519{ 1534{
1520 int ret; 1535 int ret;
1521 1536
1537 if (!stream) {
1538 dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n");
1539 return 0;
1540 }
1541
1522 trace_ipc_request("stream pause", stream->reply.stream_hw_id); 1542 trace_ipc_request("stream pause", stream->reply.stream_hw_id);
1523 1543
1524 ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE, 1544 ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE,
@@ -1535,6 +1555,11 @@ int sst_hsw_stream_resume(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
1535{ 1555{
1536 int ret; 1556 int ret;
1537 1557
1558 if (!stream) {
1559 dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n");
1560 return 0;
1561 }
1562
1538 trace_ipc_request("stream resume", stream->reply.stream_hw_id); 1563 trace_ipc_request("stream resume", stream->reply.stream_hw_id);
1539 1564
1540 ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME, 1565 ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME,
@@ -1550,6 +1575,11 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1550{ 1575{
1551 int ret, tries = 10; 1576 int ret, tries = 10;
1552 1577
1578 if (!stream) {
1579 dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n");
1580 return 0;
1581 }
1582
1553 /* dont reset streams that are not commited */ 1583 /* dont reset streams that are not commited */
1554 if (!stream->commited) 1584 if (!stream->commited)
1555 return 0; 1585 return 0;
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
index 2ac72eb5e75d..b3360139c41a 100644
--- a/sound/soc/intel/sst/sst_acpi.c
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -350,7 +350,7 @@ static struct sst_machines sst_acpi_bytcr[] = {
350 350
351/* Cherryview-based platforms: CherryTrail and Braswell */ 351/* Cherryview-based platforms: CherryTrail and Braswell */
352static struct sst_machines sst_acpi_chv[] = { 352static struct sst_machines sst_acpi_chv[] = {
353 {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "fw_sst_22a8.bin", 353 {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "intel/fw_sst_22a8.bin",
354 &chv_platform_data }, 354 &chv_platform_data },
355 {}, 355 {},
356}; 356};
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 8b79cafab1e2..c7eb9dd67f60 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
434 case SND_SOC_DAIFMT_CBM_CFS: 434 case SND_SOC_DAIFMT_CBM_CFS:
435 /* McBSP slave. FS clock as output */ 435 /* McBSP slave. FS clock as output */
436 regs->srgr2 |= FSGM; 436 regs->srgr2 |= FSGM;
437 regs->pcr0 |= FSXM; 437 regs->pcr0 |= FSXM | FSRM;
438 break; 438 break;
439 case SND_SOC_DAIFMT_CBM_CFM: 439 case SND_SOC_DAIFMT_CBM_CFM:
440 /* McBSP slave */ 440 /* McBSP slave */
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 13d8507333b8..dcc26eda0539 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -335,6 +335,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
335 SNDRV_PCM_FMTBIT_S24_LE), 335 SNDRV_PCM_FMTBIT_S24_LE),
336 }, 336 },
337 .ops = &rockchip_i2s_dai_ops, 337 .ops = &rockchip_i2s_dai_ops,
338 .symmetric_rates = 1,
338}; 339};
339 340
340static const struct snd_soc_component_driver rockchip_i2s_component = { 341static const struct snd_soc_component_driver rockchip_i2s_component = {
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
index 2e10e9a38376..08d7259bbaab 100644
--- a/sound/soc/soc-ac97.c
+++ b/sound/soc/soc-ac97.c
@@ -48,15 +48,18 @@ static void soc_ac97_device_release(struct device *dev)
48} 48}
49 49
50/** 50/**
51 * snd_soc_new_ac97_codec - initailise AC97 device 51 * snd_soc_alloc_ac97_codec() - Allocate new a AC'97 device
52 * @codec: audio codec 52 * @codec: The CODEC for which to create the AC'97 device
53 * 53 *
54 * Initialises AC97 codec resources for use by ad-hoc devices only. 54 * Allocated a new snd_ac97 device and intializes it, but does not yet register
55 * it. The caller is responsible to either call device_add(&ac97->dev) to
56 * register the device, or to call put_device(&ac97->dev) to free the device.
57 *
58 * Returns: A snd_ac97 device or a PTR_ERR in case of an error.
55 */ 59 */
56struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec) 60struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec)
57{ 61{
58 struct snd_ac97 *ac97; 62 struct snd_ac97 *ac97;
59 int ret;
60 63
61 ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL); 64 ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
62 if (ac97 == NULL) 65 if (ac97 == NULL)
@@ -73,7 +76,28 @@ struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
73 codec->component.card->snd_card->number, 0, 76 codec->component.card->snd_card->number, 0,
74 codec->component.name); 77 codec->component.name);
75 78
76 ret = device_register(&ac97->dev); 79 device_initialize(&ac97->dev);
80
81 return ac97;
82}
83EXPORT_SYMBOL(snd_soc_alloc_ac97_codec);
84
85/**
86 * snd_soc_new_ac97_codec - initailise AC97 device
87 * @codec: audio codec
88 *
89 * Initialises AC97 codec resources for use by ad-hoc devices only.
90 */
91struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
92{
93 struct snd_ac97 *ac97;
94 int ret;
95
96 ac97 = snd_soc_alloc_ac97_codec(codec);
97 if (IS_ERR(ac97))
98 return ac97;
99
100 ret = device_add(&ac97->dev);
77 if (ret) { 101 if (ret) {
78 put_device(&ac97->dev); 102 put_device(&ac97->dev);
79 return ERR_PTR(ret); 103 return ERR_PTR(ret);
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 590a82f01d0b..025c38fbe3c0 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -659,7 +659,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
659 rtd->dai_link->stream_name); 659 rtd->dai_link->stream_name);
660 660
661 ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num, 661 ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
662 1, 0, &be_pcm); 662 rtd->dai_link->dpcm_playback,
663 rtd->dai_link->dpcm_capture, &be_pcm);
663 if (ret < 0) { 664 if (ret < 0) {
664 dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n", 665 dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
665 rtd->dai_link->name); 666 rtd->dai_link->name);
@@ -668,8 +669,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
668 669
669 rtd->pcm = be_pcm; 670 rtd->pcm = be_pcm;
670 rtd->fe_compr = 1; 671 rtd->fe_compr = 1;
671 be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; 672 if (rtd->dai_link->dpcm_playback)
672 be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; 673 be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
674 else if (rtd->dai_link->dpcm_capture)
675 be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
673 memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops)); 676 memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
674 } else 677 } else
675 memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops)); 678 memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c
index 86ea2d7b8845..d2b18e887071 100644
--- a/tools/lib/api/fs/debugfs.c
+++ b/tools/lib/api/fs/debugfs.c
@@ -1,3 +1,4 @@
1#define _GNU_SOURCE
1#include <errno.h> 2#include <errno.h>
2#include <stdio.h> 3#include <stdio.h>
3#include <stdlib.h> 4#include <stdlib.h>
@@ -98,3 +99,45 @@ char *debugfs_mount(const char *mountpoint)
98out: 99out:
99 return debugfs_mountpoint; 100 return debugfs_mountpoint;
100} 101}
102
103int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename)
104{
105 char sbuf[128];
106
107 switch (err) {
108 case ENOENT:
109 if (debugfs_found) {
110 snprintf(buf, size,
111 "Error:\tFile %s/%s not found.\n"
112 "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n",
113 debugfs_mountpoint, filename);
114 break;
115 }
116 snprintf(buf, size, "%s",
117 "Error:\tUnable to find debugfs\n"
118 "Hint:\tWas your kernel compiled with debugfs support?\n"
119 "Hint:\tIs the debugfs filesystem mounted?\n"
120 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
121 break;
122 case EACCES:
123 snprintf(buf, size,
124 "Error:\tNo permissions to read %s/%s\n"
125 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
126 debugfs_mountpoint, filename, debugfs_mountpoint);
127 break;
128 default:
129 snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
130 break;
131 }
132
133 return 0;
134}
135
136int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name)
137{
138 char path[PATH_MAX];
139
140 snprintf(path, PATH_MAX, "tracing/events/%s/%s", sys, name ?: "*");
141
142 return debugfs__strerror_open(err, buf, size, path);
143}
diff --git a/tools/lib/api/fs/debugfs.h b/tools/lib/api/fs/debugfs.h
index f19d3df9609d..0739881a9897 100644
--- a/tools/lib/api/fs/debugfs.h
+++ b/tools/lib/api/fs/debugfs.h
@@ -26,4 +26,7 @@ char *debugfs_mount(const char *mountpoint);
26 26
27extern char debugfs_mountpoint[]; 27extern char debugfs_mountpoint[];
28 28
29int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename);
30int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name);
31
29#endif /* __API_DEBUGFS_H__ */ 32#endif /* __API_DEBUGFS_H__ */
diff --git a/tools/lib/lockdep/.gitignore b/tools/lib/lockdep/.gitignore
new file mode 100644
index 000000000000..cc0e7a9f99e3
--- /dev/null
+++ b/tools/lib/lockdep/.gitignore
@@ -0,0 +1 @@
liblockdep.so.*
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index 52f9279c6c13..4b866c54f624 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -104,7 +104,7 @@ N =
104 104
105export Q VERBOSE 105export Q VERBOSE
106 106
107INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES) 107INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
108 108
109# Set compile option CFLAGS if not set elsewhere 109# Set compile option CFLAGS if not set elsewhere
110CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g 110CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index cf3a44bf1ec3..afe20ed9fac8 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -32,6 +32,7 @@
32#include <stdint.h> 32#include <stdint.h>
33#include <limits.h> 33#include <limits.h>
34 34
35#include <netinet/ip6.h>
35#include "event-parse.h" 36#include "event-parse.h"
36#include "event-utils.h" 37#include "event-utils.h"
37 38
@@ -4149,6 +4150,324 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
4149 trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 4150 trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
4150} 4151}
4151 4152
4153static void print_ip4_addr(struct trace_seq *s, char i, unsigned char *buf)
4154{
4155 const char *fmt;
4156
4157 if (i == 'i')
4158 fmt = "%03d.%03d.%03d.%03d";
4159 else
4160 fmt = "%d.%d.%d.%d";
4161
4162 trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3]);
4163}
4164
4165static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
4166{
4167 return ((unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
4168 (unsigned long)(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL;
4169}
4170
4171static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
4172{
4173 return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE);
4174}
4175
4176static void print_ip6c_addr(struct trace_seq *s, unsigned char *addr)
4177{
4178 int i, j, range;
4179 unsigned char zerolength[8];
4180 int longest = 1;
4181 int colonpos = -1;
4182 uint16_t word;
4183 uint8_t hi, lo;
4184 bool needcolon = false;
4185 bool useIPv4;
4186 struct in6_addr in6;
4187
4188 memcpy(&in6, addr, sizeof(struct in6_addr));
4189
4190 useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
4191
4192 memset(zerolength, 0, sizeof(zerolength));
4193
4194 if (useIPv4)
4195 range = 6;
4196 else
4197 range = 8;
4198
4199 /* find position of longest 0 run */
4200 for (i = 0; i < range; i++) {
4201 for (j = i; j < range; j++) {
4202 if (in6.s6_addr16[j] != 0)
4203 break;
4204 zerolength[i]++;
4205 }
4206 }
4207 for (i = 0; i < range; i++) {
4208 if (zerolength[i] > longest) {
4209 longest = zerolength[i];
4210 colonpos = i;
4211 }
4212 }
4213 if (longest == 1) /* don't compress a single 0 */
4214 colonpos = -1;
4215
4216 /* emit address */
4217 for (i = 0; i < range; i++) {
4218 if (i == colonpos) {
4219 if (needcolon || i == 0)
4220 trace_seq_printf(s, ":");
4221 trace_seq_printf(s, ":");
4222 needcolon = false;
4223 i += longest - 1;
4224 continue;
4225 }
4226 if (needcolon) {
4227 trace_seq_printf(s, ":");
4228 needcolon = false;
4229 }
4230 /* hex u16 without leading 0s */
4231 word = ntohs(in6.s6_addr16[i]);
4232 hi = word >> 8;
4233 lo = word & 0xff;
4234 if (hi)
4235 trace_seq_printf(s, "%x%02x", hi, lo);
4236 else
4237 trace_seq_printf(s, "%x", lo);
4238
4239 needcolon = true;
4240 }
4241
4242 if (useIPv4) {
4243 if (needcolon)
4244 trace_seq_printf(s, ":");
4245 print_ip4_addr(s, 'I', &in6.s6_addr[12]);
4246 }
4247
4248 return;
4249}
4250
4251static void print_ip6_addr(struct trace_seq *s, char i, unsigned char *buf)
4252{
4253 int j;
4254
4255 for (j = 0; j < 16; j += 2) {
4256 trace_seq_printf(s, "%02x%02x", buf[j], buf[j+1]);
4257 if (i == 'I' && j < 14)
4258 trace_seq_printf(s, ":");
4259 }
4260}
4261
4262/*
4263 * %pi4 print an IPv4 address with leading zeros
4264 * %pI4 print an IPv4 address without leading zeros
4265 * %pi6 print an IPv6 address without colons
4266 * %pI6 print an IPv6 address with colons
4267 * %pI6c print an IPv6 address in compressed form with colons
4268 * %pISpc print an IP address based on sockaddr; p adds port.
4269 */
4270static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
4271 void *data, int size, struct event_format *event,
4272 struct print_arg *arg)
4273{
4274 unsigned char *buf;
4275
4276 if (arg->type == PRINT_FUNC) {
4277 process_defined_func(s, data, size, event, arg);
4278 return 0;
4279 }
4280
4281 if (arg->type != PRINT_FIELD) {
4282 trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
4283 return 0;
4284 }
4285
4286 if (!arg->field.field) {
4287 arg->field.field =
4288 pevent_find_any_field(event, arg->field.name);
4289 if (!arg->field.field) {
4290 do_warning("%s: field %s not found",
4291 __func__, arg->field.name);
4292 return 0;
4293 }
4294 }
4295
4296 buf = data + arg->field.field->offset;
4297
4298 if (arg->field.field->size != 4) {
4299 trace_seq_printf(s, "INVALIDIPv4");
4300 return 0;
4301 }
4302 print_ip4_addr(s, i, buf);
4303
4304 return 0;
4305}
4306
4307static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
4308 void *data, int size, struct event_format *event,
4309 struct print_arg *arg)
4310{
4311 char have_c = 0;
4312 unsigned char *buf;
4313 int rc = 0;
4314
4315 /* pI6c */
4316 if (i == 'I' && *ptr == 'c') {
4317 have_c = 1;
4318 ptr++;
4319 rc++;
4320 }
4321
4322 if (arg->type == PRINT_FUNC) {
4323 process_defined_func(s, data, size, event, arg);
4324 return rc;
4325 }
4326
4327 if (arg->type != PRINT_FIELD) {
4328 trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
4329 return rc;
4330 }
4331
4332 if (!arg->field.field) {
4333 arg->field.field =
4334 pevent_find_any_field(event, arg->field.name);
4335 if (!arg->field.field) {
4336 do_warning("%s: field %s not found",
4337 __func__, arg->field.name);
4338 return rc;
4339 }
4340 }
4341
4342 buf = data + arg->field.field->offset;
4343
4344 if (arg->field.field->size != 16) {
4345 trace_seq_printf(s, "INVALIDIPv6");
4346 return rc;
4347 }
4348
4349 if (have_c)
4350 print_ip6c_addr(s, buf);
4351 else
4352 print_ip6_addr(s, i, buf);
4353
4354 return rc;
4355}
4356
4357static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
4358 void *data, int size, struct event_format *event,
4359 struct print_arg *arg)
4360{
4361 char have_c = 0, have_p = 0;
4362 unsigned char *buf;
4363 struct sockaddr_storage *sa;
4364 int rc = 0;
4365
4366 /* pISpc */
4367 if (i == 'I') {
4368 if (*ptr == 'p') {
4369 have_p = 1;
4370 ptr++;
4371 rc++;
4372 }
4373 if (*ptr == 'c') {
4374 have_c = 1;
4375 ptr++;
4376 rc++;
4377 }
4378 }
4379
4380 if (arg->type == PRINT_FUNC) {
4381 process_defined_func(s, data, size, event, arg);
4382 return rc;
4383 }
4384
4385 if (arg->type != PRINT_FIELD) {
4386 trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
4387 return rc;
4388 }
4389
4390 if (!arg->field.field) {
4391 arg->field.field =
4392 pevent_find_any_field(event, arg->field.name);
4393 if (!arg->field.field) {
4394 do_warning("%s: field %s not found",
4395 __func__, arg->field.name);
4396 return rc;
4397 }
4398 }
4399
4400 sa = (struct sockaddr_storage *) (data + arg->field.field->offset);
4401
4402 if (sa->ss_family == AF_INET) {
4403 struct sockaddr_in *sa4 = (struct sockaddr_in *) sa;
4404
4405 if (arg->field.field->size < sizeof(struct sockaddr_in)) {
4406 trace_seq_printf(s, "INVALIDIPv4");
4407 return rc;
4408 }
4409
4410 print_ip4_addr(s, i, (unsigned char *) &sa4->sin_addr);
4411 if (have_p)
4412 trace_seq_printf(s, ":%d", ntohs(sa4->sin_port));
4413
4414
4415 } else if (sa->ss_family == AF_INET6) {
4416 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) sa;
4417
4418 if (arg->field.field->size < sizeof(struct sockaddr_in6)) {
4419 trace_seq_printf(s, "INVALIDIPv6");
4420 return rc;
4421 }
4422
4423 if (have_p)
4424 trace_seq_printf(s, "[");
4425
4426 buf = (unsigned char *) &sa6->sin6_addr;
4427 if (have_c)
4428 print_ip6c_addr(s, buf);
4429 else
4430 print_ip6_addr(s, i, buf);
4431
4432 if (have_p)
4433 trace_seq_printf(s, "]:%d", ntohs(sa6->sin6_port));
4434 }
4435
4436 return rc;
4437}
4438
4439static int print_ip_arg(struct trace_seq *s, const char *ptr,
4440 void *data, int size, struct event_format *event,
4441 struct print_arg *arg)
4442{
4443 char i = *ptr; /* 'i' or 'I' */
4444 char ver;
4445 int rc = 0;
4446
4447 ptr++;
4448 rc++;
4449
4450 ver = *ptr;
4451 ptr++;
4452 rc++;
4453
4454 switch (ver) {
4455 case '4':
4456 rc += print_ipv4_arg(s, ptr, i, data, size, event, arg);
4457 break;
4458 case '6':
4459 rc += print_ipv6_arg(s, ptr, i, data, size, event, arg);
4460 break;
4461 case 'S':
4462 rc += print_ipsa_arg(s, ptr, i, data, size, event, arg);
4463 break;
4464 default:
4465 return 0;
4466 }
4467
4468 return rc;
4469}
4470
4152static int is_printable_array(char *p, unsigned int len) 4471static int is_printable_array(char *p, unsigned int len)
4153{ 4472{
4154 unsigned int i; 4473 unsigned int i;
@@ -4337,6 +4656,15 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
4337 ptr++; 4656 ptr++;
4338 arg = arg->next; 4657 arg = arg->next;
4339 break; 4658 break;
4659 } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
4660 int n;
4661
4662 n = print_ip_arg(s, ptr+1, data, size, event, arg);
4663 if (n > 0) {
4664 ptr += n;
4665 arg = arg->next;
4666 break;
4667 }
4340 } 4668 }
4341 4669
4342 /* fall through */ 4670 /* fall through */
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index fd77d81ea748..0294c57b1f5e 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -38,7 +38,7 @@ OPTIONS
38--remove=:: 38--remove=::
39 Remove specified file from the cache. 39 Remove specified file from the cache.
40-M:: 40-M::
41--missing=:: 41--missing=::
42 List missing build ids in the cache for the specified file. 42 List missing build ids in the cache for the specified file.
43-u:: 43-u::
44--update:: 44--update::
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index cbb4f743d921..3e2aec94f806 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -89,6 +89,19 @@ raw encoding of 0x1A8 can be used:
89You should refer to the processor specific documentation for getting these 89You should refer to the processor specific documentation for getting these
90details. Some of them are referenced in the SEE ALSO section below. 90details. Some of them are referenced in the SEE ALSO section below.
91 91
92PARAMETERIZED EVENTS
93--------------------
94
95Some pmu events listed by 'perf-list' will be displayed with '?' in them. For
96example:
97
98 hv_gpci/dtbp_ptitc,phys_processor_idx=?/
99
100This means that when provided as an event, a value for '?' must
101also be supplied. For example:
102
103 perf stat -C 0 -e 'hv_gpci/dtbp_ptitc,phys_processor_idx=0x2/' ...
104
92OPTIONS 105OPTIONS
93------- 106-------
94 107
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index 1d78a4064da4..43310d8661fe 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -12,11 +12,12 @@ SYNOPSIS
12 12
13DESCRIPTION 13DESCRIPTION
14----------- 14-----------
15"perf mem -t <TYPE> record" runs a command and gathers memory operation data 15"perf mem record" runs a command and gathers memory operation data
16from it, into perf.data. Perf record options are accepted and are passed through. 16from it, into perf.data. Perf record options are accepted and are passed through.
17 17
18"perf mem -t <TYPE> report" displays the result. It invokes perf report with the 18"perf mem report" displays the result. It invokes perf report with the
19right set of options to display a memory access profile. 19right set of options to display a memory access profile. By default, loads
20and stores are sampled. Use the -t option to limit to loads or stores.
20 21
21Note that on Intel systems the memory latency reported is the use-latency, 22Note that on Intel systems the memory latency reported is the use-latency,
22not the pure load (or store latency). Use latency includes any pipeline 23not the pure load (or store latency). Use latency includes any pipeline
@@ -29,7 +30,7 @@ OPTIONS
29 30
30-t:: 31-t::
31--type=:: 32--type=::
32 Select the memory operation type: load or store (default: load) 33 Select the memory operation type: load or store (default: load,store)
33 34
34-D:: 35-D::
35--dump-raw-samples=:: 36--dump-raw-samples=::
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index af9a54ece024..31e977459c51 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -33,12 +33,27 @@ OPTIONS
33 - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a 33 - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
34 hexadecimal event descriptor. 34 hexadecimal event descriptor.
35 35
36 - a hardware breakpoint event in the form of '\mem:addr[:access]' 36 - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
37 'param1', 'param2', etc are defined as formats for the PMU in
38 /sys/bus/event_sources/devices/<pmu>/format/*.
39
40 - a symbolically formed event like 'pmu/config=M,config1=N,config3=K/'
41
42 where M, N, K are numbers (in decimal, hex, octal format). Acceptable
43 values for each of 'config', 'config1' and 'config2' are defined by
44 corresponding entries in /sys/bus/event_sources/devices/<pmu>/format/*
45 param1 and param2 are defined as formats for the PMU in:
46 /sys/bus/event_sources/devices/<pmu>/format/*
47
48 - a hardware breakpoint event in the form of '\mem:addr[/len][:access]'
37 where addr is the address in memory you want to break in. 49 where addr is the address in memory you want to break in.
38 Access is the memory access type (read, write, execute) it can 50 Access is the memory access type (read, write, execute) it can
39 be passed as follows: '\mem:addr[:[r][w][x]]'. 51 be passed as follows: '\mem:addr[:[r][w][x]]'. len is the range,
52 number of bytes from specified addr, which the breakpoint will cover.
40 If you want to profile read-write accesses in 0x1000, just set 53 If you want to profile read-write accesses in 0x1000, just set
41 'mem:0x1000:rw'. 54 'mem:0x1000:rw'.
55 If you want to profile write accesses in [0x1000~1008), just set
56 'mem:0x1000/8:w'.
42 57
43--filter=<filter>:: 58--filter=<filter>::
44 Event filter. 59 Event filter.
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 21494806c0ab..a21eec05bc42 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -125,46 +125,46 @@ OPTIONS
125 is equivalent to: 125 is equivalent to:
126 126
127 perf script -f trace:<fields> -f sw:<fields> -f hw:<fields> 127 perf script -f trace:<fields> -f sw:<fields> -f hw:<fields>
128 128
129 i.e., the specified fields apply to all event types if the type string 129 i.e., the specified fields apply to all event types if the type string
130 is not given. 130 is not given.
131 131
132 The arguments are processed in the order received. A later usage can 132 The arguments are processed in the order received. A later usage can
133 reset a prior request. e.g.: 133 reset a prior request. e.g.:
134 134
135 -f trace: -f comm,tid,time,ip,sym 135 -f trace: -f comm,tid,time,ip,sym
136 136
137 The first -f suppresses trace events (field list is ""), but then the 137 The first -f suppresses trace events (field list is ""), but then the
138 second invocation sets the fields to comm,tid,time,ip,sym. In this case a 138 second invocation sets the fields to comm,tid,time,ip,sym. In this case a
139 warning is given to the user: 139 warning is given to the user:
140 140
141 "Overriding previous field request for all events." 141 "Overriding previous field request for all events."
142 142
143 Alternatively, consider the order: 143 Alternatively, consider the order:
144 144
145 -f comm,tid,time,ip,sym -f trace: 145 -f comm,tid,time,ip,sym -f trace:
146 146
147 The first -f sets the fields for all events and the second -f 147 The first -f sets the fields for all events and the second -f
148 suppresses trace events. The user is given a warning message about 148 suppresses trace events. The user is given a warning message about
149 the override, and the result of the above is that only S/W and H/W 149 the override, and the result of the above is that only S/W and H/W
150 events are displayed with the given fields. 150 events are displayed with the given fields.
151 151
152 For the 'wildcard' option if a user selected field is invalid for an 152 For the 'wildcard' option if a user selected field is invalid for an
153 event type, a message is displayed to the user that the option is 153 event type, a message is displayed to the user that the option is
154 ignored for that type. For example: 154 ignored for that type. For example:
155 155
156 $ perf script -f comm,tid,trace 156 $ perf script -f comm,tid,trace
157 'trace' not valid for hardware events. Ignoring. 157 'trace' not valid for hardware events. Ignoring.
158 'trace' not valid for software events. Ignoring. 158 'trace' not valid for software events. Ignoring.
159 159
160 Alternatively, if the type is given an invalid field is specified it 160 Alternatively, if the type is given an invalid field is specified it
161 is an error. For example: 161 is an error. For example:
162 162
163 perf script -v -f sw:comm,tid,trace 163 perf script -v -f sw:comm,tid,trace
164 'trace' not valid for software events. 164 'trace' not valid for software events.
165 165
166 At this point usage is displayed, and perf-script exits. 166 At this point usage is displayed, and perf-script exits.
167 167
168 Finally, a user may not set fields to none for all event types. 168 Finally, a user may not set fields to none for all event types.
169 i.e., -f "" is not allowed. 169 i.e., -f "" is not allowed.
170 170
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 29ee857c09c6..04e150d83e7d 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -25,10 +25,22 @@ OPTIONS
25 25
26-e:: 26-e::
27--event=:: 27--event=::
28 Select the PMU event. Selection can be a symbolic event name 28 Select the PMU event. Selection can be:
29 (use 'perf list' to list all events) or a raw PMU 29
30 event (eventsel+umask) in the form of rNNN where NNN is a 30 - a symbolic event name (use 'perf list' to list all events)
31 hexadecimal event descriptor. 31
32 - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
33 hexadecimal event descriptor.
34
35 - a symbolically formed event like 'pmu/param1=0x3,param2/' where
36 param1 and param2 are defined as formats for the PMU in
37 /sys/bus/event_sources/devices/<pmu>/format/*
38
39 - a symbolically formed event like 'pmu/config=M,config1=N,config2=K/'
40 where M, N, K are numbers (in decimal, hex, octal format).
41 Acceptable values for each of 'config', 'config1' and 'config2'
42 parameters are defined by corresponding entries in
43 /sys/bus/event_sources/devices/<pmu>/format/*
32 44
33-i:: 45-i::
34--no-inherit:: 46--no-inherit::
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
index 71f2844cf97f..7ed22ff1e1ac 100644
--- a/tools/perf/bench/futex.h
+++ b/tools/perf/bench/futex.h
@@ -68,4 +68,17 @@ futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wak
68 val, opflags); 68 val, opflags);
69} 69}
70 70
71#ifndef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
72#include <pthread.h>
73static inline int pthread_attr_setaffinity_np(pthread_attr_t *attr,
74 size_t cpusetsize,
75 cpu_set_t *cpuset)
76{
77 attr = attr;
78 cpusetsize = cpusetsize;
79 cpuset = cpuset;
80 return 0;
81}
82#endif
83
71#endif /* _FUTEX_H */ 84#endif /* _FUTEX_H */
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 77d5cae54c6a..50e6b66aea1f 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -236,10 +236,10 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
236 if (errno == ENOENT) 236 if (errno == ENOENT)
237 return false; 237 return false;
238 238
239 pr_warning("Problems with %s file, consider removing it from the cache\n", 239 pr_warning("Problems with %s file, consider removing it from the cache\n",
240 filename); 240 filename);
241 } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) { 241 } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) {
242 pr_warning("Problems with %s file, consider removing it from the cache\n", 242 pr_warning("Problems with %s file, consider removing it from the cache\n",
243 filename); 243 filename);
244 } 244 }
245 245
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 1fd96c13f199..74aada554b12 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -390,6 +390,15 @@ static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
390 } 390 }
391} 391}
392 392
393static struct data__file *fmt_to_data_file(struct perf_hpp_fmt *fmt)
394{
395 struct diff_hpp_fmt *dfmt = container_of(fmt, struct diff_hpp_fmt, fmt);
396 void *ptr = dfmt - dfmt->idx;
397 struct data__file *d = container_of(ptr, struct data__file, fmt);
398
399 return d;
400}
401
393static struct hist_entry* 402static struct hist_entry*
394get_pair_data(struct hist_entry *he, struct data__file *d) 403get_pair_data(struct hist_entry *he, struct data__file *d)
395{ 404{
@@ -407,8 +416,7 @@ get_pair_data(struct hist_entry *he, struct data__file *d)
407static struct hist_entry* 416static struct hist_entry*
408get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt) 417get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
409{ 418{
410 void *ptr = dfmt - dfmt->idx; 419 struct data__file *d = fmt_to_data_file(&dfmt->fmt);
411 struct data__file *d = container_of(ptr, struct data__file, fmt);
412 420
413 return get_pair_data(he, d); 421 return get_pair_data(he, d);
414} 422}
@@ -430,7 +438,7 @@ static void hists__baseline_only(struct hists *hists)
430 next = rb_next(&he->rb_node_in); 438 next = rb_next(&he->rb_node_in);
431 if (!hist_entry__next_pair(he)) { 439 if (!hist_entry__next_pair(he)) {
432 rb_erase(&he->rb_node_in, root); 440 rb_erase(&he->rb_node_in, root);
433 hist_entry__free(he); 441 hist_entry__delete(he);
434 } 442 }
435 } 443 }
436} 444}
@@ -448,26 +456,30 @@ static void hists__precompute(struct hists *hists)
448 next = rb_first(root); 456 next = rb_first(root);
449 while (next != NULL) { 457 while (next != NULL) {
450 struct hist_entry *he, *pair; 458 struct hist_entry *he, *pair;
459 struct data__file *d;
460 int i;
451 461
452 he = rb_entry(next, struct hist_entry, rb_node_in); 462 he = rb_entry(next, struct hist_entry, rb_node_in);
453 next = rb_next(&he->rb_node_in); 463 next = rb_next(&he->rb_node_in);
454 464
455 pair = get_pair_data(he, &data__files[sort_compute]); 465 data__for_each_file_new(i, d) {
456 if (!pair) 466 pair = get_pair_data(he, d);
457 continue; 467 if (!pair)
468 continue;
458 469
459 switch (compute) { 470 switch (compute) {
460 case COMPUTE_DELTA: 471 case COMPUTE_DELTA:
461 compute_delta(he, pair); 472 compute_delta(he, pair);
462 break; 473 break;
463 case COMPUTE_RATIO: 474 case COMPUTE_RATIO:
464 compute_ratio(he, pair); 475 compute_ratio(he, pair);
465 break; 476 break;
466 case COMPUTE_WEIGHTED_DIFF: 477 case COMPUTE_WEIGHTED_DIFF:
467 compute_wdiff(he, pair); 478 compute_wdiff(he, pair);
468 break; 479 break;
469 default: 480 default:
470 BUG_ON(1); 481 BUG_ON(1);
482 }
471 } 483 }
472 } 484 }
473} 485}
@@ -517,7 +529,7 @@ __hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
517 529
518static int64_t 530static int64_t
519hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, 531hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
520 int c) 532 int c, int sort_idx)
521{ 533{
522 bool pairs_left = hist_entry__has_pairs(left); 534 bool pairs_left = hist_entry__has_pairs(left);
523 bool pairs_right = hist_entry__has_pairs(right); 535 bool pairs_right = hist_entry__has_pairs(right);
@@ -529,8 +541,8 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
529 if (!pairs_left || !pairs_right) 541 if (!pairs_left || !pairs_right)
530 return pairs_left ? -1 : 1; 542 return pairs_left ? -1 : 1;
531 543
532 p_left = get_pair_data(left, &data__files[sort_compute]); 544 p_left = get_pair_data(left, &data__files[sort_idx]);
533 p_right = get_pair_data(right, &data__files[sort_compute]); 545 p_right = get_pair_data(right, &data__files[sort_idx]);
534 546
535 if (!p_left && !p_right) 547 if (!p_left && !p_right)
536 return 0; 548 return 0;
@@ -546,90 +558,102 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
546} 558}
547 559
548static int64_t 560static int64_t
549hist_entry__cmp_nop(struct hist_entry *left __maybe_unused, 561hist_entry__cmp_compute_idx(struct hist_entry *left, struct hist_entry *right,
562 int c, int sort_idx)
563{
564 struct hist_entry *p_right, *p_left;
565
566 p_left = get_pair_data(left, &data__files[sort_idx]);
567 p_right = get_pair_data(right, &data__files[sort_idx]);
568
569 if (!p_left && !p_right)
570 return 0;
571
572 if (!p_left || !p_right)
573 return p_left ? -1 : 1;
574
575 if (c != COMPUTE_DELTA) {
576 /*
577 * The delta can be computed without the baseline, but
578 * others are not. Put those entries which have no
579 * values below.
580 */
581 if (left->dummy && right->dummy)
582 return 0;
583
584 if (left->dummy || right->dummy)
585 return left->dummy ? 1 : -1;
586 }
587
588 return __hist_entry__cmp_compute(p_left, p_right, c);
589}
590
591static int64_t
592hist_entry__cmp_nop(struct perf_hpp_fmt *fmt __maybe_unused,
593 struct hist_entry *left __maybe_unused,
550 struct hist_entry *right __maybe_unused) 594 struct hist_entry *right __maybe_unused)
551{ 595{
552 return 0; 596 return 0;
553} 597}
554 598
555static int64_t 599static int64_t
556hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right) 600hist_entry__cmp_baseline(struct perf_hpp_fmt *fmt __maybe_unused,
601 struct hist_entry *left, struct hist_entry *right)
557{ 602{
558 if (sort_compute)
559 return 0;
560
561 if (left->stat.period == right->stat.period) 603 if (left->stat.period == right->stat.period)
562 return 0; 604 return 0;
563 return left->stat.period > right->stat.period ? 1 : -1; 605 return left->stat.period > right->stat.period ? 1 : -1;
564} 606}
565 607
566static int64_t 608static int64_t
567hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right) 609hist_entry__cmp_delta(struct perf_hpp_fmt *fmt,
610 struct hist_entry *left, struct hist_entry *right)
568{ 611{
569 return hist_entry__cmp_compute(right, left, COMPUTE_DELTA); 612 struct data__file *d = fmt_to_data_file(fmt);
613
614 return hist_entry__cmp_compute(right, left, COMPUTE_DELTA, d->idx);
570} 615}
571 616
572static int64_t 617static int64_t
573hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right) 618hist_entry__cmp_ratio(struct perf_hpp_fmt *fmt,
619 struct hist_entry *left, struct hist_entry *right)
574{ 620{
575 return hist_entry__cmp_compute(right, left, COMPUTE_RATIO); 621 struct data__file *d = fmt_to_data_file(fmt);
622
623 return hist_entry__cmp_compute(right, left, COMPUTE_RATIO, d->idx);
576} 624}
577 625
578static int64_t 626static int64_t
579hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right) 627hist_entry__cmp_wdiff(struct perf_hpp_fmt *fmt,
628 struct hist_entry *left, struct hist_entry *right)
580{ 629{
581 return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF); 630 struct data__file *d = fmt_to_data_file(fmt);
631
632 return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, d->idx);
582} 633}
583 634
584static void insert_hist_entry_by_compute(struct rb_root *root, 635static int64_t
585 struct hist_entry *he, 636hist_entry__cmp_delta_idx(struct perf_hpp_fmt *fmt __maybe_unused,
586 int c) 637 struct hist_entry *left, struct hist_entry *right)
587{ 638{
588 struct rb_node **p = &root->rb_node; 639 return hist_entry__cmp_compute_idx(right, left, COMPUTE_DELTA,
589 struct rb_node *parent = NULL; 640 sort_compute);
590 struct hist_entry *iter;
591
592 while (*p != NULL) {
593 parent = *p;
594 iter = rb_entry(parent, struct hist_entry, rb_node);
595 if (hist_entry__cmp_compute(he, iter, c) < 0)
596 p = &(*p)->rb_left;
597 else
598 p = &(*p)->rb_right;
599 }
600
601 rb_link_node(&he->rb_node, parent, p);
602 rb_insert_color(&he->rb_node, root);
603} 641}
604 642
605static void hists__compute_resort(struct hists *hists) 643static int64_t
644hist_entry__cmp_ratio_idx(struct perf_hpp_fmt *fmt __maybe_unused,
645 struct hist_entry *left, struct hist_entry *right)
606{ 646{
607 struct rb_root *root; 647 return hist_entry__cmp_compute_idx(right, left, COMPUTE_RATIO,
608 struct rb_node *next; 648 sort_compute);
609 649}
610 if (sort__need_collapse)
611 root = &hists->entries_collapsed;
612 else
613 root = hists->entries_in;
614
615 hists->entries = RB_ROOT;
616 next = rb_first(root);
617
618 hists__reset_stats(hists);
619 hists__reset_col_len(hists);
620
621 while (next != NULL) {
622 struct hist_entry *he;
623
624 he = rb_entry(next, struct hist_entry, rb_node_in);
625 next = rb_next(&he->rb_node_in);
626
627 insert_hist_entry_by_compute(&hists->entries, he, compute);
628 hists__inc_stats(hists, he);
629 650
630 if (!he->filtered) 651static int64_t
631 hists__calc_col_len(hists, he); 652hist_entry__cmp_wdiff_idx(struct perf_hpp_fmt *fmt __maybe_unused,
632 } 653 struct hist_entry *left, struct hist_entry *right)
654{
655 return hist_entry__cmp_compute_idx(right, left, COMPUTE_WEIGHTED_DIFF,
656 sort_compute);
633} 657}
634 658
635static void hists__process(struct hists *hists) 659static void hists__process(struct hists *hists)
@@ -637,12 +661,8 @@ static void hists__process(struct hists *hists)
637 if (show_baseline_only) 661 if (show_baseline_only)
638 hists__baseline_only(hists); 662 hists__baseline_only(hists);
639 663
640 if (sort_compute) { 664 hists__precompute(hists);
641 hists__precompute(hists); 665 hists__output_resort(hists, NULL);
642 hists__compute_resort(hists);
643 } else {
644 hists__output_resort(hists, NULL);
645 }
646 666
647 hists__fprintf(hists, true, 0, 0, 0, stdout); 667 hists__fprintf(hists, true, 0, 0, 0, stdout);
648} 668}
@@ -841,7 +861,7 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
841 char pfmt[20] = " "; 861 char pfmt[20] = " ";
842 862
843 if (!pair) 863 if (!pair)
844 goto dummy_print; 864 goto no_print;
845 865
846 switch (comparison_method) { 866 switch (comparison_method) {
847 case COMPUTE_DELTA: 867 case COMPUTE_DELTA:
@@ -850,8 +870,6 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
850 else 870 else
851 diff = compute_delta(he, pair); 871 diff = compute_delta(he, pair);
852 872
853 if (fabs(diff) < 0.01)
854 goto dummy_print;
855 scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1); 873 scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1);
856 return percent_color_snprintf(hpp->buf, hpp->size, 874 return percent_color_snprintf(hpp->buf, hpp->size,
857 pfmt, diff); 875 pfmt, diff);
@@ -883,6 +901,9 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
883 } 901 }
884dummy_print: 902dummy_print:
885 return scnprintf(hpp->buf, hpp->size, "%*s", 903 return scnprintf(hpp->buf, hpp->size, "%*s",
904 dfmt->header_width, "N/A");
905no_print:
906 return scnprintf(hpp->buf, hpp->size, "%*s",
886 dfmt->header_width, pfmt); 907 dfmt->header_width, pfmt);
887} 908}
888 909
@@ -932,14 +953,15 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
932 else 953 else
933 diff = compute_delta(he, pair); 954 diff = compute_delta(he, pair);
934 955
935 if (fabs(diff) >= 0.01) 956 scnprintf(buf, size, "%+4.2F%%", diff);
936 scnprintf(buf, size, "%+4.2F%%", diff);
937 break; 957 break;
938 958
939 case PERF_HPP_DIFF__RATIO: 959 case PERF_HPP_DIFF__RATIO:
940 /* No point for ratio number if we are dummy.. */ 960 /* No point for ratio number if we are dummy.. */
941 if (he->dummy) 961 if (he->dummy) {
962 scnprintf(buf, size, "N/A");
942 break; 963 break;
964 }
943 965
944 if (pair->diff.computed) 966 if (pair->diff.computed)
945 ratio = pair->diff.period_ratio; 967 ratio = pair->diff.period_ratio;
@@ -952,8 +974,10 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
952 974
953 case PERF_HPP_DIFF__WEIGHTED_DIFF: 975 case PERF_HPP_DIFF__WEIGHTED_DIFF:
954 /* No point for wdiff number if we are dummy.. */ 976 /* No point for wdiff number if we are dummy.. */
955 if (he->dummy) 977 if (he->dummy) {
978 scnprintf(buf, size, "N/A");
956 break; 979 break;
980 }
957 981
958 if (pair->diff.computed) 982 if (pair->diff.computed)
959 wdiff = pair->diff.wdiff; 983 wdiff = pair->diff.wdiff;
@@ -1105,9 +1129,10 @@ static void data__hpp_register(struct data__file *d, int idx)
1105 perf_hpp__register_sort_field(fmt); 1129 perf_hpp__register_sort_field(fmt);
1106} 1130}
1107 1131
1108static void ui_init(void) 1132static int ui_init(void)
1109{ 1133{
1110 struct data__file *d; 1134 struct data__file *d;
1135 struct perf_hpp_fmt *fmt;
1111 int i; 1136 int i;
1112 1137
1113 data__for_each_file(i, d) { 1138 data__for_each_file(i, d) {
@@ -1137,6 +1162,46 @@ static void ui_init(void)
1137 data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD : 1162 data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD :
1138 PERF_HPP_DIFF__PERIOD_BASELINE); 1163 PERF_HPP_DIFF__PERIOD_BASELINE);
1139 } 1164 }
1165
1166 if (!sort_compute)
1167 return 0;
1168
1169 /*
1170 * Prepend an fmt to sort on columns at 'sort_compute' first.
1171 * This fmt is added only to the sort list but not to the
1172 * output fields list.
1173 *
1174 * Note that this column (data) can be compared twice - one
1175 * for this 'sort_compute' fmt and another for the normal
1176 * diff_hpp_fmt. But it shouldn't a problem as most entries
1177 * will be sorted out by first try or baseline and comparing
1178 * is not a costly operation.
1179 */
1180 fmt = zalloc(sizeof(*fmt));
1181 if (fmt == NULL) {
1182 pr_err("Memory allocation failed\n");
1183 return -1;
1184 }
1185
1186 fmt->cmp = hist_entry__cmp_nop;
1187 fmt->collapse = hist_entry__cmp_nop;
1188
1189 switch (compute) {
1190 case COMPUTE_DELTA:
1191 fmt->sort = hist_entry__cmp_delta_idx;
1192 break;
1193 case COMPUTE_RATIO:
1194 fmt->sort = hist_entry__cmp_ratio_idx;
1195 break;
1196 case COMPUTE_WEIGHTED_DIFF:
1197 fmt->sort = hist_entry__cmp_wdiff_idx;
1198 break;
1199 default:
1200 BUG_ON(1);
1201 }
1202
1203 list_add(&fmt->sort_list, &perf_hpp__sort_list);
1204 return 0;
1140} 1205}
1141 1206
1142static int data_init(int argc, const char **argv) 1207static int data_init(int argc, const char **argv)
@@ -1202,7 +1267,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
1202 if (data_init(argc, argv) < 0) 1267 if (data_init(argc, argv) < 0)
1203 return -1; 1268 return -1;
1204 1269
1205 ui_init(); 1270 if (ui_init() < 0)
1271 return -1;
1206 1272
1207 sort__mode = SORT_MODE__DIFF; 1273 sort__mode = SORT_MODE__DIFF;
1208 1274
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 84df2deed988..a13641e066f5 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -343,6 +343,7 @@ static int __cmd_inject(struct perf_inject *inject)
343 int ret = -EINVAL; 343 int ret = -EINVAL;
344 struct perf_session *session = inject->session; 344 struct perf_session *session = inject->session;
345 struct perf_data_file *file_out = &inject->output; 345 struct perf_data_file *file_out = &inject->output;
346 int fd = perf_data_file__fd(file_out);
346 347
347 signal(SIGINT, sig_handler); 348 signal(SIGINT, sig_handler);
348 349
@@ -376,7 +377,7 @@ static int __cmd_inject(struct perf_inject *inject)
376 } 377 }
377 378
378 if (!file_out->is_pipe) 379 if (!file_out->is_pipe)
379 lseek(file_out->fd, session->header.data_offset, SEEK_SET); 380 lseek(fd, session->header.data_offset, SEEK_SET);
380 381
381 ret = perf_session__process_events(session, &inject->tool); 382 ret = perf_session__process_events(session, &inject->tool);
382 383
@@ -385,7 +386,7 @@ static int __cmd_inject(struct perf_inject *inject)
385 perf_header__set_feat(&session->header, 386 perf_header__set_feat(&session->header,
386 HEADER_BUILD_ID); 387 HEADER_BUILD_ID);
387 session->header.data_size = inject->bytes_written; 388 session->header.data_size = inject->bytes_written;
388 perf_session__write_header(session, session->evlist, file_out->fd, true); 389 perf_session__write_header(session, session->evlist, fd, true);
389 } 390 }
390 391
391 return ret; 392 return ret;
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 24db6ffe2957..9b5663950a4d 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -7,44 +7,47 @@
7#include "util/session.h" 7#include "util/session.h"
8#include "util/data.h" 8#include "util/data.h"
9 9
10#define MEM_OPERATION_LOAD "load" 10#define MEM_OPERATION_LOAD 0x1
11#define MEM_OPERATION_STORE "store" 11#define MEM_OPERATION_STORE 0x2
12
13static const char *mem_operation = MEM_OPERATION_LOAD;
14 12
15struct perf_mem { 13struct perf_mem {
16 struct perf_tool tool; 14 struct perf_tool tool;
17 char const *input_name; 15 char const *input_name;
18 bool hide_unresolved; 16 bool hide_unresolved;
19 bool dump_raw; 17 bool dump_raw;
18 int operation;
20 const char *cpu_list; 19 const char *cpu_list;
21 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 20 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
22}; 21};
23 22
24static int __cmd_record(int argc, const char **argv) 23static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
25{ 24{
26 int rec_argc, i = 0, j; 25 int rec_argc, i = 0, j;
27 const char **rec_argv; 26 const char **rec_argv;
28 char event[64];
29 int ret; 27 int ret;
30 28
31 rec_argc = argc + 4; 29 rec_argc = argc + 7; /* max number of arguments */
32 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 30 rec_argv = calloc(rec_argc + 1, sizeof(char *));
33 if (!rec_argv) 31 if (!rec_argv)
34 return -1; 32 return -1;
35 33
36 rec_argv[i++] = strdup("record"); 34 rec_argv[i++] = "record";
37 if (!strcmp(mem_operation, MEM_OPERATION_LOAD))
38 rec_argv[i++] = strdup("-W");
39 rec_argv[i++] = strdup("-d");
40 rec_argv[i++] = strdup("-e");
41 35
42 if (strcmp(mem_operation, MEM_OPERATION_LOAD)) 36 if (mem->operation & MEM_OPERATION_LOAD)
43 sprintf(event, "cpu/mem-stores/pp"); 37 rec_argv[i++] = "-W";
44 else 38
45 sprintf(event, "cpu/mem-loads/pp"); 39 rec_argv[i++] = "-d";
40
41 if (mem->operation & MEM_OPERATION_LOAD) {
42 rec_argv[i++] = "-e";
43 rec_argv[i++] = "cpu/mem-loads/pp";
44 }
45
46 if (mem->operation & MEM_OPERATION_STORE) {
47 rec_argv[i++] = "-e";
48 rec_argv[i++] = "cpu/mem-stores/pp";
49 }
46 50
47 rec_argv[i++] = strdup(event);
48 for (j = 1; j < argc; j++, i++) 51 for (j = 1; j < argc; j++, i++)
49 rec_argv[i] = argv[j]; 52 rec_argv[i] = argv[j];
50 53
@@ -162,17 +165,17 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
162 if (!rep_argv) 165 if (!rep_argv)
163 return -1; 166 return -1;
164 167
165 rep_argv[i++] = strdup("report"); 168 rep_argv[i++] = "report";
166 rep_argv[i++] = strdup("--mem-mode"); 169 rep_argv[i++] = "--mem-mode";
167 rep_argv[i++] = strdup("-n"); /* display number of samples */ 170 rep_argv[i++] = "-n"; /* display number of samples */
168 171
169 /* 172 /*
170 * there is no weight (cost) associated with stores, so don't print 173 * there is no weight (cost) associated with stores, so don't print
171 * the column 174 * the column
172 */ 175 */
173 if (strcmp(mem_operation, MEM_OPERATION_LOAD)) 176 if (!(mem->operation & MEM_OPERATION_LOAD))
174 rep_argv[i++] = strdup("--sort=mem,sym,dso,symbol_daddr," 177 rep_argv[i++] = "--sort=mem,sym,dso,symbol_daddr,"
175 "dso_daddr,tlb,locked"); 178 "dso_daddr,tlb,locked";
176 179
177 for (j = 1; j < argc; j++, i++) 180 for (j = 1; j < argc; j++, i++)
178 rep_argv[i] = argv[j]; 181 rep_argv[i] = argv[j];
@@ -182,6 +185,75 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
182 return ret; 185 return ret;
183} 186}
184 187
188struct mem_mode {
189 const char *name;
190 int mode;
191};
192
193#define MEM_OPT(n, m) \
194 { .name = n, .mode = (m) }
195
196#define MEM_END { .name = NULL }
197
198static const struct mem_mode mem_modes[]={
199 MEM_OPT("load", MEM_OPERATION_LOAD),
200 MEM_OPT("store", MEM_OPERATION_STORE),
201 MEM_END
202};
203
204static int
205parse_mem_ops(const struct option *opt, const char *str, int unset)
206{
207 int *mode = (int *)opt->value;
208 const struct mem_mode *m;
209 char *s, *os = NULL, *p;
210 int ret = -1;
211
212 if (unset)
213 return 0;
214
215 /* str may be NULL in case no arg is passed to -t */
216 if (str) {
217 /* because str is read-only */
218 s = os = strdup(str);
219 if (!s)
220 return -1;
221
222 /* reset mode */
223 *mode = 0;
224
225 for (;;) {
226 p = strchr(s, ',');
227 if (p)
228 *p = '\0';
229
230 for (m = mem_modes; m->name; m++) {
231 if (!strcasecmp(s, m->name))
232 break;
233 }
234 if (!m->name) {
235 fprintf(stderr, "unknown sampling op %s,"
236 " check man page\n", s);
237 goto error;
238 }
239
240 *mode |= m->mode;
241
242 if (!p)
243 break;
244
245 s = p + 1;
246 }
247 }
248 ret = 0;
249
250 if (*mode == 0)
251 *mode = MEM_OPERATION_LOAD;
252error:
253 free(os);
254 return ret;
255}
256
185int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused) 257int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
186{ 258{
187 struct stat st; 259 struct stat st;
@@ -197,10 +269,15 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
197 .ordered_events = true, 269 .ordered_events = true,
198 }, 270 },
199 .input_name = "perf.data", 271 .input_name = "perf.data",
272 /*
273 * default to both load an store sampling
274 */
275 .operation = MEM_OPERATION_LOAD | MEM_OPERATION_STORE,
200 }; 276 };
201 const struct option mem_options[] = { 277 const struct option mem_options[] = {
202 OPT_STRING('t', "type", &mem_operation, 278 OPT_CALLBACK('t', "type", &mem.operation,
203 "type", "memory operations(load/store)"), 279 "type", "memory operations(load,store) Default load,store",
280 parse_mem_ops),
204 OPT_BOOLEAN('D', "dump-raw-samples", &mem.dump_raw, 281 OPT_BOOLEAN('D', "dump-raw-samples", &mem.dump_raw,
205 "dump raw samples in ASCII"), 282 "dump raw samples in ASCII"),
206 OPT_BOOLEAN('U', "hide-unresolved", &mem.hide_unresolved, 283 OPT_BOOLEAN('U', "hide-unresolved", &mem.hide_unresolved,
@@ -225,7 +302,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
225 argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, 302 argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
226 mem_usage, PARSE_OPT_STOP_AT_NON_OPTION); 303 mem_usage, PARSE_OPT_STOP_AT_NON_OPTION);
227 304
228 if (!argc || !(strncmp(argv[0], "rec", 3) || mem_operation)) 305 if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation))
229 usage_with_options(mem_usage, mem_options); 306 usage_with_options(mem_usage, mem_options);
230 307
231 if (!mem.input_name || !strlen(mem.input_name)) { 308 if (!mem.input_name || !strlen(mem.input_name)) {
@@ -236,7 +313,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
236 } 313 }
237 314
238 if (!strncmp(argv[0], "rec", 3)) 315 if (!strncmp(argv[0], "rec", 3))
239 return __cmd_record(argc, argv); 316 return __cmd_record(argc, argv, &mem);
240 else if (!strncmp(argv[0], "rep", 3)) 317 else if (!strncmp(argv[0], "rep", 3))
241 return report_events(argc, argv, &mem); 318 return report_events(argc, argv, &mem);
242 else 319 else
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 8648c6d3003d..404ab3434052 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -190,16 +190,30 @@ out:
190 return rc; 190 return rc;
191} 191}
192 192
193static int process_sample_event(struct perf_tool *tool,
194 union perf_event *event,
195 struct perf_sample *sample,
196 struct perf_evsel *evsel,
197 struct machine *machine)
198{
199 struct record *rec = container_of(tool, struct record, tool);
200
201 rec->samples++;
202
203 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
204}
205
193static int process_buildids(struct record *rec) 206static int process_buildids(struct record *rec)
194{ 207{
195 struct perf_data_file *file = &rec->file; 208 struct perf_data_file *file = &rec->file;
196 struct perf_session *session = rec->session; 209 struct perf_session *session = rec->session;
197 u64 start = session->header.data_offset;
198 210
199 u64 size = lseek(file->fd, 0, SEEK_CUR); 211 u64 size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
200 if (size == 0) 212 if (size == 0)
201 return 0; 213 return 0;
202 214
215 file->size = size;
216
203 /* 217 /*
204 * During this process, it'll load kernel map and replace the 218 * During this process, it'll load kernel map and replace the
205 * dso->long_name to a real pathname it found. In this case 219 * dso->long_name to a real pathname it found. In this case
@@ -211,9 +225,7 @@ static int process_buildids(struct record *rec)
211 */ 225 */
212 symbol_conf.ignore_vmlinux_buildid = true; 226 symbol_conf.ignore_vmlinux_buildid = true;
213 227
214 return __perf_session__process_events(session, start, 228 return perf_session__process_events(session, &rec->tool);
215 size - start,
216 size, &build_id__mark_dso_hit_ops);
217} 229}
218 230
219static void perf_event__synthesize_guest_os(struct machine *machine, void *data) 231static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
@@ -322,6 +334,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
322 struct perf_data_file *file = &rec->file; 334 struct perf_data_file *file = &rec->file;
323 struct perf_session *session; 335 struct perf_session *session;
324 bool disabled = false, draining = false; 336 bool disabled = false, draining = false;
337 int fd;
325 338
326 rec->progname = argv[0]; 339 rec->progname = argv[0];
327 340
@@ -336,6 +349,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
336 return -1; 349 return -1;
337 } 350 }
338 351
352 fd = perf_data_file__fd(file);
339 rec->session = session; 353 rec->session = session;
340 354
341 record__init_features(rec); 355 record__init_features(rec);
@@ -360,12 +374,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
360 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); 374 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
361 375
362 if (file->is_pipe) { 376 if (file->is_pipe) {
363 err = perf_header__write_pipe(file->fd); 377 err = perf_header__write_pipe(fd);
364 if (err < 0) 378 if (err < 0)
365 goto out_child; 379 goto out_child;
366 } else { 380 } else {
367 err = perf_session__write_header(session, rec->evlist, 381 err = perf_session__write_header(session, rec->evlist, fd, false);
368 file->fd, false);
369 if (err < 0) 382 if (err < 0)
370 goto out_child; 383 goto out_child;
371 } 384 }
@@ -397,7 +410,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
397 * return this more properly and also 410 * return this more properly and also
398 * propagate errors that now are calling die() 411 * propagate errors that now are calling die()
399 */ 412 */
400 err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist, 413 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
401 process_synthesized_event); 414 process_synthesized_event);
402 if (err <= 0) { 415 if (err <= 0) {
403 pr_err("Couldn't record tracing data.\n"); 416 pr_err("Couldn't record tracing data.\n");
@@ -504,19 +517,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
504 goto out_child; 517 goto out_child;
505 } 518 }
506 519
507 if (!quiet) { 520 if (!quiet)
508 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking); 521 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
509 522
510 /*
511 * Approximate RIP event size: 24 bytes.
512 */
513 fprintf(stderr,
514 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
515 (double)rec->bytes_written / 1024.0 / 1024.0,
516 file->path,
517 rec->bytes_written / 24);
518 }
519
520out_child: 523out_child:
521 if (forks) { 524 if (forks) {
522 int exit_status; 525 int exit_status;
@@ -535,13 +538,29 @@ out_child:
535 } else 538 } else
536 status = err; 539 status = err;
537 540
541 /* this will be recalculated during process_buildids() */
542 rec->samples = 0;
543
538 if (!err && !file->is_pipe) { 544 if (!err && !file->is_pipe) {
539 rec->session->header.data_size += rec->bytes_written; 545 rec->session->header.data_size += rec->bytes_written;
540 546
541 if (!rec->no_buildid) 547 if (!rec->no_buildid)
542 process_buildids(rec); 548 process_buildids(rec);
543 perf_session__write_header(rec->session, rec->evlist, 549 perf_session__write_header(rec->session, rec->evlist, fd, true);
544 file->fd, true); 550 }
551
552 if (!err && !quiet) {
553 char samples[128];
554
555 if (rec->samples)
556 scnprintf(samples, sizeof(samples),
557 " (%" PRIu64 " samples)", rec->samples);
558 else
559 samples[0] = '\0';
560
561 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n",
562 perf_data_file__size(file) / 1024.0 / 1024.0,
563 file->path, samples);
545 } 564 }
546 565
547out_delete_session: 566out_delete_session:
@@ -720,6 +739,13 @@ static struct record record = {
720 .default_per_cpu = true, 739 .default_per_cpu = true,
721 }, 740 },
722 }, 741 },
742 .tool = {
743 .sample = process_sample_event,
744 .fork = perf_event__process_fork,
745 .comm = perf_event__process_comm,
746 .mmap = perf_event__process_mmap,
747 .mmap2 = perf_event__process_mmap2,
748 },
723}; 749};
724 750
725#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: " 751#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 072ae8ad67fc..2f91094e228b 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -86,17 +86,6 @@ static int report__config(const char *var, const char *value, void *cb)
86 return perf_default_config(var, value, cb); 86 return perf_default_config(var, value, cb);
87} 87}
88 88
89static void report__inc_stats(struct report *rep, struct hist_entry *he)
90{
91 /*
92 * The @he is either of a newly created one or an existing one
93 * merging current sample. We only want to count a new one so
94 * checking ->nr_events being 1.
95 */
96 if (he->stat.nr_events == 1)
97 rep->nr_entries++;
98}
99
100static int hist_iter__report_callback(struct hist_entry_iter *iter, 89static int hist_iter__report_callback(struct hist_entry_iter *iter,
101 struct addr_location *al, bool single, 90 struct addr_location *al, bool single,
102 void *arg) 91 void *arg)
@@ -108,8 +97,6 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
108 struct mem_info *mi; 97 struct mem_info *mi;
109 struct branch_info *bi; 98 struct branch_info *bi;
110 99
111 report__inc_stats(rep, he);
112
113 if (!ui__has_annotation()) 100 if (!ui__has_annotation())
114 return 0; 101 return 0;
115 102
@@ -499,6 +486,9 @@ static int __cmd_report(struct report *rep)
499 486
500 report__warn_kptr_restrict(rep); 487 report__warn_kptr_restrict(rep);
501 488
489 evlist__for_each(session->evlist, pos)
490 rep->nr_entries += evsel__hists(pos)->nr_entries;
491
502 if (use_browser == 0) { 492 if (use_browser == 0) {
503 if (verbose > 3) 493 if (verbose > 3)
504 perf_session__fprintf(session, stdout); 494 perf_session__fprintf(session, stdout);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 891086376381..e598e4e98170 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1730,7 +1730,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1730 "detailed run - start a lot of events"), 1730 "detailed run - start a lot of events"),
1731 OPT_BOOLEAN('S', "sync", &sync_run, 1731 OPT_BOOLEAN('S', "sync", &sync_run,
1732 "call sync() before starting a run"), 1732 "call sync() before starting a run"),
1733 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1733 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1734 "print large numbers with thousands\' separators", 1734 "print large numbers with thousands\' separators",
1735 stat__set_big_num), 1735 stat__set_big_num),
1736 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1736 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 616f0fcb4701..c4c7eac69de4 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -165,7 +165,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
165 err ? "[unknown]" : uts.release, perf_version_string); 165 err ? "[unknown]" : uts.release, perf_version_string);
166 if (use_browser <= 0) 166 if (use_browser <= 0)
167 sleep(5); 167 sleep(5);
168 168
169 map->erange_warned = true; 169 map->erange_warned = true;
170} 170}
171 171
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index badfabc6a01f..7e935f1083ec 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -929,66 +929,66 @@ static struct syscall_fmt {
929 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, }, 929 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
930 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), }, 930 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
931 { .name = "close", .errmsg = true, 931 { .name = "close", .errmsg = true,
932 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, }, 932 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
933 { .name = "connect", .errmsg = true, }, 933 { .name = "connect", .errmsg = true, },
934 { .name = "dup", .errmsg = true, 934 { .name = "dup", .errmsg = true,
935 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 935 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
936 { .name = "dup2", .errmsg = true, 936 { .name = "dup2", .errmsg = true,
937 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 937 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
938 { .name = "dup3", .errmsg = true, 938 { .name = "dup3", .errmsg = true,
939 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 939 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
940 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), }, 940 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
941 { .name = "eventfd2", .errmsg = true, 941 { .name = "eventfd2", .errmsg = true,
942 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, }, 942 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
943 { .name = "faccessat", .errmsg = true, 943 { .name = "faccessat", .errmsg = true,
944 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 944 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
945 { .name = "fadvise64", .errmsg = true, 945 { .name = "fadvise64", .errmsg = true,
946 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 946 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
947 { .name = "fallocate", .errmsg = true, 947 { .name = "fallocate", .errmsg = true,
948 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 948 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
949 { .name = "fchdir", .errmsg = true, 949 { .name = "fchdir", .errmsg = true,
950 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 950 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
951 { .name = "fchmod", .errmsg = true, 951 { .name = "fchmod", .errmsg = true,
952 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 952 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
953 { .name = "fchmodat", .errmsg = true, 953 { .name = "fchmodat", .errmsg = true,
954 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 954 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
955 { .name = "fchown", .errmsg = true, 955 { .name = "fchown", .errmsg = true,
956 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 956 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
957 { .name = "fchownat", .errmsg = true, 957 { .name = "fchownat", .errmsg = true,
958 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 958 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
959 { .name = "fcntl", .errmsg = true, 959 { .name = "fcntl", .errmsg = true,
960 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 960 .arg_scnprintf = { [0] = SCA_FD, /* fd */
961 [1] = SCA_STRARRAY, /* cmd */ }, 961 [1] = SCA_STRARRAY, /* cmd */ },
962 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, }, 962 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
963 { .name = "fdatasync", .errmsg = true, 963 { .name = "fdatasync", .errmsg = true,
964 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 964 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
965 { .name = "flock", .errmsg = true, 965 { .name = "flock", .errmsg = true,
966 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 966 .arg_scnprintf = { [0] = SCA_FD, /* fd */
967 [1] = SCA_FLOCK, /* cmd */ }, }, 967 [1] = SCA_FLOCK, /* cmd */ }, },
968 { .name = "fsetxattr", .errmsg = true, 968 { .name = "fsetxattr", .errmsg = true,
969 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 969 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
970 { .name = "fstat", .errmsg = true, .alias = "newfstat", 970 { .name = "fstat", .errmsg = true, .alias = "newfstat",
971 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 971 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
972 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", 972 { .name = "fstatat", .errmsg = true, .alias = "newfstatat",
973 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 973 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
974 { .name = "fstatfs", .errmsg = true, 974 { .name = "fstatfs", .errmsg = true,
975 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 975 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
976 { .name = "fsync", .errmsg = true, 976 { .name = "fsync", .errmsg = true,
977 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 977 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
978 { .name = "ftruncate", .errmsg = true, 978 { .name = "ftruncate", .errmsg = true,
979 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 979 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
980 { .name = "futex", .errmsg = true, 980 { .name = "futex", .errmsg = true,
981 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, }, 981 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
982 { .name = "futimesat", .errmsg = true, 982 { .name = "futimesat", .errmsg = true,
983 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 983 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
984 { .name = "getdents", .errmsg = true, 984 { .name = "getdents", .errmsg = true,
985 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 985 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
986 { .name = "getdents64", .errmsg = true, 986 { .name = "getdents64", .errmsg = true,
987 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 987 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
988 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), }, 988 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
989 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, 989 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
990 { .name = "ioctl", .errmsg = true, 990 { .name = "ioctl", .errmsg = true,
991 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 991 .arg_scnprintf = { [0] = SCA_FD, /* fd */
992#if defined(__i386__) || defined(__x86_64__) 992#if defined(__i386__) || defined(__x86_64__)
993/* 993/*
994 * FIXME: Make this available to all arches. 994 * FIXME: Make this available to all arches.
@@ -1002,7 +1002,7 @@ static struct syscall_fmt {
1002 { .name = "kill", .errmsg = true, 1002 { .name = "kill", .errmsg = true,
1003 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, 1003 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
1004 { .name = "linkat", .errmsg = true, 1004 { .name = "linkat", .errmsg = true,
1005 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 1005 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
1006 { .name = "lseek", .errmsg = true, 1006 { .name = "lseek", .errmsg = true,
1007 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 1007 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1008 [2] = SCA_STRARRAY, /* whence */ }, 1008 [2] = SCA_STRARRAY, /* whence */ },
@@ -1012,9 +1012,9 @@ static struct syscall_fmt {
1012 .arg_scnprintf = { [0] = SCA_HEX, /* start */ 1012 .arg_scnprintf = { [0] = SCA_HEX, /* start */
1013 [2] = SCA_MADV_BHV, /* behavior */ }, }, 1013 [2] = SCA_MADV_BHV, /* behavior */ }, },
1014 { .name = "mkdirat", .errmsg = true, 1014 { .name = "mkdirat", .errmsg = true,
1015 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 1015 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
1016 { .name = "mknodat", .errmsg = true, 1016 { .name = "mknodat", .errmsg = true,
1017 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 1017 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
1018 { .name = "mlock", .errmsg = true, 1018 { .name = "mlock", .errmsg = true,
1019 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, 1019 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1020 { .name = "mlockall", .errmsg = true, 1020 { .name = "mlockall", .errmsg = true,
@@ -1036,9 +1036,9 @@ static struct syscall_fmt {
1036 { .name = "munmap", .errmsg = true, 1036 { .name = "munmap", .errmsg = true,
1037 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, 1037 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1038 { .name = "name_to_handle_at", .errmsg = true, 1038 { .name = "name_to_handle_at", .errmsg = true,
1039 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 1039 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1040 { .name = "newfstatat", .errmsg = true, 1040 { .name = "newfstatat", .errmsg = true,
1041 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 1041 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1042 { .name = "open", .errmsg = true, 1042 { .name = "open", .errmsg = true,
1043 .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, }, 1043 .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
1044 { .name = "open_by_handle_at", .errmsg = true, 1044 { .name = "open_by_handle_at", .errmsg = true,
@@ -1052,20 +1052,20 @@ static struct syscall_fmt {
1052 { .name = "poll", .errmsg = true, .timeout = true, }, 1052 { .name = "poll", .errmsg = true, .timeout = true, },
1053 { .name = "ppoll", .errmsg = true, .timeout = true, }, 1053 { .name = "ppoll", .errmsg = true, .timeout = true, },
1054 { .name = "pread", .errmsg = true, .alias = "pread64", 1054 { .name = "pread", .errmsg = true, .alias = "pread64",
1055 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1055 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1056 { .name = "preadv", .errmsg = true, .alias = "pread", 1056 { .name = "preadv", .errmsg = true, .alias = "pread",
1057 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1057 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1058 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), }, 1058 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
1059 { .name = "pwrite", .errmsg = true, .alias = "pwrite64", 1059 { .name = "pwrite", .errmsg = true, .alias = "pwrite64",
1060 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1060 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1061 { .name = "pwritev", .errmsg = true, 1061 { .name = "pwritev", .errmsg = true,
1062 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1062 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1063 { .name = "read", .errmsg = true, 1063 { .name = "read", .errmsg = true,
1064 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1064 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1065 { .name = "readlinkat", .errmsg = true, 1065 { .name = "readlinkat", .errmsg = true,
1066 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 1066 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1067 { .name = "readv", .errmsg = true, 1067 { .name = "readv", .errmsg = true,
1068 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1068 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1069 { .name = "recvfrom", .errmsg = true, 1069 { .name = "recvfrom", .errmsg = true,
1070 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, }, 1070 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
1071 { .name = "recvmmsg", .errmsg = true, 1071 { .name = "recvmmsg", .errmsg = true,
@@ -1073,7 +1073,7 @@ static struct syscall_fmt {
1073 { .name = "recvmsg", .errmsg = true, 1073 { .name = "recvmsg", .errmsg = true,
1074 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, }, 1074 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
1075 { .name = "renameat", .errmsg = true, 1075 { .name = "renameat", .errmsg = true,
1076 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 1076 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1077 { .name = "rt_sigaction", .errmsg = true, 1077 { .name = "rt_sigaction", .errmsg = true,
1078 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, }, 1078 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
1079 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), }, 1079 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
@@ -1091,7 +1091,7 @@ static struct syscall_fmt {
1091 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), }, 1091 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
1092 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, 1092 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
1093 { .name = "shutdown", .errmsg = true, 1093 { .name = "shutdown", .errmsg = true,
1094 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1094 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1095 { .name = "socket", .errmsg = true, 1095 { .name = "socket", .errmsg = true,
1096 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */ 1096 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
1097 [1] = SCA_SK_TYPE, /* type */ }, 1097 [1] = SCA_SK_TYPE, /* type */ },
@@ -1102,7 +1102,7 @@ static struct syscall_fmt {
1102 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, }, 1102 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
1103 { .name = "stat", .errmsg = true, .alias = "newstat", }, 1103 { .name = "stat", .errmsg = true, .alias = "newstat", },
1104 { .name = "symlinkat", .errmsg = true, 1104 { .name = "symlinkat", .errmsg = true,
1105 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 1105 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1106 { .name = "tgkill", .errmsg = true, 1106 { .name = "tgkill", .errmsg = true,
1107 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, }, 1107 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
1108 { .name = "tkill", .errmsg = true, 1108 { .name = "tkill", .errmsg = true,
@@ -1113,9 +1113,9 @@ static struct syscall_fmt {
1113 { .name = "utimensat", .errmsg = true, 1113 { .name = "utimensat", .errmsg = true,
1114 .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, }, 1114 .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
1115 { .name = "write", .errmsg = true, 1115 { .name = "write", .errmsg = true,
1116 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1116 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1117 { .name = "writev", .errmsg = true, 1117 { .name = "writev", .errmsg = true,
1118 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1118 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1119}; 1119};
1120 1120
1121static int syscall_fmt__cmp(const void *name, const void *fmtp) 1121static int syscall_fmt__cmp(const void *name, const void *fmtp)
@@ -1191,7 +1191,7 @@ static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1191 1191
1192 if (thread__priv(thread) == NULL) 1192 if (thread__priv(thread) == NULL)
1193 thread__set_priv(thread, thread_trace__new()); 1193 thread__set_priv(thread, thread_trace__new());
1194 1194
1195 if (thread__priv(thread) == NULL) 1195 if (thread__priv(thread) == NULL)
1196 goto fail; 1196 goto fail;
1197 1197
@@ -2056,23 +2056,24 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
2056 if (trace->trace_syscalls && 2056 if (trace->trace_syscalls &&
2057 perf_evlist__add_syscall_newtp(evlist, trace__sys_enter, 2057 perf_evlist__add_syscall_newtp(evlist, trace__sys_enter,
2058 trace__sys_exit)) 2058 trace__sys_exit))
2059 goto out_error_tp; 2059 goto out_error_raw_syscalls;
2060 2060
2061 if (trace->trace_syscalls) 2061 if (trace->trace_syscalls)
2062 perf_evlist__add_vfs_getname(evlist); 2062 perf_evlist__add_vfs_getname(evlist);
2063 2063
2064 if ((trace->trace_pgfaults & TRACE_PFMAJ) && 2064 if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
2065 perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) 2065 perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) {
2066 goto out_error_tp; 2066 goto out_error_mem;
2067 }
2067 2068
2068 if ((trace->trace_pgfaults & TRACE_PFMIN) && 2069 if ((trace->trace_pgfaults & TRACE_PFMIN) &&
2069 perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN)) 2070 perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
2070 goto out_error_tp; 2071 goto out_error_mem;
2071 2072
2072 if (trace->sched && 2073 if (trace->sched &&
2073 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime", 2074 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
2074 trace__sched_stat_runtime)) 2075 trace__sched_stat_runtime))
2075 goto out_error_tp; 2076 goto out_error_sched_stat_runtime;
2076 2077
2077 err = perf_evlist__create_maps(evlist, &trace->opts.target); 2078 err = perf_evlist__create_maps(evlist, &trace->opts.target);
2078 if (err < 0) { 2079 if (err < 0) {
@@ -2202,8 +2203,12 @@ out:
2202{ 2203{
2203 char errbuf[BUFSIZ]; 2204 char errbuf[BUFSIZ];
2204 2205
2205out_error_tp: 2206out_error_sched_stat_runtime:
2206 perf_evlist__strerror_tp(evlist, errno, errbuf, sizeof(errbuf)); 2207 debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
2208 goto out_error;
2209
2210out_error_raw_syscalls:
2211 debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
2207 goto out_error; 2212 goto out_error;
2208 2213
2209out_error_mmap: 2214out_error_mmap:
@@ -2217,6 +2222,9 @@ out_error:
2217 fprintf(trace->output, "%s\n", errbuf); 2222 fprintf(trace->output, "%s\n", errbuf);
2218 goto out_delete_evlist; 2223 goto out_delete_evlist;
2219} 2224}
2225out_error_mem:
2226 fprintf(trace->output, "Not enough memory to run!\n");
2227 goto out_delete_evlist;
2220} 2228}
2221 2229
2222static int trace__replay(struct trace *trace) 2230static int trace__replay(struct trace *trace)
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 648e31ff4021..cc224080b525 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -198,6 +198,7 @@ CORE_FEATURE_TESTS = \
198 libpython-version \ 198 libpython-version \
199 libslang \ 199 libslang \
200 libunwind \ 200 libunwind \
201 pthread-attr-setaffinity-np \
201 stackprotector-all \ 202 stackprotector-all \
202 timerfd \ 203 timerfd \
203 libdw-dwarf-unwind \ 204 libdw-dwarf-unwind \
@@ -226,6 +227,7 @@ VF_FEATURE_TESTS = \
226 libelf-getphdrnum \ 227 libelf-getphdrnum \
227 libelf-mmap \ 228 libelf-mmap \
228 libpython-version \ 229 libpython-version \
230 pthread-attr-setaffinity-np \
229 stackprotector-all \ 231 stackprotector-all \
230 timerfd \ 232 timerfd \
231 libunwind-debug-frame \ 233 libunwind-debug-frame \
@@ -301,6 +303,10 @@ ifeq ($(feature-sync-compare-and-swap), 1)
301 CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT 303 CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT
302endif 304endif
303 305
306ifeq ($(feature-pthread-attr-setaffinity-np), 1)
307 CFLAGS += -DHAVE_PTHREAD_ATTR_SETAFFINITY_NP
308endif
309
304ifndef NO_BIONIC 310ifndef NO_BIONIC
305 $(call feature_check,bionic) 311 $(call feature_check,bionic)
306 ifeq ($(feature-bionic), 1) 312 ifeq ($(feature-bionic), 1)
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index 53f19b5dbc37..42ac05aaf8ac 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -25,6 +25,7 @@ FILES= \
25 test-libslang.bin \ 25 test-libslang.bin \
26 test-libunwind.bin \ 26 test-libunwind.bin \
27 test-libunwind-debug-frame.bin \ 27 test-libunwind-debug-frame.bin \
28 test-pthread-attr-setaffinity-np.bin \
28 test-stackprotector-all.bin \ 29 test-stackprotector-all.bin \
29 test-timerfd.bin \ 30 test-timerfd.bin \
30 test-libdw-dwarf-unwind.bin \ 31 test-libdw-dwarf-unwind.bin \
@@ -47,6 +48,9 @@ test-all.bin:
47test-hello.bin: 48test-hello.bin:
48 $(BUILD) 49 $(BUILD)
49 50
51test-pthread-attr-setaffinity-np.bin:
52 $(BUILD) -Werror -lpthread
53
50test-stackprotector-all.bin: 54test-stackprotector-all.bin:
51 $(BUILD) -Werror -fstack-protector-all 55 $(BUILD) -Werror -fstack-protector-all
52 56
diff --git a/tools/perf/config/feature-checks/test-all.c b/tools/perf/config/feature-checks/test-all.c
index 652e0098eba6..6d4d09323922 100644
--- a/tools/perf/config/feature-checks/test-all.c
+++ b/tools/perf/config/feature-checks/test-all.c
@@ -97,6 +97,10 @@
97# include "test-zlib.c" 97# include "test-zlib.c"
98#undef main 98#undef main
99 99
100#define main main_test_pthread_attr_setaffinity_np
101# include "test-pthread_attr_setaffinity_np.c"
102#undef main
103
100int main(int argc, char *argv[]) 104int main(int argc, char *argv[])
101{ 105{
102 main_test_libpython(); 106 main_test_libpython();
@@ -121,6 +125,7 @@ int main(int argc, char *argv[])
121 main_test_libdw_dwarf_unwind(); 125 main_test_libdw_dwarf_unwind();
122 main_test_sync_compare_and_swap(argc, argv); 126 main_test_sync_compare_and_swap(argc, argv);
123 main_test_zlib(); 127 main_test_zlib();
128 main_test_pthread_attr_setaffinity_np();
124 129
125 return 0; 130 return 0;
126} 131}
diff --git a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
new file mode 100644
index 000000000000..0a0d3ecb4e8a
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
@@ -0,0 +1,14 @@
1#include <stdint.h>
2#include <pthread.h>
3
4int main(void)
5{
6 int ret = 0;
7 pthread_attr_t thread_attr;
8
9 pthread_attr_init(&thread_attr);
10 /* don't care abt exact args, just the API itself in libpthread */
11 ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL);
12
13 return ret;
14}
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
index 790ceba6ad3f..28431d1bbcf5 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
@@ -5,7 +5,10 @@
5 * ANY CHANGES MADE HERE WILL BE LOST! 5 * ANY CHANGES MADE HERE WILL BE LOST!
6 * 6 *
7 */ 7 */
8 8#include <stdbool.h>
9#ifndef HAS_BOOL
10# define HAS_BOOL 1
11#endif
9#line 1 "Context.xs" 12#line 1 "Context.xs"
10/* 13/*
11 * Context.xs. XS interfaces for perf script. 14 * Context.xs. XS interfaces for perf script.
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index c9b4b6269b51..1091bd47adfd 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -104,7 +104,6 @@ class Event(dict):
104 continue 104 continue
105 if not self.compare_data(self[t], other[t]): 105 if not self.compare_data(self[t], other[t]):
106 log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) 106 log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
107
108 107
109# Test file description needs to have following sections: 108# Test file description needs to have following sections:
110# [config] 109# [config]
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 8d110dec393e..18619966454c 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -140,7 +140,7 @@ static void del_hist_entries(struct hists *hists)
140 he = rb_entry(node, struct hist_entry, rb_node); 140 he = rb_entry(node, struct hist_entry, rb_node);
141 rb_erase(node, root_out); 141 rb_erase(node, root_out);
142 rb_erase(&he->rb_node_in, root_in); 142 rb_erase(&he->rb_node_in, root_in);
143 hist_entry__free(he); 143 hist_entry__delete(he);
144 } 144 }
145} 145}
146 146
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index f5547610da02..b52c9faea224 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -106,7 +106,7 @@ static void del_hist_entries(struct hists *hists)
106 he = rb_entry(node, struct hist_entry, rb_node); 106 he = rb_entry(node, struct hist_entry, rb_node);
107 rb_erase(node, root_out); 107 rb_erase(node, root_out);
108 rb_erase(&he->rb_node_in, root_in); 108 rb_erase(&he->rb_node_in, root_in);
109 hist_entry__free(he); 109 hist_entry__delete(he);
110 } 110 }
111} 111}
112 112
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 69a71ff84e01..75709d2b17b4 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -222,7 +222,6 @@ tarpkg:
222 @cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \ 222 @cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \
223 echo "- $@: $$cmd" && echo $$cmd > $@ && \ 223 echo "- $@: $$cmd" && echo $$cmd > $@ && \
224 ( eval $$cmd ) >> $@ 2>&1 224 ( eval $$cmd ) >> $@ 2>&1
225
226 225
227all: $(run) $(run_O) tarpkg 226all: $(run) $(run_O) tarpkg
228 @echo OK 227 @echo OK
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 7f2f51f93619..1cdab0ce00e2 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1145,6 +1145,49 @@ static int test__pinned_group(struct perf_evlist *evlist)
1145 return 0; 1145 return 0;
1146} 1146}
1147 1147
1148static int test__checkevent_breakpoint_len(struct perf_evlist *evlist)
1149{
1150 struct perf_evsel *evsel = perf_evlist__first(evlist);
1151
1152 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
1153 TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
1154 TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
1155 TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
1156 evsel->attr.bp_type);
1157 TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_1 ==
1158 evsel->attr.bp_len);
1159
1160 return 0;
1161}
1162
1163static int test__checkevent_breakpoint_len_w(struct perf_evlist *evlist)
1164{
1165 struct perf_evsel *evsel = perf_evlist__first(evlist);
1166
1167 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
1168 TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
1169 TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
1170 TEST_ASSERT_VAL("wrong bp_type", HW_BREAKPOINT_W ==
1171 evsel->attr.bp_type);
1172 TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_2 ==
1173 evsel->attr.bp_len);
1174
1175 return 0;
1176}
1177
1178static int
1179test__checkevent_breakpoint_len_rw_modifier(struct perf_evlist *evlist)
1180{
1181 struct perf_evsel *evsel = perf_evlist__first(evlist);
1182
1183 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
1184 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
1185 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
1186 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
1187
1188 return test__checkevent_breakpoint_rw(evlist);
1189}
1190
1148static int count_tracepoints(void) 1191static int count_tracepoints(void)
1149{ 1192{
1150 char events_path[PATH_MAX]; 1193 char events_path[PATH_MAX];
@@ -1420,6 +1463,21 @@ static struct evlist_test test__events[] = {
1420 .check = test__pinned_group, 1463 .check = test__pinned_group,
1421 .id = 41, 1464 .id = 41,
1422 }, 1465 },
1466 {
1467 .name = "mem:0/1",
1468 .check = test__checkevent_breakpoint_len,
1469 .id = 42,
1470 },
1471 {
1472 .name = "mem:0/2:w",
1473 .check = test__checkevent_breakpoint_len_w,
1474 .id = 43,
1475 },
1476 {
1477 .name = "mem:0/4:rw:u",
1478 .check = test__checkevent_breakpoint_len_rw_modifier,
1479 .id = 44
1480 },
1423#if defined(__s390x__) 1481#if defined(__s390x__)
1424 { 1482 {
1425 .name = "kvm-s390:kvm_s390_create_vm", 1483 .name = "kvm-s390:kvm_s390_create_vm",
@@ -1471,7 +1529,7 @@ static int test_event(struct evlist_test *e)
1471 } else { 1529 } else {
1472 ret = e->check(evlist); 1530 ret = e->check(evlist);
1473 } 1531 }
1474 1532
1475 perf_evlist__delete(evlist); 1533 perf_evlist__delete(evlist);
1476 1534
1477 return ret; 1535 return ret;
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 4908c648a597..30c02181e78b 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -110,7 +110,7 @@ static bool samples_same(const struct perf_sample *s1,
110 110
111 if (type & PERF_SAMPLE_STACK_USER) { 111 if (type & PERF_SAMPLE_STACK_USER) {
112 COMP(user_stack.size); 112 COMP(user_stack.size);
113 if (memcmp(s1->user_stack.data, s1->user_stack.data, 113 if (memcmp(s1->user_stack.data, s2->user_stack.data,
114 s1->user_stack.size)) { 114 s1->user_stack.size)) {
115 pr_debug("Samples differ at 'user_stack'\n"); 115 pr_debug("Samples differ at 'user_stack'\n");
116 return false; 116 return false;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 1e0a2fd80115..9d32e3c0cfee 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -517,7 +517,7 @@ static bool annotate_browser__jump(struct annotate_browser *browser)
517 } 517 }
518 518
519 annotate_browser__set_top(browser, dl, idx); 519 annotate_browser__set_top(browser, dl, idx);
520 520
521 return true; 521 return true;
522} 522}
523 523
@@ -867,7 +867,6 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser
867 867
868 ++browser->nr_jumps; 868 ++browser->nr_jumps;
869 } 869 }
870
871} 870}
872 871
873static inline int width_jumps(int n) 872static inline int width_jumps(int n)
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 482adae3cc44..25d608394d74 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -285,7 +285,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
285} 285}
286 286
287#define __HPP_SORT_FN(_type, _field) \ 287#define __HPP_SORT_FN(_type, _field) \
288static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ 288static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
289 struct hist_entry *a, struct hist_entry *b) \
289{ \ 290{ \
290 return __hpp__sort(a, b, he_get_##_field); \ 291 return __hpp__sort(a, b, he_get_##_field); \
291} 292}
@@ -312,7 +313,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
312} 313}
313 314
314#define __HPP_SORT_ACC_FN(_type, _field) \ 315#define __HPP_SORT_ACC_FN(_type, _field) \
315static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ 316static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
317 struct hist_entry *a, struct hist_entry *b) \
316{ \ 318{ \
317 return __hpp__sort_acc(a, b, he_get_acc_##_field); \ 319 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
318} 320}
@@ -331,7 +333,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
331} 333}
332 334
333#define __HPP_SORT_RAW_FN(_type, _field) \ 335#define __HPP_SORT_RAW_FN(_type, _field) \
334static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ 336static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
337 struct hist_entry *a, struct hist_entry *b) \
335{ \ 338{ \
336 return __hpp__sort(a, b, he_get_raw_##_field); \ 339 return __hpp__sort(a, b, he_get_raw_##_field); \
337} 340}
@@ -361,7 +364,8 @@ HPP_PERCENT_ACC_FNS(overhead_acc, period)
361HPP_RAW_FNS(samples, nr_events) 364HPP_RAW_FNS(samples, nr_events)
362HPP_RAW_FNS(period, period) 365HPP_RAW_FNS(period, period)
363 366
364static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused, 367static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
368 struct hist_entry *a __maybe_unused,
365 struct hist_entry *b __maybe_unused) 369 struct hist_entry *b __maybe_unused)
366{ 370{
367 return 0; 371 return 0;
diff --git a/tools/perf/ui/progress.h b/tools/perf/ui/progress.h
index f34f89eb607c..717d39d3052b 100644
--- a/tools/perf/ui/progress.h
+++ b/tools/perf/ui/progress.h
@@ -4,12 +4,12 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6void ui_progress__finish(void); 6void ui_progress__finish(void);
7 7
8struct ui_progress { 8struct ui_progress {
9 const char *title; 9 const char *title;
10 u64 curr, next, step, total; 10 u64 curr, next, step, total;
11}; 11};
12 12
13void ui_progress__init(struct ui_progress *p, u64 total, const char *title); 13void ui_progress__init(struct ui_progress *p, u64 total, const char *title);
14void ui_progress__update(struct ui_progress *p, u64 adv); 14void ui_progress__update(struct ui_progress *p, u64 adv);
15 15
diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
index 1c8b9afd5d6e..88f5143a5981 100644
--- a/tools/perf/ui/tui/helpline.c
+++ b/tools/perf/ui/tui/helpline.c
@@ -9,6 +9,7 @@
9#include "../libslang.h" 9#include "../libslang.h"
10 10
11char ui_helpline__last_msg[1024]; 11char ui_helpline__last_msg[1024];
12bool tui_helpline__set;
12 13
13static void tui_helpline__pop(void) 14static void tui_helpline__pop(void)
14{ 15{
@@ -35,6 +36,8 @@ static int tui_helpline__show(const char *format, va_list ap)
35 sizeof(ui_helpline__last_msg) - backlog, format, ap); 36 sizeof(ui_helpline__last_msg) - backlog, format, ap);
36 backlog += ret; 37 backlog += ret;
37 38
39 tui_helpline__set = true;
40
38 if (ui_helpline__last_msg[backlog - 1] == '\n') { 41 if (ui_helpline__last_msg[backlog - 1] == '\n') {
39 ui_helpline__puts(ui_helpline__last_msg); 42 ui_helpline__puts(ui_helpline__last_msg);
40 SLsmg_refresh(); 43 SLsmg_refresh();
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index 3c38f25b1695..b77e1d771363 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -17,6 +17,7 @@
17static volatile int ui__need_resize; 17static volatile int ui__need_resize;
18 18
19extern struct perf_error_ops perf_tui_eops; 19extern struct perf_error_ops perf_tui_eops;
20extern bool tui_helpline__set;
20 21
21extern void hist_browser__init_hpp(void); 22extern void hist_browser__init_hpp(void);
22 23
@@ -159,7 +160,7 @@ out:
159 160
160void ui__exit(bool wait_for_ok) 161void ui__exit(bool wait_for_ok)
161{ 162{
162 if (wait_for_ok) 163 if (wait_for_ok && tui_helpline__set)
163 ui__question_window("Fatal Error", 164 ui__question_window("Fatal Error",
164 ui_helpline__last_msg, 165 ui_helpline__last_msg,
165 "Press any key...", 0); 166 "Press any key...", 0);
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 79999ceaf2be..61bf9128e1f2 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -177,14 +177,17 @@ static int lock__parse(struct ins_operands *ops)
177 goto out_free_ops; 177 goto out_free_ops;
178 178
179 ops->locked.ins = ins__find(name); 179 ops->locked.ins = ins__find(name);
180 free(name);
181
180 if (ops->locked.ins == NULL) 182 if (ops->locked.ins == NULL)
181 goto out_free_ops; 183 goto out_free_ops;
182 184
183 if (!ops->locked.ins->ops) 185 if (!ops->locked.ins->ops)
184 return 0; 186 return 0;
185 187
186 if (ops->locked.ins->ops->parse) 188 if (ops->locked.ins->ops->parse &&
187 ops->locked.ins->ops->parse(ops->locked.ops); 189 ops->locked.ins->ops->parse(ops->locked.ops) < 0)
190 goto out_free_ops;
188 191
189 return 0; 192 return 0;
190 193
@@ -208,6 +211,13 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
208 211
209static void lock__delete(struct ins_operands *ops) 212static void lock__delete(struct ins_operands *ops)
210{ 213{
214 struct ins *ins = ops->locked.ins;
215
216 if (ins && ins->ops->free)
217 ins->ops->free(ops->locked.ops);
218 else
219 ins__delete(ops->locked.ops);
220
211 zfree(&ops->locked.ops); 221 zfree(&ops->locked.ops);
212 zfree(&ops->target.raw); 222 zfree(&ops->target.raw);
213 zfree(&ops->target.name); 223 zfree(&ops->target.name);
@@ -229,7 +239,7 @@ static int mov__parse(struct ins_operands *ops)
229 *s = '\0'; 239 *s = '\0';
230 ops->source.raw = strdup(ops->raw); 240 ops->source.raw = strdup(ops->raw);
231 *s = ','; 241 *s = ',';
232 242
233 if (ops->source.raw == NULL) 243 if (ops->source.raw == NULL)
234 return -1; 244 return -1;
235 245
@@ -531,8 +541,8 @@ static void disasm_line__init_ins(struct disasm_line *dl)
531 if (!dl->ins->ops) 541 if (!dl->ins->ops)
532 return; 542 return;
533 543
534 if (dl->ins->ops->parse) 544 if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0)
535 dl->ins->ops->parse(&dl->ops); 545 dl->ins = NULL;
536} 546}
537 547
538static int disasm_line__parse(char *line, char **namep, char **rawp) 548static int disasm_line__parse(char *line, char **namep, char **rawp)
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index f4654183d391..55355b3d4f85 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -5,132 +5,6 @@
5 5
6int perf_use_color_default = -1; 6int perf_use_color_default = -1;
7 7
8static int parse_color(const char *name, int len)
9{
10 static const char * const color_names[] = {
11 "normal", "black", "red", "green", "yellow",
12 "blue", "magenta", "cyan", "white"
13 };
14 char *end;
15 int i;
16
17 for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) {
18 const char *str = color_names[i];
19 if (!strncasecmp(name, str, len) && !str[len])
20 return i - 1;
21 }
22 i = strtol(name, &end, 10);
23 if (end - name == len && i >= -1 && i <= 255)
24 return i;
25 return -2;
26}
27
28static int parse_attr(const char *name, int len)
29{
30 static const int attr_values[] = { 1, 2, 4, 5, 7 };
31 static const char * const attr_names[] = {
32 "bold", "dim", "ul", "blink", "reverse"
33 };
34 unsigned int i;
35
36 for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
37 const char *str = attr_names[i];
38 if (!strncasecmp(name, str, len) && !str[len])
39 return attr_values[i];
40 }
41 return -1;
42}
43
44void color_parse(const char *value, const char *var, char *dst)
45{
46 color_parse_mem(value, strlen(value), var, dst);
47}
48
49void color_parse_mem(const char *value, int value_len, const char *var,
50 char *dst)
51{
52 const char *ptr = value;
53 int len = value_len;
54 int attr = -1;
55 int fg = -2;
56 int bg = -2;
57
58 if (!strncasecmp(value, "reset", len)) {
59 strcpy(dst, PERF_COLOR_RESET);
60 return;
61 }
62
63 /* [fg [bg]] [attr] */
64 while (len > 0) {
65 const char *word = ptr;
66 int val, wordlen = 0;
67
68 while (len > 0 && !isspace(word[wordlen])) {
69 wordlen++;
70 len--;
71 }
72
73 ptr = word + wordlen;
74 while (len > 0 && isspace(*ptr)) {
75 ptr++;
76 len--;
77 }
78
79 val = parse_color(word, wordlen);
80 if (val >= -1) {
81 if (fg == -2) {
82 fg = val;
83 continue;
84 }
85 if (bg == -2) {
86 bg = val;
87 continue;
88 }
89 goto bad;
90 }
91 val = parse_attr(word, wordlen);
92 if (val < 0 || attr != -1)
93 goto bad;
94 attr = val;
95 }
96
97 if (attr >= 0 || fg >= 0 || bg >= 0) {
98 int sep = 0;
99
100 *dst++ = '\033';
101 *dst++ = '[';
102 if (attr >= 0) {
103 *dst++ = '0' + attr;
104 sep++;
105 }
106 if (fg >= 0) {
107 if (sep++)
108 *dst++ = ';';
109 if (fg < 8) {
110 *dst++ = '3';
111 *dst++ = '0' + fg;
112 } else {
113 dst += sprintf(dst, "38;5;%d", fg);
114 }
115 }
116 if (bg >= 0) {
117 if (sep++)
118 *dst++ = ';';
119 if (bg < 8) {
120 *dst++ = '4';
121 *dst++ = '0' + bg;
122 } else {
123 dst += sprintf(dst, "48;5;%d", bg);
124 }
125 }
126 *dst++ = 'm';
127 }
128 *dst = 0;
129 return;
130bad:
131 die("bad color value '%.*s' for variable '%s'", value_len, value, var);
132}
133
134int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty) 8int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
135{ 9{
136 if (value) { 10 if (value) {
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 0a594b8a0c26..38146f922c54 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -30,8 +30,6 @@ extern int perf_use_color_default;
30int perf_color_default_config(const char *var, const char *value, void *cb); 30int perf_color_default_config(const char *var, const char *value, void *cb);
31 31
32int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty); 32int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
33void color_parse(const char *value, const char *var, char *dst);
34void color_parse_mem(const char *value, int len, const char *var, char *dst);
35int color_vsnprintf(char *bf, size_t size, const char *color, 33int color_vsnprintf(char *bf, size_t size, const char *color,
36 const char *fmt, va_list args); 34 const char *fmt, va_list args);
37int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args); 35int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 45be944d450a..c2f7d3b90966 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -532,12 +532,8 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
532 break; 532 break;
533 533
534 cache_offset = offset & DSO__DATA_CACHE_MASK; 534 cache_offset = offset & DSO__DATA_CACHE_MASK;
535 ret = -EINVAL;
536 535
537 if (-1 == lseek(dso->data.fd, cache_offset, SEEK_SET)) 536 ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
538 break;
539
540 ret = read(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE);
541 if (ret <= 0) 537 if (ret <= 0)
542 break; 538 break;
543 539
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 3782c82c6e44..ced92841ff97 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -139,6 +139,7 @@ struct dso {
139 u32 status_seen; 139 u32 status_seen;
140 size_t file_size; 140 size_t file_size;
141 struct list_head open_entry; 141 struct list_head open_entry;
142 u64 frame_offset;
142 } data; 143 } data;
143 144
144 union { /* Tool specific area */ 145 union { /* Tool specific area */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index cbab1fb77b1d..28b8ce86bf12 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1436,33 +1436,6 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1436 return printed + fprintf(fp, "\n"); 1436 return printed + fprintf(fp, "\n");
1437} 1437}
1438 1438
1439int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1440 int err, char *buf, size_t size)
1441{
1442 char sbuf[128];
1443
1444 switch (err) {
1445 case ENOENT:
1446 scnprintf(buf, size, "%s",
1447 "Error:\tUnable to find debugfs\n"
1448 "Hint:\tWas your kernel was compiled with debugfs support?\n"
1449 "Hint:\tIs the debugfs filesystem mounted?\n"
1450 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1451 break;
1452 case EACCES:
1453 scnprintf(buf, size,
1454 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1455 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1456 debugfs_mountpoint, debugfs_mountpoint);
1457 break;
1458 default:
1459 scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1460 break;
1461 }
1462
1463 return 0;
1464}
1465
1466int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused, 1439int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1467 int err, char *buf, size_t size) 1440 int err, char *buf, size_t size)
1468{ 1441{
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 0ba93f67ab94..c94a9e03ecf1 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -183,7 +183,6 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
183 183
184size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); 184size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
185 185
186int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size);
187int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size); 186int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
188int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size); 187int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
189 188
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1e90c8557ede..ea51a90e20a0 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -709,6 +709,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
709 if (opts->sample_weight) 709 if (opts->sample_weight)
710 perf_evsel__set_sample_bit(evsel, WEIGHT); 710 perf_evsel__set_sample_bit(evsel, WEIGHT);
711 711
712 attr->task = track;
712 attr->mmap = track; 713 attr->mmap = track;
713 attr->mmap2 = track && !perf_missing_features.mmap2; 714 attr->mmap2 = track && !perf_missing_features.mmap2;
714 attr->comm = track; 715 attr->comm = track;
@@ -797,6 +798,9 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
797 798
798int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) 799int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
799{ 800{
801 if (ncpus == 0 || nthreads == 0)
802 return 0;
803
800 if (evsel->system_wide) 804 if (evsel->system_wide)
801 nthreads = 1; 805 nthreads = 1;
802 806
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b20e40c74468..1f407f7352a7 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2237,6 +2237,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2237 * - unique number to identify actual perf.data files 2237 * - unique number to identify actual perf.data files
2238 * - encode endianness of file 2238 * - encode endianness of file
2239 */ 2239 */
2240 ph->version = PERF_HEADER_VERSION_2;
2240 2241
2241 /* check magic number with one endianness */ 2242 /* check magic number with one endianness */
2242 if (magic == __perf_magic2) 2243 if (magic == __perf_magic2)
@@ -2247,7 +2248,6 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2247 return -1; 2248 return -1;
2248 2249
2249 ph->needs_swap = true; 2250 ph->needs_swap = true;
2250 ph->version = PERF_HEADER_VERSION_2;
2251 2251
2252 return 0; 2252 return 0;
2253} 2253}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 182395546ddc..70b48a65064c 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -241,6 +241,20 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
241 return he->stat.period == 0; 241 return he->stat.period == 0;
242} 242}
243 243
244static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
245{
246 rb_erase(&he->rb_node, &hists->entries);
247
248 if (sort__need_collapse)
249 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
250
251 --hists->nr_entries;
252 if (!he->filtered)
253 --hists->nr_non_filtered_entries;
254
255 hist_entry__delete(he);
256}
257
244void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 258void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
245{ 259{
246 struct rb_node *next = rb_first(&hists->entries); 260 struct rb_node *next = rb_first(&hists->entries);
@@ -258,16 +272,7 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
258 (zap_kernel && n->level != '.') || 272 (zap_kernel && n->level != '.') ||
259 hists__decay_entry(hists, n)) && 273 hists__decay_entry(hists, n)) &&
260 !n->used) { 274 !n->used) {
261 rb_erase(&n->rb_node, &hists->entries); 275 hists__delete_entry(hists, n);
262
263 if (sort__need_collapse)
264 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
265
266 --hists->nr_entries;
267 if (!n->filtered)
268 --hists->nr_non_filtered_entries;
269
270 hist_entry__free(n);
271 } 276 }
272 } 277 }
273} 278}
@@ -281,16 +286,7 @@ void hists__delete_entries(struct hists *hists)
281 n = rb_entry(next, struct hist_entry, rb_node); 286 n = rb_entry(next, struct hist_entry, rb_node);
282 next = rb_next(&n->rb_node); 287 next = rb_next(&n->rb_node);
283 288
284 rb_erase(&n->rb_node, &hists->entries); 289 hists__delete_entry(hists, n);
285
286 if (sort__need_collapse)
287 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
288
289 --hists->nr_entries;
290 if (!n->filtered)
291 --hists->nr_non_filtered_entries;
292
293 hist_entry__free(n);
294 } 290 }
295} 291}
296 292
@@ -433,6 +429,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
433 if (!he) 429 if (!he)
434 return NULL; 430 return NULL;
435 431
432 hists->nr_entries++;
433
436 rb_link_node(&he->rb_node_in, parent, p); 434 rb_link_node(&he->rb_node_in, parent, p);
437 rb_insert_color(&he->rb_node_in, hists->entries_in); 435 rb_insert_color(&he->rb_node_in, hists->entries_in);
438out: 436out:
@@ -915,7 +913,7 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
915 if (perf_hpp__should_skip(fmt)) 913 if (perf_hpp__should_skip(fmt))
916 continue; 914 continue;
917 915
918 cmp = fmt->cmp(left, right); 916 cmp = fmt->cmp(fmt, left, right);
919 if (cmp) 917 if (cmp)
920 break; 918 break;
921 } 919 }
@@ -933,7 +931,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
933 if (perf_hpp__should_skip(fmt)) 931 if (perf_hpp__should_skip(fmt))
934 continue; 932 continue;
935 933
936 cmp = fmt->collapse(left, right); 934 cmp = fmt->collapse(fmt, left, right);
937 if (cmp) 935 if (cmp)
938 break; 936 break;
939 } 937 }
@@ -941,7 +939,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
941 return cmp; 939 return cmp;
942} 940}
943 941
944void hist_entry__free(struct hist_entry *he) 942void hist_entry__delete(struct hist_entry *he)
945{ 943{
946 zfree(&he->branch_info); 944 zfree(&he->branch_info);
947 zfree(&he->mem_info); 945 zfree(&he->mem_info);
@@ -981,7 +979,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
981 iter->callchain, 979 iter->callchain,
982 he->callchain); 980 he->callchain);
983 } 981 }
984 hist_entry__free(he); 982 hist_entry__delete(he);
985 return false; 983 return false;
986 } 984 }
987 985
@@ -1063,7 +1061,7 @@ static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1063 if (perf_hpp__should_skip(fmt)) 1061 if (perf_hpp__should_skip(fmt))
1064 continue; 1062 continue;
1065 1063
1066 cmp = fmt->sort(a, b); 1064 cmp = fmt->sort(fmt, a, b);
1067 if (cmp) 1065 if (cmp)
1068 break; 1066 break;
1069 } 1067 }
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 46bd50344f85..2b690d028907 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -119,7 +119,7 @@ int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
119int hist_entry__transaction_len(void); 119int hist_entry__transaction_len(void);
120int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size, 120int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
121 struct hists *hists); 121 struct hists *hists);
122void hist_entry__free(struct hist_entry *); 122void hist_entry__delete(struct hist_entry *he);
123 123
124void hists__output_resort(struct hists *hists, struct ui_progress *prog); 124void hists__output_resort(struct hists *hists, struct ui_progress *prog);
125void hists__collapse_resort(struct hists *hists, struct ui_progress *prog); 125void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
@@ -195,9 +195,12 @@ struct perf_hpp_fmt {
195 struct hist_entry *he); 195 struct hist_entry *he);
196 int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 196 int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
197 struct hist_entry *he); 197 struct hist_entry *he);
198 int64_t (*cmp)(struct hist_entry *a, struct hist_entry *b); 198 int64_t (*cmp)(struct perf_hpp_fmt *fmt,
199 int64_t (*collapse)(struct hist_entry *a, struct hist_entry *b); 199 struct hist_entry *a, struct hist_entry *b);
200 int64_t (*sort)(struct hist_entry *a, struct hist_entry *b); 200 int64_t (*collapse)(struct perf_hpp_fmt *fmt,
201 struct hist_entry *a, struct hist_entry *b);
202 int64_t (*sort)(struct perf_hpp_fmt *fmt,
203 struct hist_entry *a, struct hist_entry *b);
201 204
202 struct list_head list; 205 struct list_head list;
203 struct list_head sort_list; 206 struct list_head sort_list;
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 6951a9d42339..0e42438b1e59 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -116,6 +116,22 @@ struct thread;
116#define map__for_each_symbol(map, pos, n) \ 116#define map__for_each_symbol(map, pos, n) \
117 dso__for_each_symbol(map->dso, pos, n, map->type) 117 dso__for_each_symbol(map->dso, pos, n, map->type)
118 118
119/* map__for_each_symbol_with_name - iterate over the symbols in the given map
120 * that have the given name
121 *
122 * @map: the 'struct map *' in which symbols itereated
123 * @sym_name: the symbol name
124 * @pos: the 'struct symbol *' to use as a loop cursor
125 * @filter: to use when loading the DSO
126 */
127#define __map__for_each_symbol_by_name(map, sym_name, pos, filter) \
128 for (pos = map__find_symbol_by_name(map, sym_name, filter); \
129 pos && strcmp(pos->name, sym_name) == 0; \
130 pos = symbol__next_by_name(pos))
131
132#define map__for_each_symbol_by_name(map, sym_name, pos) \
133 __map__for_each_symbol_by_name(map, sym_name, (pos), NULL)
134
119typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); 135typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
120 136
121void map__init(struct map *map, enum map_type type, 137void map__init(struct map *map, enum map_type type,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 77b43fe43d55..7f8ec6ce2823 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -526,7 +526,7 @@ do { \
526} 526}
527 527
528int parse_events_add_breakpoint(struct list_head *list, int *idx, 528int parse_events_add_breakpoint(struct list_head *list, int *idx,
529 void *ptr, char *type) 529 void *ptr, char *type, u64 len)
530{ 530{
531 struct perf_event_attr attr; 531 struct perf_event_attr attr;
532 532
@@ -536,14 +536,15 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
536 if (parse_breakpoint_type(type, &attr)) 536 if (parse_breakpoint_type(type, &attr))
537 return -EINVAL; 537 return -EINVAL;
538 538
539 /* 539 /* Provide some defaults if len is not specified */
540 * We should find a nice way to override the access length 540 if (!len) {
541 * Provide some defaults for now 541 if (attr.bp_type == HW_BREAKPOINT_X)
542 */ 542 len = sizeof(long);
543 if (attr.bp_type == HW_BREAKPOINT_X) 543 else
544 attr.bp_len = sizeof(long); 544 len = HW_BREAKPOINT_LEN_4;
545 else 545 }
546 attr.bp_len = HW_BREAKPOINT_LEN_4; 546
547 attr.bp_len = len;
547 548
548 attr.type = PERF_TYPE_BREAKPOINT; 549 attr.type = PERF_TYPE_BREAKPOINT;
549 attr.sample_period = 1; 550 attr.sample_period = 1;
@@ -1121,7 +1122,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
1121 return; 1122 return;
1122 1123
1123 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 1124 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
1124 if (subsys_glob != NULL && 1125 if (subsys_glob != NULL &&
1125 !strglobmatch(sys_dirent.d_name, subsys_glob)) 1126 !strglobmatch(sys_dirent.d_name, subsys_glob))
1126 continue; 1127 continue;
1127 1128
@@ -1132,7 +1133,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
1132 continue; 1133 continue;
1133 1134
1134 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 1135 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
1135 if (event_glob != NULL && 1136 if (event_glob != NULL &&
1136 !strglobmatch(evt_dirent.d_name, event_glob)) 1137 !strglobmatch(evt_dirent.d_name, event_glob))
1137 continue; 1138 continue;
1138 1139
@@ -1305,7 +1306,7 @@ static void print_symbol_events(const char *event_glob, unsigned type,
1305 1306
1306 for (i = 0; i < max; i++, syms++) { 1307 for (i = 0; i < max; i++, syms++) {
1307 1308
1308 if (event_glob != NULL && 1309 if (event_glob != NULL &&
1309 !(strglobmatch(syms->symbol, event_glob) || 1310 !(strglobmatch(syms->symbol, event_glob) ||
1310 (syms->alias && strglobmatch(syms->alias, event_glob)))) 1311 (syms->alias && strglobmatch(syms->alias, event_glob))))
1311 continue; 1312 continue;
@@ -1366,7 +1367,7 @@ void print_events(const char *event_glob, bool name_only)
1366 printf("\n"); 1367 printf("\n");
1367 1368
1368 printf(" %-50s [%s]\n", 1369 printf(" %-50s [%s]\n",
1369 "mem:<addr>[:access]", 1370 "mem:<addr>[/len][:access]",
1370 event_type_descriptors[PERF_TYPE_BREAKPOINT]); 1371 event_type_descriptors[PERF_TYPE_BREAKPOINT]);
1371 printf("\n"); 1372 printf("\n");
1372 } 1373 }
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index db2cf78ff0f3..ff6e1fa4111e 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -71,6 +71,7 @@ struct parse_events_term {
71 int type_val; 71 int type_val;
72 int type_term; 72 int type_term;
73 struct list_head list; 73 struct list_head list;
74 bool used;
74}; 75};
75 76
76struct parse_events_evlist { 77struct parse_events_evlist {
@@ -104,7 +105,7 @@ int parse_events_add_numeric(struct list_head *list, int *idx,
104int parse_events_add_cache(struct list_head *list, int *idx, 105int parse_events_add_cache(struct list_head *list, int *idx,
105 char *type, char *op_result1, char *op_result2); 106 char *type, char *op_result1, char *op_result2);
106int parse_events_add_breakpoint(struct list_head *list, int *idx, 107int parse_events_add_breakpoint(struct list_head *list, int *idx,
107 void *ptr, char *type); 108 void *ptr, char *type, u64 len);
108int parse_events_add_pmu(struct list_head *list, int *idx, 109int parse_events_add_pmu(struct list_head *list, int *idx,
109 char *pmu , struct list_head *head_config); 110 char *pmu , struct list_head *head_config);
110enum perf_pmu_event_symbol_type 111enum perf_pmu_event_symbol_type
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 906630bbf8eb..94eacb6c1ef7 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -159,6 +159,7 @@ branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
159<mem>{ 159<mem>{
160{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); } 160{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); }
161: { return ':'; } 161: { return ':'; }
162"/" { return '/'; }
162{num_dec} { return value(yyscanner, 10); } 163{num_dec} { return value(yyscanner, 10); }
163{num_hex} { return value(yyscanner, 16); } 164{num_hex} { return value(yyscanner, 16); }
164 /* 165 /*
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 93c4c9fbc922..72def077dbbf 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -326,6 +326,28 @@ PE_NAME_CACHE_TYPE
326} 326}
327 327
328event_legacy_mem: 328event_legacy_mem:
329PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc
330{
331 struct parse_events_evlist *data = _data;
332 struct list_head *list;
333
334 ALLOC_LIST(list);
335 ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
336 (void *) $2, $6, $4));
337 $$ = list;
338}
339|
340PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc
341{
342 struct parse_events_evlist *data = _data;
343 struct list_head *list;
344
345 ALLOC_LIST(list);
346 ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
347 (void *) $2, NULL, $4));
348 $$ = list;
349}
350|
329PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc 351PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
330{ 352{
331 struct parse_events_evlist *data = _data; 353 struct parse_events_evlist *data = _data;
@@ -333,7 +355,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
333 355
334 ALLOC_LIST(list); 356 ALLOC_LIST(list);
335 ABORT_ON(parse_events_add_breakpoint(list, &data->idx, 357 ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
336 (void *) $2, $4)); 358 (void *) $2, $4, 0));
337 $$ = list; 359 $$ = list;
338} 360}
339| 361|
@@ -344,7 +366,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc
344 366
345 ALLOC_LIST(list); 367 ALLOC_LIST(list);
346 ABORT_ON(parse_events_add_breakpoint(list, &data->idx, 368 ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
347 (void *) $2, NULL)); 369 (void *) $2, NULL, 0));
348 $$ = list; 370 $$ = list;
349} 371}
350 372
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index f62dee7bd924..4a015f77e2b5 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -46,7 +46,7 @@ static int get_value(struct parse_opt_ctx_t *p,
46 return opterror(opt, "is not usable", flags); 46 return opterror(opt, "is not usable", flags);
47 47
48 if (opt->flags & PARSE_OPT_EXCLUSIVE) { 48 if (opt->flags & PARSE_OPT_EXCLUSIVE) {
49 if (p->excl_opt) { 49 if (p->excl_opt && p->excl_opt != opt) {
50 char msg[128]; 50 char msg[128];
51 51
52 if (((flags & OPT_SHORT) && p->excl_opt->short_name) || 52 if (((flags & OPT_SHORT) && p->excl_opt->short_name) ||
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 5c9c4947cfb4..48411674da0f 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -551,31 +551,68 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
551} 551}
552 552
553/* 553/*
554 * Term is a string term, and might be a param-term. Try to look up it's value
555 * in the remaining terms.
556 * - We have a term like "base-or-format-term=param-term",
557 * - We need to find the value supplied for "param-term" (with param-term named
558 * in a config string) later on in the term list.
559 */
560static int pmu_resolve_param_term(struct parse_events_term *term,
561 struct list_head *head_terms,
562 __u64 *value)
563{
564 struct parse_events_term *t;
565
566 list_for_each_entry(t, head_terms, list) {
567 if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
568 if (!strcmp(t->config, term->config)) {
569 t->used = true;
570 *value = t->val.num;
571 return 0;
572 }
573 }
574 }
575
576 if (verbose)
577 printf("Required parameter '%s' not specified\n", term->config);
578
579 return -1;
580}
581
582/*
554 * Setup one of config[12] attr members based on the 583 * Setup one of config[12] attr members based on the
555 * user input data - term parameter. 584 * user input data - term parameter.
556 */ 585 */
557static int pmu_config_term(struct list_head *formats, 586static int pmu_config_term(struct list_head *formats,
558 struct perf_event_attr *attr, 587 struct perf_event_attr *attr,
559 struct parse_events_term *term, 588 struct parse_events_term *term,
589 struct list_head *head_terms,
560 bool zero) 590 bool zero)
561{ 591{
562 struct perf_pmu_format *format; 592 struct perf_pmu_format *format;
563 __u64 *vp; 593 __u64 *vp;
594 __u64 val;
595
596 /*
597 * If this is a parameter we've already used for parameterized-eval,
598 * skip it in normal eval.
599 */
600 if (term->used)
601 return 0;
564 602
565 /* 603 /*
566 * Support only for hardcoded and numnerial terms.
567 * Hardcoded terms should be already in, so nothing 604 * Hardcoded terms should be already in, so nothing
568 * to be done for them. 605 * to be done for them.
569 */ 606 */
570 if (parse_events__is_hardcoded_term(term)) 607 if (parse_events__is_hardcoded_term(term))
571 return 0; 608 return 0;
572 609
573 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
574 return -EINVAL;
575
576 format = pmu_find_format(formats, term->config); 610 format = pmu_find_format(formats, term->config);
577 if (!format) 611 if (!format) {
612 if (verbose)
613 printf("Invalid event/parameter '%s'\n", term->config);
578 return -EINVAL; 614 return -EINVAL;
615 }
579 616
580 switch (format->value) { 617 switch (format->value) {
581 case PERF_PMU_FORMAT_VALUE_CONFIG: 618 case PERF_PMU_FORMAT_VALUE_CONFIG:
@@ -592,11 +629,25 @@ static int pmu_config_term(struct list_head *formats,
592 } 629 }
593 630
594 /* 631 /*
595 * XXX If we ever decide to go with string values for 632 * Either directly use a numeric term, or try to translate string terms
596 * non-hardcoded terms, here's the place to translate 633 * using event parameters.
597 * them into value.
598 */ 634 */
599 pmu_format_value(format->bits, term->val.num, vp, zero); 635 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
636 val = term->val.num;
637 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
638 if (strcmp(term->val.str, "?")) {
639 if (verbose)
640 pr_info("Invalid sysfs entry %s=%s\n",
641 term->config, term->val.str);
642 return -EINVAL;
643 }
644
645 if (pmu_resolve_param_term(term, head_terms, &val))
646 return -EINVAL;
647 } else
648 return -EINVAL;
649
650 pmu_format_value(format->bits, val, vp, zero);
600 return 0; 651 return 0;
601} 652}
602 653
@@ -607,9 +658,10 @@ int perf_pmu__config_terms(struct list_head *formats,
607{ 658{
608 struct parse_events_term *term; 659 struct parse_events_term *term;
609 660
610 list_for_each_entry(term, head_terms, list) 661 list_for_each_entry(term, head_terms, list) {
611 if (pmu_config_term(formats, attr, term, zero)) 662 if (pmu_config_term(formats, attr, term, head_terms, zero))
612 return -EINVAL; 663 return -EINVAL;
664 }
613 665
614 return 0; 666 return 0;
615} 667}
@@ -767,10 +819,36 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
767 set_bit(b, bits); 819 set_bit(b, bits);
768} 820}
769 821
822static int sub_non_neg(int a, int b)
823{
824 if (b > a)
825 return 0;
826 return a - b;
827}
828
770static char *format_alias(char *buf, int len, struct perf_pmu *pmu, 829static char *format_alias(char *buf, int len, struct perf_pmu *pmu,
771 struct perf_pmu_alias *alias) 830 struct perf_pmu_alias *alias)
772{ 831{
773 snprintf(buf, len, "%s/%s/", pmu->name, alias->name); 832 struct parse_events_term *term;
833 int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
834
835 list_for_each_entry(term, &alias->terms, list) {
836 if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
837 used += snprintf(buf + used, sub_non_neg(len, used),
838 ",%s=%s", term->config,
839 term->val.str);
840 }
841
842 if (sub_non_neg(len, used) > 0) {
843 buf[used] = '/';
844 used++;
845 }
846 if (sub_non_neg(len, used) > 0) {
847 buf[used] = '\0';
848 used++;
849 } else
850 buf[len - 1] = '\0';
851
774 return buf; 852 return buf;
775} 853}
776 854
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 94a717bf007d..919937eb0be2 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -446,7 +446,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
446 } 446 }
447 447
448 for (i = 0; i < ntevs; i++) { 448 for (i = 0; i < ntevs; i++) {
449 if (tevs[i].point.address) { 449 if (tevs[i].point.address && !tevs[i].point.retprobe) {
450 tmp = strdup(reloc_sym->name); 450 tmp = strdup(reloc_sym->name);
451 if (!tmp) 451 if (!tmp)
452 return -ENOMEM; 452 return -ENOMEM;
@@ -2193,18 +2193,17 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
2193 return ret; 2193 return ret;
2194} 2194}
2195 2195
2196static char *looking_function_name; 2196static int find_probe_functions(struct map *map, char *name)
2197static int num_matched_functions;
2198
2199static int probe_function_filter(struct map *map __maybe_unused,
2200 struct symbol *sym)
2201{ 2197{
2202 if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) && 2198 int found = 0;
2203 strcmp(looking_function_name, sym->name) == 0) { 2199 struct symbol *sym;
2204 num_matched_functions++; 2200
2205 return 0; 2201 map__for_each_symbol_by_name(map, name, sym) {
2202 if (sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL)
2203 found++;
2206 } 2204 }
2207 return 1; 2205
2206 return found;
2208} 2207}
2209 2208
2210#define strdup_or_goto(str, label) \ 2209#define strdup_or_goto(str, label) \
@@ -2222,10 +2221,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
2222 struct kmap *kmap = NULL; 2221 struct kmap *kmap = NULL;
2223 struct ref_reloc_sym *reloc_sym = NULL; 2222 struct ref_reloc_sym *reloc_sym = NULL;
2224 struct symbol *sym; 2223 struct symbol *sym;
2225 struct rb_node *nd;
2226 struct probe_trace_event *tev; 2224 struct probe_trace_event *tev;
2227 struct perf_probe_point *pp = &pev->point; 2225 struct perf_probe_point *pp = &pev->point;
2228 struct probe_trace_point *tp; 2226 struct probe_trace_point *tp;
2227 int num_matched_functions;
2229 int ret, i; 2228 int ret, i;
2230 2229
2231 /* Init maps of given executable or kernel */ 2230 /* Init maps of given executable or kernel */
@@ -2242,10 +2241,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
2242 * Load matched symbols: Since the different local symbols may have 2241 * Load matched symbols: Since the different local symbols may have
2243 * same name but different addresses, this lists all the symbols. 2242 * same name but different addresses, this lists all the symbols.
2244 */ 2243 */
2245 num_matched_functions = 0; 2244 num_matched_functions = find_probe_functions(map, pp->function);
2246 looking_function_name = pp->function; 2245 if (num_matched_functions == 0) {
2247 ret = map__load(map, probe_function_filter);
2248 if (ret || num_matched_functions == 0) {
2249 pr_err("Failed to find symbol %s in %s\n", pp->function, 2246 pr_err("Failed to find symbol %s in %s\n", pp->function,
2250 target ? : "kernel"); 2247 target ? : "kernel");
2251 ret = -ENOENT; 2248 ret = -ENOENT;
@@ -2257,7 +2254,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
2257 goto out; 2254 goto out;
2258 } 2255 }
2259 2256
2260 if (!pev->uprobes) { 2257 if (!pev->uprobes && !pp->retprobe) {
2261 kmap = map__kmap(map); 2258 kmap = map__kmap(map);
2262 reloc_sym = kmap->ref_reloc_sym; 2259 reloc_sym = kmap->ref_reloc_sym;
2263 if (!reloc_sym) { 2260 if (!reloc_sym) {
@@ -2275,7 +2272,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
2275 } 2272 }
2276 2273
2277 ret = 0; 2274 ret = 0;
2278 map__for_each_symbol(map, sym, nd) { 2275
2276 map__for_each_symbol_by_name(map, pp->function, sym) {
2279 tev = (*tevs) + ret; 2277 tev = (*tevs) + ret;
2280 tp = &tev->point; 2278 tp = &tev->point;
2281 if (ret == num_matched_functions) { 2279 if (ret == num_matched_functions) {
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 3dda85ca50c1..d906d0ad5d40 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -768,7 +768,7 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
768 Py_DECREF(file); 768 Py_DECREF(file);
769 goto free_list; 769 goto free_list;
770 } 770 }
771 771
772 Py_DECREF(file); 772 Py_DECREF(file);
773 } 773 }
774 774
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index d808a328f4dc..0c815a40a6e8 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -89,7 +89,7 @@ static void handler_call_die(const char *handler_name)
89 89
90/* 90/*
91 * Insert val into into the dictionary and decrement the reference counter. 91 * Insert val into into the dictionary and decrement the reference counter.
92 * This is necessary for dictionaries since PyDict_SetItemString() does not 92 * This is necessary for dictionaries since PyDict_SetItemString() does not
93 * steal a reference, as opposed to PyTuple_SetItem(). 93 * steal a reference, as opposed to PyTuple_SetItem().
94 */ 94 */
95static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val) 95static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5f0e05a76c05..0baf75f12b7c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -274,7 +274,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
274 if (tool->id_index == NULL) 274 if (tool->id_index == NULL)
275 tool->id_index = process_id_index_stub; 275 tool->id_index = process_id_index_stub;
276} 276}
277 277
278static void swap_sample_id_all(union perf_event *event, void *data) 278static void swap_sample_id_all(union perf_event *event, void *data)
279{ 279{
280 void *end = (void *) event + event->header.size; 280 void *end = (void *) event + event->header.size;
@@ -1251,9 +1251,9 @@ fetch_mmaped_event(struct perf_session *session,
1251#define NUM_MMAPS 128 1251#define NUM_MMAPS 128
1252#endif 1252#endif
1253 1253
1254int __perf_session__process_events(struct perf_session *session, 1254static int __perf_session__process_events(struct perf_session *session,
1255 u64 data_offset, u64 data_size, 1255 u64 data_offset, u64 data_size,
1256 u64 file_size, struct perf_tool *tool) 1256 u64 file_size, struct perf_tool *tool)
1257{ 1257{
1258 int fd = perf_data_file__fd(session->file); 1258 int fd = perf_data_file__fd(session->file);
1259 u64 head, page_offset, file_offset, file_pos, size; 1259 u64 head, page_offset, file_offset, file_pos, size;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index dc26ebf60fe4..6d663dc76404 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -49,9 +49,6 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
49 union perf_event **event_ptr, 49 union perf_event **event_ptr,
50 struct perf_sample *sample); 50 struct perf_sample *sample);
51 51
52int __perf_session__process_events(struct perf_session *session,
53 u64 data_offset, u64 data_size, u64 size,
54 struct perf_tool *tool);
55int perf_session__process_events(struct perf_session *session, 52int perf_session__process_events(struct perf_session *session,
56 struct perf_tool *tool); 53 struct perf_tool *tool);
57 54
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 9139dda9f9a3..7a39c1ed8d37 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1304,6 +1304,37 @@ static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1304 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1304 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1305} 1305}
1306 1306
1307static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1308 struct hist_entry *a, struct hist_entry *b)
1309{
1310 struct hpp_sort_entry *hse;
1311
1312 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1313 return hse->se->se_cmp(a, b);
1314}
1315
1316static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1317 struct hist_entry *a, struct hist_entry *b)
1318{
1319 struct hpp_sort_entry *hse;
1320 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1321
1322 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1323 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1324 return collapse_fn(a, b);
1325}
1326
1327static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1328 struct hist_entry *a, struct hist_entry *b)
1329{
1330 struct hpp_sort_entry *hse;
1331 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1332
1333 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1334 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1335 return sort_fn(a, b);
1336}
1337
1307static struct hpp_sort_entry * 1338static struct hpp_sort_entry *
1308__sort_dimension__alloc_hpp(struct sort_dimension *sd) 1339__sort_dimension__alloc_hpp(struct sort_dimension *sd)
1309{ 1340{
@@ -1322,9 +1353,9 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1322 hse->hpp.entry = __sort__hpp_entry; 1353 hse->hpp.entry = __sort__hpp_entry;
1323 hse->hpp.color = NULL; 1354 hse->hpp.color = NULL;
1324 1355
1325 hse->hpp.cmp = sd->entry->se_cmp; 1356 hse->hpp.cmp = __sort__hpp_cmp;
1326 hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp; 1357 hse->hpp.collapse = __sort__hpp_collapse;
1327 hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse; 1358 hse->hpp.sort = __sort__hpp_sort;
1328 1359
1329 INIT_LIST_HEAD(&hse->hpp.list); 1360 INIT_LIST_HEAD(&hse->hpp.list);
1330 INIT_LIST_HEAD(&hse->hpp.sort_list); 1361 INIT_LIST_HEAD(&hse->hpp.sort_list);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 06fcd1bf98b6..b24f9d8727a8 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -574,13 +574,16 @@ static int decompress_kmodule(struct dso *dso, const char *name,
574 const char *ext = strrchr(name, '.'); 574 const char *ext = strrchr(name, '.');
575 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX"; 575 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
576 576
577 if ((type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP && 577 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
578 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP) || 578 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
579 type != dso->symtab_type) 579 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
580 return -1; 580 return -1;
581 581
582 if (!ext || !is_supported_compression(ext + 1)) 582 if (!ext || !is_supported_compression(ext + 1)) {
583 return -1; 583 ext = strrchr(dso->name, '.');
584 if (!ext || !is_supported_compression(ext + 1))
585 return -1;
586 }
584 587
585 fd = mkstemp(tmpbuf); 588 fd = mkstemp(tmpbuf);
586 if (fd < 0) 589 if (fd < 0)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index c24c5b83156c..a69066865a55 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -396,6 +396,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
396 const char *name) 396 const char *name)
397{ 397{
398 struct rb_node *n; 398 struct rb_node *n;
399 struct symbol_name_rb_node *s;
399 400
400 if (symbols == NULL) 401 if (symbols == NULL)
401 return NULL; 402 return NULL;
@@ -403,7 +404,6 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
403 n = symbols->rb_node; 404 n = symbols->rb_node;
404 405
405 while (n) { 406 while (n) {
406 struct symbol_name_rb_node *s;
407 int cmp; 407 int cmp;
408 408
409 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 409 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
@@ -414,10 +414,24 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
414 else if (cmp > 0) 414 else if (cmp > 0)
415 n = n->rb_right; 415 n = n->rb_right;
416 else 416 else
417 return &s->sym; 417 break;
418 } 418 }
419 419
420 return NULL; 420 if (n == NULL)
421 return NULL;
422
423 /* return first symbol that has same name (if any) */
424 for (n = rb_prev(n); n; n = rb_prev(n)) {
425 struct symbol_name_rb_node *tmp;
426
427 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
428 if (strcmp(tmp->sym.name, s->sym.name))
429 break;
430
431 s = tmp;
432 }
433
434 return &s->sym;
421} 435}
422 436
423struct symbol *dso__find_symbol(struct dso *dso, 437struct symbol *dso__find_symbol(struct dso *dso,
@@ -436,6 +450,17 @@ struct symbol *dso__next_symbol(struct symbol *sym)
436 return symbols__next(sym); 450 return symbols__next(sym);
437} 451}
438 452
453struct symbol *symbol__next_by_name(struct symbol *sym)
454{
455 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
456 struct rb_node *n = rb_next(&s->rb_node);
457
458 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
459}
460
461 /*
462 * Teturns first symbol that matched with @name.
463 */
439struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 464struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
440 const char *name) 465 const char *name)
441{ 466{
@@ -660,7 +685,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
660 struct machine *machine = kmaps->machine; 685 struct machine *machine = kmaps->machine;
661 struct map *curr_map = map; 686 struct map *curr_map = map;
662 struct symbol *pos; 687 struct symbol *pos;
663 int count = 0, moved = 0; 688 int count = 0, moved = 0;
664 struct rb_root *root = &dso->symbols[map->type]; 689 struct rb_root *root = &dso->symbols[map->type];
665 struct rb_node *next = rb_first(root); 690 struct rb_node *next = rb_first(root);
666 int kernel_range = 0; 691 int kernel_range = 0;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 9d602e9c6f59..1650dcb3a67b 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -231,6 +231,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
231 u64 addr); 231 u64 addr);
232struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 232struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
233 const char *name); 233 const char *name);
234struct symbol *symbol__next_by_name(struct symbol *sym);
234 235
235struct symbol *dso__first_symbol(struct dso *dso, enum map_type type); 236struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
236struct symbol *dso__next_symbol(struct symbol *sym); 237struct symbol *dso__next_symbol(struct symbol *sym);
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 6edf535f65c2..e3c40a520a25 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -266,14 +266,17 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
266 u64 *fde_count) 266 u64 *fde_count)
267{ 267{
268 int ret = -EINVAL, fd; 268 int ret = -EINVAL, fd;
269 u64 offset; 269 u64 offset = dso->data.frame_offset;
270 270
271 fd = dso__data_fd(dso, machine); 271 if (offset == 0) {
272 if (fd < 0) 272 fd = dso__data_fd(dso, machine);
273 return -EINVAL; 273 if (fd < 0)
274 return -EINVAL;
274 275
275 /* Check the .eh_frame section for unwinding info */ 276 /* Check the .eh_frame section for unwinding info */
276 offset = elf_section_offset(fd, ".eh_frame_hdr"); 277 offset = elf_section_offset(fd, ".eh_frame_hdr");
278 dso->data.frame_offset = offset;
279 }
277 280
278 if (offset) 281 if (offset)
279 ret = unwind_spec_ehframe(dso, machine, offset, 282 ret = unwind_spec_ehframe(dso, machine, offset,
@@ -287,14 +290,20 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
287static int read_unwind_spec_debug_frame(struct dso *dso, 290static int read_unwind_spec_debug_frame(struct dso *dso,
288 struct machine *machine, u64 *offset) 291 struct machine *machine, u64 *offset)
289{ 292{
290 int fd = dso__data_fd(dso, machine); 293 int fd;
294 u64 ofs = dso->data.frame_offset;
291 295
292 if (fd < 0) 296 if (ofs == 0) {
293 return -EINVAL; 297 fd = dso__data_fd(dso, machine);
298 if (fd < 0)
299 return -EINVAL;
294 300
295 /* Check the .debug_frame section for unwinding info */ 301 /* Check the .debug_frame section for unwinding info */
296 *offset = elf_section_offset(fd, ".debug_frame"); 302 ofs = elf_section_offset(fd, ".debug_frame");
303 dso->data.frame_offset = ofs;
304 }
297 305
306 *offset = ofs;
298 if (*offset) 307 if (*offset)
299 return 0; 308 return 0;
300 309
diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
index abe14b7f36e9..bb99cde3f5f9 100755
--- a/tools/testing/selftests/rcutorture/bin/cpus2use.sh
+++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
@@ -24,7 +24,7 @@
24 24
25ncpus=`grep '^processor' /proc/cpuinfo | wc -l` 25ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
26idlecpus=`mpstat | tail -1 | \ 26idlecpus=`mpstat | tail -1 | \
27 awk -v ncpus=$ncpus '{ print ncpus * ($7 + $12) / 100 }'` 27 awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'`
28awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null ' 28awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
29BEGIN { 29BEGIN {
30 cpus2use = idlecpus; 30 cpus2use = idlecpus;
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index d6cc07fc137f..559e01ac86be 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -30,6 +30,7 @@ else
30 echo Unreadable results directory: $i 30 echo Unreadable results directory: $i
31 exit 1 31 exit 1
32fi 32fi
33. tools/testing/selftests/rcutorture/bin/functions.sh
33 34
34configfile=`echo $i | sed -e 's/^.*\///'` 35configfile=`echo $i | sed -e 's/^.*\///'`
35ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'` 36ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
@@ -48,4 +49,21 @@ else
48 title="$title ($ngpsps per second)" 49 title="$title ($ngpsps per second)"
49 fi 50 fi
50 echo $title 51 echo $title
52 nclosecalls=`grep --binary-files=text 'torture: Reader Batch' $i/console.log | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'`
53 if test -z "$nclosecalls"
54 then
55 exit 0
56 fi
57 if test "$nclosecalls" -eq 0
58 then
59 exit 0
60 fi
61 # Compute number of close calls per tenth of an hour
62 nclosecalls10=`awk -v nclosecalls=$nclosecalls -v dur=$dur 'BEGIN { print int(nclosecalls * 36000 / dur) }' < /dev/null`
63 if test $nclosecalls10 -gt 5 -a $nclosecalls -gt 1
64 then
65 print_bug $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
66 else
67 print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
68 fi
51fi 69fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 8ca9f21f2efc..5236e073919d 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -8,9 +8,9 @@
8# 8#
9# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args 9# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
10# 10#
11# qemu-args defaults to "-nographic", along with arguments specifying the 11# qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with
12# number of CPUs and other options generated from 12# arguments specifying the number of CPUs and other
13# the underlying CPU architecture. 13# options generated from the underlying CPU architecture.
14# boot_args defaults to value returned by the per_version_boot_params 14# boot_args defaults to value returned by the per_version_boot_params
15# shell function. 15# shell function.
16# 16#
@@ -138,7 +138,7 @@ then
138fi 138fi
139 139
140# Generate -smp qemu argument. 140# Generate -smp qemu argument.
141qemu_args="-nographic $qemu_args" 141qemu_args="-enable-kvm -soundhw pcspk -nographic $qemu_args"
142cpu_count=`configNR_CPUS.sh $config_template` 142cpu_count=`configNR_CPUS.sh $config_template`
143cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"` 143cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
144vcpus=`identify_qemu_vcpus` 144vcpus=`identify_qemu_vcpus`
@@ -168,6 +168,7 @@ then
168 touch $resdir/buildonly 168 touch $resdir/buildonly
169 exit 0 169 exit 0
170fi 170fi
171echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log
171echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd 172echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
172( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) & 173( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
173qemu_pid=$! 174qemu_pid=$!
diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh
index 499d1e598e42..a6b57622c2e5 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh
@@ -26,12 +26,15 @@
26# 26#
27# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 27# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
28 28
29T=$1 29F=$1
30title=$2 30title=$2
31T=/tmp/parse-build.sh.$$
32trap 'rm -rf $T' 0
33mkdir $T
31 34
32. functions.sh 35. functions.sh
33 36
34if grep -q CC < $T 37if grep -q CC < $F
35then 38then
36 : 39 :
37else 40else
@@ -39,18 +42,21 @@ else
39 exit 1 42 exit 1
40fi 43fi
41 44
42if grep -q "error:" < $T 45if grep -q "error:" < $F
43then 46then
44 print_bug $title build errors: 47 print_bug $title build errors:
45 grep "error:" < $T 48 grep "error:" < $F
46 exit 2 49 exit 2
47fi 50fi
48exit 0
49 51
50if egrep -q "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T 52grep warning: < $F > $T/warnings
53grep "include/linux/*rcu*\.h:" $T/warnings > $T/hwarnings
54grep "kernel/rcu/[^/]*:" $T/warnings > $T/cwarnings
55cat $T/hwarnings $T/cwarnings > $T/rcuwarnings
56if test -s $T/rcuwarnings
51then 57then
52 print_warning $title build errors: 58 print_warning $title build errors:
53 egrep "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T 59 cat $T/rcuwarnings
54 exit 2 60 exit 2
55fi 61fi
56exit 0 62exit 0
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index f962ba4cf68b..d8f35cf116be 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -36,7 +36,7 @@ if grep -Pq '\x00' < $file
36then 36then
37 print_warning Console output contains nul bytes, old qemu still running? 37 print_warning Console output contains nul bytes, old qemu still running?
38fi 38fi
39egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T 39egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
40if test -s $T 40if test -s $T
41then 41then
42 print_warning Assertion failure in $file $title 42 print_warning Assertion failure in $file $title