aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/CodeOfConflict27
-rw-r--r--Documentation/devicetree/bindings/arm/cci.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/exynos/power_domain.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/sti.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-imx.txt1
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe-phy.txt4
-rw-r--r--Documentation/devicetree/bindings/net/apm-xgene-enet.txt5
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt29
-rw-r--r--Documentation/devicetree/bindings/serial/8250.txt (renamed from Documentation/devicetree/bindings/serial/of-serial.txt)0
-rw-r--r--Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt19
-rw-r--r--Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt16
-rw-r--r--Documentation/devicetree/bindings/submitting-patches.txt3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/power/suspend-and-interrupts.txt22
-rw-r--r--MAINTAINERS27
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/processor.h14
-rw-r--r--arch/arc/include/asm/stacktrace.h37
-rw-r--r--arch/arc/kernel/process.c23
-rw-r--r--arch/arc/kernel/stacktrace.c21
-rw-r--r--arch/arc/kernel/unaligned.c2
-rw-r--r--arch/arc/mm/fault.c12
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi8
-rw-r--r--arch/arm/boot/dts/am335x-bone.dts8
-rw-r--r--arch/arm/boot/dts/am335x-lxm.dts4
-rw-r--r--arch/arm/boot/dts/am33xx-clocks.dtsi6
-rw-r--r--arch/arm/boot/dts/am43xx-clocks.dtsi12
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts10
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts10
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi90
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4-cpu-thermal.dtsi52
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi45
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts19
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts57
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi38
-rw-r--r--arch/arm/boot/dts/exynos4212.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi64
-rw-r--r--arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi24
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts15
-rw-r--r--arch/arm/boot/dts/exynos4412.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos4x12.dtsi12
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi44
-rw-r--r--arch/arm/boot/dts/exynos5420-trip-points.dtsi35
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi33
-rw-r--r--arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi24
-rw-r--r--arch/arm/boot/dts/exynos5440-trip-points.dtsi25
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi18
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sl-evk.dts2
-rw-r--r--arch/arm/boot/dts/omap5-core-thermal.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5-gpu-thermal.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi4
-rw-r--r--arch/arm/boot/dts/omap54xx-clocks.dtsi41
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi6
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/sunxi_defconfig1
-rw-r--r--arch/arm/configs/vexpress_defconfig2
-rw-r--r--arch/arm/include/asm/arm-cci.h42
-rw-r--r--arch/arm/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm/kvm/arm.c2
-rw-r--r--arch/arm/kvm/trace.h10
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-exynos/platsmp.c3
-rw-r--r--arch/arm/mach-exynos/pm_domains.c28
-rw-r--r--arch/arm/mach-exynos/suspend.c4
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c5
-rw-r--r--arch/arm/mach-msm/board-halibut.c8
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c10
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c103
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c1
-rw-r--r--arch/arm/mach-omap2/prm44xx.c4
-rw-r--r--arch/arm/mach-pxa/idp.c6
-rw-r--r--arch/arm/mach-pxa/lpd270.c8
-rw-r--r--arch/arm/mach-realview/core.c7
-rw-r--r--arch/arm/mach-realview/realview_eb.c2
-rw-r--r--arch/arm/mach-sa1100/neponset.c6
-rw-r--r--arch/arm/mach-sa1100/pleb.c7
-rw-r--r--arch/arm/mach-socfpga/core.h2
-rw-r--r--arch/arm/mach-socfpga/socfpga.c5
-rw-r--r--arch/arm/mach-sti/board-dt.c1
-rw-r--r--arch/arm/mach-vexpress/Kconfig4
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi4
-rw-r--r--arch/arm64/include/asm/arm-cci.h27
-rw-r--r--arch/arm64/include/asm/tlb.h3
-rw-r--r--arch/arm64/include/asm/tlbflush.h13
-rw-r--r--arch/arm64/kernel/efi.c9
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/process.c8
-rw-r--r--arch/arm64/mm/pageattr.c5
-rw-r--r--arch/c6x/include/asm/pgtable.h5
-rw-r--r--arch/microblaze/kernel/entry.S7
-rw-r--r--arch/mips/kvm/tlb.c1
-rw-r--r--arch/mips/kvm/trace.h6
-rw-r--r--arch/nios2/include/asm/ptrace.h47
-rw-r--r--arch/nios2/include/asm/ucontext.h32
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild2
-rw-r--r--arch/nios2/include/uapi/asm/elf.h4
-rw-r--r--arch/nios2/include/uapi/asm/ptrace.h50
-rw-r--r--arch/nios2/include/uapi/asm/sigcontext.h12
-rw-r--r--arch/nios2/kernel/signal.c4
-rw-r--r--arch/powerpc/include/asm/iommu.h6
-rw-r--r--arch/powerpc/include/asm/irq_work.h9
-rw-r--r--arch/powerpc/kernel/iommu.c26
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/platforms/powernv/pci.c26
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/s390/include/asm/kvm_host.h12
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/include/asm/page.h11
-rw-r--r--arch/s390/kernel/jump_label.c12
-rw-r--r--arch/s390/kernel/module.c1
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c68
-rw-r--r--arch/s390/kvm/kvm-s390.h3
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/pci/pci.c28
-rw-r--r--arch/s390/pci/pci_mmio.c17
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/xsave.h28
-rw-r--r--arch/x86/kernel/entry_64.S13
-rw-r--r--arch/x86/kvm/emulate.c3
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c23
-rw-r--r--arch/x86/pci/acpi.c11
-rw-r--r--arch/x86/xen/p2m.c2
-rw-r--r--drivers/acpi/acpi_lpss.c5
-rw-r--r--drivers/acpi/resource.c4
-rw-r--r--drivers/acpi/video.c20
-rw-r--r--drivers/android/binder.c10
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/base/power/domain.c24
-rw-r--r--drivers/base/power/wakeup.c1
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/bus/Kconfig26
-rw-r--r--drivers/bus/arm-cci.c517
-rw-r--r--drivers/char/tpm/tpm-chip.c34
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c10
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h6
-rw-r--r--drivers/clk/at91/pmc.c20
-rw-r--r--drivers/clk/at91/pmc.h1
-rw-r--r--drivers/clk/clk-divider.c29
-rw-r--r--drivers/clk/clk.c27
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c13
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c7
-rw-r--r--drivers/clk/ti/fapll.c6
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c21
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle.c61
-rw-r--r--drivers/dma-buf/fence.c3
-rw-r--r--drivers/dma-buf/reservation.c5
-rw-r--r--drivers/dma/at_xdmac.c7
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/ioat/dma_v3.c4
-rw-r--r--drivers/dma/mmp_pdma.c10
-rw-r--r--drivers/dma/mmp_tdma.c31
-rw-r--r--drivers/dma/qcom_bam_dma.c10
-rw-r--r--drivers/dma/sh/shdmac.c15
-rw-r--r--drivers/firmware/dmi_scan.c17
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c8
-rw-r--r--drivers/gpu/drm/drm_crtc.c35
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c11
-rw-r--r--drivers/gpu/drm/drm_mm.c154
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c30
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c18
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c36
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c28
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h15
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c99
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c5
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c30
-rw-r--r--drivers/gpu/drm/radeon/cik.c3
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c68
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c59
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c68
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c9
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c78
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c14
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c40
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/iio/adc/mcp3422.c17
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c3
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c2
-rw-r--r--drivers/iio/dac/ad5686.c2
-rw-r--r--drivers/iio/humidity/dht11.c69
-rw-r--r--drivers/iio/humidity/si7020.c6
-rw-r--r--drivers/iio/imu/adis16400_core.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c6
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/magnetometer/Kconfig2
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c6
-rw-r--r--drivers/input/misc/mma8450.c1
-rw-r--r--drivers/input/mouse/alps.c4
-rw-r--r--drivers/input/mouse/cyapa_gen3.c2
-rw-r--r--drivers/input/mouse/cyapa_gen5.c4
-rw-r--r--drivers/input/mouse/focaltech.c50
-rw-r--r--drivers/input/mouse/psmouse-base.c14
-rw-r--r--drivers/input/mouse/psmouse.h6
-rw-r--r--drivers/input/touchscreen/Kconfig1
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/exynos-iommu.c7
-rw-r--r--drivers/iommu/io-pgtable-arm.c5
-rw-r--r--drivers/iommu/omap-iommu.c7
-rw-r--r--drivers/iommu/rockchip-iommu.c7
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c21
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c157
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c20
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/misc/mei/init.c2
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c50
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/can/dev.c8
-rw-r--r--drivers/net/can/usb/kvaser_usb.c48
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c7
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c175
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c122
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.c8
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c54
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c246
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c9
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c65
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c82
-rw-r--r--drivers/net/phy/phy.c23
-rw-r--r--drivers/net/team/team.c10
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/plusb.c5
-rw-r--r--drivers/net/wan/cosa.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/net/xen-netback/netback.c51
-rw-r--r--drivers/of/Kconfig3
-rw-r--r--drivers/of/base.c27
-rw-r--r--drivers/of/overlay.c3
-rw-r--r--drivers/of/unittest.c28
-rw-r--r--drivers/pci/host/pci-versatile.c2
-rw-r--r--drivers/pci/host/pci-xgene.c4
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/regulator/core.c7
-rw-r--r--drivers/regulator/da9210-regulator.c9
-rw-r--r--drivers/regulator/rk808-regulator.c8
-rw-r--r--drivers/rtc/rtc-at91rm9200.c62
-rw-r--r--drivers/rtc/rtc-at91sam9.c73
-rw-r--r--drivers/rtc/rtc-s3c.c1
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/scm_blk_cluster.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c6
-rw-r--r--drivers/spi/spi-atmel.c12
-rw-r--r--drivers/spi/spi-dw-mid.c6
-rw-r--r--drivers/spi/spi-dw-pci.c4
-rw-r--r--drivers/spi/spi-dw.c4
-rw-r--r--drivers/spi/spi-img-spfi.c7
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/spi/spi-ti-qspi.c22
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c3
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.c5
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c71
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c207
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c3
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.c10
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c3
-rw-r--r--drivers/thermal/thermal_core.c37
-rw-r--r--drivers/tty/bfin_jtag_comm.c13
-rw-r--r--drivers/tty/serial/8250/8250_core.c11
-rw-r--r--drivers/tty/serial/8250/8250_dw.c32
-rw-r--r--drivers/tty/serial/8250/8250_pci.c20
-rw-r--r--drivers/tty/serial/atmel_serial.c49
-rw-r--r--drivers/tty/serial/of_serial.c4
-rw-r--r--drivers/tty/serial/sprd_serial.c4
-rw-r--r--drivers/tty/tty_io.c4
-rw-r--r--drivers/tty/tty_ioctl.c16
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/core/devio.c2
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c30
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c204
-rw-r--r--drivers/usb/gadget/function/f_hid.c2
-rw-r--r--drivers/usb/gadget/function/f_phonet.c5
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c4
-rw-r--r--drivers/usb/gadget/function/f_uac2.c34
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c1
-rw-r--r--drivers/usb/gadget/function/uvc_video.c1
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c6
-rw-r--r--drivers/usb/gadget/legacy/inode.c466
-rw-r--r--drivers/usb/host/xhci-pci.c30
-rw-r--r--drivers/usb/host/xhci-plat.c19
-rw-r--r--drivers/usb/host/xhci-ring.c12
-rw-r--r--drivers/usb/host/xhci.c100
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c6
-rw-r--r--drivers/usb/musb/musb_core.c10
-rw-r--r--drivers/usb/musb/musb_dsps.c32
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/omap2430.c7
-rw-r--r--drivers/usb/renesas_usbhs/Kconfig1
-rw-r--r--drivers/usb/serial/bus.c45
-rw-r--r--drivers/usb/serial/ch341.c15
-rw-r--r--drivers/usb/serial/console.c2
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c19
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h23
-rw-r--r--drivers/usb/serial/generic.c5
-rw-r--r--drivers/usb/serial/mxuport.c3
-rw-r--r--drivers/usb/serial/pl2303.c18
-rw-r--r--drivers/usb/serial/usb-serial.c21
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/vhost/net.c25
-rw-r--r--drivers/video/fbdev/amba-clcd.c3
-rw-r--r--drivers/video/fbdev/core/fbmon.c6
-rw-r--r--drivers/video/fbdev/omap2/dss/display-sysfs.c179
-rw-r--r--drivers/watchdog/at91sam9_wdt.c3
-rw-r--r--drivers/xen/events/events_base.c18
-rw-r--r--drivers/xen/xen-pciback/conf_space.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.h2
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c61
-rw-r--r--fs/btrfs/ctree.c8
-rw-r--r--fs/btrfs/extent-tree.c16
-rw-r--r--fs/btrfs/file.c87
-rw-r--r--fs/btrfs/inode.c1
-rw-r--r--fs/btrfs/ordered-data.c7
-rw-r--r--fs/btrfs/send.c171
-rw-r--r--fs/btrfs/transaction.c3
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/xattr.c8
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h4
-rw-r--r--fs/ecryptfs/file.c34
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/main.c2
-rw-r--r--fs/locks.c5
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/delegation.c45
-rw-r--r--fs/nfs/dir.c22
-rw-r--r--fs/nfs/file.c11
-rw-r--r--fs/nfs/inode.c111
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs3proc.c4
-rw-r--r--fs/nfs/nfs3xdr.c5
-rw-r--r--fs/nfs/nfs4client.c9
-rw-r--r--fs/nfs/nfs4proc.c31
-rw-r--r--fs/nfs/nfs4session.h1
-rw-r--r--fs/nfs/nfs4state.c18
-rw-r--r--fs/nfs/proc.c6
-rw-r--r--fs/nfs/write.c30
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/nilfs2/segment.c7
-rw-r--r--fs/notify/fanotify/fanotify.c3
-rw-r--r--fs/ocfs2/ocfs2.h2
-rw-r--r--fs/ocfs2/ocfs2_fs.h15
-rw-r--r--include/drm/drm_mm.h52
-rw-r--r--include/drm/ttm/ttm_bo_api.h2
-rw-r--r--include/drm/ttm/ttm_bo_driver.h2
-rw-r--r--include/dt-bindings/pinctrl/am33xx.h3
-rw-r--r--include/dt-bindings/pinctrl/am43xx.h3
-rw-r--r--include/linux/arm-cci.h9
-rw-r--r--include/linux/clk.h18
-rw-r--r--include/linux/cpuidle.h17
-rw-r--r--include/linux/interrupt.h9
-rw-r--r--include/linux/irqchip/arm-gic-v3.h5
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/kasan.h9
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/moduleloader.h8
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/nfs_fs.h5
-rw-r--r--include/linux/of_platform.h2
-rw-r--r--include/linux/rhashtable.h22
-rw-r--r--include/linux/serial_core.h14
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/usb/serial.h3
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/workqueue.h3
-rw-r--r--include/net/caif/cfpkt.h2
-rw-r--r--include/net/netfilter/nf_tables.h22
-rw-r--r--include/uapi/linux/serial.h4
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/video/omapdss.h1
-rw-r--r--include/xen/xenbus.h4
-rw-r--r--kernel/cpuset.c9
-rw-r--r--kernel/irq/manage.c7
-rw-r--r--kernel/irq/pm.c7
-rw-r--r--kernel/livepatch/core.c3
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/printk/console_cmdline.h2
-rw-r--r--kernel/printk/printk.c1
-rw-r--r--kernel/sched/idle.c54
-rw-r--r--kernel/trace/ftrace.c40
-rw-r--r--kernel/workqueue.c56
-rw-r--r--lib/Makefile2
-rw-r--r--lib/iov_iter.c (renamed from mm/iov_iter.c)15
-rw-r--r--lib/rhashtable.c62
-rw-r--r--lib/seq_buf.c4
-rw-r--r--lib/test_rhashtable.c11
-rw-r--r--mm/Makefile2
-rw-r--r--mm/cma.c12
-rw-r--r--mm/huge_memory.c11
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/kasan/kasan.c14
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory.c7
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/vmalloc.c1
-rw-r--r--net/bridge/br.c2
-rw-r--r--net/caif/cffrml.c2
-rw-r--r--net/caif/cfpkt_skbuff.c6
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/compat.c9
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/gen_stats.c15
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/rtnetlink.c15
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/hsr/hsr_device.c3
-rw-r--r--net/hsr/hsr_main.c4
-rw-r--r--net/hsr/hsr_slave.c10
-rw-r--r--net/ipv4/ip_fragment.c11
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_sockglue.c33
-rw-r--r--net/ipv4/ping.c12
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv6/addrconf.c17
-rw-r--r--net/ipv6/datagram.c39
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/ping.c5
-rw-r--r--net/irda/ircomm/ircomm_tty.c6
-rw-r--r--net/irda/irnet/irnet_ppp.c4
-rw-r--r--net/mac80211/chan.c5
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/tx.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c3
-rw-r--r--net/netfilter/nf_tables_api.c61
-rw-r--r--net/netfilter/nft_compat.c26
-rw-r--r--net/netfilter/nft_hash.c2
-rw-r--r--net/netfilter/xt_recent.c11
-rw-r--r--net/netfilter/xt_socket.c21
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/openvswitch/datapath.c45
-rw-r--r--net/openvswitch/flow_netlink.c8
-rw-r--r--net/openvswitch/vport.h2
-rw-r--r--net/packet/af_packet.c42
-rw-r--r--net/rxrpc/ar-ack.c9
-rw-r--r--net/rxrpc/ar-error.c4
-rw-r--r--net/sched/ematch.c1
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c3
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h2
-rw-r--r--net/tipc/link.c7
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/wireless/core.c1
-rw-r--r--net/wireless/nl80211.c12
-rw-r--r--net/wireless/reg.c2
-rw-r--r--sound/core/control.c4
-rw-r--r--sound/drivers/opl3/opl3_midi.c2
-rw-r--r--sound/firewire/iso-resources.c3
-rw-r--r--sound/firewire/oxfw/oxfw-stream.c5
-rw-r--r--sound/isa/msnd/msnd_pinnacle_mixer.c3
-rw-r--r--sound/pci/hda/hda_controller.c2
-rw-r--r--sound/pci/hda/hda_generic.c30
-rw-r--r--sound/pci/hda/patch_cirrus.c2
-rw-r--r--sound/pci/hda/patch_conexant.c11
-rw-r--r--sound/pci/hda/patch_realtek.c7
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c68
-rw-r--r--sound/soc/cirrus/Kconfig2
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/max98357a.c12
-rw-r--r--sound/soc/codecs/rt5670.c7
-rw-r--r--sound/soc/codecs/rt5677.c32
-rw-r--r--sound/soc/codecs/sta32x.c6
-rw-r--r--sound/soc/fsl/fsl_spdif.c4
-rw-r--r--sound/soc/fsl/fsl_ssi.c11
-rw-r--r--sound/soc/generic/simple-card.c5
-rw-r--r--sound/soc/intel/sst-atom-controls.h2
-rw-r--r--sound/soc/intel/sst/sst.c10
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c2
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c3
-rw-r--r--sound/soc/omap/omap-mcbsp.c11
-rw-r--r--sound/soc/omap/omap-pcm.c2
-rw-r--r--sound/soc/samsung/Kconfig10
-rw-r--r--sound/soc/sh/rcar/core.c4
-rw-r--r--sound/usb/line6/playback.c6
-rw-r--r--sound/usb/quirks-table.h30
-rw-r--r--tools/power/cpupower/Makefile2
-rw-r--r--tools/testing/selftests/exec/execveat.c10
572 files changed, 6740 insertions, 3616 deletions
diff --git a/Documentation/CodeOfConflict b/Documentation/CodeOfConflict
new file mode 100644
index 000000000000..1684d0b4efa6
--- /dev/null
+++ b/Documentation/CodeOfConflict
@@ -0,0 +1,27 @@
1Code of Conflict
2----------------
3
4The Linux kernel development effort is a very personal process compared
5to "traditional" ways of developing software. Your code and ideas
6behind it will be carefully reviewed, often resulting in critique and
7criticism. The review will almost always require improvements to the
8code before it can be included in the kernel. Know that this happens
9because everyone involved wants to see the best possible solution for
10the overall success of Linux. This development process has been proven
11to create the most robust operating system kernel ever, and we do not
12want to do anything to cause the quality of submission and eventual
13result to ever decrease.
14
15If however, anyone feels personally abused, threatened, or otherwise
16uncomfortable due to this process, that is not acceptable. If so,
17please contact the Linux Foundation's Technical Advisory Board at
18<tab@lists.linux-foundation.org>, or the individual members, and they
19will work to resolve the issue to the best of their ability. For more
20information on who is on the Technical Advisory Board and what their
21role is, please see:
22 http://www.linuxfoundation.org/programs/advisory-councils/tab
23
24As a reviewer of code, please strive to keep things civil and focused on
25the technical issues involved. We are all humans, and frustrations can
26be high on both sides of the process. Try to keep in mind the immortal
27words of Bill and Ted, "Be excellent to each other."
diff --git a/Documentation/devicetree/bindings/arm/cci.txt b/Documentation/devicetree/bindings/arm/cci.txt
index f28d82bbbc56..3c5c631328d3 100644
--- a/Documentation/devicetree/bindings/arm/cci.txt
+++ b/Documentation/devicetree/bindings/arm/cci.txt
@@ -94,8 +94,11 @@ specific to ARM.
94 - compatible 94 - compatible
95 Usage: required 95 Usage: required
96 Value type: <string> 96 Value type: <string>
97 Definition: must be "arm,cci-400-pmu" 97 Definition: Must contain one of:
98 98 "arm,cci-400-pmu,r0"
99 "arm,cci-400-pmu,r1"
100 "arm,cci-400-pmu" - DEPRECATED, permitted only where OS has
101 secure acces to CCI registers
99 - reg: 102 - reg:
100 Usage: required 103 Usage: required
101 Value type: Integer cells. A register entry, expressed 104 Value type: Integer cells. A register entry, expressed
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
index f4445e5a2bbb..1e097037349c 100644
--- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
+++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
@@ -22,6 +22,8 @@ Optional Properties:
22 - pclkN, clkN: Pairs of parent of input clock and input clock to the 22 - pclkN, clkN: Pairs of parent of input clock and input clock to the
23 devices in this power domain. Maximum of 4 pairs (N = 0 to 3) 23 devices in this power domain. Maximum of 4 pairs (N = 0 to 3)
24 are supported currently. 24 are supported currently.
25- power-domains: phandle pointing to the parent power domain, for more details
26 see Documentation/devicetree/bindings/power/power_domain.txt
25 27
26Node of a device using power domains must have a power-domains property 28Node of a device using power domains must have a power-domains property
27defined with a phandle to respective power domain. 29defined with a phandle to respective power domain.
diff --git a/Documentation/devicetree/bindings/arm/sti.txt b/Documentation/devicetree/bindings/arm/sti.txt
index d70ec358736c..8d27f6b084c7 100644
--- a/Documentation/devicetree/bindings/arm/sti.txt
+++ b/Documentation/devicetree/bindings/arm/sti.txt
@@ -13,6 +13,10 @@ Boards with the ST STiH407 SoC shall have the following properties:
13Required root node property: 13Required root node property:
14compatible = "st,stih407"; 14compatible = "st,stih407";
15 15
16Boards with the ST STiH410 SoC shall have the following properties:
17Required root node property:
18compatible = "st,stih410";
19
16Boards with the ST STiH418 SoC shall have the following properties: 20Boards with the ST STiH418 SoC shall have the following properties:
17Required root node property: 21Required root node property:
18compatible = "st,stih418"; 22compatible = "st,stih418";
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx.txt b/Documentation/devicetree/bindings/i2c/i2c-imx.txt
index 52d37fd8d3e5..ce4311d726ae 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx.txt
@@ -7,6 +7,7 @@ Required properties:
7 - "fsl,vf610-i2c" for I2C compatible with the one integrated on Vybrid vf610 SoC 7 - "fsl,vf610-i2c" for I2C compatible with the one integrated on Vybrid vf610 SoC
8- reg : Should contain I2C/HS-I2C registers location and length 8- reg : Should contain I2C/HS-I2C registers location and length
9- interrupts : Should contain I2C/HS-I2C interrupt 9- interrupts : Should contain I2C/HS-I2C interrupt
10- clocks : Should contain the I2C/HS-I2C clock specifier
10 11
11Optional properties: 12Optional properties:
12- clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz. 13- clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz.
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
index 33df3932168e..8db32384a486 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
@@ -27,6 +27,8 @@ property is used.
27- amd,serdes-cdr-rate: CDR rate speed selection 27- amd,serdes-cdr-rate: CDR rate speed selection
28- amd,serdes-pq-skew: PQ (data sampling) skew 28- amd,serdes-pq-skew: PQ (data sampling) skew
29- amd,serdes-tx-amp: TX amplitude boost 29- amd,serdes-tx-amp: TX amplitude boost
30- amd,serdes-dfe-tap-config: DFE taps available to run
31- amd,serdes-dfe-tap-enable: DFE taps to enable
30 32
31Example: 33Example:
32 xgbe_phy@e1240800 { 34 xgbe_phy@e1240800 {
@@ -41,4 +43,6 @@ Example:
41 amd,serdes-cdr-rate = <2>, <2>, <7>; 43 amd,serdes-cdr-rate = <2>, <2>, <7>;
42 amd,serdes-pq-skew = <10>, <10>, <30>; 44 amd,serdes-pq-skew = <10>, <10>, <30>;
43 amd,serdes-tx-amp = <15>, <15>, <10>; 45 amd,serdes-tx-amp = <15>, <15>, <10>;
46 amd,serdes-dfe-tap-config = <3>, <3>, <1>;
47 amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
44 }; 48 };
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
index cfcc52705ed8..6151999c5dca 100644
--- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
+++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@ -4,7 +4,10 @@ Ethernet nodes are defined to describe on-chip ethernet interfaces in
4APM X-Gene SoC. 4APM X-Gene SoC.
5 5
6Required properties for all the ethernet interfaces: 6Required properties for all the ethernet interfaces:
7- compatible: Should be "apm,xgene-enet" 7- compatible: Should state binding information from the following list,
8 - "apm,xgene-enet": RGMII based 1G interface
9 - "apm,xgene1-sgenet": SGMII based 1G interface
10 - "apm,xgene1-xgenet": XFI based 10G interface
8- reg: Address and length of the register set for the device. It contains the 11- reg: Address and length of the register set for the device. It contains the
9 information of registers in the same order as described by reg-names 12 information of registers in the same order as described by reg-names
10- reg-names: Should contain the register set names 13- reg-names: Should contain the register set names
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 98c16672ab5f..0f8ed3710c66 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -19,6 +19,16 @@ Required properties:
19 providing multiple PM domains (e.g. power controllers), but can be any value 19 providing multiple PM domains (e.g. power controllers), but can be any value
20 as specified by device tree binding documentation of particular provider. 20 as specified by device tree binding documentation of particular provider.
21 21
22Optional properties:
23 - power-domains : A phandle and PM domain specifier as defined by bindings of
24 the power controller specified by phandle.
25 Some power domains might be powered from another power domain (or have
26 other hardware specific dependencies). For representing such dependency
27 a standard PM domain consumer binding is used. When provided, all domains
28 created by the given provider should be subdomains of the domain
29 specified by this binding. More details about power domain specifier are
30 available in the next section.
31
22Example: 32Example:
23 33
24 power: power-controller@12340000 { 34 power: power-controller@12340000 {
@@ -30,6 +40,25 @@ Example:
30The node above defines a power controller that is a PM domain provider and 40The node above defines a power controller that is a PM domain provider and
31expects one cell as its phandle argument. 41expects one cell as its phandle argument.
32 42
43Example 2:
44
45 parent: power-controller@12340000 {
46 compatible = "foo,power-controller";
47 reg = <0x12340000 0x1000>;
48 #power-domain-cells = <1>;
49 };
50
51 child: power-controller@12340000 {
52 compatible = "foo,power-controller";
53 reg = <0x12341000 0x1000>;
54 power-domains = <&parent 0>;
55 #power-domain-cells = <1>;
56 };
57
58The nodes above define two power controllers: 'parent' and 'child'.
59Domains created by the 'child' power controller are subdomains of '0' power
60domain provided by the 'parent' power controller.
61
33==PM domain consumers== 62==PM domain consumers==
34 63
35Required properties: 64Required properties:
diff --git a/Documentation/devicetree/bindings/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/8250.txt
index 91d5ab0e60fc..91d5ab0e60fc 100644
--- a/Documentation/devicetree/bindings/serial/of-serial.txt
+++ b/Documentation/devicetree/bindings/serial/8250.txt
diff --git a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt
new file mode 100644
index 000000000000..ebcbb62c0a76
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt
@@ -0,0 +1,19 @@
1ETRAX FS UART
2
3Required properties:
4- compatible : "axis,etraxfs-uart"
5- reg: offset and length of the register set for the device.
6- interrupts: device interrupt
7
8Optional properties:
9- {dtr,dsr,ri,cd}-gpios: specify a GPIO for DTR/DSR/RI/CD
10 line respectively.
11
12Example:
13
14serial@b00260000 {
15 compatible = "axis,etraxfs-uart";
16 reg = <0xb0026000 0x1000>;
17 interrupts = <68>;
18 status = "disabled";
19};
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt
index 7f76214f728a..289c40ed7470 100644
--- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt
+++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt
@@ -21,6 +21,18 @@ Optional properties:
21- reg-io-width : the size (in bytes) of the IO accesses that should be 21- reg-io-width : the size (in bytes) of the IO accesses that should be
22 performed on the device. If this property is not present then single byte 22 performed on the device. If this property is not present then single byte
23 accesses are used. 23 accesses are used.
24- dcd-override : Override the DCD modem status signal. This signal will always
25 be reported as active instead of being obtained from the modem status
26 register. Define this if your serial port does not use this pin.
27- dsr-override : Override the DTS modem status signal. This signal will always
28 be reported as active instead of being obtained from the modem status
29 register. Define this if your serial port does not use this pin.
30- cts-override : Override the CTS modem status signal. This signal will always
31 be reported as active instead of being obtained from the modem status
32 register. Define this if your serial port does not use this pin.
33- ri-override : Override the RI modem status signal. This signal will always be
34 reported as inactive instead of being obtained from the modem status register.
35 Define this if your serial port does not use this pin.
24 36
25Example: 37Example:
26 38
@@ -31,6 +43,10 @@ Example:
31 interrupts = <10>; 43 interrupts = <10>;
32 reg-shift = <2>; 44 reg-shift = <2>;
33 reg-io-width = <4>; 45 reg-io-width = <4>;
46 dcd-override;
47 dsr-override;
48 cts-override;
49 ri-override;
34 }; 50 };
35 51
36Example with one clock: 52Example with one clock:
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt
index 56742bc70218..7d44eae7ab0b 100644
--- a/Documentation/devicetree/bindings/submitting-patches.txt
+++ b/Documentation/devicetree/bindings/submitting-patches.txt
@@ -12,6 +12,9 @@ I. For patch submitters
12 12
13 devicetree@vger.kernel.org 13 devicetree@vger.kernel.org
14 14
15 and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify
16 all of the DT maintainers.
17
15 3) The Documentation/ portion of the patch should come in the series before 18 3) The Documentation/ portion of the patch should come in the series before
16 the code implementing the binding. 19 the code implementing the binding.
17 20
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 389ca1347a77..fae26d014aaf 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -20,6 +20,7 @@ amlogic Amlogic, Inc.
20ams AMS AG 20ams AMS AG
21amstaos AMS-Taos Inc. 21amstaos AMS-Taos Inc.
22apm Applied Micro Circuits Corporation (APM) 22apm Applied Micro Circuits Corporation (APM)
23arasan Arasan Chip Systems
23arm ARM Ltd. 24arm ARM Ltd.
24armadeus ARMadeus Systems SARL 25armadeus ARMadeus Systems SARL
25asahi-kasei Asahi Kasei Corp. 26asahi-kasei Asahi Kasei Corp.
@@ -27,6 +28,7 @@ atmel Atmel Corporation
27auo AU Optronics Corporation 28auo AU Optronics Corporation
28avago Avago Technologies 29avago Avago Technologies
29avic Shanghai AVIC Optoelectronics Co., Ltd. 30avic Shanghai AVIC Optoelectronics Co., Ltd.
31axis Axis Communications AB
30bosch Bosch Sensortec GmbH 32bosch Bosch Sensortec GmbH
31brcm Broadcom Corporation 33brcm Broadcom Corporation
32buffalo Buffalo, Inc. 34buffalo Buffalo, Inc.
diff --git a/Documentation/power/suspend-and-interrupts.txt b/Documentation/power/suspend-and-interrupts.txt
index 2f9c5a5fcb25..8afb29a8604a 100644
--- a/Documentation/power/suspend-and-interrupts.txt
+++ b/Documentation/power/suspend-and-interrupts.txt
@@ -40,8 +40,10 @@ but also to IPIs and to some other special-purpose interrupts.
40 40
41The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when 41The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when
42requesting a special-purpose interrupt. It causes suspend_device_irqs() to 42requesting a special-purpose interrupt. It causes suspend_device_irqs() to
43leave the corresponding IRQ enabled so as to allow the interrupt to work all 43leave the corresponding IRQ enabled so as to allow the interrupt to work as
44the time as expected. 44expected during the suspend-resume cycle, but does not guarantee that the
45interrupt will wake the system from a suspended state -- for such cases it is
46necessary to use enable_irq_wake().
45 47
46Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one 48Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one
47user of it. Thus, if the IRQ is shared, all of the interrupt handlers installed 49user of it. Thus, if the IRQ is shared, all of the interrupt handlers installed
@@ -110,8 +112,9 @@ any special interrupt handling logic for it to work.
110IRQF_NO_SUSPEND and enable_irq_wake() 112IRQF_NO_SUSPEND and enable_irq_wake()
111------------------------------------- 113-------------------------------------
112 114
113There are no valid reasons to use both enable_irq_wake() and the IRQF_NO_SUSPEND 115There are very few valid reasons to use both enable_irq_wake() and the
114flag on the same IRQ. 116IRQF_NO_SUSPEND flag on the same IRQ, and it is never valid to use both for the
117same device.
115 118
116First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND 119First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND
117interrupts (interrupt handlers are invoked after suspend_device_irqs()) are 120interrupts (interrupt handlers are invoked after suspend_device_irqs()) are
@@ -120,4 +123,13 @@ handlers are not invoked after suspend_device_irqs()).
120 123
121Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not 124Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not
122to individual interrupt handlers, so sharing an IRQ between a system wakeup 125to individual interrupt handlers, so sharing an IRQ between a system wakeup
123interrupt source and an IRQF_NO_SUSPEND interrupt source does not make sense. 126interrupt source and an IRQF_NO_SUSPEND interrupt source does not generally
127make sense.
128
129In rare cases an IRQ can be shared between a wakeup device driver and an
130IRQF_NO_SUSPEND user. In order for this to be safe, the wakeup device driver
131must be able to discern spurious IRQs from genuine wakeup events (signalling
132the latter to the core with pm_system_wakeup()), must use enable_irq_wake() to
133ensure that the IRQ will function as a wakeup source, and must request the IRQ
134with IRQF_COND_SUSPEND to tell the core that it meets these requirements. If
135these requirements are not met, it is not valid to use IRQF_COND_SUSPEND.
diff --git a/MAINTAINERS b/MAINTAINERS
index ad18cd135a87..7b4f760e4414 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1030,6 +1030,16 @@ F: arch/arm/mach-mxs/
1030F: arch/arm/boot/dts/imx* 1030F: arch/arm/boot/dts/imx*
1031F: arch/arm/configs/imx*_defconfig 1031F: arch/arm/configs/imx*_defconfig
1032 1032
1033ARM/FREESCALE VYBRID ARM ARCHITECTURE
1034M: Shawn Guo <shawn.guo@linaro.org>
1035M: Sascha Hauer <kernel@pengutronix.de>
1036R: Stefan Agner <stefan@agner.ch>
1037L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1038S: Maintained
1039T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
1040F: arch/arm/mach-imx/*vf610*
1041F: arch/arm/boot/dts/vf*
1042
1033ARM/GLOMATION GESBC9312SX MACHINE SUPPORT 1043ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
1034M: Lennert Buytenhek <kernel@wantstofly.org> 1044M: Lennert Buytenhek <kernel@wantstofly.org>
1035L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1045L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1188,6 +1198,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support
1188M: Jason Cooper <jason@lakedaemon.net> 1198M: Jason Cooper <jason@lakedaemon.net>
1189M: Andrew Lunn <andrew@lunn.ch> 1199M: Andrew Lunn <andrew@lunn.ch>
1190M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 1200M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
1201M: Gregory Clement <gregory.clement@free-electrons.com>
1191L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1202L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1192S: Maintained 1203S: Maintained
1193F: arch/arm/mach-dove/ 1204F: arch/arm/mach-dove/
@@ -2066,7 +2077,7 @@ F: include/net/bluetooth/
2066BONDING DRIVER 2077BONDING DRIVER
2067M: Jay Vosburgh <j.vosburgh@gmail.com> 2078M: Jay Vosburgh <j.vosburgh@gmail.com>
2068M: Veaceslav Falico <vfalico@gmail.com> 2079M: Veaceslav Falico <vfalico@gmail.com>
2069M: Andy Gospodarek <andy@greyhouse.net> 2080M: Andy Gospodarek <gospo@cumulusnetworks.com>
2070L: netdev@vger.kernel.org 2081L: netdev@vger.kernel.org
2071W: http://sourceforge.net/projects/bonding/ 2082W: http://sourceforge.net/projects/bonding/
2072S: Supported 2083S: Supported
@@ -2108,7 +2119,6 @@ F: drivers/net/ethernet/broadcom/bnx2x/
2108 2119
2109BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE 2120BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
2110M: Christian Daudt <bcm@fixthebug.org> 2121M: Christian Daudt <bcm@fixthebug.org>
2111M: Matt Porter <mporter@linaro.org>
2112M: Florian Fainelli <f.fainelli@gmail.com> 2122M: Florian Fainelli <f.fainelli@gmail.com>
2113L: bcm-kernel-feedback-list@broadcom.com 2123L: bcm-kernel-feedback-list@broadcom.com
2114T: git git://github.com/broadcom/mach-bcm 2124T: git git://github.com/broadcom/mach-bcm
@@ -2370,8 +2380,9 @@ F: arch/x86/include/asm/tce.h
2370 2380
2371CAN NETWORK LAYER 2381CAN NETWORK LAYER
2372M: Oliver Hartkopp <socketcan@hartkopp.net> 2382M: Oliver Hartkopp <socketcan@hartkopp.net>
2383M: Marc Kleine-Budde <mkl@pengutronix.de>
2373L: linux-can@vger.kernel.org 2384L: linux-can@vger.kernel.org
2374W: http://gitorious.org/linux-can 2385W: https://github.com/linux-can
2375T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git 2386T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
2376T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git 2387T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
2377S: Maintained 2388S: Maintained
@@ -2387,7 +2398,7 @@ CAN NETWORK DRIVERS
2387M: Wolfgang Grandegger <wg@grandegger.com> 2398M: Wolfgang Grandegger <wg@grandegger.com>
2388M: Marc Kleine-Budde <mkl@pengutronix.de> 2399M: Marc Kleine-Budde <mkl@pengutronix.de>
2389L: linux-can@vger.kernel.org 2400L: linux-can@vger.kernel.org
2390W: http://gitorious.org/linux-can 2401W: https://github.com/linux-can
2391T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git 2402T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
2392T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git 2403T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
2393S: Maintained 2404S: Maintained
@@ -8481,6 +8492,14 @@ S: Supported
8481L: netdev@vger.kernel.org 8492L: netdev@vger.kernel.org
8482F: drivers/net/ethernet/samsung/sxgbe/ 8493F: drivers/net/ethernet/samsung/sxgbe/
8483 8494
8495SAMSUNG THERMAL DRIVER
8496M: Lukasz Majewski <l.majewski@samsung.com>
8497L: linux-pm@vger.kernel.org
8498L: linux-samsung-soc@vger.kernel.org
8499S: Supported
8500T: https://github.com/lmajewski/linux-samsung-thermal.git
8501F: drivers/thermal/samsung/
8502
8484SAMSUNG USB2 PHY DRIVER 8503SAMSUNG USB2 PHY DRIVER
8485M: Kamil Debski <k.debski@samsung.com> 8504M: Kamil Debski <k.debski@samsung.com>
8486L: linux-kernel@vger.kernel.org 8505L: linux-kernel@vger.kernel.org
diff --git a/Makefile b/Makefile
index e6a9b1b94656..e734965b1604 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 0 2PATCHLEVEL = 0
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc4
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 4e547296831d..52312cb5dbe2 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -47,9 +47,6 @@ struct thread_struct {
47/* Forward declaration, a strange C thing */ 47/* Forward declaration, a strange C thing */
48struct task_struct; 48struct task_struct;
49 49
50/* Return saved PC of a blocked thread */
51unsigned long thread_saved_pc(struct task_struct *t);
52
53#define task_pt_regs(p) \ 50#define task_pt_regs(p) \
54 ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1) 51 ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
55 52
@@ -72,18 +69,21 @@ unsigned long thread_saved_pc(struct task_struct *t);
72#define release_segments(mm) do { } while (0) 69#define release_segments(mm) do { } while (0)
73 70
74#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) 71#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
72#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
75 73
76/* 74/*
77 * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. 75 * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
78 * Look in process.c for details of kernel stack layout 76 * Look in process.c for details of kernel stack layout
79 */ 77 */
80#define KSTK_ESP(tsk) (tsk->thread.ksp) 78#define TSK_K_ESP(tsk) (tsk->thread.ksp)
81 79
82#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \ 80#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \
83 sizeof(struct callee_regs) + off))) 81 sizeof(struct callee_regs) + off)))
84 82
85#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4) 83#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
86#define KSTK_FP(tsk) KSTK_REG(tsk, 0) 84#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
85
86#define thread_saved_pc(tsk) TSK_K_BLINK(tsk)
87 87
88extern void start_thread(struct pt_regs * regs, unsigned long pc, 88extern void start_thread(struct pt_regs * regs, unsigned long pc,
89 unsigned long usp); 89 unsigned long usp);
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h
new file mode 100644
index 000000000000..b29b6064ea14
--- /dev/null
+++ b/arch/arc/include/asm/stacktrace.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
3 * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __ASM_STACKTRACE_H
11#define __ASM_STACKTRACE_H
12
13#include <linux/sched.h>
14
15/**
16 * arc_unwind_core - Unwind the kernel mode stack for an execution context
17 * @tsk: NULL for current task, specific task otherwise
18 * @regs: pt_regs used to seed the unwinder {SP, FP, BLINK, PC}
19 * If NULL, use pt_regs of @tsk (if !NULL) otherwise
20 * use the current values of {SP, FP, BLINK, PC}
21 * @consumer_fn: Callback invoked for each frame unwound
22 * Returns 0 to continue unwinding, -1 to stop
23 * @arg: Arg to callback
24 *
25 * Returns the address of first function in stack
26 *
27 * Semantics:
28 * - synchronous unwinding (e.g. dump_stack): @tsk NULL, @regs NULL
29 * - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs NULL
30 * - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL
31 */
32notrace noinline unsigned int arc_unwind_core(
33 struct task_struct *tsk, struct pt_regs *regs,
34 int (*consumer_fn) (unsigned int, void *),
35 void *arg);
36
37#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index fdd89715d2d3..98c00a2d4dd9 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -192,29 +192,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
192 return 0; 192 return 0;
193} 193}
194 194
195/*
196 * API: expected by schedular Code: If thread is sleeping where is that.
197 * What is this good for? it will be always the scheduler or ret_from_fork.
198 * So we hard code that anyways.
199 */
200unsigned long thread_saved_pc(struct task_struct *t)
201{
202 struct pt_regs *regs = task_pt_regs(t);
203 unsigned long blink = 0;
204
205 /*
206 * If the thread being queried for in not itself calling this, then it
207 * implies it is not executing, which in turn implies it is sleeping,
208 * which in turn implies it got switched OUT by the schedular.
209 * In that case, it's kernel mode blink can reliably retrieved as per
210 * the picture above (right above pt_regs).
211 */
212 if (t != current && t->state != TASK_RUNNING)
213 blink = *((unsigned int *)regs - 1);
214
215 return blink;
216}
217
218int elf_check_arch(const struct elf32_hdr *x) 195int elf_check_arch(const struct elf32_hdr *x)
219{ 196{
220 unsigned int eflags; 197 unsigned int eflags;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index 9ce47cfe2303..92320d6f737c 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -43,6 +43,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
43 struct pt_regs *regs, 43 struct pt_regs *regs,
44 struct unwind_frame_info *frame_info) 44 struct unwind_frame_info *frame_info)
45{ 45{
46 /*
47 * synchronous unwinding (e.g. dump_stack)
48 * - uses current values of SP and friends
49 */
46 if (tsk == NULL && regs == NULL) { 50 if (tsk == NULL && regs == NULL) {
47 unsigned long fp, sp, blink, ret; 51 unsigned long fp, sp, blink, ret;
48 frame_info->task = current; 52 frame_info->task = current;
@@ -61,12 +65,17 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
61 frame_info->regs.r63 = ret; 65 frame_info->regs.r63 = ret;
62 frame_info->call_frame = 0; 66 frame_info->call_frame = 0;
63 } else if (regs == NULL) { 67 } else if (regs == NULL) {
68 /*
69 * Asynchronous unwinding of sleeping task
70 * - Gets SP etc from task's pt_regs (saved bottom of kernel
71 * mode stack of task)
72 */
64 73
65 frame_info->task = tsk; 74 frame_info->task = tsk;
66 75
67 frame_info->regs.r27 = KSTK_FP(tsk); 76 frame_info->regs.r27 = TSK_K_FP(tsk);
68 frame_info->regs.r28 = KSTK_ESP(tsk); 77 frame_info->regs.r28 = TSK_K_ESP(tsk);
69 frame_info->regs.r31 = KSTK_BLINK(tsk); 78 frame_info->regs.r31 = TSK_K_BLINK(tsk);
70 frame_info->regs.r63 = (unsigned int)__switch_to; 79 frame_info->regs.r63 = (unsigned int)__switch_to;
71 80
72 /* In the prologue of __switch_to, first FP is saved on stack 81 /* In the prologue of __switch_to, first FP is saved on stack
@@ -83,6 +92,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
83 frame_info->call_frame = 0; 92 frame_info->call_frame = 0;
84 93
85 } else { 94 } else {
95 /*
96 * Asynchronous unwinding of intr/exception
97 * - Just uses the pt_regs passed
98 */
86 frame_info->task = tsk; 99 frame_info->task = tsk;
87 100
88 frame_info->regs.r27 = regs->fp; 101 frame_info->regs.r27 = regs->fp;
@@ -95,7 +108,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
95 108
96#endif 109#endif
97 110
98static noinline unsigned int 111notrace noinline unsigned int
99arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, 112arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
100 int (*consumer_fn) (unsigned int, void *), void *arg) 113 int (*consumer_fn) (unsigned int, void *), void *arg)
101{ 114{
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 7ff5b5c183bb..74db59b6f392 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/perf_event.h>
15#include <linux/ptrace.h> 16#include <linux/ptrace.h>
16#include <linux/uaccess.h> 17#include <linux/uaccess.h>
17#include <asm/disasm.h> 18#include <asm/disasm.h>
@@ -253,6 +254,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
253 } 254 }
254 } 255 }
255 256
257 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
256 return 0; 258 return 0;
257 259
258fault: 260fault:
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 563cb27e37f5..6a2e006cbcce 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -14,6 +14,7 @@
14#include <linux/ptrace.h> 14#include <linux/ptrace.h>
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <linux/kdebug.h> 16#include <linux/kdebug.h>
17#include <linux/perf_event.h>
17#include <asm/pgalloc.h> 18#include <asm/pgalloc.h>
18#include <asm/mmu.h> 19#include <asm/mmu.h>
19 20
@@ -139,13 +140,20 @@ good_area:
139 return; 140 return;
140 } 141 }
141 142
143 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
144
142 if (likely(!(fault & VM_FAULT_ERROR))) { 145 if (likely(!(fault & VM_FAULT_ERROR))) {
143 if (flags & FAULT_FLAG_ALLOW_RETRY) { 146 if (flags & FAULT_FLAG_ALLOW_RETRY) {
144 /* To avoid updating stats twice for retry case */ 147 /* To avoid updating stats twice for retry case */
145 if (fault & VM_FAULT_MAJOR) 148 if (fault & VM_FAULT_MAJOR) {
146 tsk->maj_flt++; 149 tsk->maj_flt++;
147 else 150 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
151 regs, address);
152 } else {
148 tsk->min_flt++; 153 tsk->min_flt++;
154 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
155 regs, address);
156 }
149 157
150 if (fault & VM_FAULT_RETRY) { 158 if (fault & VM_FAULT_RETRY) {
151 flags &= ~FAULT_FLAG_ALLOW_RETRY; 159 flags &= ~FAULT_FLAG_ALLOW_RETRY;
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 7f99cd652203..eb7bb511f853 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -150,6 +150,7 @@ machine-$(CONFIG_ARCH_BERLIN) += berlin
150machine-$(CONFIG_ARCH_CLPS711X) += clps711x 150machine-$(CONFIG_ARCH_CLPS711X) += clps711x
151machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx 151machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx
152machine-$(CONFIG_ARCH_DAVINCI) += davinci 152machine-$(CONFIG_ARCH_DAVINCI) += davinci
153machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor
153machine-$(CONFIG_ARCH_DOVE) += dove 154machine-$(CONFIG_ARCH_DOVE) += dove
154machine-$(CONFIG_ARCH_EBSA110) += ebsa110 155machine-$(CONFIG_ARCH_EBSA110) += ebsa110
155machine-$(CONFIG_ARCH_EFM32) += efm32 156machine-$(CONFIG_ARCH_EFM32) += efm32
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index 2c6248d9a9ef..c3255e0c90aa 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -301,3 +301,11 @@
301 cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; 301 cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
302 cd-inverted; 302 cd-inverted;
303}; 303};
304
305&aes {
306 status = "okay";
307};
308
309&sham {
310 status = "okay";
311};
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts
index 83d40f7655e5..6b8493720424 100644
--- a/arch/arm/boot/dts/am335x-bone.dts
+++ b/arch/arm/boot/dts/am335x-bone.dts
@@ -24,11 +24,3 @@
24&mmc1 { 24&mmc1 {
25 vmmc-supply = <&ldo3_reg>; 25 vmmc-supply = <&ldo3_reg>;
26}; 26};
27
28&sham {
29 status = "okay";
30};
31
32&aes {
33 status = "okay";
34};
diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts
index 7266a00aab2e..5c5667a3624d 100644
--- a/arch/arm/boot/dts/am335x-lxm.dts
+++ b/arch/arm/boot/dts/am335x-lxm.dts
@@ -328,6 +328,10 @@
328 dual_emac_res_vlan = <3>; 328 dual_emac_res_vlan = <3>;
329}; 329};
330 330
331&phy_sel {
332 rmii-clock-ext;
333};
334
331&mac { 335&mac {
332 pinctrl-names = "default", "sleep"; 336 pinctrl-names = "default", "sleep";
333 pinctrl-0 = <&cpsw_default>; 337 pinctrl-0 = <&cpsw_default>;
diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi
index 712edce7d6fb..071b56aa0c7e 100644
--- a/arch/arm/boot/dts/am33xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am33xx-clocks.dtsi
@@ -99,7 +99,7 @@
99 ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 { 99 ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 {
100 #clock-cells = <0>; 100 #clock-cells = <0>;
101 compatible = "ti,gate-clock"; 101 compatible = "ti,gate-clock";
102 clocks = <&dpll_per_m2_ck>; 102 clocks = <&l4ls_gclk>;
103 ti,bit-shift = <0>; 103 ti,bit-shift = <0>;
104 reg = <0x0664>; 104 reg = <0x0664>;
105 }; 105 };
@@ -107,7 +107,7 @@
107 ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 { 107 ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 {
108 #clock-cells = <0>; 108 #clock-cells = <0>;
109 compatible = "ti,gate-clock"; 109 compatible = "ti,gate-clock";
110 clocks = <&dpll_per_m2_ck>; 110 clocks = <&l4ls_gclk>;
111 ti,bit-shift = <1>; 111 ti,bit-shift = <1>;
112 reg = <0x0664>; 112 reg = <0x0664>;
113 }; 113 };
@@ -115,7 +115,7 @@
115 ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 { 115 ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 {
116 #clock-cells = <0>; 116 #clock-cells = <0>;
117 compatible = "ti,gate-clock"; 117 compatible = "ti,gate-clock";
118 clocks = <&dpll_per_m2_ck>; 118 clocks = <&l4ls_gclk>;
119 ti,bit-shift = <2>; 119 ti,bit-shift = <2>;
120 reg = <0x0664>; 120 reg = <0x0664>;
121 }; 121 };
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index c7dc9dab93a4..cfb49686ab6a 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -107,7 +107,7 @@
107 ehrpwm0_tbclk: ehrpwm0_tbclk { 107 ehrpwm0_tbclk: ehrpwm0_tbclk {
108 #clock-cells = <0>; 108 #clock-cells = <0>;
109 compatible = "ti,gate-clock"; 109 compatible = "ti,gate-clock";
110 clocks = <&dpll_per_m2_ck>; 110 clocks = <&l4ls_gclk>;
111 ti,bit-shift = <0>; 111 ti,bit-shift = <0>;
112 reg = <0x0664>; 112 reg = <0x0664>;
113 }; 113 };
@@ -115,7 +115,7 @@
115 ehrpwm1_tbclk: ehrpwm1_tbclk { 115 ehrpwm1_tbclk: ehrpwm1_tbclk {
116 #clock-cells = <0>; 116 #clock-cells = <0>;
117 compatible = "ti,gate-clock"; 117 compatible = "ti,gate-clock";
118 clocks = <&dpll_per_m2_ck>; 118 clocks = <&l4ls_gclk>;
119 ti,bit-shift = <1>; 119 ti,bit-shift = <1>;
120 reg = <0x0664>; 120 reg = <0x0664>;
121 }; 121 };
@@ -123,7 +123,7 @@
123 ehrpwm2_tbclk: ehrpwm2_tbclk { 123 ehrpwm2_tbclk: ehrpwm2_tbclk {
124 #clock-cells = <0>; 124 #clock-cells = <0>;
125 compatible = "ti,gate-clock"; 125 compatible = "ti,gate-clock";
126 clocks = <&dpll_per_m2_ck>; 126 clocks = <&l4ls_gclk>;
127 ti,bit-shift = <2>; 127 ti,bit-shift = <2>;
128 reg = <0x0664>; 128 reg = <0x0664>;
129 }; 129 };
@@ -131,7 +131,7 @@
131 ehrpwm3_tbclk: ehrpwm3_tbclk { 131 ehrpwm3_tbclk: ehrpwm3_tbclk {
132 #clock-cells = <0>; 132 #clock-cells = <0>;
133 compatible = "ti,gate-clock"; 133 compatible = "ti,gate-clock";
134 clocks = <&dpll_per_m2_ck>; 134 clocks = <&l4ls_gclk>;
135 ti,bit-shift = <4>; 135 ti,bit-shift = <4>;
136 reg = <0x0664>; 136 reg = <0x0664>;
137 }; 137 };
@@ -139,7 +139,7 @@
139 ehrpwm4_tbclk: ehrpwm4_tbclk { 139 ehrpwm4_tbclk: ehrpwm4_tbclk {
140 #clock-cells = <0>; 140 #clock-cells = <0>;
141 compatible = "ti,gate-clock"; 141 compatible = "ti,gate-clock";
142 clocks = <&dpll_per_m2_ck>; 142 clocks = <&l4ls_gclk>;
143 ti,bit-shift = <5>; 143 ti,bit-shift = <5>;
144 reg = <0x0664>; 144 reg = <0x0664>;
145 }; 145 };
@@ -147,7 +147,7 @@
147 ehrpwm5_tbclk: ehrpwm5_tbclk { 147 ehrpwm5_tbclk: ehrpwm5_tbclk {
148 #clock-cells = <0>; 148 #clock-cells = <0>;
149 compatible = "ti,gate-clock"; 149 compatible = "ti,gate-clock";
150 clocks = <&dpll_per_m2_ck>; 150 clocks = <&l4ls_gclk>;
151 ti,bit-shift = <6>; 151 ti,bit-shift = <6>;
152 reg = <0x0664>; 152 reg = <0x0664>;
153 }; 153 };
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 3290a96ba586..7563d7ce01bb 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -263,17 +263,15 @@
263 263
264 dcan1_pins_default: dcan1_pins_default { 264 dcan1_pins_default: dcan1_pins_default {
265 pinctrl-single,pins = < 265 pinctrl-single,pins = <
266 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ 266 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
267 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ 267 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */
268 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */
269 >; 268 >;
270 }; 269 };
271 270
272 dcan1_pins_sleep: dcan1_pins_sleep { 271 dcan1_pins_sleep: dcan1_pins_sleep {
273 pinctrl-single,pins = < 272 pinctrl-single,pins = <
274 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ 273 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */
275 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ 274 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */
276 0x418 (MUX_MODE15) /* wakeup0.off */
277 >; 275 >;
278 }; 276 };
279}; 277};
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index e0264d0bf7b9..40ed539ce474 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -119,17 +119,15 @@
119 119
120 dcan1_pins_default: dcan1_pins_default { 120 dcan1_pins_default: dcan1_pins_default {
121 pinctrl-single,pins = < 121 pinctrl-single,pins = <
122 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ 122 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
123 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ 123 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */
124 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */
125 >; 124 >;
126 }; 125 };
127 126
128 dcan1_pins_sleep: dcan1_pins_sleep { 127 dcan1_pins_sleep: dcan1_pins_sleep {
129 pinctrl-single,pins = < 128 pinctrl-single,pins = <
130 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ 129 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */
131 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ 130 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */
132 0x418 (MUX_MODE15) /* wakeup0.off */
133 >; 131 >;
134 }; 132 };
135 133
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index 4bdcbd61ce47..99b09a44e269 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -243,10 +243,18 @@
243 ti,invert-autoidle-bit; 243 ti,invert-autoidle-bit;
244 }; 244 };
245 245
246 dpll_core_byp_mux: dpll_core_byp_mux {
247 #clock-cells = <0>;
248 compatible = "ti,mux-clock";
249 clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
250 ti,bit-shift = <23>;
251 reg = <0x012c>;
252 };
253
246 dpll_core_ck: dpll_core_ck { 254 dpll_core_ck: dpll_core_ck {
247 #clock-cells = <0>; 255 #clock-cells = <0>;
248 compatible = "ti,omap4-dpll-core-clock"; 256 compatible = "ti,omap4-dpll-core-clock";
249 clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; 257 clocks = <&sys_clkin1>, <&dpll_core_byp_mux>;
250 reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; 258 reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
251 }; 259 };
252 260
@@ -309,10 +317,18 @@
309 clock-div = <1>; 317 clock-div = <1>;
310 }; 318 };
311 319
320 dpll_dsp_byp_mux: dpll_dsp_byp_mux {
321 #clock-cells = <0>;
322 compatible = "ti,mux-clock";
323 clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
324 ti,bit-shift = <23>;
325 reg = <0x0240>;
326 };
327
312 dpll_dsp_ck: dpll_dsp_ck { 328 dpll_dsp_ck: dpll_dsp_ck {
313 #clock-cells = <0>; 329 #clock-cells = <0>;
314 compatible = "ti,omap4-dpll-clock"; 330 compatible = "ti,omap4-dpll-clock";
315 clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; 331 clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>;
316 reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>; 332 reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>;
317 }; 333 };
318 334
@@ -335,10 +351,18 @@
335 clock-div = <1>; 351 clock-div = <1>;
336 }; 352 };
337 353
354 dpll_iva_byp_mux: dpll_iva_byp_mux {
355 #clock-cells = <0>;
356 compatible = "ti,mux-clock";
357 clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
358 ti,bit-shift = <23>;
359 reg = <0x01ac>;
360 };
361
338 dpll_iva_ck: dpll_iva_ck { 362 dpll_iva_ck: dpll_iva_ck {
339 #clock-cells = <0>; 363 #clock-cells = <0>;
340 compatible = "ti,omap4-dpll-clock"; 364 compatible = "ti,omap4-dpll-clock";
341 clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; 365 clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>;
342 reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; 366 reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
343 }; 367 };
344 368
@@ -361,10 +385,18 @@
361 clock-div = <1>; 385 clock-div = <1>;
362 }; 386 };
363 387
388 dpll_gpu_byp_mux: dpll_gpu_byp_mux {
389 #clock-cells = <0>;
390 compatible = "ti,mux-clock";
391 clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
392 ti,bit-shift = <23>;
393 reg = <0x02e4>;
394 };
395
364 dpll_gpu_ck: dpll_gpu_ck { 396 dpll_gpu_ck: dpll_gpu_ck {
365 #clock-cells = <0>; 397 #clock-cells = <0>;
366 compatible = "ti,omap4-dpll-clock"; 398 compatible = "ti,omap4-dpll-clock";
367 clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; 399 clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>;
368 reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>; 400 reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>;
369 }; 401 };
370 402
@@ -398,10 +430,18 @@
398 clock-div = <1>; 430 clock-div = <1>;
399 }; 431 };
400 432
433 dpll_ddr_byp_mux: dpll_ddr_byp_mux {
434 #clock-cells = <0>;
435 compatible = "ti,mux-clock";
436 clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
437 ti,bit-shift = <23>;
438 reg = <0x021c>;
439 };
440
401 dpll_ddr_ck: dpll_ddr_ck { 441 dpll_ddr_ck: dpll_ddr_ck {
402 #clock-cells = <0>; 442 #clock-cells = <0>;
403 compatible = "ti,omap4-dpll-clock"; 443 compatible = "ti,omap4-dpll-clock";
404 clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; 444 clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>;
405 reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>; 445 reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>;
406 }; 446 };
407 447
@@ -416,10 +456,18 @@
416 ti,invert-autoidle-bit; 456 ti,invert-autoidle-bit;
417 }; 457 };
418 458
459 dpll_gmac_byp_mux: dpll_gmac_byp_mux {
460 #clock-cells = <0>;
461 compatible = "ti,mux-clock";
462 clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
463 ti,bit-shift = <23>;
464 reg = <0x02b4>;
465 };
466
419 dpll_gmac_ck: dpll_gmac_ck { 467 dpll_gmac_ck: dpll_gmac_ck {
420 #clock-cells = <0>; 468 #clock-cells = <0>;
421 compatible = "ti,omap4-dpll-clock"; 469 compatible = "ti,omap4-dpll-clock";
422 clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; 470 clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>;
423 reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>; 471 reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>;
424 }; 472 };
425 473
@@ -482,10 +530,18 @@
482 clock-div = <1>; 530 clock-div = <1>;
483 }; 531 };
484 532
533 dpll_eve_byp_mux: dpll_eve_byp_mux {
534 #clock-cells = <0>;
535 compatible = "ti,mux-clock";
536 clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
537 ti,bit-shift = <23>;
538 reg = <0x0290>;
539 };
540
485 dpll_eve_ck: dpll_eve_ck { 541 dpll_eve_ck: dpll_eve_ck {
486 #clock-cells = <0>; 542 #clock-cells = <0>;
487 compatible = "ti,omap4-dpll-clock"; 543 compatible = "ti,omap4-dpll-clock";
488 clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; 544 clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>;
489 reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>; 545 reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>;
490 }; 546 };
491 547
@@ -1249,10 +1305,18 @@
1249 clock-div = <1>; 1305 clock-div = <1>;
1250 }; 1306 };
1251 1307
1308 dpll_per_byp_mux: dpll_per_byp_mux {
1309 #clock-cells = <0>;
1310 compatible = "ti,mux-clock";
1311 clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
1312 ti,bit-shift = <23>;
1313 reg = <0x014c>;
1314 };
1315
1252 dpll_per_ck: dpll_per_ck { 1316 dpll_per_ck: dpll_per_ck {
1253 #clock-cells = <0>; 1317 #clock-cells = <0>;
1254 compatible = "ti,omap4-dpll-clock"; 1318 compatible = "ti,omap4-dpll-clock";
1255 clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; 1319 clocks = <&sys_clkin1>, <&dpll_per_byp_mux>;
1256 reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; 1320 reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
1257 }; 1321 };
1258 1322
@@ -1275,10 +1339,18 @@
1275 clock-div = <1>; 1339 clock-div = <1>;
1276 }; 1340 };
1277 1341
1342 dpll_usb_byp_mux: dpll_usb_byp_mux {
1343 #clock-cells = <0>;
1344 compatible = "ti,mux-clock";
1345 clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
1346 ti,bit-shift = <23>;
1347 reg = <0x018c>;
1348 };
1349
1278 dpll_usb_ck: dpll_usb_ck { 1350 dpll_usb_ck: dpll_usb_ck {
1279 #clock-cells = <0>; 1351 #clock-cells = <0>;
1280 compatible = "ti,omap4-dpll-j-type-clock"; 1352 compatible = "ti,omap4-dpll-j-type-clock";
1281 clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; 1353 clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>;
1282 reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; 1354 reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
1283 }; 1355 };
1284 1356
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 277b48b0b6f9..ac6b0ae42caf 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include "skeleton.dtsi" 20#include "skeleton.dtsi"
21#include "exynos4-cpu-thermal.dtsi"
21#include <dt-bindings/clock/exynos3250.h> 22#include <dt-bindings/clock/exynos3250.h>
22 23
23/ { 24/ {
@@ -193,6 +194,7 @@
193 interrupts = <0 216 0>; 194 interrupts = <0 216 0>;
194 clocks = <&cmu CLK_TMU_APBIF>; 195 clocks = <&cmu CLK_TMU_APBIF>;
195 clock-names = "tmu_apbif"; 196 clock-names = "tmu_apbif";
197 #include "exynos4412-tmu-sensor-conf.dtsi"
196 status = "disabled"; 198 status = "disabled";
197 }; 199 };
198 200
diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
new file mode 100644
index 000000000000..735cb2f10817
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
@@ -0,0 +1,52 @@
1/*
2 * Device tree sources for Exynos4 thermal zone
3 *
4 * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <dt-bindings/thermal/thermal.h>
13
14/ {
15thermal-zones {
16 cpu_thermal: cpu-thermal {
17 thermal-sensors = <&tmu 0>;
18 polling-delay-passive = <0>;
19 polling-delay = <0>;
20 trips {
21 cpu_alert0: cpu-alert-0 {
22 temperature = <70000>; /* millicelsius */
23 hysteresis = <10000>; /* millicelsius */
24 type = "active";
25 };
26 cpu_alert1: cpu-alert-1 {
27 temperature = <95000>; /* millicelsius */
28 hysteresis = <10000>; /* millicelsius */
29 type = "active";
30 };
31 cpu_alert2: cpu-alert-2 {
32 temperature = <110000>; /* millicelsius */
33 hysteresis = <10000>; /* millicelsius */
34 type = "active";
35 };
36 cpu_crit0: cpu-crit-0 {
37 temperature = <120000>; /* millicelsius */
38 hysteresis = <0>; /* millicelsius */
39 type = "critical";
40 };
41 };
42 cooling-maps {
43 map0 {
44 trip = <&cpu_alert0>;
45 };
46 map1 {
47 trip = <&cpu_alert1>;
48 };
49 };
50 };
51};
52};
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 76173cacd450..77ea547768f4 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -38,6 +38,7 @@
38 i2c5 = &i2c_5; 38 i2c5 = &i2c_5;
39 i2c6 = &i2c_6; 39 i2c6 = &i2c_6;
40 i2c7 = &i2c_7; 40 i2c7 = &i2c_7;
41 i2c8 = &i2c_8;
41 csis0 = &csis_0; 42 csis0 = &csis_0;
42 csis1 = &csis_1; 43 csis1 = &csis_1;
43 fimc0 = &fimc_0; 44 fimc0 = &fimc_0;
@@ -104,6 +105,7 @@
104 compatible = "samsung,exynos4210-pd"; 105 compatible = "samsung,exynos4210-pd";
105 reg = <0x10023C20 0x20>; 106 reg = <0x10023C20 0x20>;
106 #power-domain-cells = <0>; 107 #power-domain-cells = <0>;
108 power-domains = <&pd_lcd0>;
107 }; 109 };
108 110
109 pd_cam: cam-power-domain@10023C00 { 111 pd_cam: cam-power-domain@10023C00 {
@@ -554,6 +556,22 @@
554 status = "disabled"; 556 status = "disabled";
555 }; 557 };
556 558
559 i2c_8: i2c@138E0000 {
560 #address-cells = <1>;
561 #size-cells = <0>;
562 compatible = "samsung,s3c2440-hdmiphy-i2c";
563 reg = <0x138E0000 0x100>;
564 interrupts = <0 93 0>;
565 clocks = <&clock CLK_I2C_HDMI>;
566 clock-names = "i2c";
567 status = "disabled";
568
569 hdmi_i2c_phy: hdmiphy@38 {
570 compatible = "exynos4210-hdmiphy";
571 reg = <0x38>;
572 };
573 };
574
557 spi_0: spi@13920000 { 575 spi_0: spi@13920000 {
558 compatible = "samsung,exynos4210-spi"; 576 compatible = "samsung,exynos4210-spi";
559 reg = <0x13920000 0x100>; 577 reg = <0x13920000 0x100>;
@@ -663,6 +681,33 @@
663 status = "disabled"; 681 status = "disabled";
664 }; 682 };
665 683
684 tmu: tmu@100C0000 {
685 #include "exynos4412-tmu-sensor-conf.dtsi"
686 };
687
688 hdmi: hdmi@12D00000 {
689 compatible = "samsung,exynos4210-hdmi";
690 reg = <0x12D00000 0x70000>;
691 interrupts = <0 92 0>;
692 clock-names = "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy",
693 "mout_hdmi";
694 clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
695 <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
696 <&clock CLK_MOUT_HDMI>;
697 phy = <&hdmi_i2c_phy>;
698 power-domains = <&pd_tv>;
699 samsung,syscon-phandle = <&pmu_system_controller>;
700 status = "disabled";
701 };
702
703 mixer: mixer@12C10000 {
704 compatible = "samsung,exynos4210-mixer";
705 interrupts = <0 91 0>;
706 reg = <0x12C10000 0x2100>, <0x12c00000 0x300>;
707 power-domains = <&pd_tv>;
708 status = "disabled";
709 };
710
666 ppmu_dmc0: ppmu_dmc0@106a0000 { 711 ppmu_dmc0: ppmu_dmc0@106a0000 {
667 compatible = "samsung,exynos-ppmu"; 712 compatible = "samsung,exynos-ppmu";
668 reg = <0x106a0000 0x2000>; 713 reg = <0x106a0000 0x2000>;
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 3d6652a4b6cb..32c5fd8f6269 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -426,6 +426,25 @@
426 status = "okay"; 426 status = "okay";
427 }; 427 };
428 428
429 tmu@100C0000 {
430 status = "okay";
431 };
432
433 thermal-zones {
434 cpu_thermal: cpu-thermal {
435 cooling-maps {
436 map0 {
437 /* Corresponds to 800MHz at freq_table */
438 cooling-device = <&cpu0 2 2>;
439 };
440 map1 {
441 /* Corresponds to 200MHz at freq_table */
442 cooling-device = <&cpu0 4 4>;
443 };
444 };
445 };
446 };
447
429 camera { 448 camera {
430 pinctrl-names = "default"; 449 pinctrl-names = "default";
431 pinctrl-0 = <>; 450 pinctrl-0 = <>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index b57e6b82ea20..d4f2b11319dd 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -505,6 +505,63 @@
505 assigned-clock-rates = <0>, <160000000>; 505 assigned-clock-rates = <0>, <160000000>;
506 }; 506 };
507 }; 507 };
508
509 hdmi_en: voltage-regulator-hdmi-5v {
510 compatible = "regulator-fixed";
511 regulator-name = "HDMI_5V";
512 regulator-min-microvolt = <5000000>;
513 regulator-max-microvolt = <5000000>;
514 gpio = <&gpe0 1 0>;
515 enable-active-high;
516 };
517
518 hdmi_ddc: i2c-ddc {
519 compatible = "i2c-gpio";
520 gpios = <&gpe4 2 0 &gpe4 3 0>;
521 i2c-gpio,delay-us = <100>;
522 #address-cells = <1>;
523 #size-cells = <0>;
524
525 pinctrl-0 = <&i2c_ddc_bus>;
526 pinctrl-names = "default";
527 status = "okay";
528 };
529
530 mixer@12C10000 {
531 status = "okay";
532 };
533
534 hdmi@12D00000 {
535 hpd-gpio = <&gpx3 7 0>;
536 pinctrl-names = "default";
537 pinctrl-0 = <&hdmi_hpd>;
538 hdmi-en-supply = <&hdmi_en>;
539 vdd-supply = <&ldo3_reg>;
540 vdd_osc-supply = <&ldo4_reg>;
541 vdd_pll-supply = <&ldo3_reg>;
542 ddc = <&hdmi_ddc>;
543 status = "okay";
544 };
545
546 i2c@138E0000 {
547 status = "okay";
548 };
549};
550
551&pinctrl_1 {
552 hdmi_hpd: hdmi-hpd {
553 samsung,pins = "gpx3-7";
554 samsung,pin-pud = <0>;
555 };
556};
557
558&pinctrl_0 {
559 i2c_ddc_bus: i2c-ddc-bus {
560 samsung,pins = "gpe4-2", "gpe4-3";
561 samsung,pin-function = <2>;
562 samsung,pin-pud = <3>;
563 samsung,pin-drv = <0>;
564 };
508}; 565};
509 566
510&mdma1 { 567&mdma1 {
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 67c832c9dcf1..be89f83f70e7 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -21,6 +21,7 @@
21 21
22#include "exynos4.dtsi" 22#include "exynos4.dtsi"
23#include "exynos4210-pinctrl.dtsi" 23#include "exynos4210-pinctrl.dtsi"
24#include "exynos4-cpu-thermal.dtsi"
24 25
25/ { 26/ {
26 compatible = "samsung,exynos4210", "samsung,exynos4"; 27 compatible = "samsung,exynos4210", "samsung,exynos4";
@@ -35,10 +36,13 @@
35 #address-cells = <1>; 36 #address-cells = <1>;
36 #size-cells = <0>; 37 #size-cells = <0>;
37 38
38 cpu@900 { 39 cpu0: cpu@900 {
39 device_type = "cpu"; 40 device_type = "cpu";
40 compatible = "arm,cortex-a9"; 41 compatible = "arm,cortex-a9";
41 reg = <0x900>; 42 reg = <0x900>;
43 cooling-min-level = <4>;
44 cooling-max-level = <2>;
45 #cooling-cells = <2>; /* min followed by max */
42 }; 46 };
43 47
44 cpu@901 { 48 cpu@901 {
@@ -153,16 +157,38 @@
153 reg = <0x03860000 0x1000>; 157 reg = <0x03860000 0x1000>;
154 }; 158 };
155 159
156 tmu@100C0000 { 160 tmu: tmu@100C0000 {
157 compatible = "samsung,exynos4210-tmu"; 161 compatible = "samsung,exynos4210-tmu";
158 interrupt-parent = <&combiner>; 162 interrupt-parent = <&combiner>;
159 reg = <0x100C0000 0x100>; 163 reg = <0x100C0000 0x100>;
160 interrupts = <2 4>; 164 interrupts = <2 4>;
161 clocks = <&clock CLK_TMU_APBIF>; 165 clocks = <&clock CLK_TMU_APBIF>;
162 clock-names = "tmu_apbif"; 166 clock-names = "tmu_apbif";
167 samsung,tmu_gain = <15>;
168 samsung,tmu_reference_voltage = <7>;
163 status = "disabled"; 169 status = "disabled";
164 }; 170 };
165 171
172 thermal-zones {
173 cpu_thermal: cpu-thermal {
174 polling-delay-passive = <0>;
175 polling-delay = <0>;
176 thermal-sensors = <&tmu 0>;
177
178 trips {
179 cpu_alert0: cpu-alert-0 {
180 temperature = <85000>; /* millicelsius */
181 };
182 cpu_alert1: cpu-alert-1 {
183 temperature = <100000>; /* millicelsius */
184 };
185 cpu_alert2: cpu-alert-2 {
186 temperature = <110000>; /* millicelsius */
187 };
188 };
189 };
190 };
191
166 g2d@12800000 { 192 g2d@12800000 {
167 compatible = "samsung,s5pv210-g2d"; 193 compatible = "samsung,s5pv210-g2d";
168 reg = <0x12800000 0x1000>; 194 reg = <0x12800000 0x1000>;
@@ -203,6 +229,14 @@
203 }; 229 };
204 }; 230 };
205 231
232 mixer: mixer@12C10000 {
233 clock-names = "mixer", "hdmi", "sclk_hdmi", "vp", "mout_mixer",
234 "sclk_mixer";
235 clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
236 <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>,
237 <&clock CLK_MOUT_MIXER>, <&clock CLK_SCLK_MIXER>;
238 };
239
206 ppmu_lcd1: ppmu_lcd1@12240000 { 240 ppmu_lcd1: ppmu_lcd1@12240000 {
207 compatible = "samsung,exynos-ppmu"; 241 compatible = "samsung,exynos-ppmu";
208 reg = <0x12240000 0x2000>; 242 reg = <0x12240000 0x2000>;
diff --git a/arch/arm/boot/dts/exynos4212.dtsi b/arch/arm/boot/dts/exynos4212.dtsi
index dd0a43ec56da..5be03288f1ee 100644
--- a/arch/arm/boot/dts/exynos4212.dtsi
+++ b/arch/arm/boot/dts/exynos4212.dtsi
@@ -26,10 +26,13 @@
26 #address-cells = <1>; 26 #address-cells = <1>;
27 #size-cells = <0>; 27 #size-cells = <0>;
28 28
29 cpu@A00 { 29 cpu0: cpu@A00 {
30 device_type = "cpu"; 30 device_type = "cpu";
31 compatible = "arm,cortex-a9"; 31 compatible = "arm,cortex-a9";
32 reg = <0xA00>; 32 reg = <0xA00>;
33 cooling-min-level = <13>;
34 cooling-max-level = <7>;
35 #cooling-cells = <2>; /* min followed by max */
33 }; 36 };
34 37
35 cpu@A01 { 38 cpu@A01 {
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index de80b5bba204..adb4f6a97a1d 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -249,6 +249,20 @@
249 regulator-always-on; 249 regulator-always-on;
250 }; 250 };
251 251
252 ldo8_reg: ldo@8 {
253 regulator-compatible = "LDO8";
254 regulator-name = "VDD10_HDMI_1.0V";
255 regulator-min-microvolt = <1000000>;
256 regulator-max-microvolt = <1000000>;
257 };
258
259 ldo10_reg: ldo@10 {
260 regulator-compatible = "LDO10";
261 regulator-name = "VDDQ_MIPIHSI_1.8V";
262 regulator-min-microvolt = <1800000>;
263 regulator-max-microvolt = <1800000>;
264 };
265
252 ldo11_reg: LDO11 { 266 ldo11_reg: LDO11 {
253 regulator-name = "VDD18_ABB1_1.8V"; 267 regulator-name = "VDD18_ABB1_1.8V";
254 regulator-min-microvolt = <1800000>; 268 regulator-min-microvolt = <1800000>;
@@ -411,6 +425,51 @@
411 ehci: ehci@12580000 { 425 ehci: ehci@12580000 {
412 status = "okay"; 426 status = "okay";
413 }; 427 };
428
429 tmu@100C0000 {
430 vtmu-supply = <&ldo10_reg>;
431 status = "okay";
432 };
433
434 thermal-zones {
435 cpu_thermal: cpu-thermal {
436 cooling-maps {
437 map0 {
438 /* Corresponds to 800MHz at freq_table */
439 cooling-device = <&cpu0 7 7>;
440 };
441 map1 {
442 /* Corresponds to 200MHz at freq_table */
443 cooling-device = <&cpu0 13 13>;
444 };
445 };
446 };
447 };
448
449 mixer: mixer@12C10000 {
450 status = "okay";
451 };
452
453 hdmi@12D00000 {
454 hpd-gpio = <&gpx3 7 0>;
455 pinctrl-names = "default";
456 pinctrl-0 = <&hdmi_hpd>;
457 vdd-supply = <&ldo8_reg>;
458 vdd_osc-supply = <&ldo10_reg>;
459 vdd_pll-supply = <&ldo8_reg>;
460 ddc = <&hdmi_ddc>;
461 status = "okay";
462 };
463
464 hdmi_ddc: i2c@13880000 {
465 status = "okay";
466 pinctrl-names = "default";
467 pinctrl-0 = <&i2c2_bus>;
468 };
469
470 i2c@138E0000 {
471 status = "okay";
472 };
414}; 473};
415 474
416&pinctrl_1 { 475&pinctrl_1 {
@@ -425,4 +484,9 @@
425 samsung,pin-pud = <0>; 484 samsung,pin-pud = <0>;
426 samsung,pin-drv = <0>; 485 samsung,pin-drv = <0>;
427 }; 486 };
487
488 hdmi_hpd: hdmi-hpd {
489 samsung,pins = "gpx3-7";
490 samsung,pin-pud = <1>;
491 };
428}; 492};
diff --git a/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi
new file mode 100644
index 000000000000..e3f7934d19d0
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi
@@ -0,0 +1,24 @@
1/*
2 * Device tree sources for Exynos4412 TMU sensor configuration
3 *
4 * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <dt-bindings/thermal/thermal_exynos.h>
13
14#thermal-sensor-cells = <0>;
15samsung,tmu_gain = <8>;
16samsung,tmu_reference_voltage = <16>;
17samsung,tmu_noise_cancel_mode = <4>;
18samsung,tmu_efuse_value = <55>;
19samsung,tmu_min_efuse_value = <40>;
20samsung,tmu_max_efuse_value = <100>;
21samsung,tmu_first_point_trim = <25>;
22samsung,tmu_second_point_trim = <85>;
23samsung,tmu_default_temp_offset = <50>;
24samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 21f748083586..173ffa479ad3 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -927,6 +927,21 @@
927 pulldown-ohm = <100000>; /* 100K */ 927 pulldown-ohm = <100000>; /* 100K */
928 io-channels = <&adc 2>; /* Battery temperature */ 928 io-channels = <&adc 2>; /* Battery temperature */
929 }; 929 };
930
931 thermal-zones {
932 cpu_thermal: cpu-thermal {
933 cooling-maps {
934 map0 {
935 /* Corresponds to 800MHz at freq_table */
936 cooling-device = <&cpu0 7 7>;
937 };
938 map1 {
939 /* Corresponds to 200MHz at freq_table */
940 cooling-device = <&cpu0 13 13>;
941 };
942 };
943 };
944 };
930}; 945};
931 946
932&pmu_system_controller { 947&pmu_system_controller {
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
index 0f6ec93bb1d8..68ad43b391ae 100644
--- a/arch/arm/boot/dts/exynos4412.dtsi
+++ b/arch/arm/boot/dts/exynos4412.dtsi
@@ -26,10 +26,13 @@
26 #address-cells = <1>; 26 #address-cells = <1>;
27 #size-cells = <0>; 27 #size-cells = <0>;
28 28
29 cpu@A00 { 29 cpu0: cpu@A00 {
30 device_type = "cpu"; 30 device_type = "cpu";
31 compatible = "arm,cortex-a9"; 31 compatible = "arm,cortex-a9";
32 reg = <0xA00>; 32 reg = <0xA00>;
33 cooling-min-level = <13>;
34 cooling-max-level = <7>;
35 #cooling-cells = <2>; /* min followed by max */
33 }; 36 };
34 37
35 cpu@A01 { 38 cpu@A01 {
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
index f5e0ae780d6c..6a6abe14fd9b 100644
--- a/arch/arm/boot/dts/exynos4x12.dtsi
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -19,6 +19,7 @@
19 19
20#include "exynos4.dtsi" 20#include "exynos4.dtsi"
21#include "exynos4x12-pinctrl.dtsi" 21#include "exynos4x12-pinctrl.dtsi"
22#include "exynos4-cpu-thermal.dtsi"
22 23
23/ { 24/ {
24 aliases { 25 aliases {
@@ -297,4 +298,15 @@
297 clock-names = "tmu_apbif"; 298 clock-names = "tmu_apbif";
298 status = "disabled"; 299 status = "disabled";
299 }; 300 };
301
302 hdmi: hdmi@12D00000 {
303 compatible = "samsung,exynos4212-hdmi";
304 };
305
306 mixer: mixer@12C10000 {
307 compatible = "samsung,exynos4212-mixer";
308 clock-names = "mixer", "hdmi", "sclk_hdmi", "vp";
309 clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
310 <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>;
311 };
300}; 312};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 9bb1b0b738f5..adbde1adad95 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -20,7 +20,7 @@
20#include <dt-bindings/clock/exynos5250.h> 20#include <dt-bindings/clock/exynos5250.h>
21#include "exynos5.dtsi" 21#include "exynos5.dtsi"
22#include "exynos5250-pinctrl.dtsi" 22#include "exynos5250-pinctrl.dtsi"
23 23#include "exynos4-cpu-thermal.dtsi"
24#include <dt-bindings/clock/exynos-audss-clk.h> 24#include <dt-bindings/clock/exynos-audss-clk.h>
25 25
26/ { 26/ {
@@ -58,11 +58,14 @@
58 #address-cells = <1>; 58 #address-cells = <1>;
59 #size-cells = <0>; 59 #size-cells = <0>;
60 60
61 cpu@0 { 61 cpu0: cpu@0 {
62 device_type = "cpu"; 62 device_type = "cpu";
63 compatible = "arm,cortex-a15"; 63 compatible = "arm,cortex-a15";
64 reg = <0>; 64 reg = <0>;
65 clock-frequency = <1700000000>; 65 clock-frequency = <1700000000>;
66 cooling-min-level = <15>;
67 cooling-max-level = <9>;
68 #cooling-cells = <2>; /* min followed by max */
66 }; 69 };
67 cpu@1 { 70 cpu@1 {
68 device_type = "cpu"; 71 device_type = "cpu";
@@ -102,6 +105,12 @@
102 #power-domain-cells = <0>; 105 #power-domain-cells = <0>;
103 }; 106 };
104 107
108 pd_disp1: disp1-power-domain@100440A0 {
109 compatible = "samsung,exynos4210-pd";
110 reg = <0x100440A0 0x20>;
111 #power-domain-cells = <0>;
112 };
113
105 clock: clock-controller@10010000 { 114 clock: clock-controller@10010000 {
106 compatible = "samsung,exynos5250-clock"; 115 compatible = "samsung,exynos5250-clock";
107 reg = <0x10010000 0x30000>; 116 reg = <0x10010000 0x30000>;
@@ -235,12 +244,32 @@
235 status = "disabled"; 244 status = "disabled";
236 }; 245 };
237 246
238 tmu@10060000 { 247 tmu: tmu@10060000 {
239 compatible = "samsung,exynos5250-tmu"; 248 compatible = "samsung,exynos5250-tmu";
240 reg = <0x10060000 0x100>; 249 reg = <0x10060000 0x100>;
241 interrupts = <0 65 0>; 250 interrupts = <0 65 0>;
242 clocks = <&clock CLK_TMU>; 251 clocks = <&clock CLK_TMU>;
243 clock-names = "tmu_apbif"; 252 clock-names = "tmu_apbif";
253 #include "exynos4412-tmu-sensor-conf.dtsi"
254 };
255
256 thermal-zones {
257 cpu_thermal: cpu-thermal {
258 polling-delay-passive = <0>;
259 polling-delay = <0>;
260 thermal-sensors = <&tmu 0>;
261
262 cooling-maps {
263 map0 {
264 /* Corresponds to 800MHz at freq_table */
265 cooling-device = <&cpu0 9 9>;
266 };
267 map1 {
268 /* Corresponds to 200MHz at freq_table */
269 cooling-device = <&cpu0 15 15>;
270 };
271 };
272 };
244 }; 273 };
245 274
246 serial@12C00000 { 275 serial@12C00000 {
@@ -719,6 +748,7 @@
719 hdmi: hdmi { 748 hdmi: hdmi {
720 compatible = "samsung,exynos4212-hdmi"; 749 compatible = "samsung,exynos4212-hdmi";
721 reg = <0x14530000 0x70000>; 750 reg = <0x14530000 0x70000>;
751 power-domains = <&pd_disp1>;
722 interrupts = <0 95 0>; 752 interrupts = <0 95 0>;
723 clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, 753 clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
724 <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, 754 <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
@@ -731,9 +761,11 @@
731 mixer { 761 mixer {
732 compatible = "samsung,exynos5250-mixer"; 762 compatible = "samsung,exynos5250-mixer";
733 reg = <0x14450000 0x10000>; 763 reg = <0x14450000 0x10000>;
764 power-domains = <&pd_disp1>;
734 interrupts = <0 94 0>; 765 interrupts = <0 94 0>;
735 clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; 766 clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
736 clock-names = "mixer", "sclk_hdmi"; 767 <&clock CLK_SCLK_HDMI>;
768 clock-names = "mixer", "hdmi", "sclk_hdmi";
737 }; 769 };
738 770
739 dp_phy: video-phy@10040720 { 771 dp_phy: video-phy@10040720 {
@@ -743,6 +775,7 @@
743 }; 775 };
744 776
745 dp: dp-controller@145B0000 { 777 dp: dp-controller@145B0000 {
778 power-domains = <&pd_disp1>;
746 clocks = <&clock CLK_DP>; 779 clocks = <&clock CLK_DP>;
747 clock-names = "dp"; 780 clock-names = "dp";
748 phys = <&dp_phy>; 781 phys = <&dp_phy>;
@@ -750,6 +783,7 @@
750 }; 783 };
751 784
752 fimd: fimd@14400000 { 785 fimd: fimd@14400000 {
786 power-domains = <&pd_disp1>;
753 clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>; 787 clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>;
754 clock-names = "sclk_fimd", "fimd"; 788 clock-names = "sclk_fimd", "fimd";
755 }; 789 };
diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
new file mode 100644
index 000000000000..5d31fc140823
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
@@ -0,0 +1,35 @@
1/*
2 * Device tree sources for default Exynos5420 thermal zone definition
3 *
4 * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12polling-delay-passive = <0>;
13polling-delay = <0>;
14trips {
15 cpu-alert-0 {
16 temperature = <85000>; /* millicelsius */
17 hysteresis = <10000>; /* millicelsius */
18 type = "active";
19 };
20 cpu-alert-1 {
21 temperature = <103000>; /* millicelsius */
22 hysteresis = <10000>; /* millicelsius */
23 type = "active";
24 };
25 cpu-alert-2 {
26 temperature = <110000>; /* millicelsius */
27 hysteresis = <10000>; /* millicelsius */
28 type = "active";
29 };
30 cpu-crit-0 {
31 temperature = <1200000>; /* millicelsius */
32 hysteresis = <0>; /* millicelsius */
33 type = "critical";
34 };
35};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 9dc2e9773b30..c0e98cf3514f 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -740,8 +740,9 @@
740 compatible = "samsung,exynos5420-mixer"; 740 compatible = "samsung,exynos5420-mixer";
741 reg = <0x14450000 0x10000>; 741 reg = <0x14450000 0x10000>;
742 interrupts = <0 94 0>; 742 interrupts = <0 94 0>;
743 clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; 743 clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
744 clock-names = "mixer", "sclk_hdmi"; 744 <&clock CLK_SCLK_HDMI>;
745 clock-names = "mixer", "hdmi", "sclk_hdmi";
745 power-domains = <&disp_pd>; 746 power-domains = <&disp_pd>;
746 }; 747 };
747 748
@@ -782,6 +783,7 @@
782 interrupts = <0 65 0>; 783 interrupts = <0 65 0>;
783 clocks = <&clock CLK_TMU>; 784 clocks = <&clock CLK_TMU>;
784 clock-names = "tmu_apbif"; 785 clock-names = "tmu_apbif";
786 #include "exynos4412-tmu-sensor-conf.dtsi"
785 }; 787 };
786 788
787 tmu_cpu1: tmu@10064000 { 789 tmu_cpu1: tmu@10064000 {
@@ -790,6 +792,7 @@
790 interrupts = <0 183 0>; 792 interrupts = <0 183 0>;
791 clocks = <&clock CLK_TMU>; 793 clocks = <&clock CLK_TMU>;
792 clock-names = "tmu_apbif"; 794 clock-names = "tmu_apbif";
795 #include "exynos4412-tmu-sensor-conf.dtsi"
793 }; 796 };
794 797
795 tmu_cpu2: tmu@10068000 { 798 tmu_cpu2: tmu@10068000 {
@@ -798,6 +801,7 @@
798 interrupts = <0 184 0>; 801 interrupts = <0 184 0>;
799 clocks = <&clock CLK_TMU>, <&clock CLK_TMU>; 802 clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
800 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 803 clock-names = "tmu_apbif", "tmu_triminfo_apbif";
804 #include "exynos4412-tmu-sensor-conf.dtsi"
801 }; 805 };
802 806
803 tmu_cpu3: tmu@1006c000 { 807 tmu_cpu3: tmu@1006c000 {
@@ -806,6 +810,7 @@
806 interrupts = <0 185 0>; 810 interrupts = <0 185 0>;
807 clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>; 811 clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
808 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 812 clock-names = "tmu_apbif", "tmu_triminfo_apbif";
813 #include "exynos4412-tmu-sensor-conf.dtsi"
809 }; 814 };
810 815
811 tmu_gpu: tmu@100a0000 { 816 tmu_gpu: tmu@100a0000 {
@@ -814,6 +819,30 @@
814 interrupts = <0 215 0>; 819 interrupts = <0 215 0>;
815 clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>; 820 clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
816 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 821 clock-names = "tmu_apbif", "tmu_triminfo_apbif";
822 #include "exynos4412-tmu-sensor-conf.dtsi"
823 };
824
825 thermal-zones {
826 cpu0_thermal: cpu0-thermal {
827 thermal-sensors = <&tmu_cpu0>;
828 #include "exynos5420-trip-points.dtsi"
829 };
830 cpu1_thermal: cpu1-thermal {
831 thermal-sensors = <&tmu_cpu1>;
832 #include "exynos5420-trip-points.dtsi"
833 };
834 cpu2_thermal: cpu2-thermal {
835 thermal-sensors = <&tmu_cpu2>;
836 #include "exynos5420-trip-points.dtsi"
837 };
838 cpu3_thermal: cpu3-thermal {
839 thermal-sensors = <&tmu_cpu3>;
840 #include "exynos5420-trip-points.dtsi"
841 };
842 gpu_thermal: gpu-thermal {
843 thermal-sensors = <&tmu_gpu>;
844 #include "exynos5420-trip-points.dtsi"
845 };
817 }; 846 };
818 847
819 watchdog: watchdog@101D0000 { 848 watchdog: watchdog@101D0000 {
diff --git a/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi
new file mode 100644
index 000000000000..7b2fba0ae92b
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi
@@ -0,0 +1,24 @@
1/*
2 * Device tree sources for Exynos5440 TMU sensor configuration
3 *
4 * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <dt-bindings/thermal/thermal_exynos.h>
13
14#thermal-sensor-cells = <0>;
15samsung,tmu_gain = <5>;
16samsung,tmu_reference_voltage = <16>;
17samsung,tmu_noise_cancel_mode = <4>;
18samsung,tmu_efuse_value = <0x5d2d>;
19samsung,tmu_min_efuse_value = <16>;
20samsung,tmu_max_efuse_value = <76>;
21samsung,tmu_first_point_trim = <25>;
22samsung,tmu_second_point_trim = <70>;
23samsung,tmu_default_temp_offset = <25>;
24samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
new file mode 100644
index 000000000000..48adfa8f4300
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
@@ -0,0 +1,25 @@
1/*
2 * Device tree sources for default Exynos5440 thermal zone definition
3 *
4 * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12polling-delay-passive = <0>;
13polling-delay = <0>;
14trips {
15 cpu-alert-0 {
16 temperature = <100000>; /* millicelsius */
17 hysteresis = <0>; /* millicelsius */
18 type = "active";
19 };
20 cpu-crit-0 {
21 temperature = <1050000>; /* millicelsius */
22 hysteresis = <0>; /* millicelsius */
23 type = "critical";
24 };
25};
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index 8f3373cd7b87..59d9416b3b03 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -219,6 +219,7 @@
219 interrupts = <0 58 0>; 219 interrupts = <0 58 0>;
220 clocks = <&clock CLK_B_125>; 220 clocks = <&clock CLK_B_125>;
221 clock-names = "tmu_apbif"; 221 clock-names = "tmu_apbif";
222 #include "exynos5440-tmu-sensor-conf.dtsi"
222 }; 223 };
223 224
224 tmuctrl_1: tmuctrl@16011C { 225 tmuctrl_1: tmuctrl@16011C {
@@ -227,6 +228,7 @@
227 interrupts = <0 58 0>; 228 interrupts = <0 58 0>;
228 clocks = <&clock CLK_B_125>; 229 clocks = <&clock CLK_B_125>;
229 clock-names = "tmu_apbif"; 230 clock-names = "tmu_apbif";
231 #include "exynos5440-tmu-sensor-conf.dtsi"
230 }; 232 };
231 233
232 tmuctrl_2: tmuctrl@160120 { 234 tmuctrl_2: tmuctrl@160120 {
@@ -235,6 +237,22 @@
235 interrupts = <0 58 0>; 237 interrupts = <0 58 0>;
236 clocks = <&clock CLK_B_125>; 238 clocks = <&clock CLK_B_125>;
237 clock-names = "tmu_apbif"; 239 clock-names = "tmu_apbif";
240 #include "exynos5440-tmu-sensor-conf.dtsi"
241 };
242
243 thermal-zones {
244 cpu0_thermal: cpu0-thermal {
245 thermal-sensors = <&tmuctrl_0>;
246 #include "exynos5440-trip-points.dtsi"
247 };
248 cpu1_thermal: cpu1-thermal {
249 thermal-sensors = <&tmuctrl_1>;
250 #include "exynos5440-trip-points.dtsi"
251 };
252 cpu2_thermal: cpu2-thermal {
253 thermal-sensors = <&tmuctrl_2>;
254 #include "exynos5440-trip-points.dtsi"
255 };
238 }; 256 };
239 257
240 sata@210000 { 258 sata@210000 {
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index f1cd2147421d..a626e6dd8022 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -35,6 +35,7 @@
35 regulator-max-microvolt = <5000000>; 35 regulator-max-microvolt = <5000000>;
36 gpio = <&gpio3 22 0>; 36 gpio = <&gpio3 22 0>;
37 enable-active-high; 37 enable-active-high;
38 vin-supply = <&swbst_reg>;
38 }; 39 };
39 40
40 reg_usb_h1_vbus: regulator@1 { 41 reg_usb_h1_vbus: regulator@1 {
@@ -45,6 +46,7 @@
45 regulator-max-microvolt = <5000000>; 46 regulator-max-microvolt = <5000000>;
46 gpio = <&gpio1 29 0>; 47 gpio = <&gpio1 29 0>;
47 enable-active-high; 48 enable-active-high;
49 vin-supply = <&swbst_reg>;
48 }; 50 };
49 51
50 reg_audio: regulator@2 { 52 reg_audio: regulator@2 {
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
index fda4932faefd..945887d3fdb3 100644
--- a/arch/arm/boot/dts/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/imx6sl-evk.dts
@@ -52,6 +52,7 @@
52 regulator-max-microvolt = <5000000>; 52 regulator-max-microvolt = <5000000>;
53 gpio = <&gpio4 0 0>; 53 gpio = <&gpio4 0 0>;
54 enable-active-high; 54 enable-active-high;
55 vin-supply = <&swbst_reg>;
55 }; 56 };
56 57
57 reg_usb_otg2_vbus: regulator@1 { 58 reg_usb_otg2_vbus: regulator@1 {
@@ -62,6 +63,7 @@
62 regulator-max-microvolt = <5000000>; 63 regulator-max-microvolt = <5000000>;
63 gpio = <&gpio4 2 0>; 64 gpio = <&gpio4 2 0>;
64 enable-active-high; 65 enable-active-high;
66 vin-supply = <&swbst_reg>;
65 }; 67 };
66 68
67 reg_aud3v: regulator@2 { 69 reg_aud3v: regulator@2 {
diff --git a/arch/arm/boot/dts/omap5-core-thermal.dtsi b/arch/arm/boot/dts/omap5-core-thermal.dtsi
index 19212ac6eef0..de8a3d456cf7 100644
--- a/arch/arm/boot/dts/omap5-core-thermal.dtsi
+++ b/arch/arm/boot/dts/omap5-core-thermal.dtsi
@@ -13,7 +13,7 @@
13 13
14core_thermal: core_thermal { 14core_thermal: core_thermal {
15 polling-delay-passive = <250>; /* milliseconds */ 15 polling-delay-passive = <250>; /* milliseconds */
16 polling-delay = <1000>; /* milliseconds */ 16 polling-delay = <500>; /* milliseconds */
17 17
18 /* sensor ID */ 18 /* sensor ID */
19 thermal-sensors = <&bandgap 2>; 19 thermal-sensors = <&bandgap 2>;
diff --git a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi
index 1b87aca88b77..bc3090f2e84b 100644
--- a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi
+++ b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi
@@ -13,7 +13,7 @@
13 13
14gpu_thermal: gpu_thermal { 14gpu_thermal: gpu_thermal {
15 polling-delay-passive = <250>; /* milliseconds */ 15 polling-delay-passive = <250>; /* milliseconds */
16 polling-delay = <1000>; /* milliseconds */ 16 polling-delay = <500>; /* milliseconds */
17 17
18 /* sensor ID */ 18 /* sensor ID */
19 thermal-sensors = <&bandgap 1>; 19 thermal-sensors = <&bandgap 1>;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index ddff674bd05e..4a485b63a141 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -1079,4 +1079,8 @@
1079 }; 1079 };
1080}; 1080};
1081 1081
1082&cpu_thermal {
1083 polling-delay = <500>; /* milliseconds */
1084};
1085
1082/include/ "omap54xx-clocks.dtsi" 1086/include/ "omap54xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi
index 58c27466f012..83b425fb3ac2 100644
--- a/arch/arm/boot/dts/omap54xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi
@@ -167,10 +167,18 @@
167 ti,index-starts-at-one; 167 ti,index-starts-at-one;
168 }; 168 };
169 169
170 dpll_core_byp_mux: dpll_core_byp_mux {
171 #clock-cells = <0>;
172 compatible = "ti,mux-clock";
173 clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>;
174 ti,bit-shift = <23>;
175 reg = <0x012c>;
176 };
177
170 dpll_core_ck: dpll_core_ck { 178 dpll_core_ck: dpll_core_ck {
171 #clock-cells = <0>; 179 #clock-cells = <0>;
172 compatible = "ti,omap4-dpll-core-clock"; 180 compatible = "ti,omap4-dpll-core-clock";
173 clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>; 181 clocks = <&sys_clkin>, <&dpll_core_byp_mux>;
174 reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; 182 reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
175 }; 183 };
176 184
@@ -294,10 +302,18 @@
294 clock-div = <1>; 302 clock-div = <1>;
295 }; 303 };
296 304
305 dpll_iva_byp_mux: dpll_iva_byp_mux {
306 #clock-cells = <0>;
307 compatible = "ti,mux-clock";
308 clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>;
309 ti,bit-shift = <23>;
310 reg = <0x01ac>;
311 };
312
297 dpll_iva_ck: dpll_iva_ck { 313 dpll_iva_ck: dpll_iva_ck {
298 #clock-cells = <0>; 314 #clock-cells = <0>;
299 compatible = "ti,omap4-dpll-clock"; 315 compatible = "ti,omap4-dpll-clock";
300 clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>; 316 clocks = <&sys_clkin>, <&dpll_iva_byp_mux>;
301 reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; 317 reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
302 }; 318 };
303 319
@@ -599,10 +615,19 @@
599 }; 615 };
600}; 616};
601&cm_core_clocks { 617&cm_core_clocks {
618
619 dpll_per_byp_mux: dpll_per_byp_mux {
620 #clock-cells = <0>;
621 compatible = "ti,mux-clock";
622 clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>;
623 ti,bit-shift = <23>;
624 reg = <0x014c>;
625 };
626
602 dpll_per_ck: dpll_per_ck { 627 dpll_per_ck: dpll_per_ck {
603 #clock-cells = <0>; 628 #clock-cells = <0>;
604 compatible = "ti,omap4-dpll-clock"; 629 compatible = "ti,omap4-dpll-clock";
605 clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>; 630 clocks = <&sys_clkin>, <&dpll_per_byp_mux>;
606 reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; 631 reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
607 }; 632 };
608 633
@@ -714,10 +739,18 @@
714 ti,index-starts-at-one; 739 ti,index-starts-at-one;
715 }; 740 };
716 741
742 dpll_usb_byp_mux: dpll_usb_byp_mux {
743 #clock-cells = <0>;
744 compatible = "ti,mux-clock";
745 clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>;
746 ti,bit-shift = <23>;
747 reg = <0x018c>;
748 };
749
717 dpll_usb_ck: dpll_usb_ck { 750 dpll_usb_ck: dpll_usb_ck {
718 #clock-cells = <0>; 751 #clock-cells = <0>;
719 compatible = "ti,omap4-dpll-j-type-clock"; 752 compatible = "ti,omap4-dpll-j-type-clock";
720 clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>; 753 clocks = <&sys_clkin>, <&dpll_usb_byp_mux>;
721 reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; 754 reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
722 }; 755 };
723 756
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 252c3d1bda50..9d8760956752 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -713,6 +713,9 @@
713 reg-shift = <2>; 713 reg-shift = <2>;
714 reg-io-width = <4>; 714 reg-io-width = <4>;
715 clocks = <&l4_sp_clk>; 715 clocks = <&l4_sp_clk>;
716 dmas = <&pdma 28>,
717 <&pdma 29>;
718 dma-names = "tx", "rx";
716 }; 719 };
717 720
718 uart1: serial1@ffc03000 { 721 uart1: serial1@ffc03000 {
@@ -722,6 +725,9 @@
722 reg-shift = <2>; 725 reg-shift = <2>;
723 reg-io-width = <4>; 726 reg-io-width = <4>;
724 clocks = <&l4_sp_clk>; 727 clocks = <&l4_sp_clk>;
728 dmas = <&pdma 30>,
729 <&pdma 31>;
730 dma-names = "tx", "rx";
725 }; 731 };
726 732
727 rst: rstmgr@ffd05000 { 733 rst: rstmgr@ffd05000 {
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index b7e6b6fba5e0..06075b6d2463 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -99,7 +99,7 @@ CONFIG_PCI_RCAR_GEN2=y
99CONFIG_PCI_RCAR_GEN2_PCIE=y 99CONFIG_PCI_RCAR_GEN2_PCIE=y
100CONFIG_PCIEPORTBUS=y 100CONFIG_PCIEPORTBUS=y
101CONFIG_SMP=y 101CONFIG_SMP=y
102CONFIG_NR_CPUS=8 102CONFIG_NR_CPUS=16
103CONFIG_HIGHPTE=y 103CONFIG_HIGHPTE=y
104CONFIG_CMA=y 104CONFIG_CMA=y
105CONFIG_ARM_APPENDED_DTB=y 105CONFIG_ARM_APPENDED_DTB=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index a097cffa1231..8e108599e1af 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -377,6 +377,7 @@ CONFIG_PWM_TWL=m
377CONFIG_PWM_TWL_LED=m 377CONFIG_PWM_TWL_LED=m
378CONFIG_OMAP_USB2=m 378CONFIG_OMAP_USB2=m
379CONFIG_TI_PIPE3=y 379CONFIG_TI_PIPE3=y
380CONFIG_TWL4030_USB=m
380CONFIG_EXT2_FS=y 381CONFIG_EXT2_FS=y
381CONFIG_EXT3_FS=y 382CONFIG_EXT3_FS=y
382# CONFIG_EXT3_FS_XATTR is not set 383# CONFIG_EXT3_FS_XATTR is not set
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 38840a812924..8f6a5702b696 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -4,6 +4,7 @@ CONFIG_BLK_DEV_INITRD=y
4CONFIG_PERF_EVENTS=y 4CONFIG_PERF_EVENTS=y
5CONFIG_ARCH_SUNXI=y 5CONFIG_ARCH_SUNXI=y
6CONFIG_SMP=y 6CONFIG_SMP=y
7CONFIG_NR_CPUS=8
7CONFIG_AEABI=y 8CONFIG_AEABI=y
8CONFIG_HIGHMEM=y 9CONFIG_HIGHMEM=y
9CONFIG_HIGHPTE=y 10CONFIG_HIGHPTE=y
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index f489fdaa19b8..37fe607a4ede 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -118,8 +118,8 @@ CONFIG_HID_ZEROPLUS=y
118CONFIG_USB=y 118CONFIG_USB=y
119CONFIG_USB_ANNOUNCE_NEW_DEVICES=y 119CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
120CONFIG_USB_MON=y 120CONFIG_USB_MON=y
121CONFIG_USB_ISP1760_HCD=y
122CONFIG_USB_STORAGE=y 121CONFIG_USB_STORAGE=y
122CONFIG_USB_ISP1760=y
123CONFIG_MMC=y 123CONFIG_MMC=y
124CONFIG_MMC_ARMMMCI=y 124CONFIG_MMC_ARMMMCI=y
125CONFIG_NEW_LEDS=y 125CONFIG_NEW_LEDS=y
diff --git a/arch/arm/include/asm/arm-cci.h b/arch/arm/include/asm/arm-cci.h
new file mode 100644
index 000000000000..fe77f7ab7e6b
--- /dev/null
+++ b/arch/arm/include/asm/arm-cci.h
@@ -0,0 +1,42 @@
1/*
2 * arch/arm/include/asm/arm-cci.h
3 *
4 * Copyright (C) 2015 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __ASM_ARM_CCI_H
20#define __ASM_ARM_CCI_H
21
22#ifdef CONFIG_MCPM
23#include <asm/mcpm.h>
24
25/*
26 * We don't have a reliable way of detecting whether,
27 * if we have access to secure-only registers, unless
28 * mcpm is registered.
29 */
30static inline bool platform_has_secure_cci_access(void)
31{
32 return mcpm_is_available();
33}
34
35#else
36static inline bool platform_has_secure_cci_access(void)
37{
38 return false;
39}
40#endif
41
42#endif
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 37ca2a4c6f09..bf0fe99e8ca9 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -207,7 +207,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
207 207
208 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; 208 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
209 209
210 VM_BUG_ON(size & PAGE_MASK); 210 VM_BUG_ON(size & ~PAGE_MASK);
211 211
212 if (!need_flush && !icache_is_pipt()) 212 if (!need_flush && !icache_is_pipt())
213 goto vipt_cache; 213 goto vipt_cache;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 07e7eb1d7ab6..5560f74f9eee 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -540,7 +540,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
540 540
541 vcpu->mode = OUTSIDE_GUEST_MODE; 541 vcpu->mode = OUTSIDE_GUEST_MODE;
542 kvm_guest_exit(); 542 kvm_guest_exit();
543 trace_kvm_exit(*vcpu_pc(vcpu)); 543 trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
544 /* 544 /*
545 * We may have taken a host interrupt in HYP mode (ie 545 * We may have taken a host interrupt in HYP mode (ie
546 * while executing the guest). This interrupt is still 546 * while executing the guest). This interrupt is still
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index 881874b1a036..6817664b46b8 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -25,18 +25,22 @@ TRACE_EVENT(kvm_entry,
25); 25);
26 26
27TRACE_EVENT(kvm_exit, 27TRACE_EVENT(kvm_exit,
28 TP_PROTO(unsigned long vcpu_pc), 28 TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc),
29 TP_ARGS(vcpu_pc), 29 TP_ARGS(exit_reason, vcpu_pc),
30 30
31 TP_STRUCT__entry( 31 TP_STRUCT__entry(
32 __field( unsigned int, exit_reason )
32 __field( unsigned long, vcpu_pc ) 33 __field( unsigned long, vcpu_pc )
33 ), 34 ),
34 35
35 TP_fast_assign( 36 TP_fast_assign(
37 __entry->exit_reason = exit_reason;
36 __entry->vcpu_pc = vcpu_pc; 38 __entry->vcpu_pc = vcpu_pc;
37 ), 39 ),
38 40
39 TP_printk("PC: 0x%08lx", __entry->vcpu_pc) 41 TP_printk("HSR_EC: 0x%04x, PC: 0x%08lx",
42 __entry->exit_reason,
43 __entry->vcpu_pc)
40); 44);
41 45
42TRACE_EVENT(kvm_guest_fault, 46TRACE_EVENT(kvm_guest_fault,
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 603820e5aba7..81064cd61a0a 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -123,7 +123,7 @@ config SOC_EXYNOS5800
123config EXYNOS5420_MCPM 123config EXYNOS5420_MCPM
124 bool "Exynos5420 Multi-Cluster PM support" 124 bool "Exynos5420 Multi-Cluster PM support"
125 depends on MCPM && SOC_EXYNOS5420 125 depends on MCPM && SOC_EXYNOS5420
126 select ARM_CCI 126 select ARM_CCI400_PORT_CTRL
127 select ARM_CPU_SUSPEND 127 select ARM_CPU_SUSPEND
128 help 128 help
129 This is needed to provide CPU and cluster power management 129 This is needed to provide CPU and cluster power management
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 3f32c47a6d74..d2e9f12d12f1 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -126,8 +126,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
126 */ 126 */
127void exynos_cpu_power_down(int cpu) 127void exynos_cpu_power_down(int cpu)
128{ 128{
129 if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") || 129 if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
130 of_machine_is_compatible("samsung,exynos5800"))) {
131 /* 130 /*
132 * Bypass power down for CPU0 during suspend. Check for 131 * Bypass power down for CPU0 during suspend. Check for
133 * the SYS_PWR_REG value to decide if we are suspending 132 * the SYS_PWR_REG value to decide if we are suspending
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 20f267121b3e..37266a826437 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -161,6 +161,34 @@ no_clk:
161 of_genpd_add_provider_simple(np, &pd->pd); 161 of_genpd_add_provider_simple(np, &pd->pd);
162 } 162 }
163 163
164 /* Assign the child power domains to their parents */
165 for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
166 struct generic_pm_domain *child_domain, *parent_domain;
167 struct of_phandle_args args;
168
169 args.np = np;
170 args.args_count = 0;
171 child_domain = of_genpd_get_from_provider(&args);
172 if (!child_domain)
173 continue;
174
175 if (of_parse_phandle_with_args(np, "power-domains",
176 "#power-domain-cells", 0, &args) != 0)
177 continue;
178
179 parent_domain = of_genpd_get_from_provider(&args);
180 if (!parent_domain)
181 continue;
182
183 if (pm_genpd_add_subdomain(parent_domain, child_domain))
184 pr_warn("%s failed to add subdomain: %s\n",
185 parent_domain->name, child_domain->name);
186 else
187 pr_info("%s has as child subdomain: %s.\n",
188 parent_domain->name, child_domain->name);
189 of_node_put(np);
190 }
191
164 return 0; 192 return 0;
165} 193}
166arch_initcall(exynos4_pm_init_power_domain); 194arch_initcall(exynos4_pm_init_power_domain);
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 52e2b1a2fddb..318d127df147 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3;
87static u32 exynos_irqwake_intmask = 0xffffffff; 87static u32 exynos_irqwake_intmask = 0xffffffff;
88 88
89static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { 89static const struct exynos_wkup_irq exynos3250_wkup_irq[] = {
90 { 73, BIT(1) }, /* RTC alarm */ 90 { 105, BIT(1) }, /* RTC alarm */
91 { 74, BIT(2) }, /* RTC tick */ 91 { 106, BIT(2) }, /* RTC tick */
92 { /* sentinel */ }, 92 { /* sentinel */ },
93}; 93};
94 94
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 4ad6e473cf83..9de3412af406 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -211,8 +211,9 @@ static void __init imx6q_1588_init(void)
211 * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad 211 * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad
212 * (external OSC), and we need to clear the bit. 212 * (external OSC), and we need to clear the bit.
213 */ 213 */
214 clksel = ptp_clk == enet_ref ? IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : 214 clksel = clk_is_match(ptp_clk, enet_ref) ?
215 IMX6Q_GPR1_ENET_CLK_SEL_PAD; 215 IMX6Q_GPR1_ENET_CLK_SEL_ANATOP :
216 IMX6Q_GPR1_ENET_CLK_SEL_PAD;
216 gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); 217 gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
217 if (!IS_ERR(gpr)) 218 if (!IS_ERR(gpr))
218 regmap_update_bits(gpr, IOMUXC_GPR1, 219 regmap_update_bits(gpr, IOMUXC_GPR1,
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 61bfe584a9d7..fc832040c6e9 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -20,6 +20,7 @@
20#include <linux/input.h> 20#include <linux/input.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/smc91x.h>
23 24
24#include <mach/hardware.h> 25#include <mach/hardware.h>
25#include <asm/mach-types.h> 26#include <asm/mach-types.h>
@@ -46,15 +47,20 @@ static struct resource smc91x_resources[] = {
46 [1] = { 47 [1] = {
47 .start = MSM_GPIO_TO_INT(49), 48 .start = MSM_GPIO_TO_INT(49),
48 .end = MSM_GPIO_TO_INT(49), 49 .end = MSM_GPIO_TO_INT(49),
49 .flags = IORESOURCE_IRQ, 50 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
50 }, 51 },
51}; 52};
52 53
54static struct smc91x_platdata smc91x_platdata = {
55 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
56};
57
53static struct platform_device smc91x_device = { 58static struct platform_device smc91x_device = {
54 .name = "smc91x", 59 .name = "smc91x",
55 .id = 0, 60 .id = 0,
56 .num_resources = ARRAY_SIZE(smc91x_resources), 61 .num_resources = ARRAY_SIZE(smc91x_resources),
57 .resource = smc91x_resources, 62 .resource = smc91x_resources,
63 .dev.platform_data = &smc91x_platdata,
58}; 64};
59 65
60static struct platform_device *devices[] __initdata = { 66static struct platform_device *devices[] __initdata = {
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index 4c748616ef47..10016a3bc698 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -22,6 +22,7 @@
22#include <linux/usb/msm_hsusb.h> 22#include <linux/usb/msm_hsusb.h>
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/clkdev.h> 24#include <linux/clkdev.h>
25#include <linux/smc91x.h>
25 26
26#include <asm/mach-types.h> 27#include <asm/mach-types.h>
27#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
@@ -49,15 +50,20 @@ static struct resource smc91x_resources[] = {
49 .flags = IORESOURCE_MEM, 50 .flags = IORESOURCE_MEM,
50 }, 51 },
51 [1] = { 52 [1] = {
52 .flags = IORESOURCE_IRQ, 53 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
53 }, 54 },
54}; 55};
55 56
57static struct smc91x_platdata smc91x_platdata = {
58 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
59};
60
56static struct platform_device smc91x_device = { 61static struct platform_device smc91x_device = {
57 .name = "smc91x", 62 .name = "smc91x",
58 .id = 0, 63 .id = 0,
59 .num_resources = ARRAY_SIZE(smc91x_resources), 64 .num_resources = ARRAY_SIZE(smc91x_resources),
60 .resource = smc91x_resources, 65 .resource = smc91x_resources,
66 .dev.platform_data = &smc91x_platdata,
61}; 67};
62 68
63static int __init msm_init_smc91x(void) 69static int __init msm_init_smc91x(void)
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 92afb723dcfc..355b08936871 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1692,16 +1692,15 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
1692 if (ret == -EBUSY) 1692 if (ret == -EBUSY)
1693 pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name); 1693 pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name);
1694 1694
1695 if (!ret) { 1695 if (oh->clkdm) {
1696 /* 1696 /*
1697 * Set the clockdomain to HW_AUTO, assuming that the 1697 * Set the clockdomain to HW_AUTO, assuming that the
1698 * previous state was HW_AUTO. 1698 * previous state was HW_AUTO.
1699 */ 1699 */
1700 if (oh->clkdm && hwsup) 1700 if (hwsup)
1701 clkdm_allow_idle(oh->clkdm); 1701 clkdm_allow_idle(oh->clkdm);
1702 } else { 1702
1703 if (oh->clkdm) 1703 clkdm_hwmod_disable(oh->clkdm, oh);
1704 clkdm_hwmod_disable(oh->clkdm, oh);
1705 } 1704 }
1706 1705
1707 return ret; 1706 return ret;
@@ -2698,6 +2697,7 @@ static int __init _register(struct omap_hwmod *oh)
2698 INIT_LIST_HEAD(&oh->master_ports); 2697 INIT_LIST_HEAD(&oh->master_ports);
2699 INIT_LIST_HEAD(&oh->slave_ports); 2698 INIT_LIST_HEAD(&oh->slave_ports);
2700 spin_lock_init(&oh->_lock); 2699 spin_lock_init(&oh->_lock);
2700 lockdep_set_class(&oh->_lock, &oh->hwmod_key);
2701 2701
2702 oh->_state = _HWMOD_STATE_REGISTERED; 2702 oh->_state = _HWMOD_STATE_REGISTERED;
2703 2703
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 9d4bec6ee742..9611c91d9b82 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -674,6 +674,7 @@ struct omap_hwmod {
674 u32 _sysc_cache; 674 u32 _sysc_cache;
675 void __iomem *_mpu_rt_va; 675 void __iomem *_mpu_rt_va;
676 spinlock_t _lock; 676 spinlock_t _lock;
677 struct lock_class_key hwmod_key; /* unique lock class */
677 struct list_head node; 678 struct list_head node;
678 struct omap_hwmod_ocp_if *_mpu_port; 679 struct omap_hwmod_ocp_if *_mpu_port;
679 unsigned int (*xlate_irq)(unsigned int); 680 unsigned int (*xlate_irq)(unsigned int);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index e8692e7675b8..16fe7a1b7a35 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1466,55 +1466,18 @@ static struct omap_hwmod dra7xx_ocp2scp3_hwmod = {
1466 * 1466 *
1467 */ 1467 */
1468 1468
1469static struct omap_hwmod_class dra7xx_pcie_hwmod_class = { 1469static struct omap_hwmod_class dra7xx_pciess_hwmod_class = {
1470 .name = "pcie", 1470 .name = "pcie",
1471}; 1471};
1472 1472
1473/* pcie1 */ 1473/* pcie1 */
1474static struct omap_hwmod dra7xx_pcie1_hwmod = { 1474static struct omap_hwmod dra7xx_pciess1_hwmod = {
1475 .name = "pcie1", 1475 .name = "pcie1",
1476 .class = &dra7xx_pcie_hwmod_class, 1476 .class = &dra7xx_pciess_hwmod_class,
1477 .clkdm_name = "pcie_clkdm", 1477 .clkdm_name = "pcie_clkdm",
1478 .main_clk = "l4_root_clk_div", 1478 .main_clk = "l4_root_clk_div",
1479 .prcm = { 1479 .prcm = {
1480 .omap4 = { 1480 .omap4 = {
1481 .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET,
1482 .modulemode = MODULEMODE_SWCTRL,
1483 },
1484 },
1485};
1486
1487/* pcie2 */
1488static struct omap_hwmod dra7xx_pcie2_hwmod = {
1489 .name = "pcie2",
1490 .class = &dra7xx_pcie_hwmod_class,
1491 .clkdm_name = "pcie_clkdm",
1492 .main_clk = "l4_root_clk_div",
1493 .prcm = {
1494 .omap4 = {
1495 .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET,
1496 .modulemode = MODULEMODE_SWCTRL,
1497 },
1498 },
1499};
1500
1501/*
1502 * 'PCIE PHY' class
1503 *
1504 */
1505
1506static struct omap_hwmod_class dra7xx_pcie_phy_hwmod_class = {
1507 .name = "pcie-phy",
1508};
1509
1510/* pcie1 phy */
1511static struct omap_hwmod dra7xx_pcie1_phy_hwmod = {
1512 .name = "pcie1-phy",
1513 .class = &dra7xx_pcie_phy_hwmod_class,
1514 .clkdm_name = "l3init_clkdm",
1515 .main_clk = "l4_root_clk_div",
1516 .prcm = {
1517 .omap4 = {
1518 .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET, 1481 .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET,
1519 .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET, 1482 .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET,
1520 .modulemode = MODULEMODE_SWCTRL, 1483 .modulemode = MODULEMODE_SWCTRL,
@@ -1522,11 +1485,11 @@ static struct omap_hwmod dra7xx_pcie1_phy_hwmod = {
1522 }, 1485 },
1523}; 1486};
1524 1487
1525/* pcie2 phy */ 1488/* pcie2 */
1526static struct omap_hwmod dra7xx_pcie2_phy_hwmod = { 1489static struct omap_hwmod dra7xx_pciess2_hwmod = {
1527 .name = "pcie2-phy", 1490 .name = "pcie2",
1528 .class = &dra7xx_pcie_phy_hwmod_class, 1491 .class = &dra7xx_pciess_hwmod_class,
1529 .clkdm_name = "l3init_clkdm", 1492 .clkdm_name = "pcie_clkdm",
1530 .main_clk = "l4_root_clk_div", 1493 .main_clk = "l4_root_clk_div",
1531 .prcm = { 1494 .prcm = {
1532 .omap4 = { 1495 .omap4 = {
@@ -2877,50 +2840,34 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__ocp2scp3 = {
2877 .user = OCP_USER_MPU | OCP_USER_SDMA, 2840 .user = OCP_USER_MPU | OCP_USER_SDMA,
2878}; 2841};
2879 2842
2880/* l3_main_1 -> pcie1 */ 2843/* l3_main_1 -> pciess1 */
2881static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie1 = { 2844static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = {
2882 .master = &dra7xx_l3_main_1_hwmod, 2845 .master = &dra7xx_l3_main_1_hwmod,
2883 .slave = &dra7xx_pcie1_hwmod, 2846 .slave = &dra7xx_pciess1_hwmod,
2884 .clk = "l3_iclk_div", 2847 .clk = "l3_iclk_div",
2885 .user = OCP_USER_MPU | OCP_USER_SDMA, 2848 .user = OCP_USER_MPU | OCP_USER_SDMA,
2886}; 2849};
2887 2850
2888/* l4_cfg -> pcie1 */ 2851/* l4_cfg -> pciess1 */
2889static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1 = { 2852static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess1 = {
2890 .master = &dra7xx_l4_cfg_hwmod, 2853 .master = &dra7xx_l4_cfg_hwmod,
2891 .slave = &dra7xx_pcie1_hwmod, 2854 .slave = &dra7xx_pciess1_hwmod,
2892 .clk = "l4_root_clk_div", 2855 .clk = "l4_root_clk_div",
2893 .user = OCP_USER_MPU | OCP_USER_SDMA, 2856 .user = OCP_USER_MPU | OCP_USER_SDMA,
2894}; 2857};
2895 2858
2896/* l3_main_1 -> pcie2 */ 2859/* l3_main_1 -> pciess2 */
2897static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie2 = { 2860static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess2 = {
2898 .master = &dra7xx_l3_main_1_hwmod, 2861 .master = &dra7xx_l3_main_1_hwmod,
2899 .slave = &dra7xx_pcie2_hwmod, 2862 .slave = &dra7xx_pciess2_hwmod,
2900 .clk = "l3_iclk_div", 2863 .clk = "l3_iclk_div",
2901 .user = OCP_USER_MPU | OCP_USER_SDMA, 2864 .user = OCP_USER_MPU | OCP_USER_SDMA,
2902}; 2865};
2903 2866
2904/* l4_cfg -> pcie2 */ 2867/* l4_cfg -> pciess2 */
2905static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2 = { 2868static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = {
2906 .master = &dra7xx_l4_cfg_hwmod,
2907 .slave = &dra7xx_pcie2_hwmod,
2908 .clk = "l4_root_clk_div",
2909 .user = OCP_USER_MPU | OCP_USER_SDMA,
2910};
2911
2912/* l4_cfg -> pcie1 phy */
2913static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1_phy = {
2914 .master = &dra7xx_l4_cfg_hwmod,
2915 .slave = &dra7xx_pcie1_phy_hwmod,
2916 .clk = "l4_root_clk_div",
2917 .user = OCP_USER_MPU | OCP_USER_SDMA,
2918};
2919
2920/* l4_cfg -> pcie2 phy */
2921static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2_phy = {
2922 .master = &dra7xx_l4_cfg_hwmod, 2869 .master = &dra7xx_l4_cfg_hwmod,
2923 .slave = &dra7xx_pcie2_phy_hwmod, 2870 .slave = &dra7xx_pciess2_hwmod,
2924 .clk = "l4_root_clk_div", 2871 .clk = "l4_root_clk_div",
2925 .user = OCP_USER_MPU | OCP_USER_SDMA, 2872 .user = OCP_USER_MPU | OCP_USER_SDMA,
2926}; 2873};
@@ -3327,12 +3274,10 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
3327 &dra7xx_l4_cfg__mpu, 3274 &dra7xx_l4_cfg__mpu,
3328 &dra7xx_l4_cfg__ocp2scp1, 3275 &dra7xx_l4_cfg__ocp2scp1,
3329 &dra7xx_l4_cfg__ocp2scp3, 3276 &dra7xx_l4_cfg__ocp2scp3,
3330 &dra7xx_l3_main_1__pcie1, 3277 &dra7xx_l3_main_1__pciess1,
3331 &dra7xx_l4_cfg__pcie1, 3278 &dra7xx_l4_cfg__pciess1,
3332 &dra7xx_l3_main_1__pcie2, 3279 &dra7xx_l3_main_1__pciess2,
3333 &dra7xx_l4_cfg__pcie2, 3280 &dra7xx_l4_cfg__pciess2,
3334 &dra7xx_l4_cfg__pcie1_phy,
3335 &dra7xx_l4_cfg__pcie2_phy,
3336 &dra7xx_l3_main_1__qspi, 3281 &dra7xx_l3_main_1__qspi,
3337 &dra7xx_l4_per3__rtcss, 3282 &dra7xx_l4_per3__rtcss,
3338 &dra7xx_l4_cfg__sata, 3283 &dra7xx_l4_cfg__sata,
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 190fa43e7479..e642b079e9f3 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -173,6 +173,7 @@ static void __init omap3_igep0030_rev_g_legacy_init(void)
173 173
174static void __init omap3_evm_legacy_init(void) 174static void __init omap3_evm_legacy_init(void)
175{ 175{
176 hsmmc2_internal_input_clk();
176 legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149); 177 legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149);
177} 178}
178 179
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index a08a617a6c11..d6d6bc39e05c 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -252,10 +252,10 @@ static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask)
252{ 252{
253 saved_mask[0] = 253 saved_mask[0] =
254 omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, 254 omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
255 OMAP4_PRM_IRQSTATUS_MPU_OFFSET); 255 OMAP4_PRM_IRQENABLE_MPU_OFFSET);
256 saved_mask[1] = 256 saved_mask[1] =
257 omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, 257 omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
258 OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); 258 OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
259 259
260 omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, 260 omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST,
261 OMAP4_PRM_IRQENABLE_MPU_OFFSET); 261 OMAP4_PRM_IRQENABLE_MPU_OFFSET);
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index 343c4e3a7c5d..f6d02e4cbcda 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -36,6 +36,7 @@
36#include <linux/platform_data/video-pxafb.h> 36#include <linux/platform_data/video-pxafb.h>
37#include <mach/bitfield.h> 37#include <mach/bitfield.h>
38#include <linux/platform_data/mmc-pxamci.h> 38#include <linux/platform_data/mmc-pxamci.h>
39#include <linux/smc91x.h>
39 40
40#include "generic.h" 41#include "generic.h"
41#include "devices.h" 42#include "devices.h"
@@ -81,11 +82,16 @@ static struct resource smc91x_resources[] = {
81 } 82 }
82}; 83};
83 84
85static struct smc91x_platdata smc91x_platdata = {
86 .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
87};
88
84static struct platform_device smc91x_device = { 89static struct platform_device smc91x_device = {
85 .name = "smc91x", 90 .name = "smc91x",
86 .id = 0, 91 .id = 0,
87 .num_resources = ARRAY_SIZE(smc91x_resources), 92 .num_resources = ARRAY_SIZE(smc91x_resources),
88 .resource = smc91x_resources, 93 .resource = smc91x_resources,
94 .dev.platform_data = &smc91x_platdata,
89}; 95};
90 96
91static void idp_backlight_power(int on) 97static void idp_backlight_power(int on)
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index ad777b353bd5..eaee2c20b189 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -24,6 +24,7 @@
24#include <linux/mtd/mtd.h> 24#include <linux/mtd/mtd.h>
25#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26#include <linux/pwm_backlight.h> 26#include <linux/pwm_backlight.h>
27#include <linux/smc91x.h>
27 28
28#include <asm/types.h> 29#include <asm/types.h>
29#include <asm/setup.h> 30#include <asm/setup.h>
@@ -189,15 +190,20 @@ static struct resource smc91x_resources[] = {
189 [1] = { 190 [1] = {
190 .start = LPD270_ETHERNET_IRQ, 191 .start = LPD270_ETHERNET_IRQ,
191 .end = LPD270_ETHERNET_IRQ, 192 .end = LPD270_ETHERNET_IRQ,
192 .flags = IORESOURCE_IRQ, 193 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
193 }, 194 },
194}; 195};
195 196
197struct smc91x_platdata smc91x_platdata = {
198 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
199};
200
196static struct platform_device smc91x_device = { 201static struct platform_device smc91x_device = {
197 .name = "smc91x", 202 .name = "smc91x",
198 .id = 0, 203 .id = 0,
199 .num_resources = ARRAY_SIZE(smc91x_resources), 204 .num_resources = ARRAY_SIZE(smc91x_resources),
200 .resource = smc91x_resources, 205 .resource = smc91x_resources,
206 .dev.platform_data = &smc91x_platdata,
201}; 207};
202 208
203static struct resource lpd270_flash_resources[] = { 209static struct resource lpd270_flash_resources[] = {
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 850e506926df..c309593abdb2 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -28,6 +28,7 @@
28#include <linux/platform_data/video-clcd-versatile.h> 28#include <linux/platform_data/video-clcd-versatile.h>
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/smsc911x.h> 30#include <linux/smsc911x.h>
31#include <linux/smc91x.h>
31#include <linux/ata_platform.h> 32#include <linux/ata_platform.h>
32#include <linux/amba/mmci.h> 33#include <linux/amba/mmci.h>
33#include <linux/gfp.h> 34#include <linux/gfp.h>
@@ -94,6 +95,10 @@ static struct smsc911x_platform_config smsc911x_config = {
94 .phy_interface = PHY_INTERFACE_MODE_MII, 95 .phy_interface = PHY_INTERFACE_MODE_MII,
95}; 96};
96 97
98static struct smc91x_platdata smc91x_platdata = {
99 .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
100};
101
97static struct platform_device realview_eth_device = { 102static struct platform_device realview_eth_device = {
98 .name = "smsc911x", 103 .name = "smsc911x",
99 .id = 0, 104 .id = 0,
@@ -107,6 +112,8 @@ int realview_eth_register(const char *name, struct resource *res)
107 realview_eth_device.resource = res; 112 realview_eth_device.resource = res;
108 if (strcmp(realview_eth_device.name, "smsc911x") == 0) 113 if (strcmp(realview_eth_device.name, "smsc911x") == 0)
109 realview_eth_device.dev.platform_data = &smsc911x_config; 114 realview_eth_device.dev.platform_data = &smsc911x_config;
115 else
116 realview_eth_device.dev.platform_data = &smc91x_platdata;
110 117
111 return platform_device_register(&realview_eth_device); 118 return platform_device_register(&realview_eth_device);
112} 119}
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 64c88d657f9e..b3869cbbcc68 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -234,7 +234,7 @@ static struct resource realview_eb_eth_resources[] = {
234 [1] = { 234 [1] = {
235 .start = IRQ_EB_ETH, 235 .start = IRQ_EB_ETH,
236 .end = IRQ_EB_ETH, 236 .end = IRQ_EB_ETH,
237 .flags = IORESOURCE_IRQ, 237 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
238 }, 238 },
239}; 239};
240 240
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 169262e3040d..af868d258e66 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -12,6 +12,7 @@
12#include <linux/pm.h> 12#include <linux/pm.h>
13#include <linux/serial_core.h> 13#include <linux/serial_core.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/smc91x.h>
15 16
16#include <asm/mach-types.h> 17#include <asm/mach-types.h>
17#include <asm/mach/map.h> 18#include <asm/mach/map.h>
@@ -258,12 +259,17 @@ static int neponset_probe(struct platform_device *dev)
258 0x02000000, "smc91x-attrib"), 259 0x02000000, "smc91x-attrib"),
259 { .flags = IORESOURCE_IRQ }, 260 { .flags = IORESOURCE_IRQ },
260 }; 261 };
262 struct smc91x_platdata smc91x_platdata = {
263 .flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT,
264 };
261 struct platform_device_info smc91x_devinfo = { 265 struct platform_device_info smc91x_devinfo = {
262 .parent = &dev->dev, 266 .parent = &dev->dev,
263 .name = "smc91x", 267 .name = "smc91x",
264 .id = 0, 268 .id = 0,
265 .res = smc91x_resources, 269 .res = smc91x_resources,
266 .num_res = ARRAY_SIZE(smc91x_resources), 270 .num_res = ARRAY_SIZE(smc91x_resources),
271 .data = &smc91x_platdata,
272 .size_data = sizeof(smc91x_platdata),
267 }; 273 };
268 int ret, irq; 274 int ret, irq;
269 275
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 091261878eff..1525d7b5f1b7 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -11,6 +11,7 @@
11#include <linux/irq.h> 11#include <linux/irq.h>
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/mtd/partitions.h> 13#include <linux/mtd/partitions.h>
14#include <linux/smc91x.h>
14 15
15#include <mach/hardware.h> 16#include <mach/hardware.h>
16#include <asm/setup.h> 17#include <asm/setup.h>
@@ -43,12 +44,18 @@ static struct resource smc91x_resources[] = {
43#endif 44#endif
44}; 45};
45 46
47static struct smc91x_platdata smc91x_platdata = {
48 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
49};
46 50
47static struct platform_device smc91x_device = { 51static struct platform_device smc91x_device = {
48 .name = "smc91x", 52 .name = "smc91x",
49 .id = 0, 53 .id = 0,
50 .num_resources = ARRAY_SIZE(smc91x_resources), 54 .num_resources = ARRAY_SIZE(smc91x_resources),
51 .resource = smc91x_resources, 55 .resource = smc91x_resources,
56 .dev = {
57 .platform_data = &smc91x_platdata,
58 },
52}; 59};
53 60
54static struct platform_device *devices[] __initdata = { 61static struct platform_device *devices[] __initdata = {
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
index 483cb467bf65..a0f3b1cd497c 100644
--- a/arch/arm/mach-socfpga/core.h
+++ b/arch/arm/mach-socfpga/core.h
@@ -45,6 +45,6 @@ extern char secondary_trampoline, secondary_trampoline_end;
45 45
46extern unsigned long socfpga_cpu1start_addr; 46extern unsigned long socfpga_cpu1start_addr;
47 47
48#define SOCFPGA_SCU_VIRT_BASE 0xfffec000 48#define SOCFPGA_SCU_VIRT_BASE 0xfee00000
49 49
50#endif 50#endif
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index 383d61e138af..f5e597c207b9 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -23,6 +23,7 @@
23#include <asm/hardware/cache-l2x0.h> 23#include <asm/hardware/cache-l2x0.h>
24#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
25#include <asm/mach/map.h> 25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
26 27
27#include "core.h" 28#include "core.h"
28 29
@@ -73,6 +74,10 @@ void __init socfpga_sysmgr_init(void)
73 (u32 *) &socfpga_cpu1start_addr)) 74 (u32 *) &socfpga_cpu1start_addr))
74 pr_err("SMP: Need cpu1-start-addr in device tree.\n"); 75 pr_err("SMP: Need cpu1-start-addr in device tree.\n");
75 76
77 /* Ensure that socfpga_cpu1start_addr is visible to other CPUs */
78 smp_wmb();
79 sync_cache_w(&socfpga_cpu1start_addr);
80
76 sys_manager_base_addr = of_iomap(np, 0); 81 sys_manager_base_addr = of_iomap(np, 0);
77 82
78 np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); 83 np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr");
diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c
index b067390cef4e..b373acade338 100644
--- a/arch/arm/mach-sti/board-dt.c
+++ b/arch/arm/mach-sti/board-dt.c
@@ -18,6 +18,7 @@ static const char *stih41x_dt_match[] __initdata = {
18 "st,stih415", 18 "st,stih415",
19 "st,stih416", 19 "st,stih416",
20 "st,stih407", 20 "st,stih407",
21 "st,stih410",
21 "st,stih418", 22 "st,stih418",
22 NULL 23 NULL
23}; 24};
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 3c2509b4b694..daa7ab6cb909 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -53,7 +53,7 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
53config ARCH_VEXPRESS_DCSCB 53config ARCH_VEXPRESS_DCSCB
54 bool "Dual Cluster System Control Block (DCSCB) support" 54 bool "Dual Cluster System Control Block (DCSCB) support"
55 depends on MCPM 55 depends on MCPM
56 select ARM_CCI 56 select ARM_CCI400_PORT_CTRL
57 help 57 help
58 Support for the Dual Cluster System Configuration Block (DCSCB). 58 Support for the Dual Cluster System Configuration Block (DCSCB).
59 This is needed to provide CPU and cluster power management 59 This is needed to provide CPU and cluster power management
@@ -71,7 +71,7 @@ config ARCH_VEXPRESS_SPC
71config ARCH_VEXPRESS_TC2_PM 71config ARCH_VEXPRESS_TC2_PM
72 bool "Versatile Express TC2 power management" 72 bool "Versatile Express TC2 power management"
73 depends on MCPM 73 depends on MCPM
74 select ARM_CCI 74 select ARM_CCI400_PORT_CTRL
75 select ARCH_VEXPRESS_SPC 75 select ARCH_VEXPRESS_SPC
76 select ARM_CPU_SUSPEND 76 select ARM_CPU_SUSPEND
77 help 77 help
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index f1ad9c2ab2e9..a857794432d6 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -622,7 +622,7 @@
622 }; 622 };
623 623
624 sgenet0: ethernet@1f210000 { 624 sgenet0: ethernet@1f210000 {
625 compatible = "apm,xgene-enet"; 625 compatible = "apm,xgene1-sgenet";
626 status = "disabled"; 626 status = "disabled";
627 reg = <0x0 0x1f210000 0x0 0xd100>, 627 reg = <0x0 0x1f210000 0x0 0xd100>,
628 <0x0 0x1f200000 0x0 0Xc300>, 628 <0x0 0x1f200000 0x0 0Xc300>,
@@ -636,7 +636,7 @@
636 }; 636 };
637 637
638 xgenet: ethernet@1f610000 { 638 xgenet: ethernet@1f610000 {
639 compatible = "apm,xgene-enet"; 639 compatible = "apm,xgene1-xgenet";
640 status = "disabled"; 640 status = "disabled";
641 reg = <0x0 0x1f610000 0x0 0xd100>, 641 reg = <0x0 0x1f610000 0x0 0xd100>,
642 <0x0 0x1f600000 0x0 0Xc300>, 642 <0x0 0x1f600000 0x0 0Xc300>,
diff --git a/arch/arm64/include/asm/arm-cci.h b/arch/arm64/include/asm/arm-cci.h
new file mode 100644
index 000000000000..f0b63712e10e
--- /dev/null
+++ b/arch/arm64/include/asm/arm-cci.h
@@ -0,0 +1,27 @@
1/*
2 * arch/arm64/include/asm/arm-cci.h
3 *
4 * Copyright (C) 2015 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __ASM_ARM_CCI_H
20#define __ASM_ARM_CCI_H
21
22static inline bool platform_has_secure_cci_access(void)
23{
24 return false;
25}
26
27#endif
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index c028fe37456f..53d9c354219f 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
48static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 48static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
49 unsigned long addr) 49 unsigned long addr)
50{ 50{
51 __flush_tlb_pgtable(tlb->mm, addr);
51 pgtable_page_dtor(pte); 52 pgtable_page_dtor(pte);
52 tlb_remove_entry(tlb, pte); 53 tlb_remove_entry(tlb, pte);
53} 54}
@@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
56static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, 57static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
57 unsigned long addr) 58 unsigned long addr)
58{ 59{
60 __flush_tlb_pgtable(tlb->mm, addr);
59 tlb_remove_entry(tlb, virt_to_page(pmdp)); 61 tlb_remove_entry(tlb, virt_to_page(pmdp));
60} 62}
61#endif 63#endif
@@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
64static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, 66static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
65 unsigned long addr) 67 unsigned long addr)
66{ 68{
69 __flush_tlb_pgtable(tlb->mm, addr);
67 tlb_remove_entry(tlb, virt_to_page(pudp)); 70 tlb_remove_entry(tlb, virt_to_page(pudp));
68} 71}
69#endif 72#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 4abe9b945f77..c3bb05b98616 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -144,6 +144,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
144} 144}
145 145
146/* 146/*
147 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
148 * table levels (pgd/pud/pmd).
149 */
150static inline void __flush_tlb_pgtable(struct mm_struct *mm,
151 unsigned long uaddr)
152{
153 unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
154
155 dsb(ishst);
156 asm("tlbi vae1is, %0" : : "r" (addr));
157 dsb(ish);
158}
159/*
147 * On AArch64, the cache coherency is handled via the set_pte_at() function. 160 * On AArch64, the cache coherency is handled via the set_pte_at() function.
148 */ 161 */
149static inline void update_mmu_cache(struct vm_area_struct *vma, 162static inline void update_mmu_cache(struct vm_area_struct *vma,
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index b42c7b480e1e..2b8d70164428 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -354,3 +354,12 @@ void efi_virtmap_unload(void)
354 efi_set_pgd(current->active_mm); 354 efi_set_pgd(current->active_mm);
355 preempt_enable(); 355 preempt_enable();
356} 356}
357
358/*
359 * UpdateCapsule() depends on the system being shutdown via
360 * ResetSystem().
361 */
362bool efi_poweroff_required(void)
363{
364 return efi_enabled(EFI_RUNTIME_SERVICES);
365}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 8ce88e08c030..07f930540f4a 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -585,8 +585,8 @@ ENDPROC(set_cpu_boot_mode_flag)
585 * zeroing of .bss would clobber it. 585 * zeroing of .bss would clobber it.
586 */ 586 */
587 .pushsection .data..cacheline_aligned 587 .pushsection .data..cacheline_aligned
588ENTRY(__boot_cpu_mode)
589 .align L1_CACHE_SHIFT 588 .align L1_CACHE_SHIFT
589ENTRY(__boot_cpu_mode)
590 .long BOOT_CPU_MODE_EL2 590 .long BOOT_CPU_MODE_EL2
591 .long 0 591 .long 0
592 .popsection 592 .popsection
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index fde9923af859..c6b1f3b96f45 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -21,6 +21,7 @@
21#include <stdarg.h> 21#include <stdarg.h>
22 22
23#include <linux/compat.h> 23#include <linux/compat.h>
24#include <linux/efi.h>
24#include <linux/export.h> 25#include <linux/export.h>
25#include <linux/sched.h> 26#include <linux/sched.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
@@ -150,6 +151,13 @@ void machine_restart(char *cmd)
150 local_irq_disable(); 151 local_irq_disable();
151 smp_send_stop(); 152 smp_send_stop();
152 153
154 /*
155 * UpdateCapsule() depends on the system being reset via
156 * ResetSystem().
157 */
158 if (efi_enabled(EFI_RUNTIME_SERVICES))
159 efi_reboot(reboot_mode, NULL);
160
153 /* Now call the architecture specific reboot code. */ 161 /* Now call the architecture specific reboot code. */
154 if (arm_pm_restart) 162 if (arm_pm_restart)
155 arm_pm_restart(reboot_mode, cmd); 163 arm_pm_restart(reboot_mode, cmd);
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index bb0ea94c4ba1..1d3ec3ddd84b 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -51,7 +51,10 @@ static int change_memory_common(unsigned long addr, int numpages,
51 WARN_ON_ONCE(1); 51 WARN_ON_ONCE(1);
52 } 52 }
53 53
54 if (!is_module_address(start) || !is_module_address(end - 1)) 54 if (start < MODULES_VADDR || start >= MODULES_END)
55 return -EINVAL;
56
57 if (end < MODULES_VADDR || end >= MODULES_END)
55 return -EINVAL; 58 return -EINVAL;
56 59
57 data.set_mask = set_mask; 60 data.set_mask = set_mask;
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h
index 78d4483ba40c..ec4db6df5e0d 100644
--- a/arch/c6x/include/asm/pgtable.h
+++ b/arch/c6x/include/asm/pgtable.h
@@ -67,6 +67,11 @@ extern unsigned long empty_zero_page;
67 */ 67 */
68#define pgtable_cache_init() do { } while (0) 68#define pgtable_cache_init() do { } while (0)
69 69
70/*
71 * c6x is !MMU, so define the simpliest implementation
72 */
73#define pgprot_writecombine pgprot_noncached
74
70#include <asm-generic/pgtable.h> 75#include <asm-generic/pgtable.h>
71 76
72#endif /* _ASM_C6X_PGTABLE_H */ 77#endif /* _ASM_C6X_PGTABLE_H */
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 0536bc021cc6..ef548510b951 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -348,8 +348,9 @@ C_ENTRY(_user_exception):
348 * The LP register should point to the location where the called function 348 * The LP register should point to the location where the called function
349 * should return. [note that MAKE_SYS_CALL uses label 1] */ 349 * should return. [note that MAKE_SYS_CALL uses label 1] */
350 /* See if the system call number is valid */ 350 /* See if the system call number is valid */
351 blti r12, 5f
351 addi r11, r12, -__NR_syscalls; 352 addi r11, r12, -__NR_syscalls;
352 bgei r11,5f; 353 bgei r11, 5f;
353 /* Figure out which function to use for this system call. */ 354 /* Figure out which function to use for this system call. */
354 /* Note Microblaze barrel shift is optional, so don't rely on it */ 355 /* Note Microblaze barrel shift is optional, so don't rely on it */
355 add r12, r12, r12; /* convert num -> ptr */ 356 add r12, r12, r12; /* convert num -> ptr */
@@ -375,7 +376,7 @@ C_ENTRY(_user_exception):
375 376
376 /* The syscall number is invalid, return an error. */ 377 /* The syscall number is invalid, return an error. */
3775: 3785:
378 rtsd r15, 8; /* looks like a normal subroutine return */ 379 braid ret_from_trap
379 addi r3, r0, -ENOSYS; 380 addi r3, r0, -ENOSYS;
380 381
381/* Entry point used to return from a syscall/trap */ 382/* Entry point used to return from a syscall/trap */
@@ -411,7 +412,7 @@ C_ENTRY(ret_from_trap):
411 bri 1b 412 bri 1b
412 413
413 /* Maybe handle a signal */ 414 /* Maybe handle a signal */
4145: 4155:
415 andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; 416 andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
416 beqi r11, 4f; /* Signals to handle, handle them */ 417 beqi r11, 4f; /* Signals to handle, handle them */
417 418
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index bbcd82242059..b6beb0e07b1b 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -216,6 +216,7 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
216 if (idx > current_cpu_data.tlbsize) { 216 if (idx > current_cpu_data.tlbsize) {
217 kvm_err("%s: Invalid Index: %d\n", __func__, idx); 217 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
218 kvm_mips_dump_host_tlbs(); 218 kvm_mips_dump_host_tlbs();
219 local_irq_restore(flags);
219 return -1; 220 return -1;
220 } 221 }
221 222
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
index c1388d40663b..bd6437f67dc0 100644
--- a/arch/mips/kvm/trace.h
+++ b/arch/mips/kvm/trace.h
@@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit,
24 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), 24 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
25 TP_ARGS(vcpu, reason), 25 TP_ARGS(vcpu, reason),
26 TP_STRUCT__entry( 26 TP_STRUCT__entry(
27 __field(struct kvm_vcpu *, vcpu) 27 __field(unsigned long, pc)
28 __field(unsigned int, reason) 28 __field(unsigned int, reason)
29 ), 29 ),
30 30
31 TP_fast_assign( 31 TP_fast_assign(
32 __entry->vcpu = vcpu; 32 __entry->pc = vcpu->arch.pc;
33 __entry->reason = reason; 33 __entry->reason = reason;
34 ), 34 ),
35 35
36 TP_printk("[%s]PC: 0x%08lx", 36 TP_printk("[%s]PC: 0x%08lx",
37 kvm_mips_exit_types_str[__entry->reason], 37 kvm_mips_exit_types_str[__entry->reason],
38 __entry->vcpu->arch.pc) 38 __entry->pc)
39); 39);
40 40
41#endif /* _TRACE_KVM_H */ 41#endif /* _TRACE_KVM_H */
diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h
index 20fb1cf2dab6..642462144872 100644
--- a/arch/nios2/include/asm/ptrace.h
+++ b/arch/nios2/include/asm/ptrace.h
@@ -15,7 +15,54 @@
15 15
16#include <uapi/asm/ptrace.h> 16#include <uapi/asm/ptrace.h>
17 17
18/* This struct defines the way the registers are stored on the
19 stack during a system call. */
20
18#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22struct pt_regs {
23 unsigned long r8; /* r8-r15 Caller-saved GP registers */
24 unsigned long r9;
25 unsigned long r10;
26 unsigned long r11;
27 unsigned long r12;
28 unsigned long r13;
29 unsigned long r14;
30 unsigned long r15;
31 unsigned long r1; /* Assembler temporary */
32 unsigned long r2; /* Retval LS 32bits */
33 unsigned long r3; /* Retval MS 32bits */
34 unsigned long r4; /* r4-r7 Register arguments */
35 unsigned long r5;
36 unsigned long r6;
37 unsigned long r7;
38 unsigned long orig_r2; /* Copy of r2 ?? */
39 unsigned long ra; /* Return address */
40 unsigned long fp; /* Frame pointer */
41 unsigned long sp; /* Stack pointer */
42 unsigned long gp; /* Global pointer */
43 unsigned long estatus;
44 unsigned long ea; /* Exception return address (pc) */
45 unsigned long orig_r7;
46};
47
48/*
49 * This is the extended stack used by signal handlers and the context
50 * switcher: it's pushed after the normal "struct pt_regs".
51 */
52struct switch_stack {
53 unsigned long r16; /* r16-r23 Callee-saved GP registers */
54 unsigned long r17;
55 unsigned long r18;
56 unsigned long r19;
57 unsigned long r20;
58 unsigned long r21;
59 unsigned long r22;
60 unsigned long r23;
61 unsigned long fp;
62 unsigned long gp;
63 unsigned long ra;
64};
65
19#define user_mode(regs) (((regs)->estatus & ESTATUS_EU)) 66#define user_mode(regs) (((regs)->estatus & ESTATUS_EU))
20 67
21#define instruction_pointer(regs) ((regs)->ra) 68#define instruction_pointer(regs) ((regs)->ra)
diff --git a/arch/nios2/include/asm/ucontext.h b/arch/nios2/include/asm/ucontext.h
deleted file mode 100644
index 2c87614b0f6e..000000000000
--- a/arch/nios2/include/asm/ucontext.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
3 * Copyright (C) 2004 Microtronix Datacom Ltd
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 */
9
10#ifndef _ASM_NIOS2_UCONTEXT_H
11#define _ASM_NIOS2_UCONTEXT_H
12
13typedef int greg_t;
14#define NGREG 32
15typedef greg_t gregset_t[NGREG];
16
17struct mcontext {
18 int version;
19 gregset_t gregs;
20};
21
22#define MCONTEXT_VERSION 2
23
24struct ucontext {
25 unsigned long uc_flags;
26 struct ucontext *uc_link;
27 stack_t uc_stack;
28 struct mcontext uc_mcontext;
29 sigset_t uc_sigmask; /* mask last for extensibility */
30};
31
32#endif
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index 4f07ca3f8d10..376131194cc3 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -2,3 +2,5 @@ include include/uapi/asm-generic/Kbuild.asm
2 2
3header-y += elf.h 3header-y += elf.h
4header-y += ucontext.h 4header-y += ucontext.h
5
6generic-y += ucontext.h
diff --git a/arch/nios2/include/uapi/asm/elf.h b/arch/nios2/include/uapi/asm/elf.h
index a5b91ae5cf56..6f06d3b2949e 100644
--- a/arch/nios2/include/uapi/asm/elf.h
+++ b/arch/nios2/include/uapi/asm/elf.h
@@ -50,9 +50,7 @@
50 50
51typedef unsigned long elf_greg_t; 51typedef unsigned long elf_greg_t;
52 52
53#define ELF_NGREG \ 53#define ELF_NGREG 49
54 ((sizeof(struct pt_regs) + sizeof(struct switch_stack)) / \
55 sizeof(elf_greg_t))
56typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 54typedef elf_greg_t elf_gregset_t[ELF_NGREG];
57 55
58typedef unsigned long elf_fpregset_t; 56typedef unsigned long elf_fpregset_t;
diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h
index e83a7c9d1c36..71a330597adf 100644
--- a/arch/nios2/include/uapi/asm/ptrace.h
+++ b/arch/nios2/include/uapi/asm/ptrace.h
@@ -67,53 +67,9 @@
67 67
68#define NUM_PTRACE_REG (PTR_TLBMISC + 1) 68#define NUM_PTRACE_REG (PTR_TLBMISC + 1)
69 69
70/* this struct defines the way the registers are stored on the 70/* User structures for general purpose registers. */
71 stack during a system call. 71struct user_pt_regs {
72 72 __u32 regs[49];
73 There is a fake_regs in setup.c that has to match pt_regs.*/
74
75struct pt_regs {
76 unsigned long r8; /* r8-r15 Caller-saved GP registers */
77 unsigned long r9;
78 unsigned long r10;
79 unsigned long r11;
80 unsigned long r12;
81 unsigned long r13;
82 unsigned long r14;
83 unsigned long r15;
84 unsigned long r1; /* Assembler temporary */
85 unsigned long r2; /* Retval LS 32bits */
86 unsigned long r3; /* Retval MS 32bits */
87 unsigned long r4; /* r4-r7 Register arguments */
88 unsigned long r5;
89 unsigned long r6;
90 unsigned long r7;
91 unsigned long orig_r2; /* Copy of r2 ?? */
92 unsigned long ra; /* Return address */
93 unsigned long fp; /* Frame pointer */
94 unsigned long sp; /* Stack pointer */
95 unsigned long gp; /* Global pointer */
96 unsigned long estatus;
97 unsigned long ea; /* Exception return address (pc) */
98 unsigned long orig_r7;
99};
100
101/*
102 * This is the extended stack used by signal handlers and the context
103 * switcher: it's pushed after the normal "struct pt_regs".
104 */
105struct switch_stack {
106 unsigned long r16; /* r16-r23 Callee-saved GP registers */
107 unsigned long r17;
108 unsigned long r18;
109 unsigned long r19;
110 unsigned long r20;
111 unsigned long r21;
112 unsigned long r22;
113 unsigned long r23;
114 unsigned long fp;
115 unsigned long gp;
116 unsigned long ra;
117}; 73};
118 74
119#endif /* __ASSEMBLY__ */ 75#endif /* __ASSEMBLY__ */
diff --git a/arch/nios2/include/uapi/asm/sigcontext.h b/arch/nios2/include/uapi/asm/sigcontext.h
index 7b8bb41867d4..b67944a50927 100644
--- a/arch/nios2/include/uapi/asm/sigcontext.h
+++ b/arch/nios2/include/uapi/asm/sigcontext.h
@@ -15,14 +15,16 @@
15 * details. 15 * details.
16 */ 16 */
17 17
18#ifndef _ASM_NIOS2_SIGCONTEXT_H 18#ifndef _UAPI__ASM_SIGCONTEXT_H
19#define _ASM_NIOS2_SIGCONTEXT_H 19#define _UAPI__ASM_SIGCONTEXT_H
20 20
21#include <asm/ptrace.h> 21#include <linux/types.h>
22
23#define MCONTEXT_VERSION 2
22 24
23struct sigcontext { 25struct sigcontext {
24 struct pt_regs regs; 26 int version;
25 unsigned long sc_mask; /* old sigmask */ 27 unsigned long gregs[32];
26}; 28};
27 29
28#endif 30#endif
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index 2d0ea25be171..dda41e4fe707 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -39,7 +39,7 @@ static inline int rt_restore_ucontext(struct pt_regs *regs,
39 struct ucontext *uc, int *pr2) 39 struct ucontext *uc, int *pr2)
40{ 40{
41 int temp; 41 int temp;
42 greg_t *gregs = uc->uc_mcontext.gregs; 42 unsigned long *gregs = uc->uc_mcontext.gregs;
43 int err; 43 int err;
44 44
45 /* Always make any pending restarted system calls return -EINTR */ 45 /* Always make any pending restarted system calls return -EINTR */
@@ -127,7 +127,7 @@ badframe:
127static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) 127static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
128{ 128{
129 struct switch_stack *sw = (struct switch_stack *)regs - 1; 129 struct switch_stack *sw = (struct switch_stack *)regs - 1;
130 greg_t *gregs = uc->uc_mcontext.gregs; 130 unsigned long *gregs = uc->uc_mcontext.gregs;
131 int err = 0; 131 int err = 0;
132 132
133 err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); 133 err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 9cfa3706a1b8..f1ea5972f6ec 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -113,6 +113,7 @@ extern void iommu_register_group(struct iommu_table *tbl,
113 int pci_domain_number, unsigned long pe_num); 113 int pci_domain_number, unsigned long pe_num);
114extern int iommu_add_device(struct device *dev); 114extern int iommu_add_device(struct device *dev);
115extern void iommu_del_device(struct device *dev); 115extern void iommu_del_device(struct device *dev);
116extern int __init tce_iommu_bus_notifier_init(void);
116#else 117#else
117static inline void iommu_register_group(struct iommu_table *tbl, 118static inline void iommu_register_group(struct iommu_table *tbl,
118 int pci_domain_number, 119 int pci_domain_number,
@@ -128,6 +129,11 @@ static inline int iommu_add_device(struct device *dev)
128static inline void iommu_del_device(struct device *dev) 129static inline void iommu_del_device(struct device *dev)
129{ 130{
130} 131}
132
133static inline int __init tce_iommu_bus_notifier_init(void)
134{
135 return 0;
136}
131#endif /* !CONFIG_IOMMU_API */ 137#endif /* !CONFIG_IOMMU_API */
132 138
133static inline void set_iommu_table_base_and_group(struct device *dev, 139static inline void set_iommu_table_base_and_group(struct device *dev,
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
new file mode 100644
index 000000000000..744fd54de374
--- /dev/null
+++ b/arch/powerpc/include/asm/irq_work.h
@@ -0,0 +1,9 @@
1#ifndef _ASM_POWERPC_IRQ_WORK_H
2#define _ASM_POWERPC_IRQ_WORK_H
3
4static inline bool arch_irq_work_has_interrupt(void)
5{
6 return true;
7}
8
9#endif /* _ASM_POWERPC_IRQ_WORK_H */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 5d3968c4d799..b054f33ab1fb 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1175,4 +1175,30 @@ void iommu_del_device(struct device *dev)
1175} 1175}
1176EXPORT_SYMBOL_GPL(iommu_del_device); 1176EXPORT_SYMBOL_GPL(iommu_del_device);
1177 1177
1178static int tce_iommu_bus_notifier(struct notifier_block *nb,
1179 unsigned long action, void *data)
1180{
1181 struct device *dev = data;
1182
1183 switch (action) {
1184 case BUS_NOTIFY_ADD_DEVICE:
1185 return iommu_add_device(dev);
1186 case BUS_NOTIFY_DEL_DEVICE:
1187 if (dev->iommu_group)
1188 iommu_del_device(dev);
1189 return 0;
1190 default:
1191 return 0;
1192 }
1193}
1194
1195static struct notifier_block tce_iommu_bus_nb = {
1196 .notifier_call = tce_iommu_bus_notifier,
1197};
1198
1199int __init tce_iommu_bus_notifier_init(void)
1200{
1201 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
1202 return 0;
1203}
1178#endif /* CONFIG_IOMMU_API */ 1204#endif /* CONFIG_IOMMU_API */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 6e19afa35a15..ec9ec2058d2d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -541,8 +541,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
541 if (smp_ops->give_timebase) 541 if (smp_ops->give_timebase)
542 smp_ops->give_timebase(); 542 smp_ops->give_timebase();
543 543
544 /* Wait until cpu puts itself in the online map */ 544 /* Wait until cpu puts itself in the online & active maps */
545 while (!cpu_online(cpu)) 545 while (!cpu_online(cpu) || !cpu_active(cpu))
546 cpu_relax(); 546 cpu_relax();
547 547
548 return 0; 548 return 0;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index e69142f4af08..54323d6b5166 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -836,30 +836,4 @@ void __init pnv_pci_init(void)
836#endif 836#endif
837} 837}
838 838
839static int tce_iommu_bus_notifier(struct notifier_block *nb,
840 unsigned long action, void *data)
841{
842 struct device *dev = data;
843
844 switch (action) {
845 case BUS_NOTIFY_ADD_DEVICE:
846 return iommu_add_device(dev);
847 case BUS_NOTIFY_DEL_DEVICE:
848 if (dev->iommu_group)
849 iommu_del_device(dev);
850 return 0;
851 default:
852 return 0;
853 }
854}
855
856static struct notifier_block tce_iommu_bus_nb = {
857 .notifier_call = tce_iommu_bus_notifier,
858};
859
860static int __init tce_iommu_bus_notifier_init(void)
861{
862 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
863 return 0;
864}
865machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); 839machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 1d3d52dc3ff3..7803a19adb31 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1340,3 +1340,5 @@ static int __init disable_multitce(char *str)
1340} 1340}
1341 1341
1342__setup("multitce=", disable_multitce); 1342__setup("multitce=", disable_multitce);
1343
1344machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d84559e31f32..f407bbf5ee94 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -515,15 +515,15 @@ struct s390_io_adapter {
515#define S390_ARCH_FAC_MASK_SIZE_U64 \ 515#define S390_ARCH_FAC_MASK_SIZE_U64 \
516 (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) 516 (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
517 517
518struct s390_model_fac { 518struct kvm_s390_fac {
519 /* facilities used in SIE context */ 519 /* facility list requested by guest */
520 __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64]; 520 __u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
521 /* subset enabled by kvm */ 521 /* facility mask supported by kvm & hosting machine */
522 __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64]; 522 __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
523}; 523};
524 524
525struct kvm_s390_cpu_model { 525struct kvm_s390_cpu_model {
526 struct s390_model_fac *fac; 526 struct kvm_s390_fac *fac;
527 struct cpuid cpu_id; 527 struct cpuid cpu_id;
528 unsigned short ibc; 528 unsigned short ibc;
529}; 529};
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index f49b71954654..8fb3802f8fad 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
62{ 62{
63 int cpu = smp_processor_id(); 63 int cpu = smp_processor_id();
64 64
65 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
65 if (prev == next) 66 if (prev == next)
66 return; 67 return;
67 if (MACHINE_HAS_TLB_LC) 68 if (MACHINE_HAS_TLB_LC)
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
73 atomic_dec(&prev->context.attach_count); 74 atomic_dec(&prev->context.attach_count);
74 if (MACHINE_HAS_TLB_LC) 75 if (MACHINE_HAS_TLB_LC)
75 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 76 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
76 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
77} 77}
78 78
79#define finish_arch_post_lock_switch finish_arch_post_lock_switch 79#define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 7b2ac6e44166..53eacbd4f09b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
37#endif 37#endif
38} 38}
39 39
40static inline void clear_page(void *page) 40#define clear_page(page) memset((page), 0, PAGE_SIZE)
41{
42 register unsigned long reg1 asm ("1") = 0;
43 register void *reg2 asm ("2") = page;
44 register unsigned long reg3 asm ("3") = 4096;
45 asm volatile(
46 " mvcl 2,0"
47 : "+d" (reg2), "+d" (reg3) : "d" (reg1)
48 : "memory", "cc");
49}
50 41
51/* 42/*
52 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to 43 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index cb2d51e779df..830066f936c8 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
36 insn->offset = (entry->target - entry->code) >> 1; 36 insn->offset = (entry->target - entry->code) >> 1;
37} 37}
38 38
39static void jump_label_bug(struct jump_entry *entry, struct insn *insn) 39static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
40 struct insn *new)
40{ 41{
41 unsigned char *ipc = (unsigned char *)entry->code; 42 unsigned char *ipc = (unsigned char *)entry->code;
42 unsigned char *ipe = (unsigned char *)insn; 43 unsigned char *ipe = (unsigned char *)expected;
44 unsigned char *ipn = (unsigned char *)new;
43 45
44 pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); 46 pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
45 pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", 47 pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
46 ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); 48 ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
47 pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", 49 pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
48 ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); 50 ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
51 pr_emerg("New: %02x %02x %02x %02x %02x %02x\n",
52 ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
49 panic("Corrupted kernel text"); 53 panic("Corrupted kernel text");
50} 54}
51 55
@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry,
69 } 73 }
70 if (init) { 74 if (init) {
71 if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) 75 if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
72 jump_label_bug(entry, &old); 76 jump_label_bug(entry, &orignop, &new);
73 } else { 77 } else {
74 if (memcmp((void *)entry->code, &old, sizeof(old))) 78 if (memcmp((void *)entry->code, &old, sizeof(old)))
75 jump_label_bug(entry, &old); 79 jump_label_bug(entry, &old, &new);
76 } 80 }
77 probe_kernel_write((void *)entry->code, &new, sizeof(new)); 81 probe_kernel_write((void *)entry->code, &new, sizeof(new));
78} 82}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 36154a2f1814..2ca95862e336 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr,
436 const Elf_Shdr *sechdrs, 436 const Elf_Shdr *sechdrs,
437 struct module *me) 437 struct module *me)
438{ 438{
439 jump_label_apply_nops(me);
439 vfree(me->arch.syminfo); 440 vfree(me->arch.syminfo);
440 me->arch.syminfo = NULL; 441 me->arch.syminfo = NULL;
441 return 0; 442 return 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 26108232fcaa..dc488e13b7e3 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,7 +18,7 @@
18 18
19static DEFINE_PER_CPU(struct cpuid, cpu_id); 19static DEFINE_PER_CPU(struct cpuid, cpu_id);
20 20
21void cpu_relax(void) 21void notrace cpu_relax(void)
22{ 22{
23 if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) 23 if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
24 asm volatile("diag 0,0,0x44"); 24 asm volatile("diag 0,0,0x44");
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0c3623927563..f6579cfde2df 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -522,7 +522,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
522 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, 522 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
523 sizeof(struct cpuid)); 523 sizeof(struct cpuid));
524 kvm->arch.model.ibc = proc->ibc; 524 kvm->arch.model.ibc = proc->ibc;
525 memcpy(kvm->arch.model.fac->kvm, proc->fac_list, 525 memcpy(kvm->arch.model.fac->list, proc->fac_list,
526 S390_ARCH_FAC_LIST_SIZE_BYTE); 526 S390_ARCH_FAC_LIST_SIZE_BYTE);
527 } else 527 } else
528 ret = -EFAULT; 528 ret = -EFAULT;
@@ -556,7 +556,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
556 } 556 }
557 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); 557 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
558 proc->ibc = kvm->arch.model.ibc; 558 proc->ibc = kvm->arch.model.ibc;
559 memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); 559 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
560 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) 560 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
561 ret = -EFAULT; 561 ret = -EFAULT;
562 kfree(proc); 562 kfree(proc);
@@ -576,10 +576,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
576 } 576 }
577 get_cpu_id((struct cpuid *) &mach->cpuid); 577 get_cpu_id((struct cpuid *) &mach->cpuid);
578 mach->ibc = sclp_get_ibc(); 578 mach->ibc = sclp_get_ibc();
579 memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, 579 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
580 kvm_s390_fac_list_mask_size() * sizeof(u64)); 580 S390_ARCH_FAC_LIST_SIZE_BYTE);
581 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, 581 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
582 S390_ARCH_FAC_LIST_SIZE_U64); 582 S390_ARCH_FAC_LIST_SIZE_BYTE);
583 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) 583 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
584 ret = -EFAULT; 584 ret = -EFAULT;
585 kfree(mach); 585 kfree(mach);
@@ -778,15 +778,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
778static int kvm_s390_query_ap_config(u8 *config) 778static int kvm_s390_query_ap_config(u8 *config)
779{ 779{
780 u32 fcn_code = 0x04000000UL; 780 u32 fcn_code = 0x04000000UL;
781 u32 cc; 781 u32 cc = 0;
782 782
783 memset(config, 0, 128);
783 asm volatile( 784 asm volatile(
784 "lgr 0,%1\n" 785 "lgr 0,%1\n"
785 "lgr 2,%2\n" 786 "lgr 2,%2\n"
786 ".long 0xb2af0000\n" /* PQAP(QCI) */ 787 ".long 0xb2af0000\n" /* PQAP(QCI) */
787 "ipm %0\n" 788 "0: ipm %0\n"
788 "srl %0,28\n" 789 "srl %0,28\n"
789 : "=r" (cc) 790 "1:\n"
791 EX_TABLE(0b, 1b)
792 : "+r" (cc)
790 : "r" (fcn_code), "r" (config) 793 : "r" (fcn_code), "r" (config)
791 : "cc", "0", "2", "memory" 794 : "cc", "0", "2", "memory"
792 ); 795 );
@@ -839,9 +842,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
839 842
840 kvm_s390_set_crycb_format(kvm); 843 kvm_s390_set_crycb_format(kvm);
841 844
842 /* Disable AES/DEA protected key functions by default */ 845 /* Enable AES/DEA protected key functions by default */
843 kvm->arch.crypto.aes_kw = 0; 846 kvm->arch.crypto.aes_kw = 1;
844 kvm->arch.crypto.dea_kw = 0; 847 kvm->arch.crypto.dea_kw = 1;
848 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
849 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
850 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
851 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
845 852
846 return 0; 853 return 0;
847} 854}
@@ -886,40 +893,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
886 /* 893 /*
887 * The architectural maximum amount of facilities is 16 kbit. To store 894 * The architectural maximum amount of facilities is 16 kbit. To store
888 * this amount, 2 kbyte of memory is required. Thus we need a full 895 * this amount, 2 kbyte of memory is required. Thus we need a full
889 * page to hold the active copy (arch.model.fac->sie) and the current 896 * page to hold the guest facility list (arch.model.fac->list) and the
890 * facilities set (arch.model.fac->kvm). Its address size has to be 897 * facility mask (arch.model.fac->mask). Its address size has to be
891 * 31 bits and word aligned. 898 * 31 bits and word aligned.
892 */ 899 */
893 kvm->arch.model.fac = 900 kvm->arch.model.fac =
894 (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 901 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
895 if (!kvm->arch.model.fac) 902 if (!kvm->arch.model.fac)
896 goto out_nofac; 903 goto out_nofac;
897 904
898 memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, 905 /* Populate the facility mask initially. */
899 S390_ARCH_FAC_LIST_SIZE_U64); 906 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
900 907 S390_ARCH_FAC_LIST_SIZE_BYTE);
901 /*
902 * If this KVM host runs *not* in a LPAR, relax the facility bits
903 * of the kvm facility mask by all missing facilities. This will allow
904 * to determine the right CPU model by means of the remaining facilities.
905 * Live guest migration must prohibit the migration of KVMs running in
906 * a LPAR to non LPAR hosts.
907 */
908 if (!MACHINE_IS_LPAR)
909 for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
910 kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
911
912 /*
913 * Apply the kvm facility mask to limit the kvm supported/tolerated
914 * facility list.
915 */
916 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { 908 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
917 if (i < kvm_s390_fac_list_mask_size()) 909 if (i < kvm_s390_fac_list_mask_size())
918 kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; 910 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
919 else 911 else
920 kvm->arch.model.fac->kvm[i] = 0UL; 912 kvm->arch.model.fac->mask[i] = 0UL;
921 } 913 }
922 914
915 /* Populate the facility list initially. */
916 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
917 S390_ARCH_FAC_LIST_SIZE_BYTE);
918
923 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); 919 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
924 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; 920 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
925 921
@@ -1165,8 +1161,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1165 1161
1166 mutex_lock(&vcpu->kvm->lock); 1162 mutex_lock(&vcpu->kvm->lock);
1167 vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; 1163 vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
1168 memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
1169 S390_ARCH_FAC_LIST_SIZE_BYTE);
1170 vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; 1164 vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
1171 mutex_unlock(&vcpu->kvm->lock); 1165 mutex_unlock(&vcpu->kvm->lock);
1172 1166
@@ -1212,7 +1206,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1212 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 1206 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1213 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 1207 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1214 } 1208 }
1215 vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; 1209 vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
1216 1210
1217 spin_lock_init(&vcpu->arch.local_int.lock); 1211 spin_lock_init(&vcpu->arch.local_int.lock);
1218 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 1212 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 985c2114d7ef..c34109aa552d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
128/* test availability of facility in a kvm intance */ 128/* test availability of facility in a kvm intance */
129static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) 129static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
130{ 130{
131 return __test_facility(nr, kvm->arch.model.fac->kvm); 131 return __test_facility(nr, kvm->arch.model.fac->mask) &&
132 __test_facility(nr, kvm->arch.model.fac->list);
132} 133}
133 134
134/* are cpu states controlled by user space */ 135/* are cpu states controlled by user space */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index bdd9b5b17e03..351116939ea2 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
348 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 348 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
349 * into a u32 memory representation. They will remain bits 0-31. 349 * into a u32 memory representation. They will remain bits 0-31.
350 */ 350 */
351 fac = *vcpu->kvm->arch.model.fac->sie >> 32; 351 fac = *vcpu->kvm->arch.model.fac->list >> 32;
352 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), 352 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
353 &fac, sizeof(fac)); 353 &fac, sizeof(fac));
354 if (rc) 354 if (rc)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 753a56731951..f0b85443e060 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
287 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); 287 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
288 return (void __iomem *) addr + offset; 288 return (void __iomem *) addr + offset;
289} 289}
290EXPORT_SYMBOL_GPL(pci_iomap_range); 290EXPORT_SYMBOL(pci_iomap_range);
291 291
292void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 292void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
293{ 293{
@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
309 } 309 }
310 spin_unlock(&zpci_iomap_lock); 310 spin_unlock(&zpci_iomap_lock);
311} 311}
312EXPORT_SYMBOL_GPL(pci_iounmap); 312EXPORT_SYMBOL(pci_iounmap);
313 313
314static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 314static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
315 int size, u32 *val) 315 int size, u32 *val)
@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
483 airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); 483 airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
484} 484}
485 485
486static void zpci_map_resources(struct zpci_dev *zdev) 486static void zpci_map_resources(struct pci_dev *pdev)
487{ 487{
488 struct pci_dev *pdev = zdev->pdev;
489 resource_size_t len; 488 resource_size_t len;
490 int i; 489 int i;
491 490
@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev)
499 } 498 }
500} 499}
501 500
502static void zpci_unmap_resources(struct zpci_dev *zdev) 501static void zpci_unmap_resources(struct pci_dev *pdev)
503{ 502{
504 struct pci_dev *pdev = zdev->pdev;
505 resource_size_t len; 503 resource_size_t len;
506 int i; 504 int i;
507 505
@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev)
651 649
652 zdev->pdev = pdev; 650 zdev->pdev = pdev;
653 pdev->dev.groups = zpci_attr_groups; 651 pdev->dev.groups = zpci_attr_groups;
654 zpci_map_resources(zdev); 652 zpci_map_resources(pdev);
655 653
656 for (i = 0; i < PCI_BAR_COUNT; i++) { 654 for (i = 0; i < PCI_BAR_COUNT; i++) {
657 res = &pdev->resource[i]; 655 res = &pdev->resource[i];
@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev)
663 return 0; 661 return 0;
664} 662}
665 663
664void pcibios_release_device(struct pci_dev *pdev)
665{
666 zpci_unmap_resources(pdev);
667}
668
666int pcibios_enable_device(struct pci_dev *pdev, int mask) 669int pcibios_enable_device(struct pci_dev *pdev, int mask)
667{ 670{
668 struct zpci_dev *zdev = get_zdev(pdev); 671 struct zpci_dev *zdev = get_zdev(pdev);
@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
670 zdev->pdev = pdev; 673 zdev->pdev = pdev;
671 zpci_debug_init_device(zdev); 674 zpci_debug_init_device(zdev);
672 zpci_fmb_enable_device(zdev); 675 zpci_fmb_enable_device(zdev);
673 zpci_map_resources(zdev);
674 676
675 return pci_enable_resources(pdev, mask); 677 return pci_enable_resources(pdev, mask);
676} 678}
@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev)
679{ 681{
680 struct zpci_dev *zdev = get_zdev(pdev); 682 struct zpci_dev *zdev = get_zdev(pdev);
681 683
682 zpci_unmap_resources(zdev);
683 zpci_fmb_disable_device(zdev); 684 zpci_fmb_disable_device(zdev);
684 zpci_debug_exit_device(zdev); 685 zpci_debug_exit_device(zdev);
685 zdev->pdev = NULL; 686 zdev->pdev = NULL;
@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev)
688#ifdef CONFIG_HIBERNATE_CALLBACKS 689#ifdef CONFIG_HIBERNATE_CALLBACKS
689static int zpci_restore(struct device *dev) 690static int zpci_restore(struct device *dev)
690{ 691{
691 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); 692 struct pci_dev *pdev = to_pci_dev(dev);
693 struct zpci_dev *zdev = get_zdev(pdev);
692 int ret = 0; 694 int ret = 0;
693 695
694 if (zdev->state != ZPCI_FN_STATE_ONLINE) 696 if (zdev->state != ZPCI_FN_STATE_ONLINE)
@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev)
698 if (ret) 700 if (ret)
699 goto out; 701 goto out;
700 702
701 zpci_map_resources(zdev); 703 zpci_map_resources(pdev);
702 zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, 704 zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
703 zdev->start_dma + zdev->iommu_size - 1, 705 zdev->start_dma + zdev->iommu_size - 1,
704 (u64) zdev->dma_table); 706 (u64) zdev->dma_table);
@@ -709,12 +711,14 @@ out:
709 711
710static int zpci_freeze(struct device *dev) 712static int zpci_freeze(struct device *dev)
711{ 713{
712 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); 714 struct pci_dev *pdev = to_pci_dev(dev);
715 struct zpci_dev *zdev = get_zdev(pdev);
713 716
714 if (zdev->state != ZPCI_FN_STATE_ONLINE) 717 if (zdev->state != ZPCI_FN_STATE_ONLINE)
715 return 0; 718 return 0;
716 719
717 zpci_unregister_ioat(zdev, 0); 720 zpci_unregister_ioat(zdev, 0);
721 zpci_unmap_resources(pdev);
718 return clp_disable_fh(zdev); 722 return clp_disable_fh(zdev);
719} 723}
720 724
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 8aa271b3d1ad..b1bb2b72302c 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
64 if (copy_from_user(buf, user_buffer, length)) 64 if (copy_from_user(buf, user_buffer, length))
65 goto out; 65 goto out;
66 66
67 memcpy_toio(io_addr, buf, length); 67 ret = zpci_memcpy_toio(io_addr, buf, length);
68 ret = 0;
69out: 68out:
70 if (buf != local_buf) 69 if (buf != local_buf)
71 kfree(buf); 70 kfree(buf);
@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
98 goto out; 97 goto out;
99 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); 98 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
100 99
101 ret = -EFAULT; 100 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
102 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) 101 ret = -EFAULT;
103 goto out; 102 goto out;
104 103 }
105 memcpy_fromio(buf, io_addr, length); 104 ret = zpci_memcpy_fromio(buf, io_addr, length);
106 105 if (ret)
107 if (copy_to_user(user_buffer, buf, length))
108 goto out; 106 goto out;
107 if (copy_to_user(user_buffer, buf, length))
108 ret = -EFAULT;
109 109
110 ret = 0;
111out: 110out:
112 if (buf != local_buf) 111 if (buf != local_buf)
113 kfree(buf); 112 kfree(buf);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c2fb8a87dccb..b7d31ca55187 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -499,6 +499,7 @@ config X86_INTEL_QUARK
499 depends on X86_IO_APIC 499 depends on X86_IO_APIC
500 select IOSF_MBI 500 select IOSF_MBI
501 select INTEL_IMR 501 select INTEL_IMR
502 select COMMON_CLK
502 ---help--- 503 ---help---
503 Select to include support for Quark X1000 SoC. 504 Select to include support for Quark X1000 SoC.
504 Say Y here if you have a Quark based system such as the Arduino 505 Say Y here if you have a Quark based system such as the Arduino
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 5fa9770035dc..c9a6d68b8d62 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -82,18 +82,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
82 if (boot_cpu_has(X86_FEATURE_XSAVES)) 82 if (boot_cpu_has(X86_FEATURE_XSAVES))
83 asm volatile("1:"XSAVES"\n\t" 83 asm volatile("1:"XSAVES"\n\t"
84 "2:\n\t" 84 "2:\n\t"
85 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) 85 xstate_fault
86 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
86 : "memory"); 87 : "memory");
87 else 88 else
88 asm volatile("1:"XSAVE"\n\t" 89 asm volatile("1:"XSAVE"\n\t"
89 "2:\n\t" 90 "2:\n\t"
90 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) 91 xstate_fault
92 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
91 : "memory"); 93 : "memory");
92
93 asm volatile(xstate_fault
94 : "0" (0)
95 : "memory");
96
97 return err; 94 return err;
98} 95}
99 96
@@ -112,18 +109,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
112 if (boot_cpu_has(X86_FEATURE_XSAVES)) 109 if (boot_cpu_has(X86_FEATURE_XSAVES))
113 asm volatile("1:"XRSTORS"\n\t" 110 asm volatile("1:"XRSTORS"\n\t"
114 "2:\n\t" 111 "2:\n\t"
115 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) 112 xstate_fault
113 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
116 : "memory"); 114 : "memory");
117 else 115 else
118 asm volatile("1:"XRSTOR"\n\t" 116 asm volatile("1:"XRSTOR"\n\t"
119 "2:\n\t" 117 "2:\n\t"
120 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) 118 xstate_fault
119 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
121 : "memory"); 120 : "memory");
122
123 asm volatile(xstate_fault
124 : "0" (0)
125 : "memory");
126
127 return err; 121 return err;
128} 122}
129 123
@@ -149,9 +143,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask)
149 */ 143 */
150 alternative_input_2( 144 alternative_input_2(
151 "1:"XSAVE, 145 "1:"XSAVE,
152 "1:"XSAVEOPT, 146 XSAVEOPT,
153 X86_FEATURE_XSAVEOPT, 147 X86_FEATURE_XSAVEOPT,
154 "1:"XSAVES, 148 XSAVES,
155 X86_FEATURE_XSAVES, 149 X86_FEATURE_XSAVES,
156 [fx] "D" (fx), "a" (lmask), "d" (hmask) : 150 [fx] "D" (fx), "a" (lmask), "d" (hmask) :
157 "memory"); 151 "memory");
@@ -178,7 +172,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
178 */ 172 */
179 alternative_input( 173 alternative_input(
180 "1: " XRSTOR, 174 "1: " XRSTOR,
181 "1: " XRSTORS, 175 XRSTORS,
182 X86_FEATURE_XSAVES, 176 X86_FEATURE_XSAVES,
183 "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) 177 "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
184 : "memory"); 178 : "memory");
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 10074ad9ebf8..1d74d161687c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -269,11 +269,14 @@ ENTRY(ret_from_fork)
269 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? 269 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
270 jz 1f 270 jz 1f
271 271
272 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET 272 /*
273 jnz int_ret_from_sys_call 273 * By the time we get here, we have no idea whether our pt_regs,
274 274 * ti flags, and ti status came from the 64-bit SYSCALL fast path,
275 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET 275 * the slow path, or one of the ia32entry paths.
276 jmp ret_from_sys_call # go to the SYSRET fastpath 276 * Use int_ret_from_sys_call to return, since it can safely handle
277 * all of the above.
278 */
279 jmp int_ret_from_sys_call
277 280
2781: 2811:
279 subq $REST_SKIP, %rsp # leave space for volatiles 282 subq $REST_SKIP, %rsp # leave space for volatiles
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e0b794a84c35..106c01557f2b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4950,7 +4950,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4950 goto done; 4950 goto done;
4951 } 4951 }
4952 } 4952 }
4953 ctxt->dst.orig_val = ctxt->dst.val; 4953 /* Copy full 64-bit value for CMPXCHG8B. */
4954 ctxt->dst.orig_val64 = ctxt->dst.val64;
4954 4955
4955special_insn: 4956special_insn:
4956 4957
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index e55b5fc344eb..bd4e34de24c7 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1572,7 +1572,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1572 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); 1572 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
1573 } 1573 }
1574 apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); 1574 apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
1575 apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm); 1575 apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0;
1576 apic->highest_isr_cache = -1; 1576 apic->highest_isr_cache = -1;
1577 update_divide_count(apic); 1577 update_divide_count(apic);
1578 atomic_set(&apic->lapic_timer.pending, 0); 1578 atomic_set(&apic->lapic_timer.pending, 0);
@@ -1782,7 +1782,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
1782 update_divide_count(apic); 1782 update_divide_count(apic);
1783 start_apic_timer(apic); 1783 start_apic_timer(apic);
1784 apic->irr_pending = true; 1784 apic->irr_pending = true;
1785 apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? 1785 apic->isr_count = kvm_x86_ops->hwapic_isr_update ?
1786 1 : count_vectors(apic->regs + APIC_ISR); 1786 1 : count_vectors(apic->regs + APIC_ISR);
1787 apic->highest_isr_cache = -1; 1787 apic->highest_isr_cache = -1;
1788 if (kvm_x86_ops->hwapic_irr_update) 1788 if (kvm_x86_ops->hwapic_irr_update)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d319e0c24758..cc618c882f90 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3649,11 +3649,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
3649 return; 3649 return;
3650} 3650}
3651 3651
3652static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
3653{
3654 return;
3655}
3656
3657static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) 3652static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
3658{ 3653{
3659 return; 3654 return;
@@ -4403,7 +4398,6 @@ static struct kvm_x86_ops svm_x86_ops = {
4403 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, 4398 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4404 .vm_has_apicv = svm_vm_has_apicv, 4399 .vm_has_apicv = svm_vm_has_apicv,
4405 .load_eoi_exitmap = svm_load_eoi_exitmap, 4400 .load_eoi_exitmap = svm_load_eoi_exitmap,
4406 .hwapic_isr_update = svm_hwapic_isr_update,
4407 .sync_pir_to_irr = svm_sync_pir_to_irr, 4401 .sync_pir_to_irr = svm_sync_pir_to_irr,
4408 4402
4409 .set_tss_addr = svm_set_tss_addr, 4403 .set_tss_addr = svm_set_tss_addr,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 14c1a18d206a..f7b20b417a3a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4367,6 +4367,18 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
4367 return 0; 4367 return 0;
4368} 4368}
4369 4369
4370static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
4371{
4372#ifdef CONFIG_SMP
4373 if (vcpu->mode == IN_GUEST_MODE) {
4374 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
4375 POSTED_INTR_VECTOR);
4376 return true;
4377 }
4378#endif
4379 return false;
4380}
4381
4370static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, 4382static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4371 int vector) 4383 int vector)
4372{ 4384{
@@ -4375,9 +4387,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4375 if (is_guest_mode(vcpu) && 4387 if (is_guest_mode(vcpu) &&
4376 vector == vmx->nested.posted_intr_nv) { 4388 vector == vmx->nested.posted_intr_nv) {
4377 /* the PIR and ON have been set by L1. */ 4389 /* the PIR and ON have been set by L1. */
4378 if (vcpu->mode == IN_GUEST_MODE) 4390 kvm_vcpu_trigger_posted_interrupt(vcpu);
4379 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
4380 POSTED_INTR_VECTOR);
4381 /* 4391 /*
4382 * If a posted intr is not recognized by hardware, 4392 * If a posted intr is not recognized by hardware,
4383 * we will accomplish it in the next vmentry. 4393 * we will accomplish it in the next vmentry.
@@ -4409,12 +4419,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4409 4419
4410 r = pi_test_and_set_on(&vmx->pi_desc); 4420 r = pi_test_and_set_on(&vmx->pi_desc);
4411 kvm_make_request(KVM_REQ_EVENT, vcpu); 4421 kvm_make_request(KVM_REQ_EVENT, vcpu);
4412#ifdef CONFIG_SMP 4422 if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu))
4413 if (!r && (vcpu->mode == IN_GUEST_MODE))
4414 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
4415 POSTED_INTR_VECTOR);
4416 else
4417#endif
4418 kvm_vcpu_kick(vcpu); 4423 kvm_vcpu_kick(vcpu);
4419} 4424}
4420 4425
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 6ac273832f28..e4695985f9de 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -331,7 +331,7 @@ static void probe_pci_root_info(struct pci_root_info *info,
331 struct list_head *list) 331 struct list_head *list)
332{ 332{
333 int ret; 333 int ret;
334 struct resource_entry *entry; 334 struct resource_entry *entry, *tmp;
335 335
336 sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum); 336 sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
337 info->bridge = device; 337 info->bridge = device;
@@ -345,8 +345,13 @@ static void probe_pci_root_info(struct pci_root_info *info,
345 dev_dbg(&device->dev, 345 dev_dbg(&device->dev,
346 "no IO and memory resources present in _CRS\n"); 346 "no IO and memory resources present in _CRS\n");
347 else 347 else
348 resource_list_for_each_entry(entry, list) 348 resource_list_for_each_entry_safe(entry, tmp, list) {
349 entry->res->name = info->name; 349 if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
350 (entry->res->flags & IORESOURCE_DISABLED))
351 resource_list_destroy_entry(entry);
352 else
353 entry->res->name = info->name;
354 }
350} 355}
351 356
352struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) 357struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 740ae3026a14..9f93af56a5fc 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -563,7 +563,7 @@ static bool alloc_p2m(unsigned long pfn)
563 if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) 563 if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
564 p2m_init(p2m); 564 p2m_init(p2m);
565 else 565 else
566 p2m_init_identity(p2m, pfn); 566 p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
567 567
568 spin_lock_irqsave(&p2m_update_lock, flags); 568 spin_lock_irqsave(&p2m_update_lock, flags);
569 569
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 657964e8ab7e..37fb19047603 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -65,6 +65,7 @@ struct lpss_private_data;
65 65
66struct lpss_device_desc { 66struct lpss_device_desc {
67 unsigned int flags; 67 unsigned int flags;
68 const char *clk_con_id;
68 unsigned int prv_offset; 69 unsigned int prv_offset;
69 size_t prv_size_override; 70 size_t prv_size_override;
70 void (*setup)(struct lpss_private_data *pdata); 71 void (*setup)(struct lpss_private_data *pdata);
@@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = {
140 141
141static struct lpss_device_desc lpt_uart_dev_desc = { 142static struct lpss_device_desc lpt_uart_dev_desc = {
142 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, 143 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
144 .clk_con_id = "baudclk",
143 .prv_offset = 0x800, 145 .prv_offset = 0x800,
144 .setup = lpss_uart_setup, 146 .setup = lpss_uart_setup,
145}; 147};
@@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
156 158
157static struct lpss_device_desc byt_uart_dev_desc = { 159static struct lpss_device_desc byt_uart_dev_desc = {
158 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 160 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
161 .clk_con_id = "baudclk",
159 .prv_offset = 0x800, 162 .prv_offset = 0x800,
160 .setup = lpss_uart_setup, 163 .setup = lpss_uart_setup,
161}; 164};
@@ -313,7 +316,7 @@ out:
313 return PTR_ERR(clk); 316 return PTR_ERR(clk);
314 317
315 pdata->clk = clk; 318 pdata->clk = clk;
316 clk_register_clkdev(clk, NULL, devname); 319 clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
317 return 0; 320 return 0;
318} 321}
319 322
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index c723668e3e27..5589a6e2a023 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -42,8 +42,10 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
42 * CHECKME: len might be required to check versus a minimum 42 * CHECKME: len might be required to check versus a minimum
43 * length as well. 1 for io is fine, but for memory it does 43 * length as well. 1 for io is fine, but for memory it does
44 * not make any sense at all. 44 * not make any sense at all.
45 * Note: some BIOSes report incorrect length for ACPI address space
46 * descriptor, so remove check of 'reslen == len' to avoid regression.
45 */ 47 */
46 if (len && reslen && reslen == len && start <= end) 48 if (len && reslen && start <= end)
47 return true; 49 return true;
48 50
49 pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", 51 pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index debd30917010..26eb70c8f518 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -2110,7 +2110,8 @@ static int __init intel_opregion_present(void)
2110 2110
2111int acpi_video_register(void) 2111int acpi_video_register(void)
2112{ 2112{
2113 int result = 0; 2113 int ret;
2114
2114 if (register_count) { 2115 if (register_count) {
2115 /* 2116 /*
2116 * if the function of acpi_video_register is already called, 2117 * if the function of acpi_video_register is already called,
@@ -2122,9 +2123,9 @@ int acpi_video_register(void)
2122 mutex_init(&video_list_lock); 2123 mutex_init(&video_list_lock);
2123 INIT_LIST_HEAD(&video_bus_head); 2124 INIT_LIST_HEAD(&video_bus_head);
2124 2125
2125 result = acpi_bus_register_driver(&acpi_video_bus); 2126 ret = acpi_bus_register_driver(&acpi_video_bus);
2126 if (result < 0) 2127 if (ret)
2127 return -ENODEV; 2128 return ret;
2128 2129
2129 /* 2130 /*
2130 * When the acpi_video_bus is loaded successfully, increase 2131 * When the acpi_video_bus is loaded successfully, increase
@@ -2176,6 +2177,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight);
2176 2177
2177static int __init acpi_video_init(void) 2178static int __init acpi_video_init(void)
2178{ 2179{
2180 /*
2181 * Let the module load even if ACPI is disabled (e.g. due to
2182 * a broken BIOS) so that i915.ko can still be loaded on such
2183 * old systems without an AcpiOpRegion.
2184 *
2185 * acpi_video_register() will report -ENODEV later as well due
2186 * to acpi_disabled when i915.ko tries to register itself afterwards.
2187 */
2188 if (acpi_disabled)
2189 return 0;
2190
2179 dmi_check_system(video_dmi_table); 2191 dmi_check_system(video_dmi_table);
2180 2192
2181 if (intel_opregion_present()) 2193 if (intel_opregion_present())
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 33b09b6568a4..6607f3c6ace1 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -551,7 +551,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
551{ 551{
552 void *page_addr; 552 void *page_addr;
553 unsigned long user_page_addr; 553 unsigned long user_page_addr;
554 struct vm_struct tmp_area;
555 struct page **page; 554 struct page **page;
556 struct mm_struct *mm; 555 struct mm_struct *mm;
557 556
@@ -600,10 +599,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
600 proc->pid, page_addr); 599 proc->pid, page_addr);
601 goto err_alloc_page_failed; 600 goto err_alloc_page_failed;
602 } 601 }
603 tmp_area.addr = page_addr; 602 ret = map_kernel_range_noflush((unsigned long)page_addr,
604 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; 603 PAGE_SIZE, PAGE_KERNEL, page);
605 ret = map_vm_area(&tmp_area, PAGE_KERNEL, page); 604 flush_cache_vmap((unsigned long)page_addr,
606 if (ret) { 605 (unsigned long)page_addr + PAGE_SIZE);
606 if (ret != 1) {
607 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", 607 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
608 proc->pid, page_addr); 608 proc->pid, page_addr);
609 goto err_map_kernel_failed; 609 goto err_map_kernel_failed;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index f9054cd36a72..5389579c5120 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -869,6 +869,8 @@ try_offline_again:
869 */ 869 */
870 ata_msleep(ap, 1); 870 ata_msleep(ap, 1);
871 871
872 sata_set_spd(link);
873
872 /* 874 /*
873 * Now, bring the host controller online again, this can take time 875 * Now, bring the host controller online again, this can take time
874 * as PHY reset and communication establishment, 1st D2H FIS and 876 * as PHY reset and communication establishment, 1st D2H FIS and
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index ba4abbe4693c..45937f88e77c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2242,7 +2242,7 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
2242} 2242}
2243 2243
2244static int pm_genpd_summary_one(struct seq_file *s, 2244static int pm_genpd_summary_one(struct seq_file *s,
2245 struct generic_pm_domain *gpd) 2245 struct generic_pm_domain *genpd)
2246{ 2246{
2247 static const char * const status_lookup[] = { 2247 static const char * const status_lookup[] = {
2248 [GPD_STATE_ACTIVE] = "on", 2248 [GPD_STATE_ACTIVE] = "on",
@@ -2256,26 +2256,26 @@ static int pm_genpd_summary_one(struct seq_file *s,
2256 struct gpd_link *link; 2256 struct gpd_link *link;
2257 int ret; 2257 int ret;
2258 2258
2259 ret = mutex_lock_interruptible(&gpd->lock); 2259 ret = mutex_lock_interruptible(&genpd->lock);
2260 if (ret) 2260 if (ret)
2261 return -ERESTARTSYS; 2261 return -ERESTARTSYS;
2262 2262
2263 if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup))) 2263 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2264 goto exit; 2264 goto exit;
2265 seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]); 2265 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
2266 2266
2267 /* 2267 /*
2268 * Modifications on the list require holding locks on both 2268 * Modifications on the list require holding locks on both
2269 * master and slave, so we are safe. 2269 * master and slave, so we are safe.
2270 * Also gpd->name is immutable. 2270 * Also genpd->name is immutable.
2271 */ 2271 */
2272 list_for_each_entry(link, &gpd->master_links, master_node) { 2272 list_for_each_entry(link, &genpd->master_links, master_node) {
2273 seq_printf(s, "%s", link->slave->name); 2273 seq_printf(s, "%s", link->slave->name);
2274 if (!list_is_last(&link->master_node, &gpd->master_links)) 2274 if (!list_is_last(&link->master_node, &genpd->master_links))
2275 seq_puts(s, ", "); 2275 seq_puts(s, ", ");
2276 } 2276 }
2277 2277
2278 list_for_each_entry(pm_data, &gpd->dev_list, list_node) { 2278 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2279 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); 2279 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
2280 if (kobj_path == NULL) 2280 if (kobj_path == NULL)
2281 continue; 2281 continue;
@@ -2287,14 +2287,14 @@ static int pm_genpd_summary_one(struct seq_file *s,
2287 2287
2288 seq_puts(s, "\n"); 2288 seq_puts(s, "\n");
2289exit: 2289exit:
2290 mutex_unlock(&gpd->lock); 2290 mutex_unlock(&genpd->lock);
2291 2291
2292 return 0; 2292 return 0;
2293} 2293}
2294 2294
2295static int pm_genpd_summary_show(struct seq_file *s, void *data) 2295static int pm_genpd_summary_show(struct seq_file *s, void *data)
2296{ 2296{
2297 struct generic_pm_domain *gpd; 2297 struct generic_pm_domain *genpd;
2298 int ret = 0; 2298 int ret = 0;
2299 2299
2300 seq_puts(s, " domain status slaves\n"); 2300 seq_puts(s, " domain status slaves\n");
@@ -2305,8 +2305,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
2305 if (ret) 2305 if (ret)
2306 return -ERESTARTSYS; 2306 return -ERESTARTSYS;
2307 2307
2308 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2308 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2309 ret = pm_genpd_summary_one(s, gpd); 2309 ret = pm_genpd_summary_one(s, genpd);
2310 if (ret) 2310 if (ret)
2311 break; 2311 break;
2312 } 2312 }
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index c2744b30d5d9..aab7158d2afe 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -730,6 +730,7 @@ void pm_system_wakeup(void)
730 pm_abort_suspend = true; 730 pm_abort_suspend = true;
731 freeze_wake(); 731 freeze_wake();
732} 732}
733EXPORT_SYMBOL_GPL(pm_system_wakeup);
733 734
734void pm_wakeup_clear(void) 735void pm_wakeup_clear(void)
735{ 736{
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index b87688881143..8bfc4c2bba87 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = {
272 { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, 272 { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
273 273
274 /* Intel Bluetooth devices */ 274 /* Intel Bluetooth devices */
275 { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
275 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, 276 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
276 { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, 277 { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
277 { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW }, 278 { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 7e9c2674af81..ea816ef23537 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -5,11 +5,31 @@
5menu "Bus devices" 5menu "Bus devices"
6 6
7config ARM_CCI 7config ARM_CCI
8 bool "ARM CCI driver support" 8 bool
9
10config ARM_CCI400_COMMON
11 bool
12 select ARM_CCI
13
14config ARM_CCI400_PMU
15 bool "ARM CCI400 PMU support"
16 default y
17 depends on ARM || ARM64
18 depends on HW_PERF_EVENTS
19 select ARM_CCI400_COMMON
20 help
21 Support for PMU events monitoring on the ARM CCI cache coherent
22 interconnect.
23
24 If unsure, say Y
25
26config ARM_CCI400_PORT_CTRL
27 bool
9 depends on ARM && OF && CPU_V7 28 depends on ARM && OF && CPU_V7
29 select ARM_CCI400_COMMON
10 help 30 help
11 Driver supporting the CCI cache coherent interconnect for ARM 31 Low level power management driver for CCI400 cache coherent
12 platforms. 32 interconnect for ARM platforms.
13 33
14config ARM_CCN 34config ARM_CCN
15 bool "ARM CCN driver support" 35 bool "ARM CCN driver support"
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 84fd66057dad..b854125e4831 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -29,41 +29,36 @@
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30#include <asm/smp_plat.h> 30#include <asm/smp_plat.h>
31 31
32#define DRIVER_NAME "CCI-400" 32static void __iomem *cci_ctrl_base;
33#define DRIVER_NAME_PMU DRIVER_NAME " PMU" 33static unsigned long cci_ctrl_phys;
34
35#define CCI_PORT_CTRL 0x0
36#define CCI_CTRL_STATUS 0xc
37
38#define CCI_ENABLE_SNOOP_REQ 0x1
39#define CCI_ENABLE_DVM_REQ 0x2
40#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
41 34
35#ifdef CONFIG_ARM_CCI400_PORT_CTRL
42struct cci_nb_ports { 36struct cci_nb_ports {
43 unsigned int nb_ace; 37 unsigned int nb_ace;
44 unsigned int nb_ace_lite; 38 unsigned int nb_ace_lite;
45}; 39};
46 40
47enum cci_ace_port_type { 41static const struct cci_nb_ports cci400_ports = {
48 ACE_INVALID_PORT = 0x0, 42 .nb_ace = 2,
49 ACE_PORT, 43 .nb_ace_lite = 3
50 ACE_LITE_PORT,
51}; 44};
52 45
53struct cci_ace_port { 46#define CCI400_PORTS_DATA (&cci400_ports)
54 void __iomem *base; 47#else
55 unsigned long phys; 48#define CCI400_PORTS_DATA (NULL)
56 enum cci_ace_port_type type; 49#endif
57 struct device_node *dn;
58};
59 50
60static struct cci_ace_port *ports; 51static const struct of_device_id arm_cci_matches[] = {
61static unsigned int nb_cci_ports; 52#ifdef CONFIG_ARM_CCI400_COMMON
53 {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
54#endif
55 {},
56};
62 57
63static void __iomem *cci_ctrl_base; 58#ifdef CONFIG_ARM_CCI400_PMU
64static unsigned long cci_ctrl_phys;
65 59
66#ifdef CONFIG_HW_PERF_EVENTS 60#define DRIVER_NAME "CCI-400"
61#define DRIVER_NAME_PMU DRIVER_NAME " PMU"
67 62
68#define CCI_PMCR 0x0100 63#define CCI_PMCR 0x0100
69#define CCI_PID2 0x0fe8 64#define CCI_PID2 0x0fe8
@@ -75,20 +70,6 @@ static unsigned long cci_ctrl_phys;
75#define CCI_PID2_REV_MASK 0xf0 70#define CCI_PID2_REV_MASK 0xf0
76#define CCI_PID2_REV_SHIFT 4 71#define CCI_PID2_REV_SHIFT 4
77 72
78/* Port ids */
79#define CCI_PORT_S0 0
80#define CCI_PORT_S1 1
81#define CCI_PORT_S2 2
82#define CCI_PORT_S3 3
83#define CCI_PORT_S4 4
84#define CCI_PORT_M0 5
85#define CCI_PORT_M1 6
86#define CCI_PORT_M2 7
87
88#define CCI_REV_R0 0
89#define CCI_REV_R1 1
90#define CCI_REV_R1_PX 5
91
92#define CCI_PMU_EVT_SEL 0x000 73#define CCI_PMU_EVT_SEL 0x000
93#define CCI_PMU_CNTR 0x004 74#define CCI_PMU_CNTR 0x004
94#define CCI_PMU_CNTR_CTRL 0x008 75#define CCI_PMU_CNTR_CTRL 0x008
@@ -100,76 +81,22 @@ static unsigned long cci_ctrl_phys;
100 81
101#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) 82#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
102 83
103/* 84#define CCI_PMU_EVENT_MASK 0xffUL
104 * Instead of an event id to monitor CCI cycles, a dedicated counter is
105 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
106 * make use of this event in hardware.
107 */
108enum cci400_perf_events {
109 CCI_PMU_CYCLES = 0xff
110};
111
112#define CCI_PMU_EVENT_MASK 0xff
113#define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7) 85#define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
114#define CCI_PMU_EVENT_CODE(event) (event & 0x1f) 86#define CCI_PMU_EVENT_CODE(event) (event & 0x1f)
115 87
116#define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */ 88#define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
117 89
118#define CCI_PMU_CYCLE_CNTR_IDX 0 90/* Types of interfaces that can generate events */
119#define CCI_PMU_CNTR0_IDX 1 91enum {
120#define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1) 92 CCI_IF_SLAVE,
121 93 CCI_IF_MASTER,
122/* 94 CCI_IF_MAX,
123 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
124 * ports and bits 4:0 are event codes. There are different event codes
125 * associated with each port type.
126 *
127 * Additionally, the range of events associated with the port types changed
128 * between Rev0 and Rev1.
129 *
130 * The constants below define the range of valid codes for each port type for
131 * the different revisions and are used to validate the event to be monitored.
132 */
133
134#define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
135#define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
136#define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
137#define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
138
139#define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
140#define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
141#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
142#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
143
144struct pmu_port_event_ranges {
145 u8 slave_min;
146 u8 slave_max;
147 u8 master_min;
148 u8 master_max;
149};
150
151static struct pmu_port_event_ranges port_event_range[] = {
152 [CCI_REV_R0] = {
153 .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV,
154 .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV,
155 .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV,
156 .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV,
157 },
158 [CCI_REV_R1] = {
159 .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV,
160 .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV,
161 .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV,
162 .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV,
163 },
164}; 95};
165 96
166/* 97struct event_range {
167 * Export different PMU names for the different revisions so userspace knows 98 u32 min;
168 * because the event ids are different 99 u32 max;
169 */
170static char *const pmu_names[] = {
171 [CCI_REV_R0] = "CCI_400",
172 [CCI_REV_R1] = "CCI_400_r1",
173}; 100};
174 101
175struct cci_pmu_hw_events { 102struct cci_pmu_hw_events {
@@ -178,13 +105,20 @@ struct cci_pmu_hw_events {
178 raw_spinlock_t pmu_lock; 105 raw_spinlock_t pmu_lock;
179}; 106};
180 107
108struct cci_pmu_model {
109 char *name;
110 struct event_range event_ranges[CCI_IF_MAX];
111};
112
113static struct cci_pmu_model cci_pmu_models[];
114
181struct cci_pmu { 115struct cci_pmu {
182 void __iomem *base; 116 void __iomem *base;
183 struct pmu pmu; 117 struct pmu pmu;
184 int nr_irqs; 118 int nr_irqs;
185 int irqs[CCI_PMU_MAX_HW_EVENTS]; 119 int irqs[CCI_PMU_MAX_HW_EVENTS];
186 unsigned long active_irqs; 120 unsigned long active_irqs;
187 struct pmu_port_event_ranges *port_ranges; 121 const struct cci_pmu_model *model;
188 struct cci_pmu_hw_events hw_events; 122 struct cci_pmu_hw_events hw_events;
189 struct platform_device *plat_device; 123 struct platform_device *plat_device;
190 int num_events; 124 int num_events;
@@ -196,52 +130,63 @@ static struct cci_pmu *pmu;
196 130
197#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) 131#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
198 132
199static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) 133/* Port ids */
200{ 134#define CCI_PORT_S0 0
201 int i; 135#define CCI_PORT_S1 1
202 136#define CCI_PORT_S2 2
203 for (i = 0; i < nr_irqs; i++) 137#define CCI_PORT_S3 3
204 if (irq == irqs[i]) 138#define CCI_PORT_S4 4
205 return true; 139#define CCI_PORT_M0 5
206 140#define CCI_PORT_M1 6
207 return false; 141#define CCI_PORT_M2 7
208}
209 142
210static int probe_cci_revision(void) 143#define CCI_REV_R0 0
211{ 144#define CCI_REV_R1 1
212 int rev; 145#define CCI_REV_R1_PX 5
213 rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
214 rev >>= CCI_PID2_REV_SHIFT;
215 146
216 if (rev < CCI_REV_R1_PX) 147/*
217 return CCI_REV_R0; 148 * Instead of an event id to monitor CCI cycles, a dedicated counter is
218 else 149 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
219 return CCI_REV_R1; 150 * make use of this event in hardware.
220} 151 */
152enum cci400_perf_events {
153 CCI_PMU_CYCLES = 0xff
154};
221 155
222static struct pmu_port_event_ranges *port_range_by_rev(void) 156#define CCI_PMU_CYCLE_CNTR_IDX 0
223{ 157#define CCI_PMU_CNTR0_IDX 1
224 int rev = probe_cci_revision(); 158#define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
225 159
226 return &port_event_range[rev]; 160/*
227} 161 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
162 * ports and bits 4:0 are event codes. There are different event codes
163 * associated with each port type.
164 *
165 * Additionally, the range of events associated with the port types changed
166 * between Rev0 and Rev1.
167 *
168 * The constants below define the range of valid codes for each port type for
169 * the different revisions and are used to validate the event to be monitored.
170 */
228 171
229static int pmu_is_valid_slave_event(u8 ev_code) 172#define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
230{ 173#define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
231 return pmu->port_ranges->slave_min <= ev_code && 174#define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
232 ev_code <= pmu->port_ranges->slave_max; 175#define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
233}
234 176
235static int pmu_is_valid_master_event(u8 ev_code) 177#define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
236{ 178#define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
237 return pmu->port_ranges->master_min <= ev_code && 179#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
238 ev_code <= pmu->port_ranges->master_max; 180#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
239}
240 181
241static int pmu_validate_hw_event(u8 hw_event) 182static int pmu_validate_hw_event(unsigned long hw_event)
242{ 183{
243 u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event); 184 u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
244 u8 ev_code = CCI_PMU_EVENT_CODE(hw_event); 185 u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
186 int if_type;
187
188 if (hw_event & ~CCI_PMU_EVENT_MASK)
189 return -ENOENT;
245 190
246 switch (ev_source) { 191 switch (ev_source) {
247 case CCI_PORT_S0: 192 case CCI_PORT_S0:
@@ -250,21 +195,44 @@ static int pmu_validate_hw_event(u8 hw_event)
250 case CCI_PORT_S3: 195 case CCI_PORT_S3:
251 case CCI_PORT_S4: 196 case CCI_PORT_S4:
252 /* Slave Interface */ 197 /* Slave Interface */
253 if (pmu_is_valid_slave_event(ev_code)) 198 if_type = CCI_IF_SLAVE;
254 return hw_event;
255 break; 199 break;
256 case CCI_PORT_M0: 200 case CCI_PORT_M0:
257 case CCI_PORT_M1: 201 case CCI_PORT_M1:
258 case CCI_PORT_M2: 202 case CCI_PORT_M2:
259 /* Master Interface */ 203 /* Master Interface */
260 if (pmu_is_valid_master_event(ev_code)) 204 if_type = CCI_IF_MASTER;
261 return hw_event;
262 break; 205 break;
206 default:
207 return -ENOENT;
263 } 208 }
264 209
210 if (ev_code >= pmu->model->event_ranges[if_type].min &&
211 ev_code <= pmu->model->event_ranges[if_type].max)
212 return hw_event;
213
265 return -ENOENT; 214 return -ENOENT;
266} 215}
267 216
217static int probe_cci_revision(void)
218{
219 int rev;
220 rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
221 rev >>= CCI_PID2_REV_SHIFT;
222
223 if (rev < CCI_REV_R1_PX)
224 return CCI_REV_R0;
225 else
226 return CCI_REV_R1;
227}
228
229static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
230{
231 if (platform_has_secure_cci_access())
232 return &cci_pmu_models[probe_cci_revision()];
233 return NULL;
234}
235
268static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) 236static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
269{ 237{
270 return CCI_PMU_CYCLE_CNTR_IDX <= idx && 238 return CCI_PMU_CYCLE_CNTR_IDX <= idx &&
@@ -293,7 +261,6 @@ static void pmu_enable_counter(int idx)
293 261
294static void pmu_set_event(int idx, unsigned long event) 262static void pmu_set_event(int idx, unsigned long event)
295{ 263{
296 event &= CCI_PMU_EVENT_MASK;
297 pmu_write_register(event, idx, CCI_PMU_EVT_SEL); 264 pmu_write_register(event, idx, CCI_PMU_EVT_SEL);
298} 265}
299 266
@@ -310,7 +277,7 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev
310{ 277{
311 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); 278 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
312 struct hw_perf_event *hw_event = &event->hw; 279 struct hw_perf_event *hw_event = &event->hw;
313 unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK; 280 unsigned long cci_event = hw_event->config_base;
314 int idx; 281 int idx;
315 282
316 if (cci_event == CCI_PMU_CYCLES) { 283 if (cci_event == CCI_PMU_CYCLES) {
@@ -331,7 +298,7 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev
331static int pmu_map_event(struct perf_event *event) 298static int pmu_map_event(struct perf_event *event)
332{ 299{
333 int mapping; 300 int mapping;
334 u8 config = event->attr.config & CCI_PMU_EVENT_MASK; 301 unsigned long config = event->attr.config;
335 302
336 if (event->attr.type < PERF_TYPE_MAX) 303 if (event->attr.type < PERF_TYPE_MAX)
337 return -ENOENT; 304 return -ENOENT;
@@ -660,12 +627,21 @@ static void cci_pmu_del(struct perf_event *event, int flags)
660} 627}
661 628
662static int 629static int
663validate_event(struct cci_pmu_hw_events *hw_events, 630validate_event(struct pmu *cci_pmu,
664 struct perf_event *event) 631 struct cci_pmu_hw_events *hw_events,
632 struct perf_event *event)
665{ 633{
666 if (is_software_event(event)) 634 if (is_software_event(event))
667 return 1; 635 return 1;
668 636
637 /*
638 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
639 * core perf code won't check that the pmu->ctx == leader->ctx
640 * until after pmu->event_init(event).
641 */
642 if (event->pmu != cci_pmu)
643 return 0;
644
669 if (event->state < PERF_EVENT_STATE_OFF) 645 if (event->state < PERF_EVENT_STATE_OFF)
670 return 1; 646 return 1;
671 647
@@ -687,15 +663,15 @@ validate_group(struct perf_event *event)
687 .used_mask = CPU_BITS_NONE, 663 .used_mask = CPU_BITS_NONE,
688 }; 664 };
689 665
690 if (!validate_event(&fake_pmu, leader)) 666 if (!validate_event(event->pmu, &fake_pmu, leader))
691 return -EINVAL; 667 return -EINVAL;
692 668
693 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 669 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
694 if (!validate_event(&fake_pmu, sibling)) 670 if (!validate_event(event->pmu, &fake_pmu, sibling))
695 return -EINVAL; 671 return -EINVAL;
696 } 672 }
697 673
698 if (!validate_event(&fake_pmu, event)) 674 if (!validate_event(event->pmu, &fake_pmu, event))
699 return -EINVAL; 675 return -EINVAL;
700 676
701 return 0; 677 return 0;
@@ -831,9 +807,9 @@ static const struct attribute_group *pmu_attr_groups[] = {
831 807
832static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) 808static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
833{ 809{
834 char *name = pmu_names[probe_cci_revision()]; 810 char *name = cci_pmu->model->name;
835 cci_pmu->pmu = (struct pmu) { 811 cci_pmu->pmu = (struct pmu) {
836 .name = pmu_names[probe_cci_revision()], 812 .name = cci_pmu->model->name,
837 .task_ctx_nr = perf_invalid_context, 813 .task_ctx_nr = perf_invalid_context,
838 .pmu_enable = cci_pmu_enable, 814 .pmu_enable = cci_pmu_enable,
839 .pmu_disable = cci_pmu_disable, 815 .pmu_disable = cci_pmu_disable,
@@ -886,22 +862,93 @@ static struct notifier_block cci_pmu_cpu_nb = {
886 .priority = CPU_PRI_PERF + 1, 862 .priority = CPU_PRI_PERF + 1,
887}; 863};
888 864
865static struct cci_pmu_model cci_pmu_models[] = {
866 [CCI_REV_R0] = {
867 .name = "CCI_400",
868 .event_ranges = {
869 [CCI_IF_SLAVE] = {
870 CCI_REV_R0_SLAVE_PORT_MIN_EV,
871 CCI_REV_R0_SLAVE_PORT_MAX_EV,
872 },
873 [CCI_IF_MASTER] = {
874 CCI_REV_R0_MASTER_PORT_MIN_EV,
875 CCI_REV_R0_MASTER_PORT_MAX_EV,
876 },
877 },
878 },
879 [CCI_REV_R1] = {
880 .name = "CCI_400_r1",
881 .event_ranges = {
882 [CCI_IF_SLAVE] = {
883 CCI_REV_R1_SLAVE_PORT_MIN_EV,
884 CCI_REV_R1_SLAVE_PORT_MAX_EV,
885 },
886 [CCI_IF_MASTER] = {
887 CCI_REV_R1_MASTER_PORT_MIN_EV,
888 CCI_REV_R1_MASTER_PORT_MAX_EV,
889 },
890 },
891 },
892};
893
889static const struct of_device_id arm_cci_pmu_matches[] = { 894static const struct of_device_id arm_cci_pmu_matches[] = {
890 { 895 {
891 .compatible = "arm,cci-400-pmu", 896 .compatible = "arm,cci-400-pmu",
897 .data = NULL,
898 },
899 {
900 .compatible = "arm,cci-400-pmu,r0",
901 .data = &cci_pmu_models[CCI_REV_R0],
902 },
903 {
904 .compatible = "arm,cci-400-pmu,r1",
905 .data = &cci_pmu_models[CCI_REV_R1],
892 }, 906 },
893 {}, 907 {},
894}; 908};
895 909
910static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev)
911{
912 const struct of_device_id *match = of_match_node(arm_cci_pmu_matches,
913 pdev->dev.of_node);
914 if (!match)
915 return NULL;
916 if (match->data)
917 return match->data;
918
919 dev_warn(&pdev->dev, "DEPRECATED compatible property,"
920 "requires secure access to CCI registers");
921 return probe_cci_model(pdev);
922}
923
924static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
925{
926 int i;
927
928 for (i = 0; i < nr_irqs; i++)
929 if (irq == irqs[i])
930 return true;
931
932 return false;
933}
934
896static int cci_pmu_probe(struct platform_device *pdev) 935static int cci_pmu_probe(struct platform_device *pdev)
897{ 936{
898 struct resource *res; 937 struct resource *res;
899 int i, ret, irq; 938 int i, ret, irq;
939 const struct cci_pmu_model *model;
940
941 model = get_cci_model(pdev);
942 if (!model) {
943 dev_warn(&pdev->dev, "CCI PMU version not supported\n");
944 return -ENODEV;
945 }
900 946
901 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); 947 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
902 if (!pmu) 948 if (!pmu)
903 return -ENOMEM; 949 return -ENOMEM;
904 950
951 pmu->model = model;
905 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 952 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
906 pmu->base = devm_ioremap_resource(&pdev->dev, res); 953 pmu->base = devm_ioremap_resource(&pdev->dev, res);
907 if (IS_ERR(pmu->base)) 954 if (IS_ERR(pmu->base))
@@ -933,12 +980,6 @@ static int cci_pmu_probe(struct platform_device *pdev)
933 return -EINVAL; 980 return -EINVAL;
934 } 981 }
935 982
936 pmu->port_ranges = port_range_by_rev();
937 if (!pmu->port_ranges) {
938 dev_warn(&pdev->dev, "CCI PMU version not supported\n");
939 return -EINVAL;
940 }
941
942 raw_spin_lock_init(&pmu->hw_events.pmu_lock); 983 raw_spin_lock_init(&pmu->hw_events.pmu_lock);
943 mutex_init(&pmu->reserve_mutex); 984 mutex_init(&pmu->reserve_mutex);
944 atomic_set(&pmu->active_events, 0); 985 atomic_set(&pmu->active_events, 0);
@@ -952,6 +993,7 @@ static int cci_pmu_probe(struct platform_device *pdev)
952 if (ret) 993 if (ret)
953 return ret; 994 return ret;
954 995
996 pr_info("ARM %s PMU driver probed", pmu->model->name);
955 return 0; 997 return 0;
956} 998}
957 999
@@ -963,7 +1005,66 @@ static int cci_platform_probe(struct platform_device *pdev)
963 return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); 1005 return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
964} 1006}
965 1007
966#endif /* CONFIG_HW_PERF_EVENTS */ 1008static struct platform_driver cci_pmu_driver = {
1009 .driver = {
1010 .name = DRIVER_NAME_PMU,
1011 .of_match_table = arm_cci_pmu_matches,
1012 },
1013 .probe = cci_pmu_probe,
1014};
1015
1016static struct platform_driver cci_platform_driver = {
1017 .driver = {
1018 .name = DRIVER_NAME,
1019 .of_match_table = arm_cci_matches,
1020 },
1021 .probe = cci_platform_probe,
1022};
1023
1024static int __init cci_platform_init(void)
1025{
1026 int ret;
1027
1028 ret = platform_driver_register(&cci_pmu_driver);
1029 if (ret)
1030 return ret;
1031
1032 return platform_driver_register(&cci_platform_driver);
1033}
1034
1035#else /* !CONFIG_ARM_CCI400_PMU */
1036
1037static int __init cci_platform_init(void)
1038{
1039 return 0;
1040}
1041
1042#endif /* CONFIG_ARM_CCI400_PMU */
1043
1044#ifdef CONFIG_ARM_CCI400_PORT_CTRL
1045
1046#define CCI_PORT_CTRL 0x0
1047#define CCI_CTRL_STATUS 0xc
1048
1049#define CCI_ENABLE_SNOOP_REQ 0x1
1050#define CCI_ENABLE_DVM_REQ 0x2
1051#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
1052
1053enum cci_ace_port_type {
1054 ACE_INVALID_PORT = 0x0,
1055 ACE_PORT,
1056 ACE_LITE_PORT,
1057};
1058
1059struct cci_ace_port {
1060 void __iomem *base;
1061 unsigned long phys;
1062 enum cci_ace_port_type type;
1063 struct device_node *dn;
1064};
1065
1066static struct cci_ace_port *ports;
1067static unsigned int nb_cci_ports;
967 1068
968struct cpu_port { 1069struct cpu_port {
969 u64 mpidr; 1070 u64 mpidr;
@@ -1284,36 +1385,20 @@ int notrace __cci_control_port_by_index(u32 port, bool enable)
1284} 1385}
1285EXPORT_SYMBOL_GPL(__cci_control_port_by_index); 1386EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
1286 1387
1287static const struct cci_nb_ports cci400_ports = {
1288 .nb_ace = 2,
1289 .nb_ace_lite = 3
1290};
1291
1292static const struct of_device_id arm_cci_matches[] = {
1293 {.compatible = "arm,cci-400", .data = &cci400_ports },
1294 {},
1295};
1296
1297static const struct of_device_id arm_cci_ctrl_if_matches[] = { 1388static const struct of_device_id arm_cci_ctrl_if_matches[] = {
1298 {.compatible = "arm,cci-400-ctrl-if", }, 1389 {.compatible = "arm,cci-400-ctrl-if", },
1299 {}, 1390 {},
1300}; 1391};
1301 1392
1302static int cci_probe(void) 1393static int cci_probe_ports(struct device_node *np)
1303{ 1394{
1304 struct cci_nb_ports const *cci_config; 1395 struct cci_nb_ports const *cci_config;
1305 int ret, i, nb_ace = 0, nb_ace_lite = 0; 1396 int ret, i, nb_ace = 0, nb_ace_lite = 0;
1306 struct device_node *np, *cp; 1397 struct device_node *cp;
1307 struct resource res; 1398 struct resource res;
1308 const char *match_str; 1399 const char *match_str;
1309 bool is_ace; 1400 bool is_ace;
1310 1401
1311 np = of_find_matching_node(NULL, arm_cci_matches);
1312 if (!np)
1313 return -ENODEV;
1314
1315 if (!of_device_is_available(np))
1316 return -ENODEV;
1317 1402
1318 cci_config = of_match_node(arm_cci_matches, np)->data; 1403 cci_config = of_match_node(arm_cci_matches, np)->data;
1319 if (!cci_config) 1404 if (!cci_config)
@@ -1325,17 +1410,6 @@ static int cci_probe(void)
1325 if (!ports) 1410 if (!ports)
1326 return -ENOMEM; 1411 return -ENOMEM;
1327 1412
1328 ret = of_address_to_resource(np, 0, &res);
1329 if (!ret) {
1330 cci_ctrl_base = ioremap(res.start, resource_size(&res));
1331 cci_ctrl_phys = res.start;
1332 }
1333 if (ret || !cci_ctrl_base) {
1334 WARN(1, "unable to ioremap CCI ctrl\n");
1335 ret = -ENXIO;
1336 goto memalloc_err;
1337 }
1338
1339 for_each_child_of_node(np, cp) { 1413 for_each_child_of_node(np, cp) {
1340 if (!of_match_node(arm_cci_ctrl_if_matches, cp)) 1414 if (!of_match_node(arm_cci_ctrl_if_matches, cp))
1341 continue; 1415 continue;
@@ -1395,12 +1469,37 @@ static int cci_probe(void)
1395 sync_cache_w(&cpu_port); 1469 sync_cache_w(&cpu_port);
1396 __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports); 1470 __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
1397 pr_info("ARM CCI driver probed\n"); 1471 pr_info("ARM CCI driver probed\n");
1472
1398 return 0; 1473 return 0;
1474}
1475#else /* !CONFIG_ARM_CCI400_PORT_CTRL */
1476static inline int cci_probe_ports(struct device_node *np)
1477{
1478 return 0;
1479}
1480#endif /* CONFIG_ARM_CCI400_PORT_CTRL */
1399 1481
1400memalloc_err: 1482static int cci_probe(void)
1483{
1484 int ret;
1485 struct device_node *np;
1486 struct resource res;
1487
1488 np = of_find_matching_node(NULL, arm_cci_matches);
1489 if(!np || !of_device_is_available(np))
1490 return -ENODEV;
1401 1491
1402 kfree(ports); 1492 ret = of_address_to_resource(np, 0, &res);
1403 return ret; 1493 if (!ret) {
1494 cci_ctrl_base = ioremap(res.start, resource_size(&res));
1495 cci_ctrl_phys = res.start;
1496 }
1497 if (ret || !cci_ctrl_base) {
1498 WARN(1, "unable to ioremap CCI ctrl\n");
1499 return -ENXIO;
1500 }
1501
1502 return cci_probe_ports(np);
1404} 1503}
1405 1504
1406static int cci_init_status = -EAGAIN; 1505static int cci_init_status = -EAGAIN;
@@ -1418,42 +1517,6 @@ static int cci_init(void)
1418 return cci_init_status; 1517 return cci_init_status;
1419} 1518}
1420 1519
1421#ifdef CONFIG_HW_PERF_EVENTS
1422static struct platform_driver cci_pmu_driver = {
1423 .driver = {
1424 .name = DRIVER_NAME_PMU,
1425 .of_match_table = arm_cci_pmu_matches,
1426 },
1427 .probe = cci_pmu_probe,
1428};
1429
1430static struct platform_driver cci_platform_driver = {
1431 .driver = {
1432 .name = DRIVER_NAME,
1433 .of_match_table = arm_cci_matches,
1434 },
1435 .probe = cci_platform_probe,
1436};
1437
1438static int __init cci_platform_init(void)
1439{
1440 int ret;
1441
1442 ret = platform_driver_register(&cci_pmu_driver);
1443 if (ret)
1444 return ret;
1445
1446 return platform_driver_register(&cci_platform_driver);
1447}
1448
1449#else
1450
1451static int __init cci_platform_init(void)
1452{
1453 return 0;
1454}
1455
1456#endif
1457/* 1520/*
1458 * To sort out early init calls ordering a helper function is provided to 1521 * To sort out early init calls ordering a helper function is provided to
1459 * check if the CCI driver has beed initialized. Function check if the driver 1522 * check if the CCI driver has beed initialized. Function check if the driver
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 1d278ccd751f..e096e9cddb40 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -140,24 +140,24 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
140{ 140{
141 int rc; 141 int rc;
142 142
143 rc = device_add(&chip->dev); 143 rc = cdev_add(&chip->cdev, chip->dev.devt, 1);
144 if (rc) { 144 if (rc) {
145 dev_err(&chip->dev, 145 dev_err(&chip->dev,
146 "unable to device_register() %s, major %d, minor %d, err=%d\n", 146 "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
147 chip->devname, MAJOR(chip->dev.devt), 147 chip->devname, MAJOR(chip->dev.devt),
148 MINOR(chip->dev.devt), rc); 148 MINOR(chip->dev.devt), rc);
149 149
150 device_unregister(&chip->dev);
150 return rc; 151 return rc;
151 } 152 }
152 153
153 rc = cdev_add(&chip->cdev, chip->dev.devt, 1); 154 rc = device_add(&chip->dev);
154 if (rc) { 155 if (rc) {
155 dev_err(&chip->dev, 156 dev_err(&chip->dev,
156 "unable to cdev_add() %s, major %d, minor %d, err=%d\n", 157 "unable to device_register() %s, major %d, minor %d, err=%d\n",
157 chip->devname, MAJOR(chip->dev.devt), 158 chip->devname, MAJOR(chip->dev.devt),
158 MINOR(chip->dev.devt), rc); 159 MINOR(chip->dev.devt), rc);
159 160
160 device_unregister(&chip->dev);
161 return rc; 161 return rc;
162 } 162 }
163 163
@@ -174,27 +174,17 @@ static void tpm_dev_del_device(struct tpm_chip *chip)
174 * tpm_chip_register() - create a character device for the TPM chip 174 * tpm_chip_register() - create a character device for the TPM chip
175 * @chip: TPM chip to use. 175 * @chip: TPM chip to use.
176 * 176 *
177 * Creates a character device for the TPM chip and adds sysfs interfaces for 177 * Creates a character device for the TPM chip and adds sysfs attributes for
178 * the device, PPI and TCPA. As the last step this function adds the 178 * the device. As the last step this function adds the chip to the list of TPM
179 * chip to the list of TPM chips available for use. 179 * chips available for in-kernel use.
180 * 180 *
181 * NOTE: This function should be only called after the chip initialization 181 * This function should be only called after the chip initialization is
182 * is complete. 182 * complete.
183 *
184 * Called from tpm_<specific>.c probe function only for devices
185 * the driver has determined it should claim. Prior to calling
186 * this function the specific probe function has called pci_enable_device
187 * upon errant exit from this function specific probe function should call
188 * pci_disable_device
189 */ 183 */
190int tpm_chip_register(struct tpm_chip *chip) 184int tpm_chip_register(struct tpm_chip *chip)
191{ 185{
192 int rc; 186 int rc;
193 187
194 rc = tpm_dev_add_device(chip);
195 if (rc)
196 return rc;
197
198 /* Populate sysfs for TPM1 devices. */ 188 /* Populate sysfs for TPM1 devices. */
199 if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { 189 if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
200 rc = tpm_sysfs_add_device(chip); 190 rc = tpm_sysfs_add_device(chip);
@@ -208,6 +198,10 @@ int tpm_chip_register(struct tpm_chip *chip)
208 chip->bios_dir = tpm_bios_log_setup(chip->devname); 198 chip->bios_dir = tpm_bios_log_setup(chip->devname);
209 } 199 }
210 200
201 rc = tpm_dev_add_device(chip);
202 if (rc)
203 return rc;
204
211 /* Make the chip available. */ 205 /* Make the chip available. */
212 spin_lock(&driver_lock); 206 spin_lock(&driver_lock);
213 list_add_rcu(&chip->list, &tpm_chip_list); 207 list_add_rcu(&chip->list, &tpm_chip_list);
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index b1e53e3aece5..42ffa5e7a1e0 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
124{ 124{
125 struct ibmvtpm_dev *ibmvtpm; 125 struct ibmvtpm_dev *ibmvtpm;
126 struct ibmvtpm_crq crq; 126 struct ibmvtpm_crq crq;
127 u64 *word = (u64 *) &crq; 127 __be64 *word = (__be64 *)&crq;
128 int rc; 128 int rc;
129 129
130 ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); 130 ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
145 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); 145 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
146 crq.valid = (u8)IBMVTPM_VALID_CMD; 146 crq.valid = (u8)IBMVTPM_VALID_CMD;
147 crq.msg = (u8)VTPM_TPM_COMMAND; 147 crq.msg = (u8)VTPM_TPM_COMMAND;
148 crq.len = (u16)count; 148 crq.len = cpu_to_be16(count);
149 crq.data = ibmvtpm->rtce_dma_handle; 149 crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
150 150
151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), 151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
152 cpu_to_be64(word[1])); 152 be64_to_cpu(word[1]));
153 if (rc != H_SUCCESS) { 153 if (rc != H_SUCCESS) {
154 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); 154 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
155 rc = 0; 155 rc = 0;
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
index f595f14426bf..6af92890518f 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.h
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -22,9 +22,9 @@
22struct ibmvtpm_crq { 22struct ibmvtpm_crq {
23 u8 valid; 23 u8 valid;
24 u8 msg; 24 u8 msg;
25 u16 len; 25 __be16 len;
26 u32 data; 26 __be32 data;
27 u64 reserved; 27 __be64 reserved;
28} __attribute__((packed, aligned(8))); 28} __attribute__((packed, aligned(8)));
29 29
30struct ibmvtpm_crq_queue { 30struct ibmvtpm_crq_queue {
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index f07c8152e5cc..3f27d21fb729 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -89,12 +89,29 @@ static int pmc_irq_set_type(struct irq_data *d, unsigned type)
89 return 0; 89 return 0;
90} 90}
91 91
92static void pmc_irq_suspend(struct irq_data *d)
93{
94 struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
95
96 pmc->imr = pmc_read(pmc, AT91_PMC_IMR);
97 pmc_write(pmc, AT91_PMC_IDR, pmc->imr);
98}
99
100static void pmc_irq_resume(struct irq_data *d)
101{
102 struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
103
104 pmc_write(pmc, AT91_PMC_IER, pmc->imr);
105}
106
92static struct irq_chip pmc_irq = { 107static struct irq_chip pmc_irq = {
93 .name = "PMC", 108 .name = "PMC",
94 .irq_disable = pmc_irq_mask, 109 .irq_disable = pmc_irq_mask,
95 .irq_mask = pmc_irq_mask, 110 .irq_mask = pmc_irq_mask,
96 .irq_unmask = pmc_irq_unmask, 111 .irq_unmask = pmc_irq_unmask,
97 .irq_set_type = pmc_irq_set_type, 112 .irq_set_type = pmc_irq_set_type,
113 .irq_suspend = pmc_irq_suspend,
114 .irq_resume = pmc_irq_resume,
98}; 115};
99 116
100static struct lock_class_key pmc_lock_class; 117static struct lock_class_key pmc_lock_class;
@@ -224,7 +241,8 @@ static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
224 goto out_free_pmc; 241 goto out_free_pmc;
225 242
226 pmc_write(pmc, AT91_PMC_IDR, 0xffffffff); 243 pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
227 if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc)) 244 if (request_irq(pmc->virq, pmc_irq_handler,
245 IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc))
228 goto out_remove_irqdomain; 246 goto out_remove_irqdomain;
229 247
230 return pmc; 248 return pmc;
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 52d2041fa3f6..69abb08cf146 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -33,6 +33,7 @@ struct at91_pmc {
33 spinlock_t lock; 33 spinlock_t lock;
34 const struct at91_pmc_caps *caps; 34 const struct at91_pmc_caps *caps;
35 struct irq_domain *irqdomain; 35 struct irq_domain *irqdomain;
36 u32 imr;
36}; 37};
37 38
38static inline void pmc_lock(struct at91_pmc *pmc) 39static inline void pmc_lock(struct at91_pmc *pmc)
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index db7f8bce7467..25006a8bb8e6 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -144,12 +144,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
144 divider->flags); 144 divider->flags);
145} 145}
146 146
147/*
148 * The reverse of DIV_ROUND_UP: The maximum number which
149 * divided by m is r
150 */
151#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
152
153static bool _is_valid_table_div(const struct clk_div_table *table, 147static bool _is_valid_table_div(const struct clk_div_table *table,
154 unsigned int div) 148 unsigned int div)
155{ 149{
@@ -225,19 +219,24 @@ static int _div_round_closest(const struct clk_div_table *table,
225 unsigned long parent_rate, unsigned long rate, 219 unsigned long parent_rate, unsigned long rate,
226 unsigned long flags) 220 unsigned long flags)
227{ 221{
228 int up, down, div; 222 int up, down;
223 unsigned long up_rate, down_rate;
229 224
230 up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); 225 up = DIV_ROUND_UP(parent_rate, rate);
226 down = parent_rate / rate;
231 227
232 if (flags & CLK_DIVIDER_POWER_OF_TWO) { 228 if (flags & CLK_DIVIDER_POWER_OF_TWO) {
233 up = __roundup_pow_of_two(div); 229 up = __roundup_pow_of_two(up);
234 down = __rounddown_pow_of_two(div); 230 down = __rounddown_pow_of_two(down);
235 } else if (table) { 231 } else if (table) {
236 up = _round_up_table(table, div); 232 up = _round_up_table(table, up);
237 down = _round_down_table(table, div); 233 down = _round_down_table(table, down);
238 } 234 }
239 235
240 return (up - div) <= (div - down) ? up : down; 236 up_rate = DIV_ROUND_UP(parent_rate, up);
237 down_rate = DIV_ROUND_UP(parent_rate, down);
238
239 return (rate - up_rate) <= (down_rate - rate) ? up : down;
241} 240}
242 241
243static int _div_round(const struct clk_div_table *table, 242static int _div_round(const struct clk_div_table *table,
@@ -313,7 +312,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
313 return i; 312 return i;
314 } 313 }
315 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 314 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
316 MULT_ROUND_UP(rate, i)); 315 rate * i);
317 now = DIV_ROUND_UP(parent_rate, i); 316 now = DIV_ROUND_UP(parent_rate, i);
318 if (_is_best_div(rate, now, best, flags)) { 317 if (_is_best_div(rate, now, best, flags)) {
319 bestdiv = i; 318 bestdiv = i;
@@ -353,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
353 bestdiv = readl(divider->reg) >> divider->shift; 352 bestdiv = readl(divider->reg) >> divider->shift;
354 bestdiv &= div_mask(divider->width); 353 bestdiv &= div_mask(divider->width);
355 bestdiv = _get_div(divider->table, bestdiv, divider->flags); 354 bestdiv = _get_div(divider->table, bestdiv, divider->flags);
356 return bestdiv; 355 return DIV_ROUND_UP(*prate, bestdiv);
357 } 356 }
358 357
359 return divider_round_rate(hw, rate, prate, divider->table, 358 return divider_round_rate(hw, rate, prate, divider->table,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index eb0152961d3c..237f23f68bfc 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1350,7 +1350,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk)
1350 1350
1351 return rate; 1351 return rate;
1352} 1352}
1353EXPORT_SYMBOL_GPL(clk_core_get_rate);
1354 1353
1355/** 1354/**
1356 * clk_get_rate - return the rate of clk 1355 * clk_get_rate - return the rate of clk
@@ -2171,6 +2170,32 @@ int clk_get_phase(struct clk *clk)
2171} 2170}
2172 2171
2173/** 2172/**
2173 * clk_is_match - check if two clk's point to the same hardware clock
2174 * @p: clk compared against q
2175 * @q: clk compared against p
2176 *
2177 * Returns true if the two struct clk pointers both point to the same hardware
2178 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2179 * share the same struct clk_core object.
2180 *
2181 * Returns false otherwise. Note that two NULL clks are treated as matching.
2182 */
2183bool clk_is_match(const struct clk *p, const struct clk *q)
2184{
2185 /* trivial case: identical struct clk's or both NULL */
2186 if (p == q)
2187 return true;
2188
2189 /* true if clk->core pointers match. Avoid derefing garbage */
2190 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2191 if (p->core == q->core)
2192 return true;
2193
2194 return false;
2195}
2196EXPORT_SYMBOL_GPL(clk_is_match);
2197
2198/**
2174 * __clk_init - initialize the data structures in a struct clk 2199 * __clk_init - initialize the data structures in a struct clk
2175 * @dev: device initializing this clk, placeholder for now 2200 * @dev: device initializing this clk, placeholder for now
2176 * @clk: clk being initialized 2201 * @clk: clk being initialized
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index b0b562b9ce0e..e60feffc10a1 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -48,6 +48,17 @@ static struct clk_pll pll3 = {
48 }, 48 },
49}; 49};
50 50
51static struct clk_regmap pll4_vote = {
52 .enable_reg = 0x34c0,
53 .enable_mask = BIT(4),
54 .hw.init = &(struct clk_init_data){
55 .name = "pll4_vote",
56 .parent_names = (const char *[]){ "pll4" },
57 .num_parents = 1,
58 .ops = &clk_pll_vote_ops,
59 },
60};
61
51static struct clk_pll pll8 = { 62static struct clk_pll pll8 = {
52 .l_reg = 0x3144, 63 .l_reg = 0x3144,
53 .m_reg = 0x3148, 64 .m_reg = 0x3148,
@@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = {
3023 3034
3024static struct clk_regmap *gcc_msm8960_clks[] = { 3035static struct clk_regmap *gcc_msm8960_clks[] = {
3025 [PLL3] = &pll3.clkr, 3036 [PLL3] = &pll3.clkr,
3037 [PLL4_VOTE] = &pll4_vote,
3026 [PLL8] = &pll8.clkr, 3038 [PLL8] = &pll8.clkr,
3027 [PLL8_VOTE] = &pll8_vote, 3039 [PLL8_VOTE] = &pll8_vote,
3028 [PLL14] = &pll14.clkr, 3040 [PLL14] = &pll14.clkr,
@@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = {
3247 3259
3248static struct clk_regmap *gcc_apq8064_clks[] = { 3260static struct clk_regmap *gcc_apq8064_clks[] = {
3249 [PLL3] = &pll3.clkr, 3261 [PLL3] = &pll3.clkr,
3262 [PLL4_VOTE] = &pll4_vote,
3250 [PLL8] = &pll8.clkr, 3263 [PLL8] = &pll8.clkr,
3251 [PLL8_VOTE] = &pll8_vote, 3264 [PLL8_VOTE] = &pll8_vote,
3252 [PLL14] = &pll14.clkr, 3265 [PLL14] = &pll14.clkr,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 121ffde25dc3..c9ff27b4648b 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -462,7 +462,6 @@ static struct platform_driver lcc_ipq806x_driver = {
462 .remove = lcc_ipq806x_remove, 462 .remove = lcc_ipq806x_remove,
463 .driver = { 463 .driver = {
464 .name = "lcc-ipq806x", 464 .name = "lcc-ipq806x",
465 .owner = THIS_MODULE,
466 .of_match_table = lcc_ipq806x_match_table, 465 .of_match_table = lcc_ipq806x_match_table,
467 }, 466 },
468}; 467};
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index a75a408cfccd..e2c863295f00 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -417,8 +417,8 @@ static struct clk_rcg slimbus_src = {
417 .mnctr_en_bit = 8, 417 .mnctr_en_bit = 8,
418 .mnctr_reset_bit = 7, 418 .mnctr_reset_bit = 7,
419 .mnctr_mode_shift = 5, 419 .mnctr_mode_shift = 5,
420 .n_val_shift = 16, 420 .n_val_shift = 24,
421 .m_val_shift = 16, 421 .m_val_shift = 8,
422 .width = 8, 422 .width = 8,
423 }, 423 },
424 .p = { 424 .p = {
@@ -547,7 +547,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev)
547 return PTR_ERR(regmap); 547 return PTR_ERR(regmap);
548 548
549 /* Use the correct frequency plan depending on speed of PLL4 */ 549 /* Use the correct frequency plan depending on speed of PLL4 */
550 val = regmap_read(regmap, 0x4, &val); 550 regmap_read(regmap, 0x4, &val);
551 if (val == 0x12) { 551 if (val == 0x12) {
552 slimbus_src.freq_tbl = clk_tbl_aif_osr_492; 552 slimbus_src.freq_tbl = clk_tbl_aif_osr_492;
553 mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; 553 mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492;
@@ -574,7 +574,6 @@ static struct platform_driver lcc_msm8960_driver = {
574 .remove = lcc_msm8960_remove, 574 .remove = lcc_msm8960_remove,
575 .driver = { 575 .driver = {
576 .name = "lcc-msm8960", 576 .name = "lcc-msm8960",
577 .owner = THIS_MODULE,
578 .of_match_table = lcc_msm8960_match_table, 577 .of_match_table = lcc_msm8960_match_table,
579 }, 578 },
580}; 579};
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 6ef89639a9f6..d21640634adf 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -84,7 +84,7 @@ static int ti_fapll_enable(struct clk_hw *hw)
84 struct fapll_data *fd = to_fapll(hw); 84 struct fapll_data *fd = to_fapll(hw);
85 u32 v = readl_relaxed(fd->base); 85 u32 v = readl_relaxed(fd->base);
86 86
87 v |= (1 << FAPLL_MAIN_PLLEN); 87 v |= FAPLL_MAIN_PLLEN;
88 writel_relaxed(v, fd->base); 88 writel_relaxed(v, fd->base);
89 89
90 return 0; 90 return 0;
@@ -95,7 +95,7 @@ static void ti_fapll_disable(struct clk_hw *hw)
95 struct fapll_data *fd = to_fapll(hw); 95 struct fapll_data *fd = to_fapll(hw);
96 u32 v = readl_relaxed(fd->base); 96 u32 v = readl_relaxed(fd->base);
97 97
98 v &= ~(1 << FAPLL_MAIN_PLLEN); 98 v &= ~FAPLL_MAIN_PLLEN;
99 writel_relaxed(v, fd->base); 99 writel_relaxed(v, fd->base);
100} 100}
101 101
@@ -104,7 +104,7 @@ static int ti_fapll_is_enabled(struct clk_hw *hw)
104 struct fapll_data *fd = to_fapll(hw); 104 struct fapll_data *fd = to_fapll(hw);
105 u32 v = readl_relaxed(fd->base); 105 u32 v = readl_relaxed(fd->base);
106 106
107 return v & (1 << FAPLL_MAIN_PLLEN); 107 return v & FAPLL_MAIN_PLLEN;
108} 108}
109 109
110static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, 110static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 5e98c6b1f284..82d2fbb20f7e 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -159,7 +159,7 @@ static struct cpufreq_driver exynos_driver = {
159 159
160static int exynos_cpufreq_probe(struct platform_device *pdev) 160static int exynos_cpufreq_probe(struct platform_device *pdev)
161{ 161{
162 struct device_node *cpus, *np; 162 struct device_node *cpu0;
163 int ret = -EINVAL; 163 int ret = -EINVAL;
164 164
165 exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); 165 exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
@@ -206,28 +206,19 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
206 if (ret) 206 if (ret)
207 goto err_cpufreq_reg; 207 goto err_cpufreq_reg;
208 208
209 cpus = of_find_node_by_path("/cpus"); 209 cpu0 = of_get_cpu_node(0, NULL);
210 if (!cpus) { 210 if (!cpu0) {
211 pr_err("failed to find cpus node\n"); 211 pr_err("failed to find cpu0 node\n");
212 return 0; 212 return 0;
213 } 213 }
214 214
215 np = of_get_next_child(cpus, NULL); 215 if (of_find_property(cpu0, "#cooling-cells", NULL)) {
216 if (!np) { 216 cdev = of_cpufreq_cooling_register(cpu0,
217 pr_err("failed to find cpus child node\n");
218 of_node_put(cpus);
219 return 0;
220 }
221
222 if (of_find_property(np, "#cooling-cells", NULL)) {
223 cdev = of_cpufreq_cooling_register(np,
224 cpu_present_mask); 217 cpu_present_mask);
225 if (IS_ERR(cdev)) 218 if (IS_ERR(cdev))
226 pr_err("running cpufreq without cooling device: %ld\n", 219 pr_err("running cpufreq without cooling device: %ld\n",
227 PTR_ERR(cdev)); 220 PTR_ERR(cdev));
228 } 221 }
229 of_node_put(np);
230 of_node_put(cpus);
231 222
232 return 0; 223 return 0;
233 224
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index bee5df7794d3..7cb4b766cf94 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -22,6 +22,8 @@
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <sysdev/fsl_soc.h> 23#include <sysdev/fsl_soc.h>
24 24
25#include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */
26
25/** 27/**
26 * struct cpu_data - per CPU data struct 28 * struct cpu_data - per CPU data struct
27 * @parent: the parent node of cpu clock 29 * @parent: the parent node of cpu clock
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 4d534582514e..080bd2dbde4b 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -44,6 +44,12 @@ void disable_cpuidle(void)
44 off = 1; 44 off = 1;
45} 45}
46 46
47bool cpuidle_not_available(struct cpuidle_driver *drv,
48 struct cpuidle_device *dev)
49{
50 return off || !initialized || !drv || !dev || !dev->enabled;
51}
52
47/** 53/**
48 * cpuidle_play_dead - cpu off-lining 54 * cpuidle_play_dead - cpu off-lining
49 * 55 *
@@ -66,14 +72,8 @@ int cpuidle_play_dead(void)
66 return -ENODEV; 72 return -ENODEV;
67} 73}
68 74
69/** 75static int find_deepest_state(struct cpuidle_driver *drv,
70 * cpuidle_find_deepest_state - Find deepest state meeting specific conditions. 76 struct cpuidle_device *dev, bool freeze)
71 * @drv: cpuidle driver for the given CPU.
72 * @dev: cpuidle device for the given CPU.
73 * @freeze: Whether or not the state should be suitable for suspend-to-idle.
74 */
75static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
76 struct cpuidle_device *dev, bool freeze)
77{ 77{
78 unsigned int latency_req = 0; 78 unsigned int latency_req = 0;
79 int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; 79 int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
@@ -92,6 +92,17 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
92 return ret; 92 return ret;
93} 93}
94 94
95/**
96 * cpuidle_find_deepest_state - Find the deepest available idle state.
97 * @drv: cpuidle driver for the given CPU.
98 * @dev: cpuidle device for the given CPU.
99 */
100int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
101 struct cpuidle_device *dev)
102{
103 return find_deepest_state(drv, dev, false);
104}
105
95static void enter_freeze_proper(struct cpuidle_driver *drv, 106static void enter_freeze_proper(struct cpuidle_driver *drv,
96 struct cpuidle_device *dev, int index) 107 struct cpuidle_device *dev, int index)
97{ 108{
@@ -113,15 +124,14 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
113 124
114/** 125/**
115 * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. 126 * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
127 * @drv: cpuidle driver for the given CPU.
128 * @dev: cpuidle device for the given CPU.
116 * 129 *
117 * If there are states with the ->enter_freeze callback, find the deepest of 130 * If there are states with the ->enter_freeze callback, find the deepest of
118 * them and enter it with frozen tick. Otherwise, find the deepest state 131 * them and enter it with frozen tick.
119 * available and enter it normally.
120 */ 132 */
121void cpuidle_enter_freeze(void) 133int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
122{ 134{
123 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
124 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
125 int index; 135 int index;
126 136
127 /* 137 /*
@@ -129,24 +139,11 @@ void cpuidle_enter_freeze(void)
129 * that interrupts won't be enabled when it exits and allows the tick to 139 * that interrupts won't be enabled when it exits and allows the tick to
130 * be frozen safely. 140 * be frozen safely.
131 */ 141 */
132 index = cpuidle_find_deepest_state(drv, dev, true); 142 index = find_deepest_state(drv, dev, true);
133 if (index >= 0) {
134 enter_freeze_proper(drv, dev, index);
135 return;
136 }
137
138 /*
139 * It is not safe to freeze the tick, find the deepest state available
140 * at all and try to enter it normally.
141 */
142 index = cpuidle_find_deepest_state(drv, dev, false);
143 if (index >= 0) 143 if (index >= 0)
144 cpuidle_enter(drv, dev, index); 144 enter_freeze_proper(drv, dev, index);
145 else
146 arch_cpu_idle();
147 145
148 /* Interrupts are enabled again here. */ 146 return index;
149 local_irq_disable();
150} 147}
151 148
152/** 149/**
@@ -205,12 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
205 */ 202 */
206int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) 203int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
207{ 204{
208 if (off || !initialized)
209 return -ENODEV;
210
211 if (!drv || !dev || !dev->enabled)
212 return -EBUSY;
213
214 return cpuidle_curr_governor->select(drv, dev); 205 return cpuidle_curr_governor->select(drv, dev);
215} 206}
216 207
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index e5541117b3e9..50ef8bd8708b 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -159,6 +159,9 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
159 if (WARN_ON(timeout < 0)) 159 if (WARN_ON(timeout < 0))
160 return -EINVAL; 160 return -EINVAL;
161 161
162 if (timeout == 0)
163 return fence_is_signaled(fence);
164
162 trace_fence_wait_start(fence); 165 trace_fence_wait_start(fence);
163 ret = fence->ops->wait(fence, intr, timeout); 166 ret = fence->ops->wait(fence, intr, timeout);
164 trace_fence_wait_end(fence); 167 trace_fence_wait_end(fence);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 3c97c8fa8d02..39920d77f288 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -327,6 +327,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
327 unsigned seq, shared_count, i = 0; 327 unsigned seq, shared_count, i = 0;
328 long ret = timeout; 328 long ret = timeout;
329 329
330 if (!timeout)
331 return reservation_object_test_signaled_rcu(obj, wait_all);
332
330retry: 333retry:
331 fence = NULL; 334 fence = NULL;
332 shared_count = 0; 335 shared_count = 0;
@@ -402,8 +405,6 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
402 int ret = 1; 405 int ret = 1;
403 406
404 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { 407 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
405 int ret;
406
407 fence = fence_get_rcu(lfence); 408 fence = fence_get_rcu(lfence);
408 if (!fence) 409 if (!fence)
409 return -1; 410 return -1;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 09e2825a547a..d9891d3461f6 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -664,7 +664,6 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
664 struct at_xdmac_desc *first = NULL, *prev = NULL; 664 struct at_xdmac_desc *first = NULL, *prev = NULL;
665 unsigned int periods = buf_len / period_len; 665 unsigned int periods = buf_len / period_len;
666 int i; 666 int i;
667 u32 cfg;
668 667
669 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", 668 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
670 __func__, &buf_addr, buf_len, period_len, 669 __func__, &buf_addr, buf_len, period_len,
@@ -700,17 +699,17 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
700 if (direction == DMA_DEV_TO_MEM) { 699 if (direction == DMA_DEV_TO_MEM) {
701 desc->lld.mbr_sa = atchan->per_src_addr; 700 desc->lld.mbr_sa = atchan->per_src_addr;
702 desc->lld.mbr_da = buf_addr + i * period_len; 701 desc->lld.mbr_da = buf_addr + i * period_len;
703 cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; 702 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
704 } else { 703 } else {
705 desc->lld.mbr_sa = buf_addr + i * period_len; 704 desc->lld.mbr_sa = buf_addr + i * period_len;
706 desc->lld.mbr_da = atchan->per_dst_addr; 705 desc->lld.mbr_da = atchan->per_dst_addr;
707 cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; 706 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
708 } 707 }
709 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 708 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
710 | AT_XDMAC_MBR_UBC_NDEN 709 | AT_XDMAC_MBR_UBC_NDEN
711 | AT_XDMAC_MBR_UBC_NSEN 710 | AT_XDMAC_MBR_UBC_NSEN
712 | AT_XDMAC_MBR_UBC_NDE 711 | AT_XDMAC_MBR_UBC_NDE
713 | period_len >> at_xdmac_get_dwidth(cfg); 712 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
714 713
715 dev_dbg(chan2dev(chan), 714 dev_dbg(chan2dev(chan),
716 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", 715 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 455b7a4f1e87..a8ad05291b27 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -626,7 +626,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
626 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); 626 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
627 627
628 /* Check if we have any interrupt from the DMAC */ 628 /* Check if we have any interrupt from the DMAC */
629 if (!status) 629 if (!status || !dw->in_use)
630 return IRQ_NONE; 630 return IRQ_NONE;
631 631
632 /* 632 /*
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 77a6dcf25b98..194ec20c9408 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -230,6 +230,10 @@ static bool is_bwd_noraid(struct pci_dev *pdev)
230 switch (pdev->device) { 230 switch (pdev->device) {
231 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 231 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
232 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 232 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
233 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
234 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
235 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
236 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
233 return true; 237 return true;
234 default: 238 default:
235 return false; 239 return false;
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 8926f271904e..eb410044e1af 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -219,6 +219,9 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
219 219
220 while (dint) { 220 while (dint) {
221 i = __ffs(dint); 221 i = __ffs(dint);
222 /* only handle interrupts belonging to pdma driver*/
223 if (i >= pdev->dma_channels)
224 break;
222 dint &= (dint - 1); 225 dint &= (dint - 1);
223 phy = &pdev->phy[i]; 226 phy = &pdev->phy[i];
224 ret = mmp_pdma_chan_handler(irq, phy); 227 ret = mmp_pdma_chan_handler(irq, phy);
@@ -999,6 +1002,9 @@ static int mmp_pdma_probe(struct platform_device *op)
999 struct resource *iores; 1002 struct resource *iores;
1000 int i, ret, irq = 0; 1003 int i, ret, irq = 0;
1001 int dma_channels = 0, irq_num = 0; 1004 int dma_channels = 0, irq_num = 0;
1005 const enum dma_slave_buswidth widths =
1006 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1007 DMA_SLAVE_BUSWIDTH_4_BYTES;
1002 1008
1003 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); 1009 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1004 if (!pdev) 1010 if (!pdev)
@@ -1066,6 +1072,10 @@ static int mmp_pdma_probe(struct platform_device *op)
1066 pdev->device.device_config = mmp_pdma_config; 1072 pdev->device.device_config = mmp_pdma_config;
1067 pdev->device.device_terminate_all = mmp_pdma_terminate_all; 1073 pdev->device.device_terminate_all = mmp_pdma_terminate_all;
1068 pdev->device.copy_align = PDMA_ALIGNMENT; 1074 pdev->device.copy_align = PDMA_ALIGNMENT;
1075 pdev->device.src_addr_widths = widths;
1076 pdev->device.dst_addr_widths = widths;
1077 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1078 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1069 1079
1070 if (pdev->dev->coherent_dma_mask) 1080 if (pdev->dev->coherent_dma_mask)
1071 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); 1081 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 70c2fa9963cd..b6f4e1fc9c78 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -110,7 +110,7 @@ struct mmp_tdma_chan {
110 struct tasklet_struct tasklet; 110 struct tasklet_struct tasklet;
111 111
112 struct mmp_tdma_desc *desc_arr; 112 struct mmp_tdma_desc *desc_arr;
113 phys_addr_t desc_arr_phys; 113 dma_addr_t desc_arr_phys;
114 int desc_num; 114 int desc_num;
115 enum dma_transfer_direction dir; 115 enum dma_transfer_direction dir;
116 dma_addr_t dev_addr; 116 dma_addr_t dev_addr;
@@ -166,9 +166,12 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
166static int mmp_tdma_disable_chan(struct dma_chan *chan) 166static int mmp_tdma_disable_chan(struct dma_chan *chan)
167{ 167{
168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
169 u32 tdcr;
169 170
170 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 171 tdcr = readl(tdmac->reg_base + TDCR);
171 tdmac->reg_base + TDCR); 172 tdcr |= TDCR_ABR;
173 tdcr &= ~TDCR_CHANEN;
174 writel(tdcr, tdmac->reg_base + TDCR);
172 175
173 tdmac->status = DMA_COMPLETE; 176 tdmac->status = DMA_COMPLETE;
174 177
@@ -296,12 +299,27 @@ static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
296 return -EAGAIN; 299 return -EAGAIN;
297} 300}
298 301
302static size_t mmp_tdma_get_pos(struct mmp_tdma_chan *tdmac)
303{
304 size_t reg;
305
306 if (tdmac->idx == 0) {
307 reg = __raw_readl(tdmac->reg_base + TDSAR);
308 reg -= tdmac->desc_arr[0].src_addr;
309 } else if (tdmac->idx == 1) {
310 reg = __raw_readl(tdmac->reg_base + TDDAR);
311 reg -= tdmac->desc_arr[0].dst_addr;
312 } else
313 return -EINVAL;
314
315 return reg;
316}
317
299static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id) 318static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
300{ 319{
301 struct mmp_tdma_chan *tdmac = dev_id; 320 struct mmp_tdma_chan *tdmac = dev_id;
302 321
303 if (mmp_tdma_clear_chan_irq(tdmac) == 0) { 322 if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
304 tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len;
305 tasklet_schedule(&tdmac->tasklet); 323 tasklet_schedule(&tdmac->tasklet);
306 return IRQ_HANDLED; 324 return IRQ_HANDLED;
307 } else 325 } else
@@ -343,7 +361,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
343 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); 361 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
344 362
345 gpool = tdmac->pool; 363 gpool = tdmac->pool;
346 if (tdmac->desc_arr) 364 if (gpool && tdmac->desc_arr)
347 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, 365 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
348 size); 366 size);
349 tdmac->desc_arr = NULL; 367 tdmac->desc_arr = NULL;
@@ -499,6 +517,7 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
499{ 517{
500 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 518 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
501 519
520 tdmac->pos = mmp_tdma_get_pos(tdmac);
502 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 521 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
503 tdmac->buf_len - tdmac->pos); 522 tdmac->buf_len - tdmac->pos);
504 523
@@ -610,7 +629,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
610 int i, ret; 629 int i, ret;
611 int irq = 0, irq_num = 0; 630 int irq = 0, irq_num = 0;
612 int chan_num = TDMA_CHANNEL_NUM; 631 int chan_num = TDMA_CHANNEL_NUM;
613 struct gen_pool *pool; 632 struct gen_pool *pool = NULL;
614 633
615 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); 634 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
616 if (of_id) 635 if (of_id)
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index d7a33b3ac466..9c914d625906 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -162,9 +162,9 @@ static const struct reg_offset_data bam_v1_4_reg_info[] = {
162 [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, 162 [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 },
163 [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, 163 [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 },
164 [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, 164 [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 },
165 [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 }, 165 [BAM_P_EVNT_DEST_ADDR] = { 0x182C, 0x00, 0x1000, 0x00 },
166 [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 }, 166 [BAM_P_EVNT_REG] = { 0x1818, 0x00, 0x1000, 0x00 },
167 [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 }, 167 [BAM_P_SW_OFSTS] = { 0x1800, 0x00, 0x1000, 0x00 },
168 [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, 168 [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 },
169 [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, 169 [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 },
170 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, 170 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
@@ -1143,6 +1143,10 @@ static int bam_dma_probe(struct platform_device *pdev)
1143 dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); 1143 dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
1144 1144
1145 /* initialize dmaengine apis */ 1145 /* initialize dmaengine apis */
1146 bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1147 bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1148 bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1149 bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1146 bdev->common.device_alloc_chan_resources = bam_alloc_chan; 1150 bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1147 bdev->common.device_free_chan_resources = bam_free_chan; 1151 bdev->common.device_free_chan_resources = bam_free_chan;
1148 bdev->common.device_prep_slave_sg = bam_prep_slave_sg; 1152 bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index b2431aa30033..9f1d4c7dbab8 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -582,15 +582,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
582 } 582 }
583} 583}
584 584
585static void sh_dmae_shutdown(struct platform_device *pdev)
586{
587 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
588 sh_dmae_ctl_stop(shdev);
589}
590
591#ifdef CONFIG_PM 585#ifdef CONFIG_PM
592static int sh_dmae_runtime_suspend(struct device *dev) 586static int sh_dmae_runtime_suspend(struct device *dev)
593{ 587{
588 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
589
590 sh_dmae_ctl_stop(shdev);
594 return 0; 591 return 0;
595} 592}
596 593
@@ -605,6 +602,9 @@ static int sh_dmae_runtime_resume(struct device *dev)
605#ifdef CONFIG_PM_SLEEP 602#ifdef CONFIG_PM_SLEEP
606static int sh_dmae_suspend(struct device *dev) 603static int sh_dmae_suspend(struct device *dev)
607{ 604{
605 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
606
607 sh_dmae_ctl_stop(shdev);
608 return 0; 608 return 0;
609} 609}
610 610
@@ -929,13 +929,12 @@ static int sh_dmae_remove(struct platform_device *pdev)
929} 929}
930 930
931static struct platform_driver sh_dmae_driver = { 931static struct platform_driver sh_dmae_driver = {
932 .driver = { 932 .driver = {
933 .pm = &sh_dmae_pm, 933 .pm = &sh_dmae_pm,
934 .name = SH_DMAE_DRV_NAME, 934 .name = SH_DMAE_DRV_NAME,
935 .of_match_table = sh_dmae_of_match, 935 .of_match_table = sh_dmae_of_match,
936 }, 936 },
937 .remove = sh_dmae_remove, 937 .remove = sh_dmae_remove,
938 .shutdown = sh_dmae_shutdown,
939}; 938};
940 939
941static int __init sh_dmae_init(void) 940static int __init sh_dmae_init(void)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index c5f7b4e9eb6c..69fac068669f 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -78,7 +78,7 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
78 * We have to be cautious here. We have seen BIOSes with DMI pointers 78 * We have to be cautious here. We have seen BIOSes with DMI pointers
79 * pointing to completely the wrong place for example 79 * pointing to completely the wrong place for example
80 */ 80 */
81static void dmi_table(u8 *buf, int len, int num, 81static void dmi_table(u8 *buf, u32 len, int num,
82 void (*decode)(const struct dmi_header *, void *), 82 void (*decode)(const struct dmi_header *, void *),
83 void *private_data) 83 void *private_data)
84{ 84{
@@ -93,12 +93,6 @@ static void dmi_table(u8 *buf, int len, int num,
93 const struct dmi_header *dm = (const struct dmi_header *)data; 93 const struct dmi_header *dm = (const struct dmi_header *)data;
94 94
95 /* 95 /*
96 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
97 */
98 if (dm->type == DMI_ENTRY_END_OF_TABLE)
99 break;
100
101 /*
102 * We want to know the total length (formatted area and 96 * We want to know the total length (formatted area and
103 * strings) before decoding to make sure we won't run off the 97 * strings) before decoding to make sure we won't run off the
104 * table in dmi_decode or dmi_string 98 * table in dmi_decode or dmi_string
@@ -108,13 +102,20 @@ static void dmi_table(u8 *buf, int len, int num,
108 data++; 102 data++;
109 if (data - buf < len - 1) 103 if (data - buf < len - 1)
110 decode(dm, private_data); 104 decode(dm, private_data);
105
106 /*
107 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
108 */
109 if (dm->type == DMI_ENTRY_END_OF_TABLE)
110 break;
111
111 data += 2; 112 data += 2;
112 i++; 113 i++;
113 } 114 }
114} 115}
115 116
116static phys_addr_t dmi_base; 117static phys_addr_t dmi_base;
117static u16 dmi_len; 118static u32 dmi_len;
118static u16 dmi_num; 119static u16 dmi_num;
119 120
120static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, 121static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 2fe195002021..f07d4a67fa76 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -179,12 +179,12 @@ again:
179 start = desc->phys_addr; 179 start = desc->phys_addr;
180 end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); 180 end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
181 181
182 if ((start + size) > end || (start + size) > max) 182 if (end > max)
183 continue;
184
185 if (end - size > max)
186 end = max; 183 end = max;
187 184
185 if ((start + size) > end)
186 continue;
187
188 if (round_down(end - size, align) < start) 188 if (round_down(end - size, align) < start)
189 continue; 189 continue;
190 190
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6b6b07ff720b..f6d04c7b5115 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -43,9 +43,10 @@
43#include "drm_crtc_internal.h" 43#include "drm_crtc_internal.h"
44#include "drm_internal.h" 44#include "drm_internal.h"
45 45
46static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, 46static struct drm_framebuffer *
47 struct drm_mode_fb_cmd2 *r, 47internal_framebuffer_create(struct drm_device *dev,
48 struct drm_file *file_priv); 48 struct drm_mode_fb_cmd2 *r,
49 struct drm_file *file_priv);
49 50
50/* Avoid boilerplate. I'm tired of typing. */ 51/* Avoid boilerplate. I'm tired of typing. */
51#define DRM_ENUM_NAME_FN(fnname, list) \ 52#define DRM_ENUM_NAME_FN(fnname, list) \
@@ -2908,13 +2909,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
2908 */ 2909 */
2909 if (req->flags & DRM_MODE_CURSOR_BO) { 2910 if (req->flags & DRM_MODE_CURSOR_BO) {
2910 if (req->handle) { 2911 if (req->handle) {
2911 fb = add_framebuffer_internal(dev, &fbreq, file_priv); 2912 fb = internal_framebuffer_create(dev, &fbreq, file_priv);
2912 if (IS_ERR(fb)) { 2913 if (IS_ERR(fb)) {
2913 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); 2914 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
2914 return PTR_ERR(fb); 2915 return PTR_ERR(fb);
2915 } 2916 }
2916
2917 drm_framebuffer_reference(fb);
2918 } else { 2917 } else {
2919 fb = NULL; 2918 fb = NULL;
2920 } 2919 }
@@ -3267,9 +3266,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
3267 return 0; 3266 return 0;
3268} 3267}
3269 3268
3270static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, 3269static struct drm_framebuffer *
3271 struct drm_mode_fb_cmd2 *r, 3270internal_framebuffer_create(struct drm_device *dev,
3272 struct drm_file *file_priv) 3271 struct drm_mode_fb_cmd2 *r,
3272 struct drm_file *file_priv)
3273{ 3273{
3274 struct drm_mode_config *config = &dev->mode_config; 3274 struct drm_mode_config *config = &dev->mode_config;
3275 struct drm_framebuffer *fb; 3275 struct drm_framebuffer *fb;
@@ -3301,12 +3301,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
3301 return fb; 3301 return fb;
3302 } 3302 }
3303 3303
3304 mutex_lock(&file_priv->fbs_lock);
3305 r->fb_id = fb->base.id;
3306 list_add(&fb->filp_head, &file_priv->fbs);
3307 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
3308 mutex_unlock(&file_priv->fbs_lock);
3309
3310 return fb; 3304 return fb;
3311} 3305}
3312 3306
@@ -3328,15 +3322,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
3328int drm_mode_addfb2(struct drm_device *dev, 3322int drm_mode_addfb2(struct drm_device *dev,
3329 void *data, struct drm_file *file_priv) 3323 void *data, struct drm_file *file_priv)
3330{ 3324{
3325 struct drm_mode_fb_cmd2 *r = data;
3331 struct drm_framebuffer *fb; 3326 struct drm_framebuffer *fb;
3332 3327
3333 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3328 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3334 return -EINVAL; 3329 return -EINVAL;
3335 3330
3336 fb = add_framebuffer_internal(dev, data, file_priv); 3331 fb = internal_framebuffer_create(dev, r, file_priv);
3337 if (IS_ERR(fb)) 3332 if (IS_ERR(fb))
3338 return PTR_ERR(fb); 3333 return PTR_ERR(fb);
3339 3334
3335 /* Transfer ownership to the filp for reaping on close */
3336
3337 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
3338 mutex_lock(&file_priv->fbs_lock);
3339 r->fb_id = fb->base.id;
3340 list_add(&fb->filp_head, &file_priv->fbs);
3341 mutex_unlock(&file_priv->fbs_lock);
3342
3340 return 0; 3343 return 0;
3341} 3344}
3342 3345
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 9a5b68717ec8..379ab4555756 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
733 struct drm_dp_sideband_msg_tx *txmsg) 733 struct drm_dp_sideband_msg_tx *txmsg)
734{ 734{
735 bool ret; 735 bool ret;
736 mutex_lock(&mgr->qlock); 736
737 /*
738 * All updates to txmsg->state are protected by mgr->qlock, and the two
739 * cases we check here are terminal states. For those the barriers
740 * provided by the wake_up/wait_event pair are enough.
741 */
737 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || 742 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
738 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); 743 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
739 mutex_unlock(&mgr->qlock);
740 return ret; 744 return ret;
741} 745}
742 746
@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1363 return 0; 1367 return 0;
1364} 1368}
1365 1369
1366/* must be called holding qlock */
1367static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 1370static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1368{ 1371{
1369 struct drm_dp_sideband_msg_tx *txmsg; 1372 struct drm_dp_sideband_msg_tx *txmsg;
1370 int ret; 1373 int ret;
1371 1374
1375 WARN_ON(!mutex_is_locked(&mgr->qlock));
1376
1372 /* construct a chunk from the first msg in the tx_msg queue */ 1377 /* construct a chunk from the first msg in the tx_msg queue */
1373 if (list_empty(&mgr->tx_msg_downq)) { 1378 if (list_empty(&mgr->tx_msg_downq)) {
1374 mgr->tx_down_in_progress = false; 1379 mgr->tx_down_in_progress = false;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04a209e2b66d..1134526286c8 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -91,29 +91,29 @@
91 */ 91 */
92 92
93static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 93static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
94 unsigned long size, 94 u64 size,
95 unsigned alignment, 95 unsigned alignment,
96 unsigned long color, 96 unsigned long color,
97 enum drm_mm_search_flags flags); 97 enum drm_mm_search_flags flags);
98static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 98static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
99 unsigned long size, 99 u64 size,
100 unsigned alignment, 100 unsigned alignment,
101 unsigned long color, 101 unsigned long color,
102 unsigned long start, 102 u64 start,
103 unsigned long end, 103 u64 end,
104 enum drm_mm_search_flags flags); 104 enum drm_mm_search_flags flags);
105 105
106static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 106static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
107 struct drm_mm_node *node, 107 struct drm_mm_node *node,
108 unsigned long size, unsigned alignment, 108 u64 size, unsigned alignment,
109 unsigned long color, 109 unsigned long color,
110 enum drm_mm_allocator_flags flags) 110 enum drm_mm_allocator_flags flags)
111{ 111{
112 struct drm_mm *mm = hole_node->mm; 112 struct drm_mm *mm = hole_node->mm;
113 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 113 u64 hole_start = drm_mm_hole_node_start(hole_node);
114 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 114 u64 hole_end = drm_mm_hole_node_end(hole_node);
115 unsigned long adj_start = hole_start; 115 u64 adj_start = hole_start;
116 unsigned long adj_end = hole_end; 116 u64 adj_end = hole_end;
117 117
118 BUG_ON(node->allocated); 118 BUG_ON(node->allocated);
119 119
@@ -124,12 +124,15 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
124 adj_start = adj_end - size; 124 adj_start = adj_end - size;
125 125
126 if (alignment) { 126 if (alignment) {
127 unsigned tmp = adj_start % alignment; 127 u64 tmp = adj_start;
128 if (tmp) { 128 unsigned rem;
129
130 rem = do_div(tmp, alignment);
131 if (rem) {
129 if (flags & DRM_MM_CREATE_TOP) 132 if (flags & DRM_MM_CREATE_TOP)
130 adj_start -= tmp; 133 adj_start -= rem;
131 else 134 else
132 adj_start += alignment - tmp; 135 adj_start += alignment - rem;
133 } 136 }
134 } 137 }
135 138
@@ -176,9 +179,9 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
176int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 179int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
177{ 180{
178 struct drm_mm_node *hole; 181 struct drm_mm_node *hole;
179 unsigned long end = node->start + node->size; 182 u64 end = node->start + node->size;
180 unsigned long hole_start; 183 u64 hole_start;
181 unsigned long hole_end; 184 u64 hole_end;
182 185
183 BUG_ON(node == NULL); 186 BUG_ON(node == NULL);
184 187
@@ -227,7 +230,7 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
227 * 0 on success, -ENOSPC if there's no suitable hole. 230 * 0 on success, -ENOSPC if there's no suitable hole.
228 */ 231 */
229int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 232int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
230 unsigned long size, unsigned alignment, 233 u64 size, unsigned alignment,
231 unsigned long color, 234 unsigned long color,
232 enum drm_mm_search_flags sflags, 235 enum drm_mm_search_flags sflags,
233 enum drm_mm_allocator_flags aflags) 236 enum drm_mm_allocator_flags aflags)
@@ -246,16 +249,16 @@ EXPORT_SYMBOL(drm_mm_insert_node_generic);
246 249
247static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 250static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
248 struct drm_mm_node *node, 251 struct drm_mm_node *node,
249 unsigned long size, unsigned alignment, 252 u64 size, unsigned alignment,
250 unsigned long color, 253 unsigned long color,
251 unsigned long start, unsigned long end, 254 u64 start, u64 end,
252 enum drm_mm_allocator_flags flags) 255 enum drm_mm_allocator_flags flags)
253{ 256{
254 struct drm_mm *mm = hole_node->mm; 257 struct drm_mm *mm = hole_node->mm;
255 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 258 u64 hole_start = drm_mm_hole_node_start(hole_node);
256 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 259 u64 hole_end = drm_mm_hole_node_end(hole_node);
257 unsigned long adj_start = hole_start; 260 u64 adj_start = hole_start;
258 unsigned long adj_end = hole_end; 261 u64 adj_end = hole_end;
259 262
260 BUG_ON(!hole_node->hole_follows || node->allocated); 263 BUG_ON(!hole_node->hole_follows || node->allocated);
261 264
@@ -271,12 +274,15 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
271 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 274 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
272 275
273 if (alignment) { 276 if (alignment) {
274 unsigned tmp = adj_start % alignment; 277 u64 tmp = adj_start;
275 if (tmp) { 278 unsigned rem;
279
280 rem = do_div(tmp, alignment);
281 if (rem) {
276 if (flags & DRM_MM_CREATE_TOP) 282 if (flags & DRM_MM_CREATE_TOP)
277 adj_start -= tmp; 283 adj_start -= rem;
278 else 284 else
279 adj_start += alignment - tmp; 285 adj_start += alignment - rem;
280 } 286 }
281 } 287 }
282 288
@@ -324,9 +330,9 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
324 * 0 on success, -ENOSPC if there's no suitable hole. 330 * 0 on success, -ENOSPC if there's no suitable hole.
325 */ 331 */
326int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 332int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
327 unsigned long size, unsigned alignment, 333 u64 size, unsigned alignment,
328 unsigned long color, 334 unsigned long color,
329 unsigned long start, unsigned long end, 335 u64 start, u64 end,
330 enum drm_mm_search_flags sflags, 336 enum drm_mm_search_flags sflags,
331 enum drm_mm_allocator_flags aflags) 337 enum drm_mm_allocator_flags aflags)
332{ 338{
@@ -387,32 +393,34 @@ void drm_mm_remove_node(struct drm_mm_node *node)
387} 393}
388EXPORT_SYMBOL(drm_mm_remove_node); 394EXPORT_SYMBOL(drm_mm_remove_node);
389 395
390static int check_free_hole(unsigned long start, unsigned long end, 396static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
391 unsigned long size, unsigned alignment)
392{ 397{
393 if (end - start < size) 398 if (end - start < size)
394 return 0; 399 return 0;
395 400
396 if (alignment) { 401 if (alignment) {
397 unsigned tmp = start % alignment; 402 u64 tmp = start;
398 if (tmp) 403 unsigned rem;
399 start += alignment - tmp; 404
405 rem = do_div(tmp, alignment);
406 if (rem)
407 start += alignment - rem;
400 } 408 }
401 409
402 return end >= start + size; 410 return end >= start + size;
403} 411}
404 412
405static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 413static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
406 unsigned long size, 414 u64 size,
407 unsigned alignment, 415 unsigned alignment,
408 unsigned long color, 416 unsigned long color,
409 enum drm_mm_search_flags flags) 417 enum drm_mm_search_flags flags)
410{ 418{
411 struct drm_mm_node *entry; 419 struct drm_mm_node *entry;
412 struct drm_mm_node *best; 420 struct drm_mm_node *best;
413 unsigned long adj_start; 421 u64 adj_start;
414 unsigned long adj_end; 422 u64 adj_end;
415 unsigned long best_size; 423 u64 best_size;
416 424
417 BUG_ON(mm->scanned_blocks); 425 BUG_ON(mm->scanned_blocks);
418 426
@@ -421,7 +429,7 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
421 429
422 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 430 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
423 flags & DRM_MM_SEARCH_BELOW) { 431 flags & DRM_MM_SEARCH_BELOW) {
424 unsigned long hole_size = adj_end - adj_start; 432 u64 hole_size = adj_end - adj_start;
425 433
426 if (mm->color_adjust) { 434 if (mm->color_adjust) {
427 mm->color_adjust(entry, color, &adj_start, &adj_end); 435 mm->color_adjust(entry, color, &adj_start, &adj_end);
@@ -445,18 +453,18 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
445} 453}
446 454
447static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 455static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
448 unsigned long size, 456 u64 size,
449 unsigned alignment, 457 unsigned alignment,
450 unsigned long color, 458 unsigned long color,
451 unsigned long start, 459 u64 start,
452 unsigned long end, 460 u64 end,
453 enum drm_mm_search_flags flags) 461 enum drm_mm_search_flags flags)
454{ 462{
455 struct drm_mm_node *entry; 463 struct drm_mm_node *entry;
456 struct drm_mm_node *best; 464 struct drm_mm_node *best;
457 unsigned long adj_start; 465 u64 adj_start;
458 unsigned long adj_end; 466 u64 adj_end;
459 unsigned long best_size; 467 u64 best_size;
460 468
461 BUG_ON(mm->scanned_blocks); 469 BUG_ON(mm->scanned_blocks);
462 470
@@ -465,7 +473,7 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
465 473
466 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 474 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
467 flags & DRM_MM_SEARCH_BELOW) { 475 flags & DRM_MM_SEARCH_BELOW) {
468 unsigned long hole_size = adj_end - adj_start; 476 u64 hole_size = adj_end - adj_start;
469 477
470 if (adj_start < start) 478 if (adj_start < start)
471 adj_start = start; 479 adj_start = start;
@@ -561,7 +569,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
561 * adding/removing nodes to/from the scan list are allowed. 569 * adding/removing nodes to/from the scan list are allowed.
562 */ 570 */
563void drm_mm_init_scan(struct drm_mm *mm, 571void drm_mm_init_scan(struct drm_mm *mm,
564 unsigned long size, 572 u64 size,
565 unsigned alignment, 573 unsigned alignment,
566 unsigned long color) 574 unsigned long color)
567{ 575{
@@ -594,11 +602,11 @@ EXPORT_SYMBOL(drm_mm_init_scan);
594 * adding/removing nodes to/from the scan list are allowed. 602 * adding/removing nodes to/from the scan list are allowed.
595 */ 603 */
596void drm_mm_init_scan_with_range(struct drm_mm *mm, 604void drm_mm_init_scan_with_range(struct drm_mm *mm,
597 unsigned long size, 605 u64 size,
598 unsigned alignment, 606 unsigned alignment,
599 unsigned long color, 607 unsigned long color,
600 unsigned long start, 608 u64 start,
601 unsigned long end) 609 u64 end)
602{ 610{
603 mm->scan_color = color; 611 mm->scan_color = color;
604 mm->scan_alignment = alignment; 612 mm->scan_alignment = alignment;
@@ -627,8 +635,8 @@ bool drm_mm_scan_add_block(struct drm_mm_node *node)
627{ 635{
628 struct drm_mm *mm = node->mm; 636 struct drm_mm *mm = node->mm;
629 struct drm_mm_node *prev_node; 637 struct drm_mm_node *prev_node;
630 unsigned long hole_start, hole_end; 638 u64 hole_start, hole_end;
631 unsigned long adj_start, adj_end; 639 u64 adj_start, adj_end;
632 640
633 mm->scanned_blocks++; 641 mm->scanned_blocks++;
634 642
@@ -731,7 +739,7 @@ EXPORT_SYMBOL(drm_mm_clean);
731 * 739 *
732 * Note that @mm must be cleared to 0 before calling this function. 740 * Note that @mm must be cleared to 0 before calling this function.
733 */ 741 */
734void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 742void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
735{ 743{
736 INIT_LIST_HEAD(&mm->hole_stack); 744 INIT_LIST_HEAD(&mm->hole_stack);
737 mm->scanned_blocks = 0; 745 mm->scanned_blocks = 0;
@@ -766,18 +774,17 @@ void drm_mm_takedown(struct drm_mm * mm)
766} 774}
767EXPORT_SYMBOL(drm_mm_takedown); 775EXPORT_SYMBOL(drm_mm_takedown);
768 776
769static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, 777static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
770 const char *prefix) 778 const char *prefix)
771{ 779{
772 unsigned long hole_start, hole_end, hole_size; 780 u64 hole_start, hole_end, hole_size;
773 781
774 if (entry->hole_follows) { 782 if (entry->hole_follows) {
775 hole_start = drm_mm_hole_node_start(entry); 783 hole_start = drm_mm_hole_node_start(entry);
776 hole_end = drm_mm_hole_node_end(entry); 784 hole_end = drm_mm_hole_node_end(entry);
777 hole_size = hole_end - hole_start; 785 hole_size = hole_end - hole_start;
778 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", 786 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
779 prefix, hole_start, hole_end, 787 hole_end, hole_size);
780 hole_size);
781 return hole_size; 788 return hole_size;
782 } 789 }
783 790
@@ -792,35 +799,34 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
792void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 799void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
793{ 800{
794 struct drm_mm_node *entry; 801 struct drm_mm_node *entry;
795 unsigned long total_used = 0, total_free = 0, total = 0; 802 u64 total_used = 0, total_free = 0, total = 0;
796 803
797 total_free += drm_mm_debug_hole(&mm->head_node, prefix); 804 total_free += drm_mm_debug_hole(&mm->head_node, prefix);
798 805
799 drm_mm_for_each_node(entry, mm) { 806 drm_mm_for_each_node(entry, mm) {
800 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", 807 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
801 prefix, entry->start, entry->start + entry->size, 808 entry->start + entry->size, entry->size);
802 entry->size);
803 total_used += entry->size; 809 total_used += entry->size;
804 total_free += drm_mm_debug_hole(entry, prefix); 810 total_free += drm_mm_debug_hole(entry, prefix);
805 } 811 }
806 total = total_free + total_used; 812 total = total_free + total_used;
807 813
808 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, 814 pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
809 total_used, total_free); 815 total_used, total_free);
810} 816}
811EXPORT_SYMBOL(drm_mm_debug_table); 817EXPORT_SYMBOL(drm_mm_debug_table);
812 818
813#if defined(CONFIG_DEBUG_FS) 819#if defined(CONFIG_DEBUG_FS)
814static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) 820static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
815{ 821{
816 unsigned long hole_start, hole_end, hole_size; 822 u64 hole_start, hole_end, hole_size;
817 823
818 if (entry->hole_follows) { 824 if (entry->hole_follows) {
819 hole_start = drm_mm_hole_node_start(entry); 825 hole_start = drm_mm_hole_node_start(entry);
820 hole_end = drm_mm_hole_node_end(entry); 826 hole_end = drm_mm_hole_node_end(entry);
821 hole_size = hole_end - hole_start; 827 hole_size = hole_end - hole_start;
822 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", 828 seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start,
823 hole_start, hole_end, hole_size); 829 hole_end, hole_size);
824 return hole_size; 830 return hole_size;
825 } 831 }
826 832
@@ -835,20 +841,20 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en
835int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 841int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
836{ 842{
837 struct drm_mm_node *entry; 843 struct drm_mm_node *entry;
838 unsigned long total_used = 0, total_free = 0, total = 0; 844 u64 total_used = 0, total_free = 0, total = 0;
839 845
840 total_free += drm_mm_dump_hole(m, &mm->head_node); 846 total_free += drm_mm_dump_hole(m, &mm->head_node);
841 847
842 drm_mm_for_each_node(entry, mm) { 848 drm_mm_for_each_node(entry, mm) {
843 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", 849 seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start,
844 entry->start, entry->start + entry->size, 850 entry->start + entry->size, entry->size);
845 entry->size);
846 total_used += entry->size; 851 total_used += entry->size;
847 total_free += drm_mm_dump_hole(m, entry); 852 total_free += drm_mm_dump_hole(m, entry);
848 } 853 }
849 total = total_free + total_used; 854 total = total_free + total_used;
850 855
851 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); 856 seq_printf(m, "total: %llu, used %llu free %llu\n", total,
857 total_used, total_free);
852 return 0; 858 return 0;
853} 859}
854EXPORT_SYMBOL(drm_mm_dump_table); 860EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 96e811fe24ca..e8b18e542da4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -152,12 +152,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
152 seq_puts(m, " (pp"); 152 seq_puts(m, " (pp");
153 else 153 else
154 seq_puts(m, " (g"); 154 seq_puts(m, " (g");
155 seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)", 155 seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
156 vma->node.start, vma->node.size, 156 vma->node.start, vma->node.size,
157 vma->ggtt_view.type); 157 vma->ggtt_view.type);
158 } 158 }
159 if (obj->stolen) 159 if (obj->stolen)
160 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 160 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
161 if (obj->pin_mappable || obj->fault_mappable) { 161 if (obj->pin_mappable || obj->fault_mappable) {
162 char s[3], *t = s; 162 char s[3], *t = s;
163 if (obj->pin_mappable) 163 if (obj->pin_mappable)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8039cec71fc2..cc6ea53d2b81 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -622,7 +622,7 @@ static int i915_drm_suspend(struct drm_device *dev)
622 return 0; 622 return 0;
623} 623}
624 624
625static int i915_drm_suspend_late(struct drm_device *drm_dev) 625static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
626{ 626{
627 struct drm_i915_private *dev_priv = drm_dev->dev_private; 627 struct drm_i915_private *dev_priv = drm_dev->dev_private;
628 int ret; 628 int ret;
@@ -636,7 +636,17 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev)
636 } 636 }
637 637
638 pci_disable_device(drm_dev->pdev); 638 pci_disable_device(drm_dev->pdev);
639 pci_set_power_state(drm_dev->pdev, PCI_D3hot); 639 /*
640 * During hibernation on some GEN4 platforms the BIOS may try to access
641 * the device even though it's already in D3 and hang the machine. So
642 * leave the device in D0 on those platforms and hope the BIOS will
643 * power down the device properly. Platforms where this was seen:
644 * Lenovo Thinkpad X301, X61s
645 */
646 if (!(hibernation &&
647 drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
648 INTEL_INFO(dev_priv)->gen == 4))
649 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
640 650
641 return 0; 651 return 0;
642} 652}
@@ -662,7 +672,7 @@ int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
662 if (error) 672 if (error)
663 return error; 673 return error;
664 674
665 return i915_drm_suspend_late(dev); 675 return i915_drm_suspend_late(dev, false);
666} 676}
667 677
668static int i915_drm_resume(struct drm_device *dev) 678static int i915_drm_resume(struct drm_device *dev)
@@ -950,7 +960,17 @@ static int i915_pm_suspend_late(struct device *dev)
950 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 960 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
951 return 0; 961 return 0;
952 962
953 return i915_drm_suspend_late(drm_dev); 963 return i915_drm_suspend_late(drm_dev, false);
964}
965
966static int i915_pm_poweroff_late(struct device *dev)
967{
968 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
969
970 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
971 return 0;
972
973 return i915_drm_suspend_late(drm_dev, true);
954} 974}
955 975
956static int i915_pm_resume_early(struct device *dev) 976static int i915_pm_resume_early(struct device *dev)
@@ -1520,7 +1540,7 @@ static const struct dev_pm_ops i915_pm_ops = {
1520 .thaw_early = i915_pm_resume_early, 1540 .thaw_early = i915_pm_resume_early,
1521 .thaw = i915_pm_resume, 1541 .thaw = i915_pm_resume,
1522 .poweroff = i915_pm_suspend, 1542 .poweroff = i915_pm_suspend,
1523 .poweroff_late = i915_pm_suspend_late, 1543 .poweroff_late = i915_pm_poweroff_late,
1524 .restore_early = i915_pm_resume_early, 1544 .restore_early = i915_pm_resume_early,
1525 .restore = i915_pm_resume, 1545 .restore = i915_pm_resume,
1526 1546
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e5daad5f75fb..5b205863b659 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2936,9 +2936,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2936 req = obj->last_read_req; 2936 req = obj->last_read_req;
2937 2937
2938 /* Do this after OLR check to make sure we make forward progress polling 2938 /* Do this after OLR check to make sure we make forward progress polling
2939 * on this IOCTL with a timeout <=0 (like busy ioctl) 2939 * on this IOCTL with a timeout == 0 (like busy ioctl)
2940 */ 2940 */
2941 if (args->timeout_ns <= 0) { 2941 if (args->timeout_ns == 0) {
2942 ret = -ETIME; 2942 ret = -ETIME;
2943 goto out; 2943 goto out;
2944 } 2944 }
@@ -2948,7 +2948,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2948 i915_gem_request_reference(req); 2948 i915_gem_request_reference(req);
2949 mutex_unlock(&dev->struct_mutex); 2949 mutex_unlock(&dev->struct_mutex);
2950 2950
2951 ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, 2951 ret = __i915_wait_request(req, reset_counter, true,
2952 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2952 file->driver_priv); 2953 file->driver_priv);
2953 mutex_lock(&dev->struct_mutex); 2954 mutex_lock(&dev->struct_mutex);
2954 i915_gem_request_unreference(req); 2955 i915_gem_request_unreference(req);
@@ -4792,6 +4793,9 @@ i915_gem_init_hw(struct drm_device *dev)
4792 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4793 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4793 return -EIO; 4794 return -EIO;
4794 4795
4796 /* Double layer security blanket, see i915_gem_init() */
4797 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4798
4795 if (dev_priv->ellc_size) 4799 if (dev_priv->ellc_size)
4796 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4800 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4797 4801
@@ -4824,7 +4828,7 @@ i915_gem_init_hw(struct drm_device *dev)
4824 for_each_ring(ring, dev_priv, i) { 4828 for_each_ring(ring, dev_priv, i) {
4825 ret = ring->init_hw(ring); 4829 ret = ring->init_hw(ring);
4826 if (ret) 4830 if (ret)
4827 return ret; 4831 goto out;
4828 } 4832 }
4829 4833
4830 for (i = 0; i < NUM_L3_SLICES(dev); i++) 4834 for (i = 0; i < NUM_L3_SLICES(dev); i++)
@@ -4841,9 +4845,11 @@ i915_gem_init_hw(struct drm_device *dev)
4841 DRM_ERROR("Context enable failed %d\n", ret); 4845 DRM_ERROR("Context enable failed %d\n", ret);
4842 i915_gem_cleanup_ringbuffer(dev); 4846 i915_gem_cleanup_ringbuffer(dev);
4843 4847
4844 return ret; 4848 goto out;
4845 } 4849 }
4846 4850
4851out:
4852 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4847 return ret; 4853 return ret;
4848} 4854}
4849 4855
@@ -4877,6 +4883,14 @@ int i915_gem_init(struct drm_device *dev)
4877 dev_priv->gt.stop_ring = intel_logical_ring_stop; 4883 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4878 } 4884 }
4879 4885
4886 /* This is just a security blanket to placate dragons.
4887 * On some systems, we very sporadically observe that the first TLBs
4888 * used by the CS may be stale, despite us poking the TLB reset. If
4889 * we hold the forcewake during initialisation these problems
4890 * just magically go away.
4891 */
4892 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4893
4880 ret = i915_gem_init_userptr(dev); 4894 ret = i915_gem_init_userptr(dev);
4881 if (ret) 4895 if (ret)
4882 goto out_unlock; 4896 goto out_unlock;
@@ -4903,6 +4917,7 @@ int i915_gem_init(struct drm_device *dev)
4903 } 4917 }
4904 4918
4905out_unlock: 4919out_unlock:
4920 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4906 mutex_unlock(&dev->struct_mutex); 4921 mutex_unlock(&dev->struct_mutex);
4907 4922
4908 return ret; 4923 return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 746f77fb57a3..dccdc8aad2e2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1145,7 +1145,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1145 1145
1146 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); 1146 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
1147 1147
1148 DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", 1148 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
1149 ppgtt->node.size >> 20, 1149 ppgtt->node.size >> 20,
1150 ppgtt->node.start / PAGE_SIZE); 1150 ppgtt->node.start / PAGE_SIZE);
1151 1151
@@ -1713,8 +1713,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
1713 1713
1714static void i915_gtt_color_adjust(struct drm_mm_node *node, 1714static void i915_gtt_color_adjust(struct drm_mm_node *node,
1715 unsigned long color, 1715 unsigned long color,
1716 unsigned long *start, 1716 u64 *start,
1717 unsigned long *end) 1717 u64 *end)
1718{ 1718{
1719 if (node->color != color) 1719 if (node->color != color)
1720 *start += 4096; 1720 *start += 4096;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e730789b53b7..9943c20a741d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9716,7 +9716,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
9716 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9716 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9717 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9717 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9718 9718
9719 WARN_ON(!in_irq()); 9719 WARN_ON(!in_interrupt());
9720 9720
9721 if (crtc == NULL) 9721 if (crtc == NULL)
9722 return; 9722 return;
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 04e248dd2259..54daa66c6970 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -282,16 +282,6 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
282 return ret; 282 return ret;
283} 283}
284 284
285static bool
286__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
287 enum pipe pipe)
288{
289 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
291
292 return !intel_crtc->cpu_fifo_underrun_disabled;
293}
294
295/** 285/**
296 * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state 286 * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
297 * @dev_priv: i915 device instance 287 * @dev_priv: i915 device instance
@@ -352,9 +342,15 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
352void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, 342void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
353 enum pipe pipe) 343 enum pipe pipe)
354{ 344{
345 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
346
347 /* We may be called too early in init, thanks BIOS! */
348 if (crtc == NULL)
349 return;
350
355 /* GMCH can't disable fifo underruns, filter them. */ 351 /* GMCH can't disable fifo underruns, filter them. */
356 if (HAS_GMCH_DISPLAY(dev_priv->dev) && 352 if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
357 !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe)) 353 to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
358 return; 354 return;
359 355
360 if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) 356 if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c47a3baa53d5..4e8fb891d4ea 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1048 1048
1049 /* We need to init first for ECOBUS access and then 1049 /* We need to init first for ECOBUS access and then
1050 * determine later if we want to reinit, in case of MT access is 1050 * determine later if we want to reinit, in case of MT access is
1051 * not working 1051 * not working. In this stage we don't know which flavour this
1052 * ivb is, so it is better to reset also the gen6 fw registers
1053 * before the ecobus check.
1052 */ 1054 */
1055
1056 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1057 __raw_posting_read(dev_priv, ECOBUS);
1058
1053 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1059 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1054 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1060 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1055 1061
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 121d30ca2d44..87fe8ed92ebe 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -70,7 +70,9 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
70 118800000, { 0x091c, 0x091c, 0x06dc }, 70 118800000, { 0x091c, 0x091c, 0x06dc },
71 }, { 71 }, {
72 216000000, { 0x06dc, 0x0b5c, 0x091c }, 72 216000000, { 0x06dc, 0x0b5c, 0x091c },
73 } 73 }, {
74 ~0UL, { 0x0000, 0x0000, 0x0000 },
75 },
74}; 76};
75 77
76static const struct dw_hdmi_sym_term imx_sym_term[] = { 78static const struct dw_hdmi_sym_term imx_sym_term[] = {
@@ -136,11 +138,34 @@ static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
136 .destroy = drm_encoder_cleanup, 138 .destroy = drm_encoder_cleanup,
137}; 139};
138 140
141static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con,
142 struct drm_display_mode *mode)
143{
144 if (mode->clock < 13500)
145 return MODE_CLOCK_LOW;
146 if (mode->clock > 266000)
147 return MODE_CLOCK_HIGH;
148
149 return MODE_OK;
150}
151
152static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con,
153 struct drm_display_mode *mode)
154{
155 if (mode->clock < 13500)
156 return MODE_CLOCK_LOW;
157 if (mode->clock > 270000)
158 return MODE_CLOCK_HIGH;
159
160 return MODE_OK;
161}
162
139static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { 163static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
140 .mpll_cfg = imx_mpll_cfg, 164 .mpll_cfg = imx_mpll_cfg,
141 .cur_ctr = imx_cur_ctr, 165 .cur_ctr = imx_cur_ctr,
142 .sym_term = imx_sym_term, 166 .sym_term = imx_sym_term,
143 .dev_type = IMX6Q_HDMI, 167 .dev_type = IMX6Q_HDMI,
168 .mode_valid = imx6q_hdmi_mode_valid,
144}; 169};
145 170
146static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { 171static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
@@ -148,6 +173,7 @@ static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
148 .cur_ctr = imx_cur_ctr, 173 .cur_ctr = imx_cur_ctr,
149 .sym_term = imx_sym_term, 174 .sym_term = imx_sym_term,
150 .dev_type = IMX6DL_HDMI, 175 .dev_type = IMX6DL_HDMI,
176 .mode_valid = imx6dl_hdmi_mode_valid,
151}; 177};
152 178
153static const struct of_device_id dw_hdmi_imx_dt_ids[] = { 179static const struct of_device_id dw_hdmi_imx_dt_ids[] = {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 1b86aac0b341..2d6dc94e1e64 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -163,22 +163,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
163{ 163{
164 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); 164 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
165 struct imx_ldb *ldb = imx_ldb_ch->ldb; 165 struct imx_ldb *ldb = imx_ldb_ch->ldb;
166 struct drm_display_mode *mode = &encoder->crtc->hwmode;
167 u32 pixel_fmt; 166 u32 pixel_fmt;
168 unsigned long serial_clk;
169 unsigned long di_clk = mode->clock * 1000;
170 int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
171
172 if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
173 /* dual channel LVDS mode */
174 serial_clk = 3500UL * mode->clock;
175 imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
176 imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
177 } else {
178 serial_clk = 7000UL * mode->clock;
179 imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
180 di_clk);
181 }
182 167
183 switch (imx_ldb_ch->chno) { 168 switch (imx_ldb_ch->chno) {
184 case 0: 169 case 0:
@@ -247,6 +232,9 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
247 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); 232 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
248 struct imx_ldb *ldb = imx_ldb_ch->ldb; 233 struct imx_ldb *ldb = imx_ldb_ch->ldb;
249 int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; 234 int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
235 unsigned long serial_clk;
236 unsigned long di_clk = mode->clock * 1000;
237 int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
250 238
251 if (mode->clock > 170000) { 239 if (mode->clock > 170000) {
252 dev_warn(ldb->dev, 240 dev_warn(ldb->dev,
@@ -257,6 +245,16 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
257 "%s: mode exceeds 85 MHz pixel clock\n", __func__); 245 "%s: mode exceeds 85 MHz pixel clock\n", __func__);
258 } 246 }
259 247
248 if (dual) {
249 serial_clk = 3500UL * mode->clock;
250 imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
251 imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
252 } else {
253 serial_clk = 7000UL * mode->clock;
254 imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
255 di_clk);
256 }
257
260 /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ 258 /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
261 if (imx_ldb_ch == &ldb->channel[0]) { 259 if (imx_ldb_ch == &ldb->channel[0]) {
262 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 260 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 5e83e007080f..900dda6a8e71 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -236,8 +236,11 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
236 } 236 }
237 237
238 panel_node = of_parse_phandle(np, "fsl,panel", 0); 238 panel_node = of_parse_phandle(np, "fsl,panel", 0);
239 if (panel_node) 239 if (panel_node) {
240 imxpd->panel = of_drm_find_panel(panel_node); 240 imxpd->panel = of_drm_find_panel(panel_node);
241 if (!imxpd->panel)
242 return -EPROBE_DEFER;
243 }
241 244
242 imxpd->dev = dev; 245 imxpd->dev = dev;
243 246
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 8edd531cb621..7369ee7f0c55 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -32,7 +32,10 @@ static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
32void mdp4_irq_preinstall(struct msm_kms *kms) 32void mdp4_irq_preinstall(struct msm_kms *kms)
33{ 33{
34 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 34 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
35 mdp4_enable(mdp4_kms);
35 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); 36 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
37 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
38 mdp4_disable(mdp4_kms);
36} 39}
37 40
38int mdp4_irq_postinstall(struct msm_kms *kms) 41int mdp4_irq_postinstall(struct msm_kms *kms)
@@ -53,7 +56,9 @@ int mdp4_irq_postinstall(struct msm_kms *kms)
53void mdp4_irq_uninstall(struct msm_kms *kms) 56void mdp4_irq_uninstall(struct msm_kms *kms)
54{ 57{
55 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 58 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
59 mdp4_enable(mdp4_kms);
56 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); 60 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
61 mdp4_disable(mdp4_kms);
57} 62}
58 63
59irqreturn_t mdp4_irq(struct msm_kms *kms) 64irqreturn_t mdp4_irq(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 09b4a25eb553..c276624290af 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,17 +8,9 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) 13- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
22 14
23Copyright (C) 2013-2015 by the following authors: 15Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 16- Rob Clark <robdclark@gmail.com> (robclark)
@@ -910,6 +902,7 @@ static inline uint32_t __offset_LM(uint32_t idx)
910 case 2: return (mdp5_cfg->lm.base[2]); 902 case 2: return (mdp5_cfg->lm.base[2]);
911 case 3: return (mdp5_cfg->lm.base[3]); 903 case 3: return (mdp5_cfg->lm.base[3]);
912 case 4: return (mdp5_cfg->lm.base[4]); 904 case 4: return (mdp5_cfg->lm.base[4]);
905 case 5: return (mdp5_cfg->lm.base[5]);
913 default: return INVALID_IDX(idx); 906 default: return INVALID_IDX(idx);
914 } 907 }
915} 908}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 46fac545dc2b..2f2863cf8b45 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -62,8 +62,8 @@ struct mdp5_crtc {
62 62
63 /* current cursor being scanned out: */ 63 /* current cursor being scanned out: */
64 struct drm_gem_object *scanout_bo; 64 struct drm_gem_object *scanout_bo;
65 uint32_t width; 65 uint32_t width, height;
66 uint32_t height; 66 uint32_t x, y;
67 } cursor; 67 } cursor;
68}; 68};
69#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) 69#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -103,8 +103,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
103 struct drm_plane *plane; 103 struct drm_plane *plane;
104 uint32_t flush_mask = 0; 104 uint32_t flush_mask = 0;
105 105
106 /* we could have already released CTL in the disable path: */ 106 /* this should not happen: */
107 if (!mdp5_crtc->ctl) 107 if (WARN_ON(!mdp5_crtc->ctl))
108 return; 108 return;
109 109
110 drm_atomic_crtc_for_each_plane(plane, crtc) { 110 drm_atomic_crtc_for_each_plane(plane, crtc) {
@@ -143,6 +143,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
143 drm_atomic_crtc_for_each_plane(plane, crtc) { 143 drm_atomic_crtc_for_each_plane(plane, crtc) {
144 mdp5_plane_complete_flip(plane); 144 mdp5_plane_complete_flip(plane);
145 } 145 }
146
147 if (mdp5_crtc->ctl && !crtc->state->enable) {
148 mdp5_ctl_release(mdp5_crtc->ctl);
149 mdp5_crtc->ctl = NULL;
150 }
146} 151}
147 152
148static void unref_cursor_worker(struct drm_flip_work *work, void *val) 153static void unref_cursor_worker(struct drm_flip_work *work, void *val)
@@ -386,14 +391,17 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
386 mdp5_crtc->event = crtc->state->event; 391 mdp5_crtc->event = crtc->state->event;
387 spin_unlock_irqrestore(&dev->event_lock, flags); 392 spin_unlock_irqrestore(&dev->event_lock, flags);
388 393
394 /*
395 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
396 * it means we are trying to flush a CRTC whose state is disabled:
397 * nothing else needs to be done.
398 */
399 if (unlikely(!mdp5_crtc->ctl))
400 return;
401
389 blend_setup(crtc); 402 blend_setup(crtc);
390 crtc_flush_all(crtc); 403 crtc_flush_all(crtc);
391 request_pending(crtc, PENDING_FLIP); 404 request_pending(crtc, PENDING_FLIP);
392
393 if (mdp5_crtc->ctl && !crtc->state->enable) {
394 mdp5_ctl_release(mdp5_crtc->ctl);
395 mdp5_crtc->ctl = NULL;
396 }
397} 405}
398 406
399static int mdp5_crtc_set_property(struct drm_crtc *crtc, 407static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -403,6 +411,32 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
403 return -EINVAL; 411 return -EINVAL;
404} 412}
405 413
414static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
415{
416 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
417 uint32_t xres = crtc->mode.hdisplay;
418 uint32_t yres = crtc->mode.vdisplay;
419
420 /*
421 * Cursor Region Of Interest (ROI) is a plane read from cursor
422 * buffer to render. The ROI region is determined by the visibility of
423 * the cursor point. In the default Cursor image the cursor point will
424 * be at the top left of the cursor image, unless it is specified
425 * otherwise using hotspot feature.
426 *
427 * If the cursor point reaches the right (xres - x < cursor.width) or
428 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
429 * width and ROI height need to be evaluated to crop the cursor image
430 * accordingly.
431 * (xres-x) will be new cursor width when x > (xres - cursor.width)
432 * (yres-y) will be new cursor height when y > (yres - cursor.height)
433 */
434 *roi_w = min(mdp5_crtc->cursor.width, xres -
435 mdp5_crtc->cursor.x);
436 *roi_h = min(mdp5_crtc->cursor.height, yres -
437 mdp5_crtc->cursor.y);
438}
439
406static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, 440static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
407 struct drm_file *file, uint32_t handle, 441 struct drm_file *file, uint32_t handle,
408 uint32_t width, uint32_t height) 442 uint32_t width, uint32_t height)
@@ -416,6 +450,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
416 unsigned int depth; 450 unsigned int depth;
417 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; 451 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
418 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 452 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
453 uint32_t roi_w, roi_h;
419 unsigned long flags; 454 unsigned long flags;
420 455
421 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { 456 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -446,6 +481,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
446 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 481 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
447 old_bo = mdp5_crtc->cursor.scanout_bo; 482 old_bo = mdp5_crtc->cursor.scanout_bo;
448 483
484 mdp5_crtc->cursor.scanout_bo = cursor_bo;
485 mdp5_crtc->cursor.width = width;
486 mdp5_crtc->cursor.height = height;
487
488 get_roi(crtc, &roi_w, &roi_h);
489
449 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); 490 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
450 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), 491 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
451 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); 492 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -453,19 +494,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
453 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | 494 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
454 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); 495 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
455 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), 496 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
456 MDP5_LM_CURSOR_SIZE_ROI_H(height) | 497 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
457 MDP5_LM_CURSOR_SIZE_ROI_W(width)); 498 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
458 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); 499 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
459 500
460
461 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; 501 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
462 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN;
463 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); 502 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
464 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); 503 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
465 504
466 mdp5_crtc->cursor.scanout_bo = cursor_bo;
467 mdp5_crtc->cursor.width = width;
468 mdp5_crtc->cursor.height = height;
469 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 505 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
470 506
471 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); 507 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
@@ -489,31 +525,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
489 struct mdp5_kms *mdp5_kms = get_kms(crtc); 525 struct mdp5_kms *mdp5_kms = get_kms(crtc);
490 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 526 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
491 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 527 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
492 uint32_t xres = crtc->mode.hdisplay;
493 uint32_t yres = crtc->mode.vdisplay;
494 uint32_t roi_w; 528 uint32_t roi_w;
495 uint32_t roi_h; 529 uint32_t roi_h;
496 unsigned long flags; 530 unsigned long flags;
497 531
498 x = (x > 0) ? x : 0; 532 /* In case the CRTC is disabled, just drop the cursor update */
499 y = (y > 0) ? y : 0; 533 if (unlikely(!crtc->state->enable))
534 return 0;
500 535
501 /* 536 mdp5_crtc->cursor.x = x = max(x, 0);
502 * Cursor Region Of Interest (ROI) is a plane read from cursor 537 mdp5_crtc->cursor.y = y = max(y, 0);
503 * buffer to render. The ROI region is determined by the visiblity of 538
504 * the cursor point. In the default Cursor image the cursor point will 539 get_roi(crtc, &roi_w, &roi_h);
505 * be at the top left of the cursor image, unless it is specified
506 * otherwise using hotspot feature.
507 *
508 * If the cursor point reaches the right (xres - x < cursor.width) or
509 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
510 * width and ROI height need to be evaluated to crop the cursor image
511 * accordingly.
512 * (xres-x) will be new cursor width when x > (xres - cursor.width)
513 * (yres-y) will be new cursor height when y > (yres - cursor.height)
514 */
515 roi_w = min(mdp5_crtc->cursor.width, xres - x);
516 roi_h = min(mdp5_crtc->cursor.height, yres - y);
517 540
518 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 541 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
519 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), 542 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
@@ -544,8 +567,8 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
544static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { 567static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
545 .mode_fixup = mdp5_crtc_mode_fixup, 568 .mode_fixup = mdp5_crtc_mode_fixup,
546 .mode_set_nofb = mdp5_crtc_mode_set_nofb, 569 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
547 .prepare = mdp5_crtc_disable, 570 .disable = mdp5_crtc_disable,
548 .commit = mdp5_crtc_enable, 571 .enable = mdp5_crtc_enable,
549 .atomic_check = mdp5_crtc_atomic_check, 572 .atomic_check = mdp5_crtc_atomic_check,
550 .atomic_begin = mdp5_crtc_atomic_begin, 573 .atomic_begin = mdp5_crtc_atomic_begin,
551 .atomic_flush = mdp5_crtc_atomic_flush, 574 .atomic_flush = mdp5_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index d6a14bb99988..af0e02fa4f48 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -267,14 +267,14 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
267 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); 267 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
268 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 268 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
269 269
270 mdp5_encoder->enabled = false; 270 mdp5_encoder->enabled = true;
271} 271}
272 272
273static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { 273static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
274 .mode_fixup = mdp5_encoder_mode_fixup, 274 .mode_fixup = mdp5_encoder_mode_fixup,
275 .mode_set = mdp5_encoder_mode_set, 275 .mode_set = mdp5_encoder_mode_set,
276 .prepare = mdp5_encoder_disable, 276 .disable = mdp5_encoder_disable,
277 .commit = mdp5_encoder_enable, 277 .enable = mdp5_encoder_enable,
278}; 278};
279 279
280/* initialize encoder */ 280/* initialize encoder */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 70ac81edd40f..a9407105b9b7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -34,7 +34,10 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
34void mdp5_irq_preinstall(struct msm_kms *kms) 34void mdp5_irq_preinstall(struct msm_kms *kms)
35{ 35{
36 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 36 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
37 mdp5_enable(mdp5_kms);
37 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); 38 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
39 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
40 mdp5_disable(mdp5_kms);
38} 41}
39 42
40int mdp5_irq_postinstall(struct msm_kms *kms) 43int mdp5_irq_postinstall(struct msm_kms *kms)
@@ -57,7 +60,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
57void mdp5_irq_uninstall(struct msm_kms *kms) 60void mdp5_irq_uninstall(struct msm_kms *kms)
58{ 61{
59 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 62 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
63 mdp5_enable(mdp5_kms);
60 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); 64 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
65 mdp5_disable(mdp5_kms);
61} 66}
62 67
63static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) 68static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 871aa2108dc6..18fd643b6e69 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -219,8 +219,10 @@ int msm_atomic_commit(struct drm_device *dev,
219 * mark our set of crtc's as busy: 219 * mark our set of crtc's as busy:
220 */ 220 */
221 ret = start_atomic(dev->dev_private, c->crtc_mask); 221 ret = start_atomic(dev->dev_private, c->crtc_mask);
222 if (ret) 222 if (ret) {
223 kfree(c);
223 return ret; 224 return ret;
225 }
224 226
225 /* 227 /*
226 * This is the point of no return - everything below never fails except 228 * This is the point of no return - everything below never fails except
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 79924e4b1b49..6751553abe4a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -418,7 +418,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
418 nouveau_fbcon_zfill(dev, fbcon); 418 nouveau_fbcon_zfill(dev, fbcon);
419 419
420 /* To allow resizeing without swapping buffers */ 420 /* To allow resizeing without swapping buffers */
421 NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n", 421 NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
422 nouveau_fb->base.width, nouveau_fb->base.height, 422 nouveau_fb->base.width, nouveau_fb->base.height,
423 nvbo->bo.offset, nvbo); 423 nvbo->bo.offset, nvbo);
424 424
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index ed644a4f6f57..86807ee91bd1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1405 (x << 16) | y); 1405 (x << 16) | y);
1406 viewport_w = crtc->mode.hdisplay; 1406 viewport_w = crtc->mode.hdisplay;
1407 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 1407 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1408 if ((rdev->family >= CHIP_BONAIRE) &&
1409 (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
1410 viewport_h *= 2;
1408 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1411 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1409 (viewport_w << 16) | viewport_h); 1412 (viewport_w << 16) | viewport_h);
1410 1413
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 7fe7b749e182..c39c1d0d9d4e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1626,7 +1626,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1626 struct radeon_connector *radeon_connector = NULL; 1626 struct radeon_connector *radeon_connector = NULL;
1627 struct radeon_connector_atom_dig *radeon_dig_connector = NULL; 1627 struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
1628 bool travis_quirk = false; 1628 bool travis_quirk = false;
1629 int encoder_mode;
1630 1629
1631 if (connector) { 1630 if (connector) {
1632 radeon_connector = to_radeon_connector(connector); 1631 radeon_connector = to_radeon_connector(connector);
@@ -1722,13 +1721,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1722 } 1721 }
1723 break; 1722 break;
1724 } 1723 }
1725
1726 encoder_mode = atombios_get_encoder_mode(encoder);
1727 if (connector && (radeon_audio != 0) &&
1728 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
1729 (ENCODER_MODE_IS_DP(encoder_mode) &&
1730 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
1731 radeon_audio_dpms(encoder, mode);
1732} 1724}
1733 1725
1734static void 1726static void
@@ -1737,10 +1729,19 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1737 struct drm_device *dev = encoder->dev; 1729 struct drm_device *dev = encoder->dev;
1738 struct radeon_device *rdev = dev->dev_private; 1730 struct radeon_device *rdev = dev->dev_private;
1739 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1731 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1732 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1733 int encoder_mode = atombios_get_encoder_mode(encoder);
1740 1734
1741 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", 1735 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
1742 radeon_encoder->encoder_id, mode, radeon_encoder->devices, 1736 radeon_encoder->encoder_id, mode, radeon_encoder->devices,
1743 radeon_encoder->active_device); 1737 radeon_encoder->active_device);
1738
1739 if (connector && (radeon_audio != 0) &&
1740 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
1741 (ENCODER_MODE_IS_DP(encoder_mode) &&
1742 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
1743 radeon_audio_dpms(encoder, mode);
1744
1744 switch (radeon_encoder->encoder_id) { 1745 switch (radeon_encoder->encoder_id) {
1745 case ENCODER_OBJECT_ID_INTERNAL_TMDS1: 1746 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1746 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 1747 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
@@ -2170,12 +2171,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2170 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2171 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2171 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2172 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2172 /* handled in dpms */ 2173 /* handled in dpms */
2173 encoder_mode = atombios_get_encoder_mode(encoder);
2174 if (connector && (radeon_audio != 0) &&
2175 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
2176 (ENCODER_MODE_IS_DP(encoder_mode) &&
2177 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
2178 radeon_audio_mode_set(encoder, adjusted_mode);
2179 break; 2174 break;
2180 case ENCODER_OBJECT_ID_INTERNAL_DDI: 2175 case ENCODER_OBJECT_ID_INTERNAL_DDI:
2181 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 2176 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -2197,6 +2192,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2197 } 2192 }
2198 2193
2199 atombios_apply_encoder_quirks(encoder, adjusted_mode); 2194 atombios_apply_encoder_quirks(encoder, adjusted_mode);
2195
2196 encoder_mode = atombios_get_encoder_mode(encoder);
2197 if (connector && (radeon_audio != 0) &&
2198 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
2199 (ENCODER_MODE_IS_DP(encoder_mode) &&
2200 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
2201 radeon_audio_mode_set(encoder, adjusted_mode);
2200} 2202}
2201 2203
2202static bool 2204static bool
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0c993da9c8fb..3e670d344a20 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7555,6 +7555,9 @@ int cik_irq_set(struct radeon_device *rdev)
7555 WREG32(DC_HPD5_INT_CONTROL, hpd5); 7555 WREG32(DC_HPD5_INT_CONTROL, hpd5);
7556 WREG32(DC_HPD6_INT_CONTROL, hpd6); 7556 WREG32(DC_HPD6_INT_CONTROL, hpd6);
7557 7557
7558 /* posting read */
7559 RREG32(SRBM_STATUS);
7560
7558 return 0; 7561 return 0;
7559} 7562}
7560 7563
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 192c80389151..3adc2afe32aa 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -26,6 +26,9 @@
26#include "radeon_audio.h" 26#include "radeon_audio.h"
27#include "sid.h" 27#include "sid.h"
28 28
29#define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8
30#define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc
31
29u32 dce6_endpoint_rreg(struct radeon_device *rdev, 32u32 dce6_endpoint_rreg(struct radeon_device *rdev,
30 u32 block_offset, u32 reg) 33 u32 block_offset, u32 reg)
31{ 34{
@@ -252,72 +255,67 @@ void dce6_audio_enable(struct radeon_device *rdev,
252void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, 255void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
253 struct radeon_crtc *crtc, unsigned int clock) 256 struct radeon_crtc *crtc, unsigned int clock)
254{ 257{
255 /* Two dtos; generally use dto0 for HDMI */ 258 /* Two dtos; generally use dto0 for HDMI */
256 u32 value = 0; 259 u32 value = 0;
257 260
258 if (crtc) 261 if (crtc)
259 value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); 262 value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
260 263
261 WREG32(DCCG_AUDIO_DTO_SOURCE, value); 264 WREG32(DCCG_AUDIO_DTO_SOURCE, value);
262 265
263 /* Express [24MHz / target pixel clock] as an exact rational 266 /* Express [24MHz / target pixel clock] as an exact rational
264 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 267 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
265 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 268 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
266 */ 269 */
267 WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); 270 WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
268 WREG32(DCCG_AUDIO_DTO0_MODULE, clock); 271 WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
269} 272}
270 273
271void dce6_dp_audio_set_dto(struct radeon_device *rdev, 274void dce6_dp_audio_set_dto(struct radeon_device *rdev,
272 struct radeon_crtc *crtc, unsigned int clock) 275 struct radeon_crtc *crtc, unsigned int clock)
273{ 276{
274 /* Two dtos; generally use dto1 for DP */ 277 /* Two dtos; generally use dto1 for DP */
275 u32 value = 0; 278 u32 value = 0;
276 value |= DCCG_AUDIO_DTO_SEL; 279 value |= DCCG_AUDIO_DTO_SEL;
277 280
278 if (crtc) 281 if (crtc)
279 value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); 282 value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
280 283
281 WREG32(DCCG_AUDIO_DTO_SOURCE, value); 284 WREG32(DCCG_AUDIO_DTO_SOURCE, value);
282 285
283 /* Express [24MHz / target pixel clock] as an exact rational 286 /* Express [24MHz / target pixel clock] as an exact rational
284 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 287 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
285 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 288 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
286 */ 289 */
287 WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); 290 if (ASIC_IS_DCE8(rdev)) {
288 WREG32(DCCG_AUDIO_DTO1_MODULE, clock); 291 WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
292 WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
293 } else {
294 WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
295 WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
296 }
289} 297}
290 298
291void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) 299void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
292{ 300{
293 struct drm_device *dev = encoder->dev; 301 struct drm_device *dev = encoder->dev;
294 struct radeon_device *rdev = dev->dev_private; 302 struct radeon_device *rdev = dev->dev_private;
295 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 303 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
296 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 304 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
297 uint32_t offset;
298 305
299 if (!dig || !dig->afmt) 306 if (!dig || !dig->afmt)
300 return; 307 return;
301 308
302 offset = dig->afmt->offset;
303
304 if (enable) { 309 if (enable) {
305 if (dig->afmt->enabled) 310 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
306 return; 311 EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
307 312 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
308 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); 313 EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
309 WREG32(EVERGREEN_DP_SEC_CNTL + offset, 314 EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
310 EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ 315 EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
311 EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ 316 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
312 EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
313 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
314 radeon_audio_enable(rdev, dig->afmt->pin, true);
315 } else { 317 } else {
316 if (!dig->afmt->enabled) 318 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
317 return;
318
319 WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
320 radeon_audio_enable(rdev, dig->afmt->pin, false);
321 } 319 }
322 320
323 dig->afmt->enabled = enable; 321 dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 4c0e24b3bb90..973df064c14f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4593,6 +4593,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
4593 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); 4593 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
4594 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); 4594 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
4595 4595
4596 /* posting read */
4597 RREG32(SRBM_STATUS);
4598
4596 return 0; 4599 return 0;
4597} 4600}
4598 4601
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 1d9aebc79595..c18d4ecbd95d 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -272,7 +272,7 @@ void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
272} 272}
273 273
274void dce4_dp_audio_set_dto(struct radeon_device *rdev, 274void dce4_dp_audio_set_dto(struct radeon_device *rdev,
275 struct radeon_crtc *crtc, unsigned int clock) 275 struct radeon_crtc *crtc, unsigned int clock)
276{ 276{
277 u32 value; 277 u32 value;
278 278
@@ -294,7 +294,7 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
294 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 294 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
295 */ 295 */
296 WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); 296 WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
297 WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10); 297 WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
298} 298}
299 299
300void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) 300void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
@@ -350,20 +350,9 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
350 struct drm_device *dev = encoder->dev; 350 struct drm_device *dev = encoder->dev;
351 struct radeon_device *rdev = dev->dev_private; 351 struct radeon_device *rdev = dev->dev_private;
352 352
353 WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
354 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
355 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
356
357 WREG32(AFMT_INFOFRAME_CONTROL0 + offset, 353 WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
358 AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ 354 AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
359 355
360 WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
361 HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
362
363 WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
364 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
365 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
366
367 WREG32(AFMT_60958_0 + offset, 356 WREG32(AFMT_60958_0 + offset,
368 AFMT_60958_CS_CHANNEL_NUMBER_L(1)); 357 AFMT_60958_CS_CHANNEL_NUMBER_L(1));
369 358
@@ -408,15 +397,19 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
408 if (!dig || !dig->afmt) 397 if (!dig || !dig->afmt)
409 return; 398 return;
410 399
411 /* Silent, r600_hdmi_enable will raise WARN for us */ 400 if (enable) {
412 if (enable && dig->afmt->enabled) 401 WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
413 return; 402 HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
414 if (!enable && !dig->afmt->enabled) 403
415 return; 404 WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
405 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
406 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
416 407
417 if (!enable && dig->afmt->pin) { 408 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
418 radeon_audio_enable(rdev, dig->afmt->pin, 0); 409 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
419 dig->afmt->pin = NULL; 410 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
411 } else {
412 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
420 } 413 }
421 414
422 dig->afmt->enabled = enable; 415 dig->afmt->enabled = enable;
@@ -425,33 +418,28 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
425 enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); 418 enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
426} 419}
427 420
428void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) 421void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
429{ 422{
430 struct drm_device *dev = encoder->dev; 423 struct drm_device *dev = encoder->dev;
431 struct radeon_device *rdev = dev->dev_private; 424 struct radeon_device *rdev = dev->dev_private;
432 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 425 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
433 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 426 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
434 uint32_t offset;
435 427
436 if (!dig || !dig->afmt) 428 if (!dig || !dig->afmt)
437 return; 429 return;
438 430
439 offset = dig->afmt->offset;
440
441 if (enable) { 431 if (enable) {
442 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 432 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
443 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 433 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
444 struct radeon_connector_atom_dig *dig_connector; 434 struct radeon_connector_atom_dig *dig_connector;
445 uint32_t val; 435 uint32_t val;
446 436
447 if (dig->afmt->enabled) 437 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
448 return; 438 EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
449
450 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
451 439
452 if (radeon_connector->con_priv) { 440 if (radeon_connector->con_priv) {
453 dig_connector = radeon_connector->con_priv; 441 dig_connector = radeon_connector->con_priv;
454 val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset); 442 val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
455 val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); 443 val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
456 444
457 if (dig_connector->dp_clock == 162000) 445 if (dig_connector->dp_clock == 162000)
@@ -459,21 +447,16 @@ void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
459 else 447 else
460 val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); 448 val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5);
461 449
462 WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val); 450 WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val);
463 } 451 }
464 452
465 WREG32(EVERGREEN_DP_SEC_CNTL + offset, 453 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
466 EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ 454 EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
467 EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ 455 EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
468 EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ 456 EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
469 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ 457 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
470 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
471 } else { 458 } else {
472 if (!dig->afmt->enabled) 459 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
473 return;
474
475 WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
476 radeon_audio_enable(rdev, dig->afmt->pin, 0);
477 } 460 }
478 461
479 dig->afmt->enabled = enable; 462 dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 279801ca5110..04f2514f7564 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev)
728 tmp |= RADEON_FP2_DETECT_MASK; 728 tmp |= RADEON_FP2_DETECT_MASK;
729 } 729 }
730 WREG32(RADEON_GEN_INT_CNTL, tmp); 730 WREG32(RADEON_GEN_INT_CNTL, tmp);
731
732 /* read back to post the write */
733 RREG32(RADEON_GEN_INT_CNTL);
734
731 return 0; 735 return 0;
732} 736}
733 737
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 07a71a2488c9..2fcad344492f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3784,6 +3784,9 @@ int r600_irq_set(struct radeon_device *rdev)
3784 WREG32(RV770_CG_THERMAL_INT, thermal_int); 3784 WREG32(RV770_CG_THERMAL_INT, thermal_int);
3785 } 3785 }
3786 3786
3787 /* posting read */
3788 RREG32(R_000E50_SRBM_STATUS);
3789
3787 return 0; 3790 return 0;
3788} 3791}
3789 3792
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 62c91ed669ce..dd6606b8e23c 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -476,17 +476,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
476 if (!dig || !dig->afmt) 476 if (!dig || !dig->afmt)
477 return; 477 return;
478 478
479 /* Silent, r600_hdmi_enable will raise WARN for us */
480 if (enable && dig->afmt->enabled)
481 return;
482 if (!enable && !dig->afmt->enabled)
483 return;
484
485 if (!enable && dig->afmt->pin) {
486 radeon_audio_enable(rdev, dig->afmt->pin, 0);
487 dig->afmt->pin = NULL;
488 }
489
490 /* Older chipsets require setting HDMI and routing manually */ 479 /* Older chipsets require setting HDMI and routing manually */
491 if (!ASIC_IS_DCE3(rdev)) { 480 if (!ASIC_IS_DCE3(rdev)) {
492 if (enable) 481 if (enable)
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index a3ceef6d9632..b21ef69a34ac 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -101,8 +101,8 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
101 struct drm_display_mode *mode); 101 struct drm_display_mode *mode);
102void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); 102void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
103void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); 103void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
104void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); 104void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
105void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); 105void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
106 106
107static const u32 pin_offsets[7] = 107static const u32 pin_offsets[7] =
108{ 108{
@@ -210,7 +210,7 @@ static struct radeon_audio_funcs dce4_dp_funcs = {
210 .set_avi_packet = evergreen_set_avi_packet, 210 .set_avi_packet = evergreen_set_avi_packet,
211 .set_audio_packet = dce4_set_audio_packet, 211 .set_audio_packet = dce4_set_audio_packet,
212 .mode_set = radeon_audio_dp_mode_set, 212 .mode_set = radeon_audio_dp_mode_set,
213 .dpms = evergreen_enable_dp_audio_packets, 213 .dpms = evergreen_dp_enable,
214}; 214};
215 215
216static struct radeon_audio_funcs dce6_hdmi_funcs = { 216static struct radeon_audio_funcs dce6_hdmi_funcs = {
@@ -240,7 +240,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
240 .set_avi_packet = evergreen_set_avi_packet, 240 .set_avi_packet = evergreen_set_avi_packet,
241 .set_audio_packet = dce4_set_audio_packet, 241 .set_audio_packet = dce4_set_audio_packet,
242 .mode_set = radeon_audio_dp_mode_set, 242 .mode_set = radeon_audio_dp_mode_set,
243 .dpms = dce6_enable_dp_audio_packets, 243 .dpms = dce6_dp_enable,
244}; 244};
245 245
246static void radeon_audio_interface_init(struct radeon_device *rdev) 246static void radeon_audio_interface_init(struct radeon_device *rdev)
@@ -452,7 +452,7 @@ void radeon_audio_enable(struct radeon_device *rdev,
452} 452}
453 453
454void radeon_audio_detect(struct drm_connector *connector, 454void radeon_audio_detect(struct drm_connector *connector,
455 enum drm_connector_status status) 455 enum drm_connector_status status)
456{ 456{
457 struct radeon_device *rdev; 457 struct radeon_device *rdev;
458 struct radeon_encoder *radeon_encoder; 458 struct radeon_encoder *radeon_encoder;
@@ -483,14 +483,11 @@ void radeon_audio_detect(struct drm_connector *connector,
483 else 483 else
484 radeon_encoder->audio = rdev->audio.hdmi_funcs; 484 radeon_encoder->audio = rdev->audio.hdmi_funcs;
485 485
486 radeon_audio_write_speaker_allocation(connector->encoder); 486 dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
487 radeon_audio_write_sad_regs(connector->encoder);
488 if (connector->encoder->crtc)
489 radeon_audio_write_latency_fields(connector->encoder,
490 &connector->encoder->crtc->mode);
491 radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 487 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
492 } else { 488 } else {
493 radeon_audio_enable(rdev, dig->afmt->pin, 0); 489 radeon_audio_enable(rdev, dig->afmt->pin, 0);
490 dig->afmt->pin = NULL;
494 } 491 }
495} 492}
496 493
@@ -694,23 +691,22 @@ static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute)
694 * update the info frames with the data from the current display mode 691 * update the info frames with the data from the current display mode
695 */ 692 */
696static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, 693static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
697 struct drm_display_mode *mode) 694 struct drm_display_mode *mode)
698{ 695{
699 struct radeon_device *rdev = encoder->dev->dev_private;
700 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 696 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
701 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 697 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
702 698
703 if (!dig || !dig->afmt) 699 if (!dig || !dig->afmt)
704 return; 700 return;
705 701
706 /* disable audio prior to setting up hw */ 702 radeon_audio_set_mute(encoder, true);
707 dig->afmt->pin = radeon_audio_get_pin(encoder);
708 radeon_audio_enable(rdev, dig->afmt->pin, 0);
709 703
704 radeon_audio_write_speaker_allocation(encoder);
705 radeon_audio_write_sad_regs(encoder);
706 radeon_audio_write_latency_fields(encoder, mode);
710 radeon_audio_set_dto(encoder, mode->clock); 707 radeon_audio_set_dto(encoder, mode->clock);
711 radeon_audio_set_vbi_packet(encoder); 708 radeon_audio_set_vbi_packet(encoder);
712 radeon_hdmi_set_color_depth(encoder); 709 radeon_hdmi_set_color_depth(encoder);
713 radeon_audio_set_mute(encoder, false);
714 radeon_audio_update_acr(encoder, mode->clock); 710 radeon_audio_update_acr(encoder, mode->clock);
715 radeon_audio_set_audio_packet(encoder); 711 radeon_audio_set_audio_packet(encoder);
716 radeon_audio_select_pin(encoder); 712 radeon_audio_select_pin(encoder);
@@ -718,8 +714,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
718 if (radeon_audio_set_avi_packet(encoder, mode) < 0) 714 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
719 return; 715 return;
720 716
721 /* enable audio after to setting up hw */ 717 radeon_audio_set_mute(encoder, false);
722 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
723} 718}
724 719
725static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, 720static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
@@ -729,23 +724,26 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
729 struct radeon_device *rdev = dev->dev_private; 724 struct radeon_device *rdev = dev->dev_private;
730 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 725 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
731 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 726 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
727 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
728 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
729 struct radeon_connector_atom_dig *dig_connector =
730 radeon_connector->con_priv;
732 731
733 if (!dig || !dig->afmt) 732 if (!dig || !dig->afmt)
734 return; 733 return;
735 734
736 /* disable audio prior to setting up hw */ 735 radeon_audio_write_speaker_allocation(encoder);
737 dig->afmt->pin = radeon_audio_get_pin(encoder); 736 radeon_audio_write_sad_regs(encoder);
738 radeon_audio_enable(rdev, dig->afmt->pin, 0); 737 radeon_audio_write_latency_fields(encoder, mode);
739 738 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
740 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); 739 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
740 else
741 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
741 radeon_audio_set_audio_packet(encoder); 742 radeon_audio_set_audio_packet(encoder);
742 radeon_audio_select_pin(encoder); 743 radeon_audio_select_pin(encoder);
743 744
744 if (radeon_audio_set_avi_packet(encoder, mode) < 0) 745 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
745 return; 746 return;
746
747 /* enable audio after to setting up hw */
748 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
749} 747}
750 748
751void radeon_audio_mode_set(struct drm_encoder *encoder, 749void radeon_audio_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a579ed379f20..4d0f96cc3da4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -256,11 +256,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
256 u32 ring = RADEON_CS_RING_GFX; 256 u32 ring = RADEON_CS_RING_GFX;
257 s32 priority = 0; 257 s32 priority = 0;
258 258
259 INIT_LIST_HEAD(&p->validated);
260
259 if (!cs->num_chunks) { 261 if (!cs->num_chunks) {
260 return 0; 262 return 0;
261 } 263 }
264
262 /* get chunks */ 265 /* get chunks */
263 INIT_LIST_HEAD(&p->validated);
264 p->idx = 0; 266 p->idx = 0;
265 p->ib.sa_bo = NULL; 267 p->ib.sa_bo = NULL;
266 p->const_ib.sa_bo = NULL; 268 p->const_ib.sa_bo = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d13d1b5a859f..df09ca7c4889 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence)
1030 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); 1030 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1031} 1031}
1032 1032
1033struct radeon_wait_cb {
1034 struct fence_cb base;
1035 struct task_struct *task;
1036};
1037
1038static void
1039radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
1040{
1041 struct radeon_wait_cb *wait =
1042 container_of(cb, struct radeon_wait_cb, base);
1043
1044 wake_up_process(wait->task);
1045}
1046
1033static signed long radeon_fence_default_wait(struct fence *f, bool intr, 1047static signed long radeon_fence_default_wait(struct fence *f, bool intr,
1034 signed long t) 1048 signed long t)
1035{ 1049{
1036 struct radeon_fence *fence = to_radeon_fence(f); 1050 struct radeon_fence *fence = to_radeon_fence(f);
1037 struct radeon_device *rdev = fence->rdev; 1051 struct radeon_device *rdev = fence->rdev;
1038 bool signaled; 1052 struct radeon_wait_cb cb;
1039 1053
1040 fence_enable_sw_signaling(&fence->base); 1054 cb.task = current;
1041 1055
1042 /* 1056 if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1043 * This function has to return -EDEADLK, but cannot hold 1057 return t;
1044 * exclusive_lock during the wait because some callers 1058
1045 * may already hold it. This means checking needs_reset without 1059 while (t > 0) {
1046 * lock, and not fiddling with any gpu internals. 1060 if (intr)
1047 * 1061 set_current_state(TASK_INTERRUPTIBLE);
1048 * The callback installed with fence_enable_sw_signaling will 1062 else
1049 * run before our wait_event_*timeout call, so we will see 1063 set_current_state(TASK_UNINTERRUPTIBLE);
1050 * both the signaled fence and the changes to needs_reset. 1064
1051 */ 1065 /*
1066 * radeon_test_signaled must be called after
1067 * set_current_state to prevent a race with wake_up_process
1068 */
1069 if (radeon_test_signaled(fence))
1070 break;
1071
1072 if (rdev->needs_reset) {
1073 t = -EDEADLK;
1074 break;
1075 }
1076
1077 t = schedule_timeout(t);
1078
1079 if (t > 0 && intr && signal_pending(current))
1080 t = -ERESTARTSYS;
1081 }
1082
1083 __set_current_state(TASK_RUNNING);
1084 fence_remove_callback(f, &cb.base);
1052 1085
1053 if (intr)
1054 t = wait_event_interruptible_timeout(rdev->fence_queue,
1055 ((signaled = radeon_test_signaled(fence)) ||
1056 rdev->needs_reset), t);
1057 else
1058 t = wait_event_timeout(rdev->fence_queue,
1059 ((signaled = radeon_test_signaled(fence)) ||
1060 rdev->needs_reset), t);
1061
1062 if (t > 0 && !signaled)
1063 return -EDEADLK;
1064 return t; 1086 return t;
1065} 1087}
1066 1088
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d81182ad53ec..97a904835759 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -694,6 +694,10 @@ int rs600_irq_set(struct radeon_device *rdev)
694 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 694 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
695 if (ASIC_IS_DCE2(rdev)) 695 if (ASIC_IS_DCE2(rdev))
696 WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 696 WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
697
698 /* posting read */
699 RREG32(R_000040_GEN_INT_CNTL);
700
697 return 0; 701 return 0;
698} 702}
699 703
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index bcf516a8a2f1..a7fb2735d4a9 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6203,6 +6203,9 @@ int si_irq_set(struct radeon_device *rdev)
6203 6203
6204 WREG32(CG_THERMAL_INT, thermal_int); 6204 WREG32(CG_THERMAL_INT, thermal_int);
6205 6205
6206 /* posting read */
6207 RREG32(SRBM_STATUS);
6208
6206 return 0; 6209 return 0;
6207} 6210}
6208 6211
@@ -7127,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7127 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); 7130 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
7128 7131
7129 if (!vclk || !dclk) { 7132 if (!vclk || !dclk) {
7130 /* keep the Bypass mode, put PLL to sleep */ 7133 /* keep the Bypass mode */
7131 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
7132 return 0; 7134 return 0;
7133 } 7135 }
7134 7136
@@ -7144,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7144 /* set VCO_MODE to 1 */ 7146 /* set VCO_MODE to 1 */
7145 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); 7147 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
7146 7148
7147 /* toggle UPLL_SLEEP to 1 then back to 0 */ 7149 /* disable sleep mode */
7148 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
7149 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); 7150 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
7150 7151
7151 /* deassert UPLL_RESET */ 7152 /* deassert UPLL_RESET */
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index c27118cab16a..99a9835c9f61 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -912,8 +912,8 @@
912 912
913#define DCCG_AUDIO_DTO0_PHASE 0x05b0 913#define DCCG_AUDIO_DTO0_PHASE 0x05b0
914#define DCCG_AUDIO_DTO0_MODULE 0x05b4 914#define DCCG_AUDIO_DTO0_MODULE 0x05b4
915#define DCCG_AUDIO_DTO1_PHASE 0x05b8 915#define DCCG_AUDIO_DTO1_PHASE 0x05c0
916#define DCCG_AUDIO_DTO1_MODULE 0x05bc 916#define DCCG_AUDIO_DTO1_MODULE 0x05c4
917 917
918#define AFMT_AUDIO_SRC_CONTROL 0x713c 918#define AFMT_AUDIO_SRC_CONTROL 0x713c
919#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) 919#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d395b0bef73b..8d9b7de25613 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -74,7 +74,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
74 pr_err(" has_type: %d\n", man->has_type); 74 pr_err(" has_type: %d\n", man->has_type);
75 pr_err(" use_type: %d\n", man->use_type); 75 pr_err(" use_type: %d\n", man->use_type);
76 pr_err(" flags: 0x%08X\n", man->flags); 76 pr_err(" flags: 0x%08X\n", man->flags);
77 pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); 77 pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
78 pr_err(" size: %llu\n", man->size); 78 pr_err(" size: %llu\n", man->size);
79 pr_err(" available_caching: 0x%08X\n", man->available_caching); 79 pr_err(" available_caching: 0x%08X\n", man->available_caching);
80 pr_err(" default_caching: 0x%08X\n", man->default_caching); 80 pr_err(" default_caching: 0x%08X\n", man->default_caching);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6c6b655defcf..e13b9cbc304e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
725 goto out_err1; 725 goto out_err1;
726 } 726 }
727 727
728 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
729 (dev_priv->vram_size >> PAGE_SHIFT));
730 if (unlikely(ret != 0)) {
731 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
732 goto out_err2;
733 }
734
735 dev_priv->has_gmr = true;
736 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
737 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
738 VMW_PL_GMR) != 0) {
739 DRM_INFO("No GMR memory available. "
740 "Graphics memory resources are very limited.\n");
741 dev_priv->has_gmr = false;
742 }
743
744 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
745 dev_priv->has_mob = true;
746 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
747 VMW_PL_MOB) != 0) {
748 DRM_INFO("No MOB memory available. "
749 "3D will be disabled.\n");
750 dev_priv->has_mob = false;
751 }
752 }
753
754 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, 728 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
755 dev_priv->mmio_size); 729 dev_priv->mmio_size);
756 730
@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
813 goto out_no_fman; 787 goto out_no_fman;
814 } 788 }
815 789
790
791 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
792 (dev_priv->vram_size >> PAGE_SHIFT));
793 if (unlikely(ret != 0)) {
794 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
795 goto out_no_vram;
796 }
797
798 dev_priv->has_gmr = true;
799 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
800 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
801 VMW_PL_GMR) != 0) {
802 DRM_INFO("No GMR memory available. "
803 "Graphics memory resources are very limited.\n");
804 dev_priv->has_gmr = false;
805 }
806
807 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
808 dev_priv->has_mob = true;
809 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
810 VMW_PL_MOB) != 0) {
811 DRM_INFO("No MOB memory available. "
812 "3D will be disabled.\n");
813 dev_priv->has_mob = false;
814 }
815 }
816
816 vmw_kms_save_vga(dev_priv); 817 vmw_kms_save_vga(dev_priv);
817 818
818 /* Start kms and overlay systems, needs fifo. */ 819 /* Start kms and overlay systems, needs fifo. */
@@ -838,6 +839,12 @@ out_no_fifo:
838 vmw_kms_close(dev_priv); 839 vmw_kms_close(dev_priv);
839out_no_kms: 840out_no_kms:
840 vmw_kms_restore_vga(dev_priv); 841 vmw_kms_restore_vga(dev_priv);
842 if (dev_priv->has_mob)
843 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
844 if (dev_priv->has_gmr)
845 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
846 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
847out_no_vram:
841 vmw_fence_manager_takedown(dev_priv->fman); 848 vmw_fence_manager_takedown(dev_priv->fman);
842out_no_fman: 849out_no_fman:
843 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 850 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -853,12 +860,6 @@ out_err4:
853 iounmap(dev_priv->mmio_virt); 860 iounmap(dev_priv->mmio_virt);
854out_err3: 861out_err3:
855 arch_phys_wc_del(dev_priv->mmio_mtrr); 862 arch_phys_wc_del(dev_priv->mmio_mtrr);
856 if (dev_priv->has_mob)
857 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
858 if (dev_priv->has_gmr)
859 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
860 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
861out_err2:
862 (void)ttm_bo_device_release(&dev_priv->bdev); 863 (void)ttm_bo_device_release(&dev_priv->bdev);
863out_err1: 864out_err1:
864 vmw_ttm_global_release(dev_priv); 865 vmw_ttm_global_release(dev_priv);
@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev)
887 } 888 }
888 vmw_kms_close(dev_priv); 889 vmw_kms_close(dev_priv);
889 vmw_overlay_close(dev_priv); 890 vmw_overlay_close(dev_priv);
891
892 if (dev_priv->has_mob)
893 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
894 if (dev_priv->has_gmr)
895 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
896 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
897
890 vmw_fence_manager_takedown(dev_priv->fman); 898 vmw_fence_manager_takedown(dev_priv->fman);
891 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 899 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
892 drm_irq_uninstall(dev_priv->dev); 900 drm_irq_uninstall(dev_priv->dev);
@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev)
898 ttm_object_device_release(&dev_priv->tdev); 906 ttm_object_device_release(&dev_priv->tdev);
899 iounmap(dev_priv->mmio_virt); 907 iounmap(dev_priv->mmio_virt);
900 arch_phys_wc_del(dev_priv->mmio_mtrr); 908 arch_phys_wc_del(dev_priv->mmio_mtrr);
901 if (dev_priv->has_mob)
902 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
903 if (dev_priv->has_gmr)
904 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
905 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
906 (void)ttm_bo_device_release(&dev_priv->bdev); 909 (void)ttm_bo_device_release(&dev_priv->bdev);
907 vmw_ttm_global_release(dev_priv); 910 vmw_ttm_global_release(dev_priv);
908 911
@@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev)
1235{ 1238{
1236 struct drm_device *dev = pci_get_drvdata(pdev); 1239 struct drm_device *dev = pci_get_drvdata(pdev);
1237 1240
1241 pci_disable_device(pdev);
1238 drm_put_dev(dev); 1242 drm_put_dev(dev);
1239} 1243}
1240 1244
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 33176d05db35..654c8daeb5ab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
890 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 890 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
891 if (unlikely(ret != 0)) { 891 if (unlikely(ret != 0)) {
892 DRM_ERROR("Could not find or use MOB buffer.\n"); 892 DRM_ERROR("Could not find or use MOB buffer.\n");
893 return -EINVAL; 893 ret = -EINVAL;
894 goto out_no_reloc;
894 } 895 }
895 bo = &vmw_bo->base; 896 bo = &vmw_bo->base;
896 897
@@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
914 915
915out_no_reloc: 916out_no_reloc:
916 vmw_dmabuf_unreference(&vmw_bo); 917 vmw_dmabuf_unreference(&vmw_bo);
917 vmw_bo_p = NULL; 918 *vmw_bo_p = NULL;
918 return ret; 919 return ret;
919} 920}
920 921
@@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
951 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 952 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
952 if (unlikely(ret != 0)) { 953 if (unlikely(ret != 0)) {
953 DRM_ERROR("Could not find or use GMR region.\n"); 954 DRM_ERROR("Could not find or use GMR region.\n");
954 return -EINVAL; 955 ret = -EINVAL;
956 goto out_no_reloc;
955 } 957 }
956 bo = &vmw_bo->base; 958 bo = &vmw_bo->base;
957 959
@@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
974 976
975out_no_reloc: 977out_no_reloc:
976 vmw_dmabuf_unreference(&vmw_bo); 978 vmw_dmabuf_unreference(&vmw_bo);
977 vmw_bo_p = NULL; 979 *vmw_bo_p = NULL;
978 return ret; 980 return ret;
979} 981}
980 982
@@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2780 NULL, arg->command_size, arg->throttle_us, 2782 NULL, arg->command_size, arg->throttle_us,
2781 (void __user *)(unsigned long)arg->fence_rep, 2783 (void __user *)(unsigned long)arg->fence_rep,
2782 NULL); 2784 NULL);
2783 2785 ttm_read_unlock(&dev_priv->reservation_sem);
2784 if (unlikely(ret != 0)) 2786 if (unlikely(ret != 0))
2785 goto out_unlock; 2787 return ret;
2786 2788
2787 vmw_kms_cursor_post_execbuf(dev_priv); 2789 vmw_kms_cursor_post_execbuf(dev_priv);
2788 2790
2789out_unlock: 2791 return 0;
2790 ttm_read_unlock(&dev_priv->reservation_sem);
2791 return ret;
2792} 2792}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8725b79e7847..07cda8cbbddb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2033 int i; 2033 int i;
2034 struct drm_mode_config *mode_config = &dev->mode_config; 2034 struct drm_mode_config *mode_config = &dev->mode_config;
2035 2035
2036 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2037 if (unlikely(ret != 0))
2038 return ret;
2039
2040 if (!arg->num_outputs) { 2036 if (!arg->num_outputs) {
2041 struct drm_vmw_rect def_rect = {0, 0, 800, 600}; 2037 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
2042 vmw_du_update_layout(dev_priv, 1, &def_rect); 2038 vmw_du_update_layout(dev_priv, 1, &def_rect);
2043 goto out_unlock; 2039 return 0;
2044 } 2040 }
2045 2041
2046 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 2042 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2047 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), 2043 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2048 GFP_KERNEL); 2044 GFP_KERNEL);
2049 if (unlikely(!rects)) { 2045 if (unlikely(!rects))
2050 ret = -ENOMEM; 2046 return -ENOMEM;
2051 goto out_unlock;
2052 }
2053 2047
2054 user_rects = (void __user *)(unsigned long)arg->rects; 2048 user_rects = (void __user *)(unsigned long)arg->rects;
2055 ret = copy_from_user(rects, user_rects, rects_size); 2049 ret = copy_from_user(rects, user_rects, rects_size);
@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2074 2068
2075out_free: 2069out_free:
2076 kfree(rects); 2070 kfree(rects);
2077out_unlock:
2078 ttm_read_unlock(&dev_priv->reservation_sem);
2079 return ret; 2071 return ret;
2080} 2072}
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index b61d6be97602..3ddfb3d0b64d 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -459,6 +459,8 @@ static void ipu_di_config_clock(struct ipu_di *di,
459 459
460 clkrate = clk_get_rate(di->clk_ipu); 460 clkrate = clk_get_rate(di->clk_ipu);
461 div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock); 461 div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
462 if (div == 0)
463 div = 1;
462 rate = clkrate / div; 464 rate = clkrate / div;
463 465
464 error = rate / (sig->mode.pixelclock / 1000); 466 error = rate / (sig->mode.pixelclock / 1000);
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index 5f1ff4cc5c34..7d7ae97476e2 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -17,27 +17,31 @@
17#include <linux/acpi.h> 17#include <linux/acpi.h>
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20
20#include <asm/iosf_mbi.h> 21#include <asm/iosf_mbi.h>
22
21#include "i2c-designware-core.h" 23#include "i2c-designware-core.h"
22 24
23#define SEMAPHORE_TIMEOUT 100 25#define SEMAPHORE_TIMEOUT 100
24#define PUNIT_SEMAPHORE 0x7 26#define PUNIT_SEMAPHORE 0x7
27#define PUNIT_SEMAPHORE_BIT BIT(0)
28#define PUNIT_SEMAPHORE_ACQUIRE BIT(1)
25 29
26static unsigned long acquired; 30static unsigned long acquired;
27 31
28static int get_sem(struct device *dev, u32 *sem) 32static int get_sem(struct device *dev, u32 *sem)
29{ 33{
30 u32 reg_val; 34 u32 data;
31 int ret; 35 int ret;
32 36
33 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE, 37 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE,
34 &reg_val); 38 &data);
35 if (ret) { 39 if (ret) {
36 dev_err(dev, "iosf failed to read punit semaphore\n"); 40 dev_err(dev, "iosf failed to read punit semaphore\n");
37 return ret; 41 return ret;
38 } 42 }
39 43
40 *sem = reg_val & 0x1; 44 *sem = data & PUNIT_SEMAPHORE_BIT;
41 45
42 return 0; 46 return 0;
43} 47}
@@ -52,27 +56,29 @@ static void reset_semaphore(struct device *dev)
52 return; 56 return;
53 } 57 }
54 58
55 data = data & 0xfffffffe; 59 data &= ~PUNIT_SEMAPHORE_BIT;
56 if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, 60 if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
57 PUNIT_SEMAPHORE, data)) 61 PUNIT_SEMAPHORE, data))
58 dev_err(dev, "iosf failed to reset punit semaphore during write\n"); 62 dev_err(dev, "iosf failed to reset punit semaphore during write\n");
59} 63}
60 64
61int baytrail_i2c_acquire(struct dw_i2c_dev *dev) 65static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
62{ 66{
63 u32 sem = 0; 67 u32 sem;
64 int ret; 68 int ret;
65 unsigned long start, end; 69 unsigned long start, end;
66 70
71 might_sleep();
72
67 if (!dev || !dev->dev) 73 if (!dev || !dev->dev)
68 return -ENODEV; 74 return -ENODEV;
69 75
70 if (!dev->acquire_lock) 76 if (!dev->release_lock)
71 return 0; 77 return 0;
72 78
73 /* host driver writes 0x2 to side band semaphore register */ 79 /* host driver writes to side band semaphore register */
74 ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, 80 ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
75 PUNIT_SEMAPHORE, 0x2); 81 PUNIT_SEMAPHORE, PUNIT_SEMAPHORE_ACQUIRE);
76 if (ret) { 82 if (ret) {
77 dev_err(dev->dev, "iosf punit semaphore request failed\n"); 83 dev_err(dev->dev, "iosf punit semaphore request failed\n");
78 return ret; 84 return ret;
@@ -81,7 +87,7 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
81 /* host driver waits for bit 0 to be set in semaphore register */ 87 /* host driver waits for bit 0 to be set in semaphore register */
82 start = jiffies; 88 start = jiffies;
83 end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT); 89 end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
84 while (!time_after(jiffies, end)) { 90 do {
85 ret = get_sem(dev->dev, &sem); 91 ret = get_sem(dev->dev, &sem);
86 if (!ret && sem) { 92 if (!ret && sem) {
87 acquired = jiffies; 93 acquired = jiffies;
@@ -91,14 +97,14 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
91 } 97 }
92 98
93 usleep_range(1000, 2000); 99 usleep_range(1000, 2000);
94 } 100 } while (time_before(jiffies, end));
95 101
96 dev_err(dev->dev, "punit semaphore timed out, resetting\n"); 102 dev_err(dev->dev, "punit semaphore timed out, resetting\n");
97 reset_semaphore(dev->dev); 103 reset_semaphore(dev->dev);
98 104
99 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, 105 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
100 PUNIT_SEMAPHORE, &sem); 106 PUNIT_SEMAPHORE, &sem);
101 if (!ret) 107 if (ret)
102 dev_err(dev->dev, "iosf failed to read punit semaphore\n"); 108 dev_err(dev->dev, "iosf failed to read punit semaphore\n");
103 else 109 else
104 dev_err(dev->dev, "PUNIT SEM: %d\n", sem); 110 dev_err(dev->dev, "PUNIT SEM: %d\n", sem);
@@ -107,9 +113,8 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
107 113
108 return -ETIMEDOUT; 114 return -ETIMEDOUT;
109} 115}
110EXPORT_SYMBOL(baytrail_i2c_acquire);
111 116
112void baytrail_i2c_release(struct dw_i2c_dev *dev) 117static void baytrail_i2c_release(struct dw_i2c_dev *dev)
113{ 118{
114 if (!dev || !dev->dev) 119 if (!dev || !dev->dev)
115 return; 120 return;
@@ -121,7 +126,6 @@ void baytrail_i2c_release(struct dw_i2c_dev *dev)
121 dev_dbg(dev->dev, "punit semaphore held for %ums\n", 126 dev_dbg(dev->dev, "punit semaphore held for %ums\n",
122 jiffies_to_msecs(jiffies - acquired)); 127 jiffies_to_msecs(jiffies - acquired));
123} 128}
124EXPORT_SYMBOL(baytrail_i2c_release);
125 129
126int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) 130int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
127{ 131{
@@ -137,7 +141,6 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
137 return 0; 141 return 0;
138 142
139 status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); 143 status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
140
141 if (ACPI_FAILURE(status)) 144 if (ACPI_FAILURE(status))
142 return 0; 145 return 0;
143 146
@@ -153,7 +156,6 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
153 156
154 return 0; 157 return 0;
155} 158}
156EXPORT_SYMBOL(i2c_dw_eval_lock_support);
157 159
158MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); 160MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
159MODULE_DESCRIPTION("Baytrail I2C Semaphore driver"); 161MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 210cf4874cb7..edf274cabe81 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev)
679 status = driver->remove(client); 679 status = driver->remove(client);
680 } 680 }
681 681
682 if (dev->of_node)
683 irq_dispose_mapping(client->irq);
684
685 dev_pm_domain_detach(&client->dev, true); 682 dev_pm_domain_detach(&client->dev, true);
686 return status; 683 return status;
687} 684}
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 51672256072b..b96c636470ef 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -58,20 +58,11 @@
58 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ 58 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
59 } 59 }
60 60
61/* LSB is in nV to eliminate floating point */
62static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
63
64/*
65 * scales calculated as:
66 * rates_to_lsb[sample_rate] / (1 << pga);
67 * pga is 1 for 0, 2
68 */
69
70static const int mcp3422_scales[4][4] = { 61static const int mcp3422_scales[4][4] = {
71 { 1000000, 250000, 62500, 15625 }, 62 { 1000000, 500000, 250000, 125000 },
72 { 500000 , 125000, 31250, 7812 }, 63 { 250000 , 125000, 62500 , 31250 },
73 { 250000 , 62500 , 15625, 3906 }, 64 { 62500 , 31250 , 15625 , 7812 },
74 { 125000 , 31250 , 7812 , 1953 } }; 65 { 15625 , 7812 , 3906 , 1953 } };
75 66
76/* Constant msleep times for data acquisitions */ 67/* Constant msleep times for data acquisitions */
77static const int mcp3422_read_times[4] = { 68static const int mcp3422_read_times[4] = {
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
index b9666f2f5e51..fabd24edc2a1 100644
--- a/drivers/iio/adc/qcom-spmi-iadc.c
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -296,7 +296,8 @@ static int iadc_do_conversion(struct iadc_chip *iadc, int chan, u16 *data)
296 if (iadc->poll_eoc) { 296 if (iadc->poll_eoc) {
297 ret = iadc_poll_wait_eoc(iadc, wait); 297 ret = iadc_poll_wait_eoc(iadc, wait);
298 } else { 298 } else {
299 ret = wait_for_completion_timeout(&iadc->complete, wait); 299 ret = wait_for_completion_timeout(&iadc->complete,
300 usecs_to_jiffies(wait));
300 if (!ret) 301 if (!ret)
301 ret = -ETIMEDOUT; 302 ret = -ETIMEDOUT;
302 else 303 else
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 52d70435f5a1..55a90082a29b 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -640,6 +640,7 @@ static int ssp_remove(struct spi_device *spi)
640 return 0; 640 return 0;
641} 641}
642 642
643#ifdef CONFIG_PM_SLEEP
643static int ssp_suspend(struct device *dev) 644static int ssp_suspend(struct device *dev)
644{ 645{
645 int ret; 646 int ret;
@@ -688,6 +689,7 @@ static int ssp_resume(struct device *dev)
688 689
689 return 0; 690 return 0;
690} 691}
692#endif /* CONFIG_PM_SLEEP */
691 693
692static const struct dev_pm_ops ssp_pm_ops = { 694static const struct dev_pm_ops ssp_pm_ops = {
693 SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume) 695 SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume)
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index f57562aa396f..15c73e20272d 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -322,7 +322,7 @@ static int ad5686_probe(struct spi_device *spi)
322 st = iio_priv(indio_dev); 322 st = iio_priv(indio_dev);
323 spi_set_drvdata(spi, indio_dev); 323 spi_set_drvdata(spi, indio_dev);
324 324
325 st->reg = devm_regulator_get(&spi->dev, "vcc"); 325 st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
326 if (!IS_ERR(st->reg)) { 326 if (!IS_ERR(st->reg)) {
327 ret = regulator_enable(st->reg); 327 ret = regulator_enable(st->reg);
328 if (ret) 328 if (ret)
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 623c145d8a97..7d79a1ac5f5f 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -29,6 +29,7 @@
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/bitops.h> 30#include <linux/bitops.h>
31#include <linux/completion.h> 31#include <linux/completion.h>
32#include <linux/mutex.h>
32#include <linux/delay.h> 33#include <linux/delay.h>
33#include <linux/gpio.h> 34#include <linux/gpio.h>
34#include <linux/of_gpio.h> 35#include <linux/of_gpio.h>
@@ -39,8 +40,12 @@
39 40
40#define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */ 41#define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */
41 42
42#define DHT11_EDGES_PREAMBLE 4 43#define DHT11_EDGES_PREAMBLE 2
43#define DHT11_BITS_PER_READ 40 44#define DHT11_BITS_PER_READ 40
45/*
46 * Note that when reading the sensor actually 84 edges are detected, but
47 * since the last edge is not significant, we only store 83:
48 */
44#define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1) 49#define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1)
45 50
46/* Data transmission timing (nano seconds) */ 51/* Data transmission timing (nano seconds) */
@@ -57,6 +62,7 @@ struct dht11 {
57 int irq; 62 int irq;
58 63
59 struct completion completion; 64 struct completion completion;
65 struct mutex lock;
60 66
61 s64 timestamp; 67 s64 timestamp;
62 int temperature; 68 int temperature;
@@ -88,7 +94,7 @@ static int dht11_decode(struct dht11 *dht11, int offset)
88 unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum; 94 unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
89 95
90 /* Calculate timestamp resolution */ 96 /* Calculate timestamp resolution */
91 for (i = 0; i < dht11->num_edges; ++i) { 97 for (i = 1; i < dht11->num_edges; ++i) {
92 t = dht11->edges[i].ts - dht11->edges[i-1].ts; 98 t = dht11->edges[i].ts - dht11->edges[i-1].ts;
93 if (t > 0 && t < timeres) 99 if (t > 0 && t < timeres)
94 timeres = t; 100 timeres = t;
@@ -138,6 +144,27 @@ static int dht11_decode(struct dht11 *dht11, int offset)
138 return 0; 144 return 0;
139} 145}
140 146
147/*
148 * IRQ handler called on GPIO edges
149 */
150static irqreturn_t dht11_handle_irq(int irq, void *data)
151{
152 struct iio_dev *iio = data;
153 struct dht11 *dht11 = iio_priv(iio);
154
155 /* TODO: Consider making the handler safe for IRQ sharing */
156 if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
157 dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
158 dht11->edges[dht11->num_edges++].value =
159 gpio_get_value(dht11->gpio);
160
161 if (dht11->num_edges >= DHT11_EDGES_PER_READ)
162 complete(&dht11->completion);
163 }
164
165 return IRQ_HANDLED;
166}
167
141static int dht11_read_raw(struct iio_dev *iio_dev, 168static int dht11_read_raw(struct iio_dev *iio_dev,
142 const struct iio_chan_spec *chan, 169 const struct iio_chan_spec *chan,
143 int *val, int *val2, long m) 170 int *val, int *val2, long m)
@@ -145,6 +172,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
145 struct dht11 *dht11 = iio_priv(iio_dev); 172 struct dht11 *dht11 = iio_priv(iio_dev);
146 int ret; 173 int ret;
147 174
175 mutex_lock(&dht11->lock);
148 if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) { 176 if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) {
149 reinit_completion(&dht11->completion); 177 reinit_completion(&dht11->completion);
150 178
@@ -157,8 +185,17 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
157 if (ret) 185 if (ret)
158 goto err; 186 goto err;
159 187
188 ret = request_irq(dht11->irq, dht11_handle_irq,
189 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
190 iio_dev->name, iio_dev);
191 if (ret)
192 goto err;
193
160 ret = wait_for_completion_killable_timeout(&dht11->completion, 194 ret = wait_for_completion_killable_timeout(&dht11->completion,
161 HZ); 195 HZ);
196
197 free_irq(dht11->irq, iio_dev);
198
162 if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) { 199 if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
163 dev_err(&iio_dev->dev, 200 dev_err(&iio_dev->dev,
164 "Only %d signal edges detected\n", 201 "Only %d signal edges detected\n",
@@ -185,6 +222,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
185 ret = -EINVAL; 222 ret = -EINVAL;
186err: 223err:
187 dht11->num_edges = -1; 224 dht11->num_edges = -1;
225 mutex_unlock(&dht11->lock);
188 return ret; 226 return ret;
189} 227}
190 228
@@ -193,27 +231,6 @@ static const struct iio_info dht11_iio_info = {
193 .read_raw = dht11_read_raw, 231 .read_raw = dht11_read_raw,
194}; 232};
195 233
196/*
197 * IRQ handler called on GPIO edges
198*/
199static irqreturn_t dht11_handle_irq(int irq, void *data)
200{
201 struct iio_dev *iio = data;
202 struct dht11 *dht11 = iio_priv(iio);
203
204 /* TODO: Consider making the handler safe for IRQ sharing */
205 if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
206 dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
207 dht11->edges[dht11->num_edges++].value =
208 gpio_get_value(dht11->gpio);
209
210 if (dht11->num_edges >= DHT11_EDGES_PER_READ)
211 complete(&dht11->completion);
212 }
213
214 return IRQ_HANDLED;
215}
216
217static const struct iio_chan_spec dht11_chan_spec[] = { 234static const struct iio_chan_spec dht11_chan_spec[] = {
218 { .type = IIO_TEMP, 235 { .type = IIO_TEMP,
219 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), }, 236 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), },
@@ -256,11 +273,6 @@ static int dht11_probe(struct platform_device *pdev)
256 dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio); 273 dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio);
257 return -EINVAL; 274 return -EINVAL;
258 } 275 }
259 ret = devm_request_irq(dev, dht11->irq, dht11_handle_irq,
260 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
261 pdev->name, iio);
262 if (ret)
263 return ret;
264 276
265 dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1; 277 dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1;
266 dht11->num_edges = -1; 278 dht11->num_edges = -1;
@@ -268,6 +280,7 @@ static int dht11_probe(struct platform_device *pdev)
268 platform_set_drvdata(pdev, iio); 280 platform_set_drvdata(pdev, iio);
269 281
270 init_completion(&dht11->completion); 282 init_completion(&dht11->completion);
283 mutex_init(&dht11->lock);
271 iio->name = pdev->name; 284 iio->name = pdev->name;
272 iio->dev.parent = &pdev->dev; 285 iio->dev.parent = &pdev->dev;
273 iio->info = &dht11_iio_info; 286 iio->info = &dht11_iio_info;
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index b54164677b89..fa3b809aff5e 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -45,12 +45,12 @@ static int si7020_read_raw(struct iio_dev *indio_dev,
45 struct iio_chan_spec const *chan, int *val, 45 struct iio_chan_spec const *chan, int *val,
46 int *val2, long mask) 46 int *val2, long mask)
47{ 47{
48 struct i2c_client *client = iio_priv(indio_dev); 48 struct i2c_client **client = iio_priv(indio_dev);
49 int ret; 49 int ret;
50 50
51 switch (mask) { 51 switch (mask) {
52 case IIO_CHAN_INFO_RAW: 52 case IIO_CHAN_INFO_RAW:
53 ret = i2c_smbus_read_word_data(client, 53 ret = i2c_smbus_read_word_data(*client,
54 chan->type == IIO_TEMP ? 54 chan->type == IIO_TEMP ?
55 SI7020CMD_TEMP_HOLD : 55 SI7020CMD_TEMP_HOLD :
56 SI7020CMD_RH_HOLD); 56 SI7020CMD_RH_HOLD);
@@ -126,7 +126,7 @@ static int si7020_probe(struct i2c_client *client,
126 /* Wait the maximum power-up time after software reset. */ 126 /* Wait the maximum power-up time after software reset. */
127 msleep(15); 127 msleep(15);
128 128
129 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*client)); 129 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
130 if (!indio_dev) 130 if (!indio_dev)
131 return -ENOMEM; 131 return -ENOMEM;
132 132
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index b70873de04ea..fa795dcd5f75 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -26,6 +26,7 @@
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/debugfs.h> 28#include <linux/debugfs.h>
29#include <linux/bitops.h>
29 30
30#include <linux/iio/iio.h> 31#include <linux/iio/iio.h>
31#include <linux/iio/sysfs.h> 32#include <linux/iio/sysfs.h>
@@ -414,7 +415,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
414 mutex_unlock(&indio_dev->mlock); 415 mutex_unlock(&indio_dev->mlock);
415 if (ret) 416 if (ret)
416 return ret; 417 return ret;
417 val16 = ((val16 & 0xFFF) << 4) >> 4; 418 val16 = sign_extend32(val16, 11);
418 *val = val16; 419 *val = val16;
419 return IIO_VAL_INT; 420 return IIO_VAL_INT;
420 case IIO_CHAN_INFO_OFFSET: 421 case IIO_CHAN_INFO_OFFSET:
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index f73e60b7a796..d8d5bed65e07 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -780,7 +780,11 @@ static int inv_mpu_probe(struct i2c_client *client,
780 780
781 i2c_set_clientdata(client, indio_dev); 781 i2c_set_clientdata(client, indio_dev);
782 indio_dev->dev.parent = &client->dev; 782 indio_dev->dev.parent = &client->dev;
783 indio_dev->name = id->name; 783 /* id will be NULL when enumerated via ACPI */
784 if (id)
785 indio_dev->name = (char *)id->name;
786 else
787 indio_dev->name = (char *)dev_name(&client->dev);
784 indio_dev->channels = inv_mpu_channels; 788 indio_dev->channels = inv_mpu_channels;
785 indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels); 789 indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
786 790
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index ae68c64bdad3..a224afd6380c 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -73,6 +73,7 @@ config CM36651
73config GP2AP020A00F 73config GP2AP020A00F
74 tristate "Sharp GP2AP020A00F Proximity/ALS sensor" 74 tristate "Sharp GP2AP020A00F Proximity/ALS sensor"
75 depends on I2C 75 depends on I2C
76 select REGMAP_I2C
76 select IIO_BUFFER 77 select IIO_BUFFER
77 select IIO_TRIGGERED_BUFFER 78 select IIO_TRIGGERED_BUFFER
78 select IRQ_WORK 79 select IRQ_WORK
@@ -126,6 +127,7 @@ config HID_SENSOR_PROX
126config JSA1212 127config JSA1212
127 tristate "JSA1212 ALS and proximity sensor driver" 128 tristate "JSA1212 ALS and proximity sensor driver"
128 depends on I2C 129 depends on I2C
130 select REGMAP_I2C
129 help 131 help
130 Say Y here if you want to build a IIO driver for JSA1212 132 Say Y here if you want to build a IIO driver for JSA1212
131 proximity & ALS sensor device. 133 proximity & ALS sensor device.
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 4c7a4c52dd06..a5d6de72c523 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -18,6 +18,8 @@ config AK8975
18 18
19config AK09911 19config AK09911
20 tristate "Asahi Kasei AK09911 3-axis Compass" 20 tristate "Asahi Kasei AK09911 3-axis Compass"
21 depends on I2C
22 depends on GPIOLIB
21 select AK8975 23 select AK8975
22 help 24 help
23 Deprecated: AK09911 is now supported by AK8975 driver. 25 Deprecated: AK09911 is now supported by AK8975 driver.
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 8ff612d160b0..563932500ff1 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -411,9 +411,9 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
411 411
412 input_set_drvdata(input, keypad); 412 input_set_drvdata(input, keypad);
413 413
414 error = request_threaded_irq(irq, NULL, 414 error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq,
415 tc3589x_keypad_irq, plat->irqtype, 415 plat->irqtype | IRQF_ONESHOT,
416 "tc3589x-keypad", keypad); 416 "tc3589x-keypad", keypad);
417 if (error < 0) { 417 if (error < 0) {
418 dev_err(&pdev->dev, 418 dev_err(&pdev->dev,
419 "Could not allocate irq %d,error %d\n", 419 "Could not allocate irq %d,error %d\n",
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 59d4dcddf6de..98228773a111 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -187,6 +187,7 @@ static int mma8450_probe(struct i2c_client *c,
187 idev->private = m; 187 idev->private = m;
188 idev->input->name = MMA8450_DRV_NAME; 188 idev->input->name = MMA8450_DRV_NAME;
189 idev->input->id.bustype = BUS_I2C; 189 idev->input->id.bustype = BUS_I2C;
190 idev->input->dev.parent = &c->dev;
190 idev->poll = mma8450_poll; 191 idev->poll = mma8450_poll;
191 idev->poll_interval = POLL_INTERVAL; 192 idev->poll_interval = POLL_INTERVAL;
192 idev->poll_interval_max = POLL_INTERVAL_MAX; 193 idev->poll_interval_max = POLL_INTERVAL_MAX;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d28726a0ef85..1bd15ebc01f2 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -2605,8 +2605,10 @@ int alps_detect(struct psmouse *psmouse, bool set_properties)
2605 return -ENOMEM; 2605 return -ENOMEM;
2606 2606
2607 error = alps_identify(psmouse, priv); 2607 error = alps_identify(psmouse, priv);
2608 if (error) 2608 if (error) {
2609 kfree(priv);
2609 return error; 2610 return error;
2611 }
2610 2612
2611 if (set_properties) { 2613 if (set_properties) {
2612 psmouse->vendor = "ALPS"; 2614 psmouse->vendor = "ALPS";
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 77e9d70a986b..1e2291c378fe 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -20,7 +20,7 @@
20#include <linux/input/mt.h> 20#include <linux/input/mt.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/unaligned/access_ok.h> 23#include <asm/unaligned.h>
24#include "cyapa.h" 24#include "cyapa.h"
25 25
26 26
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index ddf5393a1180..5b611dd71e79 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -17,7 +17,7 @@
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/completion.h> 18#include <linux/completion.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/unaligned/access_ok.h> 20#include <asm/unaligned.h>
21#include <linux/crc-itu-t.h> 21#include <linux/crc-itu-t.h>
22#include "cyapa.h" 22#include "cyapa.h"
23 23
@@ -1926,7 +1926,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa,
1926 electrodes_tx = cyapa->electrodes_x; 1926 electrodes_tx = cyapa->electrodes_x;
1927 max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & 1927 max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) &
1928 ~7u) * electrodes_tx; 1928 ~7u) * electrodes_tx;
1929 } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) { 1929 } else {
1930 offset = 2; 1930 offset = 2;
1931 max_element_cnt = cyapa->electrodes_x + 1931 max_element_cnt = cyapa->electrodes_x +
1932 cyapa->electrodes_y; 1932 cyapa->electrodes_y;
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index 757f78a94aec..23d259416f2f 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -67,9 +67,6 @@ static void focaltech_reset(struct psmouse *psmouse)
67 67
68#define FOC_MAX_FINGERS 5 68#define FOC_MAX_FINGERS 5
69 69
70#define FOC_MAX_X 2431
71#define FOC_MAX_Y 1663
72
73/* 70/*
74 * Current state of a single finger on the touchpad. 71 * Current state of a single finger on the touchpad.
75 */ 72 */
@@ -129,9 +126,17 @@ static void focaltech_report_state(struct psmouse *psmouse)
129 input_mt_slot(dev, i); 126 input_mt_slot(dev, i);
130 input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); 127 input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
131 if (active) { 128 if (active) {
132 input_report_abs(dev, ABS_MT_POSITION_X, finger->x); 129 unsigned int clamped_x, clamped_y;
130 /*
131 * The touchpad might report invalid data, so we clamp
132 * the resulting values so that we do not confuse
133 * userspace.
134 */
135 clamped_x = clamp(finger->x, 0U, priv->x_max);
136 clamped_y = clamp(finger->y, 0U, priv->y_max);
137 input_report_abs(dev, ABS_MT_POSITION_X, clamped_x);
133 input_report_abs(dev, ABS_MT_POSITION_Y, 138 input_report_abs(dev, ABS_MT_POSITION_Y,
134 FOC_MAX_Y - finger->y); 139 priv->y_max - clamped_y);
135 } 140 }
136 } 141 }
137 input_mt_report_pointer_emulation(dev, true); 142 input_mt_report_pointer_emulation(dev, true);
@@ -180,16 +185,6 @@ static void focaltech_process_abs_packet(struct psmouse *psmouse,
180 185
181 state->pressed = (packet[0] >> 4) & 1; 186 state->pressed = (packet[0] >> 4) & 1;
182 187
183 /*
184 * packet[5] contains some kind of tool size in the most
185 * significant nibble. 0xff is a special value (latching) that
186 * signals a large contact area.
187 */
188 if (packet[5] == 0xff) {
189 state->fingers[finger].valid = false;
190 return;
191 }
192
193 state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; 188 state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2];
194 state->fingers[finger].y = (packet[3] << 8) | packet[4]; 189 state->fingers[finger].y = (packet[3] << 8) | packet[4];
195 state->fingers[finger].valid = true; 190 state->fingers[finger].valid = true;
@@ -381,6 +376,23 @@ static int focaltech_read_size(struct psmouse *psmouse)
381 376
382 return 0; 377 return 0;
383} 378}
379
380void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution)
381{
382 /* not supported yet */
383}
384
385static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate)
386{
387 /* not supported yet */
388}
389
390static void focaltech_set_scale(struct psmouse *psmouse,
391 enum psmouse_scale scale)
392{
393 /* not supported yet */
394}
395
384int focaltech_init(struct psmouse *psmouse) 396int focaltech_init(struct psmouse *psmouse)
385{ 397{
386 struct focaltech_data *priv; 398 struct focaltech_data *priv;
@@ -415,6 +427,14 @@ int focaltech_init(struct psmouse *psmouse)
415 psmouse->cleanup = focaltech_reset; 427 psmouse->cleanup = focaltech_reset;
416 /* resync is not supported yet */ 428 /* resync is not supported yet */
417 psmouse->resync_time = 0; 429 psmouse->resync_time = 0;
430 /*
431 * rate/resolution/scale changes are not supported yet, and
432 * the generic implementations of these functions seem to
433 * confuse some touchpads
434 */
435 psmouse->set_resolution = focaltech_set_resolution;
436 psmouse->set_rate = focaltech_set_rate;
437 psmouse->set_scale = focaltech_set_scale;
418 438
419 return 0; 439 return 0;
420 440
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 4ccd01d7a48d..8bc61237bc1b 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -454,6 +454,17 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
454} 454}
455 455
456/* 456/*
457 * Here we set the mouse scaling.
458 */
459
460static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale)
461{
462 ps2_command(&psmouse->ps2dev, NULL,
463 scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 :
464 PSMOUSE_CMD_SETSCALE11);
465}
466
467/*
457 * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. 468 * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
458 */ 469 */
459 470
@@ -689,6 +700,7 @@ static void psmouse_apply_defaults(struct psmouse *psmouse)
689 700
690 psmouse->set_rate = psmouse_set_rate; 701 psmouse->set_rate = psmouse_set_rate;
691 psmouse->set_resolution = psmouse_set_resolution; 702 psmouse->set_resolution = psmouse_set_resolution;
703 psmouse->set_scale = psmouse_set_scale;
692 psmouse->poll = psmouse_poll; 704 psmouse->poll = psmouse_poll;
693 psmouse->protocol_handler = psmouse_process_byte; 705 psmouse->protocol_handler = psmouse_process_byte;
694 psmouse->pktsize = 3; 706 psmouse->pktsize = 3;
@@ -1160,7 +1172,7 @@ static void psmouse_initialize(struct psmouse *psmouse)
1160 if (psmouse_max_proto != PSMOUSE_PS2) { 1172 if (psmouse_max_proto != PSMOUSE_PS2) {
1161 psmouse->set_rate(psmouse, psmouse->rate); 1173 psmouse->set_rate(psmouse, psmouse->rate);
1162 psmouse->set_resolution(psmouse, psmouse->resolution); 1174 psmouse->set_resolution(psmouse, psmouse->resolution);
1163 ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); 1175 psmouse->set_scale(psmouse, PSMOUSE_SCALE11);
1164 } 1176 }
1165} 1177}
1166 1178
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index c2ff137ecbdb..d02e1bdc9ae4 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -36,6 +36,11 @@ typedef enum {
36 PSMOUSE_FULL_PACKET 36 PSMOUSE_FULL_PACKET
37} psmouse_ret_t; 37} psmouse_ret_t;
38 38
39enum psmouse_scale {
40 PSMOUSE_SCALE11,
41 PSMOUSE_SCALE21
42};
43
39struct psmouse { 44struct psmouse {
40 void *private; 45 void *private;
41 struct input_dev *dev; 46 struct input_dev *dev;
@@ -67,6 +72,7 @@ struct psmouse {
67 psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); 72 psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse);
68 void (*set_rate)(struct psmouse *psmouse, unsigned int rate); 73 void (*set_rate)(struct psmouse *psmouse, unsigned int rate);
69 void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); 74 void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution);
75 void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale);
70 76
71 int (*reconnect)(struct psmouse *psmouse); 77 int (*reconnect)(struct psmouse *psmouse);
72 void (*disconnect)(struct psmouse *psmouse); 78 void (*disconnect)(struct psmouse *psmouse);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 58917525126e..6261fd6d7c3c 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -943,6 +943,7 @@ config TOUCHSCREEN_SUN4I
943 tristate "Allwinner sun4i resistive touchscreen controller support" 943 tristate "Allwinner sun4i resistive touchscreen controller support"
944 depends on ARCH_SUNXI || COMPILE_TEST 944 depends on ARCH_SUNXI || COMPILE_TEST
945 depends on HWMON 945 depends on HWMON
946 depends on THERMAL || !THERMAL_OF
946 help 947 help
947 This selects support for the resistive touchscreen controller 948 This selects support for the resistive touchscreen controller
948 found on Allwinner sunxi SoCs. 949 found on Allwinner sunxi SoCs.
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index baa0d9786f50..1ae4e547b419 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE
23config IOMMU_IO_PGTABLE_LPAE 23config IOMMU_IO_PGTABLE_LPAE
24 bool "ARMv7/v8 Long Descriptor Format" 24 bool "ARMv7/v8 Long Descriptor Format"
25 select IOMMU_IO_PGTABLE 25 select IOMMU_IO_PGTABLE
26 depends on ARM || ARM64 || COMPILE_TEST
26 help 27 help
27 Enable support for the ARM long descriptor pagetable format. 28 Enable support for the ARM long descriptor pagetable format.
28 This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page 29 This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -63,6 +64,7 @@ config MSM_IOMMU
63 bool "MSM IOMMU Support" 64 bool "MSM IOMMU Support"
64 depends on ARM 65 depends on ARM
65 depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST 66 depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
67 depends on BROKEN
66 select IOMMU_API 68 select IOMMU_API
67 help 69 help
68 Support for the IOMMUs found on certain Qualcomm SOCs. 70 Support for the IOMMUs found on certain Qualcomm SOCs.
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 7ce52737c7a1..dc14fec4ede1 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = {
1186 1186
1187static int __init exynos_iommu_init(void) 1187static int __init exynos_iommu_init(void)
1188{ 1188{
1189 struct device_node *np;
1189 int ret; 1190 int ret;
1190 1191
1192 np = of_find_matching_node(NULL, sysmmu_of_match);
1193 if (!np)
1194 return 0;
1195
1196 of_node_put(np);
1197
1191 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", 1198 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1192 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); 1199 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1193 if (!lv2table_kmem_cache) { 1200 if (!lv2table_kmem_cache) {
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 5a500edf00cc..b610a8dee238 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -56,7 +56,8 @@
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ 56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift) 57 * (d)->bits_per_level) + (d)->pg_shift)
58 58
59#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) 59#define ARM_LPAE_PAGES_PER_PGD(d) \
60 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
60 61
61/* 62/*
62 * Calculate the index at level l used to map virtual address a using the 63 * Calculate the index at level l used to map virtual address a using the
@@ -66,7 +67,7 @@
66 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) 67 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
67 68
68#define ARM_LPAE_LVL_IDX(a,l,d) \ 69#define ARM_LPAE_LVL_IDX(a,l,d) \
69 (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 70 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
70 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 71 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
71 72
72/* Calculate the block/page mapping size at level l for pagetable in d. */ 73/* Calculate the block/page mapping size at level l for pagetable in d. */
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index f59f857b702e..a4ba851825c2 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void)
1376 struct kmem_cache *p; 1376 struct kmem_cache *p;
1377 const unsigned long flags = SLAB_HWCACHE_ALIGN; 1377 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1378 size_t align = 1 << 10; /* L2 pagetable alignement */ 1378 size_t align = 1 << 10; /* L2 pagetable alignement */
1379 struct device_node *np;
1380
1381 np = of_find_matching_node(NULL, omap_iommu_of_match);
1382 if (!np)
1383 return 0;
1384
1385 of_node_put(np);
1379 1386
1380 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, 1387 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1381 iopte_cachep_ctor); 1388 iopte_cachep_ctor);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 6a8b1ec4a48a..9f74fddcd304 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = {
1015 1015
1016static int __init rk_iommu_init(void) 1016static int __init rk_iommu_init(void)
1017{ 1017{
1018 struct device_node *np;
1018 int ret; 1019 int ret;
1019 1020
1021 np = of_find_matching_node(NULL, rk_iommu_dt_ids);
1022 if (!np)
1023 return 0;
1024
1025 of_node_put(np);
1026
1020 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); 1027 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1021 if (ret) 1028 if (ret)
1022 return ret; 1029 return ret;
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 463c235acbdc..4387dae14e45 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base;
69static void __iomem *main_int_base; 69static void __iomem *main_int_base;
70static struct irq_domain *armada_370_xp_mpic_domain; 70static struct irq_domain *armada_370_xp_mpic_domain;
71static u32 doorbell_mask_reg; 71static u32 doorbell_mask_reg;
72static int parent_irq;
72#ifdef CONFIG_PCI_MSI 73#ifdef CONFIG_PCI_MSI
73static struct irq_domain *armada_370_xp_msi_domain; 74static struct irq_domain *armada_370_xp_msi_domain;
74static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); 75static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
@@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
356{ 357{
357 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 358 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
358 armada_xp_mpic_smp_cpu_init(); 359 armada_xp_mpic_smp_cpu_init();
360
359 return NOTIFY_OK; 361 return NOTIFY_OK;
360} 362}
361 363
@@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
364 .priority = 100, 366 .priority = 100,
365}; 367};
366 368
369static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
370 unsigned long action, void *hcpu)
371{
372 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
373 enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
374
375 return NOTIFY_OK;
376}
377
378static struct notifier_block mpic_cascaded_cpu_notifier = {
379 .notifier_call = mpic_cascaded_secondary_init,
380 .priority = 100,
381};
382
367#endif /* CONFIG_SMP */ 383#endif /* CONFIG_SMP */
368 384
369static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { 385static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
@@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
539 struct device_node *parent) 555 struct device_node *parent)
540{ 556{
541 struct resource main_int_res, per_cpu_int_res; 557 struct resource main_int_res, per_cpu_int_res;
542 int parent_irq, nr_irqs, i; 558 int nr_irqs, i;
543 u32 control; 559 u32 control;
544 560
545 BUG_ON(of_address_to_resource(node, 0, &main_int_res)); 561 BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
587 register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); 603 register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
588#endif 604#endif
589 } else { 605 } else {
606#ifdef CONFIG_SMP
607 register_cpu_notifier(&mpic_cascaded_cpu_notifier);
608#endif
590 irq_set_chained_handler(parent_irq, 609 irq_set_chained_handler(parent_irq,
591 armada_370_xp_mpic_handle_cascade_irq); 610 armada_370_xp_mpic_handle_cascade_irq);
592 } 611 }
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d8996bdf0f61..596b0a9eee99 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its,
416{ 416{
417 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; 417 struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
418 struct its_collection *sync_col; 418 struct its_collection *sync_col;
419 unsigned long flags;
419 420
420 raw_spin_lock(&its->lock); 421 raw_spin_lock_irqsave(&its->lock, flags);
421 422
422 cmd = its_allocate_entry(its); 423 cmd = its_allocate_entry(its);
423 if (!cmd) { /* We're soooooo screewed... */ 424 if (!cmd) { /* We're soooooo screewed... */
424 pr_err_ratelimited("ITS can't allocate, dropping command\n"); 425 pr_err_ratelimited("ITS can't allocate, dropping command\n");
425 raw_spin_unlock(&its->lock); 426 raw_spin_unlock_irqrestore(&its->lock, flags);
426 return; 427 return;
427 } 428 }
428 sync_col = builder(cmd, desc); 429 sync_col = builder(cmd, desc);
@@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its,
442 443
443post: 444post:
444 next_cmd = its_post_commands(its); 445 next_cmd = its_post_commands(its);
445 raw_spin_unlock(&its->lock); 446 raw_spin_unlock_irqrestore(&its->lock, flags);
446 447
447 its_wait_for_range_completion(its, cmd, next_cmd); 448 its_wait_for_range_completion(its, cmd, next_cmd);
448} 449}
@@ -799,21 +800,43 @@ static int its_alloc_tables(struct its_node *its)
799{ 800{
800 int err; 801 int err;
801 int i; 802 int i;
802 int psz = PAGE_SIZE; 803 int psz = SZ_64K;
803 u64 shr = GITS_BASER_InnerShareable; 804 u64 shr = GITS_BASER_InnerShareable;
804 805
805 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 806 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
806 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); 807 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
807 u64 type = GITS_BASER_TYPE(val); 808 u64 type = GITS_BASER_TYPE(val);
808 u64 entry_size = GITS_BASER_ENTRY_SIZE(val); 809 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
810 int order = get_order(psz);
811 int alloc_size;
809 u64 tmp; 812 u64 tmp;
810 void *base; 813 void *base;
811 814
812 if (type == GITS_BASER_TYPE_NONE) 815 if (type == GITS_BASER_TYPE_NONE)
813 continue; 816 continue;
814 817
815 /* We're lazy and only allocate a single page for now */ 818 /*
816 base = (void *)get_zeroed_page(GFP_KERNEL); 819 * Allocate as many entries as required to fit the
820 * range of device IDs that the ITS can grok... The ID
821 * space being incredibly sparse, this results in a
822 * massive waste of memory.
823 *
824 * For other tables, only allocate a single page.
825 */
826 if (type == GITS_BASER_TYPE_DEVICE) {
827 u64 typer = readq_relaxed(its->base + GITS_TYPER);
828 u32 ids = GITS_TYPER_DEVBITS(typer);
829
830 order = get_order((1UL << ids) * entry_size);
831 if (order >= MAX_ORDER) {
832 order = MAX_ORDER - 1;
833 pr_warn("%s: Device Table too large, reduce its page order to %u\n",
834 its->msi_chip.of_node->full_name, order);
835 }
836 }
837
838 alloc_size = (1 << order) * PAGE_SIZE;
839 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
817 if (!base) { 840 if (!base) {
818 err = -ENOMEM; 841 err = -ENOMEM;
819 goto out_free; 842 goto out_free;
@@ -841,7 +864,7 @@ retry_baser:
841 break; 864 break;
842 } 865 }
843 866
844 val |= (PAGE_SIZE / psz) - 1; 867 val |= (alloc_size / psz) - 1;
845 868
846 writeq_relaxed(val, its->base + GITS_BASER + i * 8); 869 writeq_relaxed(val, its->base + GITS_BASER + i * 8);
847 tmp = readq_relaxed(its->base + GITS_BASER + i * 8); 870 tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -882,7 +905,7 @@ retry_baser:
882 } 905 }
883 906
884 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", 907 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
885 (int)(PAGE_SIZE / entry_size), 908 (int)(alloc_size / entry_size),
886 its_base_type_string[type], 909 its_base_type_string[type],
887 (unsigned long)virt_to_phys(base), 910 (unsigned long)virt_to_phys(base),
888 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 911 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
@@ -1020,8 +1043,9 @@ static void its_cpu_init_collection(void)
1020static struct its_device *its_find_device(struct its_node *its, u32 dev_id) 1043static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1021{ 1044{
1022 struct its_device *its_dev = NULL, *tmp; 1045 struct its_device *its_dev = NULL, *tmp;
1046 unsigned long flags;
1023 1047
1024 raw_spin_lock(&its->lock); 1048 raw_spin_lock_irqsave(&its->lock, flags);
1025 1049
1026 list_for_each_entry(tmp, &its->its_device_list, entry) { 1050 list_for_each_entry(tmp, &its->its_device_list, entry) {
1027 if (tmp->device_id == dev_id) { 1051 if (tmp->device_id == dev_id) {
@@ -1030,7 +1054,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1030 } 1054 }
1031 } 1055 }
1032 1056
1033 raw_spin_unlock(&its->lock); 1057 raw_spin_unlock_irqrestore(&its->lock, flags);
1034 1058
1035 return its_dev; 1059 return its_dev;
1036} 1060}
@@ -1040,6 +1064,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1040{ 1064{
1041 struct its_device *dev; 1065 struct its_device *dev;
1042 unsigned long *lpi_map; 1066 unsigned long *lpi_map;
1067 unsigned long flags;
1043 void *itt; 1068 void *itt;
1044 int lpi_base; 1069 int lpi_base;
1045 int nr_lpis; 1070 int nr_lpis;
@@ -1056,7 +1081,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1056 nr_ites = max(2UL, roundup_pow_of_two(nvecs)); 1081 nr_ites = max(2UL, roundup_pow_of_two(nvecs));
1057 sz = nr_ites * its->ite_size; 1082 sz = nr_ites * its->ite_size;
1058 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 1083 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1059 itt = kmalloc(sz, GFP_KERNEL); 1084 itt = kzalloc(sz, GFP_KERNEL);
1060 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 1085 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1061 1086
1062 if (!dev || !itt || !lpi_map) { 1087 if (!dev || !itt || !lpi_map) {
@@ -1075,9 +1100,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1075 dev->device_id = dev_id; 1100 dev->device_id = dev_id;
1076 INIT_LIST_HEAD(&dev->entry); 1101 INIT_LIST_HEAD(&dev->entry);
1077 1102
1078 raw_spin_lock(&its->lock); 1103 raw_spin_lock_irqsave(&its->lock, flags);
1079 list_add(&dev->entry, &its->its_device_list); 1104 list_add(&dev->entry, &its->its_device_list);
1080 raw_spin_unlock(&its->lock); 1105 raw_spin_unlock_irqrestore(&its->lock, flags);
1081 1106
1082 /* Bind the device to the first possible CPU */ 1107 /* Bind the device to the first possible CPU */
1083 cpu = cpumask_first(cpu_online_mask); 1108 cpu = cpumask_first(cpu_online_mask);
@@ -1091,9 +1116,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1091 1116
1092static void its_free_device(struct its_device *its_dev) 1117static void its_free_device(struct its_device *its_dev)
1093{ 1118{
1094 raw_spin_lock(&its_dev->its->lock); 1119 unsigned long flags;
1120
1121 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
1095 list_del(&its_dev->entry); 1122 list_del(&its_dev->entry);
1096 raw_spin_unlock(&its_dev->its->lock); 1123 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
1097 kfree(its_dev->itt); 1124 kfree(its_dev->itt);
1098 kfree(its_dev); 1125 kfree(its_dev);
1099} 1126}
@@ -1112,31 +1139,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1112 return 0; 1139 return 0;
1113} 1140}
1114 1141
1142struct its_pci_alias {
1143 struct pci_dev *pdev;
1144 u32 dev_id;
1145 u32 count;
1146};
1147
1148static int its_pci_msi_vec_count(struct pci_dev *pdev)
1149{
1150 int msi, msix;
1151
1152 msi = max(pci_msi_vec_count(pdev), 0);
1153 msix = max(pci_msix_vec_count(pdev), 0);
1154
1155 return max(msi, msix);
1156}
1157
1158static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
1159{
1160 struct its_pci_alias *dev_alias = data;
1161
1162 dev_alias->dev_id = alias;
1163 if (pdev != dev_alias->pdev)
1164 dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
1165
1166 return 0;
1167}
1168
1115static int its_msi_prepare(struct irq_domain *domain, struct device *dev, 1169static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1116 int nvec, msi_alloc_info_t *info) 1170 int nvec, msi_alloc_info_t *info)
1117{ 1171{
1118 struct pci_dev *pdev; 1172 struct pci_dev *pdev;
1119 struct its_node *its; 1173 struct its_node *its;
1120 u32 dev_id;
1121 struct its_device *its_dev; 1174 struct its_device *its_dev;
1175 struct its_pci_alias dev_alias;
1122 1176
1123 if (!dev_is_pci(dev)) 1177 if (!dev_is_pci(dev))
1124 return -EINVAL; 1178 return -EINVAL;
1125 1179
1126 pdev = to_pci_dev(dev); 1180 pdev = to_pci_dev(dev);
1127 dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); 1181 dev_alias.pdev = pdev;
1182 dev_alias.count = nvec;
1183
1184 pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
1128 its = domain->parent->host_data; 1185 its = domain->parent->host_data;
1129 1186
1130 its_dev = its_find_device(its, dev_id); 1187 its_dev = its_find_device(its, dev_alias.dev_id);
1131 if (WARN_ON(its_dev)) 1188 if (its_dev) {
1132 return -EINVAL; 1189 /*
1190 * We already have seen this ID, probably through
1191 * another alias (PCI bridge of some sort). No need to
1192 * create the device.
1193 */
1194 dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id);
1195 goto out;
1196 }
1133 1197
1134 its_dev = its_create_device(its, dev_id, nvec); 1198 its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count);
1135 if (!its_dev) 1199 if (!its_dev)
1136 return -ENOMEM; 1200 return -ENOMEM;
1137 1201
1138 dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 1202 dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n",
1139 1203 dev_alias.count, ilog2(dev_alias.count));
1204out:
1140 info->scratchpad[0].ptr = its_dev; 1205 info->scratchpad[0].ptr = its_dev;
1141 info->scratchpad[1].ptr = dev; 1206 info->scratchpad[1].ptr = dev;
1142 return 0; 1207 return 0;
@@ -1255,6 +1320,34 @@ static const struct irq_domain_ops its_domain_ops = {
1255 .deactivate = its_irq_domain_deactivate, 1320 .deactivate = its_irq_domain_deactivate,
1256}; 1321};
1257 1322
1323static int its_force_quiescent(void __iomem *base)
1324{
1325 u32 count = 1000000; /* 1s */
1326 u32 val;
1327
1328 val = readl_relaxed(base + GITS_CTLR);
1329 if (val & GITS_CTLR_QUIESCENT)
1330 return 0;
1331
1332 /* Disable the generation of all interrupts to this ITS */
1333 val &= ~GITS_CTLR_ENABLE;
1334 writel_relaxed(val, base + GITS_CTLR);
1335
1336 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
1337 while (1) {
1338 val = readl_relaxed(base + GITS_CTLR);
1339 if (val & GITS_CTLR_QUIESCENT)
1340 return 0;
1341
1342 count--;
1343 if (!count)
1344 return -EBUSY;
1345
1346 cpu_relax();
1347 udelay(1);
1348 }
1349}
1350
1258static int its_probe(struct device_node *node, struct irq_domain *parent) 1351static int its_probe(struct device_node *node, struct irq_domain *parent)
1259{ 1352{
1260 struct resource res; 1353 struct resource res;
@@ -1283,6 +1376,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
1283 goto out_unmap; 1376 goto out_unmap;
1284 } 1377 }
1285 1378
1379 err = its_force_quiescent(its_base);
1380 if (err) {
1381 pr_warn("%s: failed to quiesce, giving up\n",
1382 node->full_name);
1383 goto out_unmap;
1384 }
1385
1286 pr_info("ITS: %s\n", node->full_name); 1386 pr_info("ITS: %s\n", node->full_name);
1287 1387
1288 its = kzalloc(sizeof(*its), GFP_KERNEL); 1388 its = kzalloc(sizeof(*its), GFP_KERNEL);
@@ -1323,7 +1423,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
1323 writeq_relaxed(baser, its->base + GITS_CBASER); 1423 writeq_relaxed(baser, its->base + GITS_CBASER);
1324 tmp = readq_relaxed(its->base + GITS_CBASER); 1424 tmp = readq_relaxed(its->base + GITS_CBASER);
1325 writeq_relaxed(0, its->base + GITS_CWRITER); 1425 writeq_relaxed(0, its->base + GITS_CWRITER);
1326 writel_relaxed(1, its->base + GITS_CTLR); 1426 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1327 1427
1328 if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { 1428 if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
1329 pr_info("ITS: using cache flushing for cmd queue\n"); 1429 pr_info("ITS: using cache flushing for cmd queue\n");
@@ -1382,12 +1482,11 @@ static bool gic_rdists_supports_plpis(void)
1382 1482
1383int its_cpu_init(void) 1483int its_cpu_init(void)
1384{ 1484{
1385 if (!gic_rdists_supports_plpis()) {
1386 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1387 return -ENXIO;
1388 }
1389
1390 if (!list_empty(&its_nodes)) { 1485 if (!list_empty(&its_nodes)) {
1486 if (!gic_rdists_supports_plpis()) {
1487 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1488 return -ENXIO;
1489 }
1391 its_cpu_init_lpis(); 1490 its_cpu_init_lpis();
1392 its_cpu_init_collection(); 1491 its_cpu_init_collection();
1393 } 1492 }
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 1c6dea2fbc34..fd8850def1b8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
466 tlist |= 1 << (mpidr & 0xf); 466 tlist |= 1 << (mpidr & 0xf);
467 467
468 cpu = cpumask_next(cpu, mask); 468 cpu = cpumask_next(cpu, mask);
469 if (cpu == nr_cpu_ids) 469 if (cpu >= nr_cpu_ids)
470 goto out; 470 goto out;
471 471
472 mpidr = cpu_logical_map(cpu); 472 mpidr = cpu_logical_map(cpu);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4634cf7d0ec3..471e1cdc1933 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d)
154static void gic_mask_irq(struct irq_data *d) 154static void gic_mask_irq(struct irq_data *d)
155{ 155{
156 u32 mask = 1 << (gic_irq(d) % 32); 156 u32 mask = 1 << (gic_irq(d) % 32);
157 unsigned long flags;
157 158
158 raw_spin_lock(&irq_controller_lock); 159 raw_spin_lock_irqsave(&irq_controller_lock, flags);
159 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); 160 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
160 if (gic_arch_extn.irq_mask) 161 if (gic_arch_extn.irq_mask)
161 gic_arch_extn.irq_mask(d); 162 gic_arch_extn.irq_mask(d);
162 raw_spin_unlock(&irq_controller_lock); 163 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
163} 164}
164 165
165static void gic_unmask_irq(struct irq_data *d) 166static void gic_unmask_irq(struct irq_data *d)
166{ 167{
167 u32 mask = 1 << (gic_irq(d) % 32); 168 u32 mask = 1 << (gic_irq(d) % 32);
169 unsigned long flags;
168 170
169 raw_spin_lock(&irq_controller_lock); 171 raw_spin_lock_irqsave(&irq_controller_lock, flags);
170 if (gic_arch_extn.irq_unmask) 172 if (gic_arch_extn.irq_unmask)
171 gic_arch_extn.irq_unmask(d); 173 gic_arch_extn.irq_unmask(d);
172 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); 174 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
173 raw_spin_unlock(&irq_controller_lock); 175 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
174} 176}
175 177
176static void gic_eoi_irq(struct irq_data *d) 178static void gic_eoi_irq(struct irq_data *d)
@@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
188{ 190{
189 void __iomem *base = gic_dist_base(d); 191 void __iomem *base = gic_dist_base(d);
190 unsigned int gicirq = gic_irq(d); 192 unsigned int gicirq = gic_irq(d);
193 unsigned long flags;
191 int ret; 194 int ret;
192 195
193 /* Interrupt configuration for SGIs can't be changed */ 196 /* Interrupt configuration for SGIs can't be changed */
@@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
199 type != IRQ_TYPE_EDGE_RISING) 202 type != IRQ_TYPE_EDGE_RISING)
200 return -EINVAL; 203 return -EINVAL;
201 204
202 raw_spin_lock(&irq_controller_lock); 205 raw_spin_lock_irqsave(&irq_controller_lock, flags);
203 206
204 if (gic_arch_extn.irq_set_type) 207 if (gic_arch_extn.irq_set_type)
205 gic_arch_extn.irq_set_type(d, type); 208 gic_arch_extn.irq_set_type(d, type);
206 209
207 ret = gic_configure_irq(gicirq, type, base, NULL); 210 ret = gic_configure_irq(gicirq, type, base, NULL);
208 211
209 raw_spin_unlock(&irq_controller_lock); 212 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
210 213
211 return ret; 214 return ret;
212} 215}
@@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
227 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); 230 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
228 unsigned int cpu, shift = (gic_irq(d) % 4) * 8; 231 unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
229 u32 val, mask, bit; 232 u32 val, mask, bit;
233 unsigned long flags;
230 234
231 if (!force) 235 if (!force)
232 cpu = cpumask_any_and(mask_val, cpu_online_mask); 236 cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
236 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 240 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
237 return -EINVAL; 241 return -EINVAL;
238 242
239 raw_spin_lock(&irq_controller_lock); 243 raw_spin_lock_irqsave(&irq_controller_lock, flags);
240 mask = 0xff << shift; 244 mask = 0xff << shift;
241 bit = gic_cpu_map[cpu] << shift; 245 bit = gic_cpu_map[cpu] << shift;
242 val = readl_relaxed(reg) & ~mask; 246 val = readl_relaxed(reg) & ~mask;
243 writel_relaxed(val | bit, reg); 247 writel_relaxed(val | bit, reg);
244 raw_spin_unlock(&irq_controller_lock); 248 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
245 249
246 return IRQ_SET_MASK_OK; 250 return IRQ_SET_MASK_OK;
247} 251}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 3c92780bda09..ff48da61c94c 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc)
1755 enable_hwirq(hc); 1755 enable_hwirq(hc);
1756 spin_unlock_irqrestore(&hc->lock, flags); 1756 spin_unlock_irqrestore(&hc->lock, flags);
1757 /* Timeout 80ms */ 1757 /* Timeout 80ms */
1758 current->state = TASK_UNINTERRUPTIBLE; 1758 set_current_state(TASK_UNINTERRUPTIBLE);
1759 schedule_timeout((80 * HZ) / 1000); 1759 schedule_timeout((80 * HZ) / 1000);
1760 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n", 1760 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1761 hc->irq, hc->irqcnt); 1761 hc->irq, hc->irqcnt);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 9306219d5675..6ad049a08e4d 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -341,6 +341,8 @@ void mei_stop(struct mei_device *dev)
341 341
342 dev->dev_state = MEI_DEV_POWER_DOWN; 342 dev->dev_state = MEI_DEV_POWER_DOWN;
343 mei_reset(dev); 343 mei_reset(dev);
344 /* move device to disabled state unconditionally */
345 dev->dev_state = MEI_DEV_DISABLED;
344 346
345 mutex_unlock(&dev->device_lock); 347 mutex_unlock(&dev->device_lock);
346 348
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5b76a173cd95..5897d8d8fa5a 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -526,6 +526,7 @@ config MTD_NAND_SUNXI
526 526
527config MTD_NAND_HISI504 527config MTD_NAND_HISI504
528 tristate "Support for NAND controller on Hisilicon SoC Hip04" 528 tristate "Support for NAND controller on Hisilicon SoC Hip04"
529 depends on HAS_DMA
529 help 530 help
530 Enables support for NAND controller on Hisilicon SoC Hip04. 531 Enables support for NAND controller on Hisilicon SoC Hip04.
531 532
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 96b0b1d27df1..10b1f7a4fe50 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
480 nand_writel(info, NDCR, ndcr | int_mask); 480 nand_writel(info, NDCR, ndcr | int_mask);
481} 481}
482 482
483static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
484{
485 if (info->ecc_bch) {
486 int timeout;
487
488 /*
489 * According to the datasheet, when reading from NDDB
490 * with BCH enabled, after each 32 bytes reads, we
491 * have to make sure that the NDSR.RDDREQ bit is set.
492 *
493 * Drain the FIFO 8 32 bits reads at a time, and skip
494 * the polling on the last read.
495 */
496 while (len > 8) {
497 __raw_readsl(info->mmio_base + NDDB, data, 8);
498
499 for (timeout = 0;
500 !(nand_readl(info, NDSR) & NDSR_RDDREQ);
501 timeout++) {
502 if (timeout >= 5) {
503 dev_err(&info->pdev->dev,
504 "Timeout on RDDREQ while draining the FIFO\n");
505 return;
506 }
507
508 mdelay(1);
509 }
510
511 data += 32;
512 len -= 8;
513 }
514 }
515
516 __raw_readsl(info->mmio_base + NDDB, data, len);
517}
518
483static void handle_data_pio(struct pxa3xx_nand_info *info) 519static void handle_data_pio(struct pxa3xx_nand_info *info)
484{ 520{
485 unsigned int do_bytes = min(info->data_size, info->chunk_size); 521 unsigned int do_bytes = min(info->data_size, info->chunk_size);
@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
496 DIV_ROUND_UP(info->oob_size, 4)); 532 DIV_ROUND_UP(info->oob_size, 4));
497 break; 533 break;
498 case STATE_PIO_READING: 534 case STATE_PIO_READING:
499 __raw_readsl(info->mmio_base + NDDB, 535 drain_fifo(info,
500 info->data_buff + info->data_buff_pos, 536 info->data_buff + info->data_buff_pos,
501 DIV_ROUND_UP(do_bytes, 4)); 537 DIV_ROUND_UP(do_bytes, 4));
502 538
503 if (info->oob_size > 0) 539 if (info->oob_size > 0)
504 __raw_readsl(info->mmio_base + NDDB, 540 drain_fifo(info,
505 info->oob_buff + info->oob_buff_pos, 541 info->oob_buff + info->oob_buff_pos,
506 DIV_ROUND_UP(info->oob_size, 4)); 542 DIV_ROUND_UP(info->oob_size, 4));
507 break; 543 break;
508 default: 544 default:
509 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, 545 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
@@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev)
1572 int ret, irq, cs; 1608 int ret, irq, cs;
1573 1609
1574 pdata = dev_get_platdata(&pdev->dev); 1610 pdata = dev_get_platdata(&pdev->dev);
1611 if (pdata->num_cs <= 0)
1612 return -ENODEV;
1575 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + 1613 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1576 sizeof(*host)) * pdata->num_cs, GFP_KERNEL); 1614 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1577 if (!info) 1615 if (!info)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 84673ebcf428..df51d6025a90 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -157,7 +157,7 @@ config IPVLAN
157 making it transparent to the connected L2 switch. 157 making it transparent to the connected L2 switch.
158 158
159 Ipvlan devices can be added using the "ip" command from the 159 Ipvlan devices can be added using the "ip" command from the
160 iproute2 package starting with the iproute2-X.Y.ZZ release: 160 iproute2 package starting with the iproute2-3.19 release:
161 161
162 "ip link add link <main-dev> [ NAME ] type ipvlan" 162 "ip link add link <main-dev> [ NAME ] type ipvlan"
163 163
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 4ce6ca5f3d36..dc6b78e5342f 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -40,7 +40,7 @@ config DEV_APPLETALK
40 40
41config LTPC 41config LTPC
42 tristate "Apple/Farallon LocalTalk PC support" 42 tristate "Apple/Farallon LocalTalk PC support"
43 depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API 43 depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
44 help 44 help
45 This allows you to use the AppleTalk PC card to connect to LocalTalk 45 This allows you to use the AppleTalk PC card to connect to LocalTalk
46 networks. The card is also known as the Farallon PhoneNet PC card. 46 networks. The card is also known as the Farallon PhoneNet PC card.
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3c82e02e3dae..b0f69248cb71 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
579 skb->pkt_type = PACKET_BROADCAST; 579 skb->pkt_type = PACKET_BROADCAST;
580 skb->ip_summed = CHECKSUM_UNNECESSARY; 580 skb->ip_summed = CHECKSUM_UNNECESSARY;
581 581
582 skb_reset_mac_header(skb);
583 skb_reset_network_header(skb);
584 skb_reset_transport_header(skb);
585
582 can_skb_reserve(skb); 586 can_skb_reserve(skb);
583 can_skb_prv(skb)->ifindex = dev->ifindex; 587 can_skb_prv(skb)->ifindex = dev->ifindex;
584 588
@@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
603 skb->pkt_type = PACKET_BROADCAST; 607 skb->pkt_type = PACKET_BROADCAST;
604 skb->ip_summed = CHECKSUM_UNNECESSARY; 608 skb->ip_summed = CHECKSUM_UNNECESSARY;
605 609
610 skb_reset_mac_header(skb);
611 skb_reset_network_header(skb);
612 skb_reset_transport_header(skb);
613
606 can_skb_reserve(skb); 614 can_skb_reserve(skb);
607 can_skb_prv(skb)->ifindex = dev->ifindex; 615 can_skb_prv(skb)->ifindex = dev->ifindex;
608 616
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 2928f7003041..a316fa4b91ab 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -14,6 +14,7 @@
14 * Copyright (C) 2015 Valeo S.A. 14 * Copyright (C) 2015 Valeo S.A.
15 */ 15 */
16 16
17#include <linux/kernel.h>
17#include <linux/completion.h> 18#include <linux/completion.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
@@ -584,8 +585,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
584 while (pos <= actual_len - MSG_HEADER_LEN) { 585 while (pos <= actual_len - MSG_HEADER_LEN) {
585 tmp = buf + pos; 586 tmp = buf + pos;
586 587
587 if (!tmp->len) 588 /* Handle messages crossing the USB endpoint max packet
588 break; 589 * size boundary. Check kvaser_usb_read_bulk_callback()
590 * for further details.
591 */
592 if (tmp->len == 0) {
593 pos = round_up(pos,
594 dev->bulk_in->wMaxPacketSize);
595 continue;
596 }
589 597
590 if (pos + tmp->len > actual_len) { 598 if (pos + tmp->len > actual_len) {
591 dev_err(dev->udev->dev.parent, 599 dev_err(dev->udev->dev.parent,
@@ -787,7 +795,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
787 netdev_err(netdev, "Error transmitting URB\n"); 795 netdev_err(netdev, "Error transmitting URB\n");
788 usb_unanchor_urb(urb); 796 usb_unanchor_urb(urb);
789 usb_free_urb(urb); 797 usb_free_urb(urb);
790 kfree(buf);
791 return err; 798 return err;
792 } 799 }
793 800
@@ -1317,8 +1324,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1317 while (pos <= urb->actual_length - MSG_HEADER_LEN) { 1324 while (pos <= urb->actual_length - MSG_HEADER_LEN) {
1318 msg = urb->transfer_buffer + pos; 1325 msg = urb->transfer_buffer + pos;
1319 1326
1320 if (!msg->len) 1327 /* The Kvaser firmware can only read and write messages that
1321 break; 1328 * does not cross the USB's endpoint wMaxPacketSize boundary.
1329 * If a follow-up command crosses such boundary, firmware puts
1330 * a placeholder zero-length command in its place then aligns
1331 * the real command to the next max packet size.
1332 *
1333 * Handle such cases or we're going to miss a significant
1334 * number of events in case of a heavy rx load on the bus.
1335 */
1336 if (msg->len == 0) {
1337 pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
1338 continue;
1339 }
1322 1340
1323 if (pos + msg->len > urb->actual_length) { 1341 if (pos + msg->len > urb->actual_length) {
1324 dev_err(dev->udev->dev.parent, "Format error\n"); 1342 dev_err(dev->udev->dev.parent, "Format error\n");
@@ -1326,7 +1344,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1326 } 1344 }
1327 1345
1328 kvaser_usb_handle_message(dev, msg); 1346 kvaser_usb_handle_message(dev, msg);
1329
1330 pos += msg->len; 1347 pos += msg->len;
1331 } 1348 }
1332 1349
@@ -1615,8 +1632,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1615 struct urb *urb; 1632 struct urb *urb;
1616 void *buf; 1633 void *buf;
1617 struct kvaser_msg *msg; 1634 struct kvaser_msg *msg;
1618 int i, err; 1635 int i, err, ret = NETDEV_TX_OK;
1619 int ret = NETDEV_TX_OK;
1620 u8 *msg_tx_can_flags = NULL; /* GCC */ 1636 u8 *msg_tx_can_flags = NULL; /* GCC */
1621 1637
1622 if (can_dropped_invalid_skb(netdev, skb)) 1638 if (can_dropped_invalid_skb(netdev, skb))
@@ -1634,7 +1650,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1634 if (!buf) { 1650 if (!buf) {
1635 stats->tx_dropped++; 1651 stats->tx_dropped++;
1636 dev_kfree_skb(skb); 1652 dev_kfree_skb(skb);
1637 goto nobufmem; 1653 goto freeurb;
1638 } 1654 }
1639 1655
1640 msg = buf; 1656 msg = buf;
@@ -1681,8 +1697,10 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1681 /* This should never happen; it implies a flow control bug */ 1697 /* This should never happen; it implies a flow control bug */
1682 if (!context) { 1698 if (!context) {
1683 netdev_warn(netdev, "cannot find free context\n"); 1699 netdev_warn(netdev, "cannot find free context\n");
1700
1701 kfree(buf);
1684 ret = NETDEV_TX_BUSY; 1702 ret = NETDEV_TX_BUSY;
1685 goto releasebuf; 1703 goto freeurb;
1686 } 1704 }
1687 1705
1688 context->priv = priv; 1706 context->priv = priv;
@@ -1719,16 +1737,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1719 else 1737 else
1720 netdev_warn(netdev, "Failed tx_urb %d\n", err); 1738 netdev_warn(netdev, "Failed tx_urb %d\n", err);
1721 1739
1722 goto releasebuf; 1740 goto freeurb;
1723 } 1741 }
1724 1742
1725 usb_free_urb(urb); 1743 ret = NETDEV_TX_OK;
1726
1727 return NETDEV_TX_OK;
1728 1744
1729releasebuf: 1745freeurb:
1730 kfree(buf);
1731nobufmem:
1732 usb_free_urb(urb); 1746 usb_free_urb(urb);
1733 return ret; 1747 return ret;
1734} 1748}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 962c3f027383..0bac0f14edc3 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -879,6 +879,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
879 879
880 pdev->usb_if = ppdev->usb_if; 880 pdev->usb_if = ppdev->usb_if;
881 pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; 881 pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr;
882
883 /* do a copy of the ctrlmode[_supported] too */
884 dev->can.ctrlmode = ppdev->dev.can.ctrlmode;
885 dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported;
882 } 886 }
883 887
884 pdev->usb_if->dev[dev->ctrl_idx] = dev; 888 pdev->usb_if->dev[dev->ctrl_idx] = dev;
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index ee9f650d5026..7b7053d3c5fa 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -105,8 +105,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
105{ \ 105{ \
106 u32 indir, dir; \ 106 u32 indir, dir; \
107 spin_lock(&priv->indir_lock); \ 107 spin_lock(&priv->indir_lock); \
108 indir = reg_readl(priv, REG_DIR_DATA_READ); \
109 dir = __raw_readl(priv->name + off); \ 108 dir = __raw_readl(priv->name + off); \
109 indir = reg_readl(priv, REG_DIR_DATA_READ); \
110 spin_unlock(&priv->indir_lock); \ 110 spin_unlock(&priv->indir_lock); \
111 return (u64)indir << 32 | dir; \ 111 return (u64)indir << 32 | dir; \
112} \ 112} \
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 7769c05543f1..ec6eac1f8c95 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
484 link->open++; 484 link->open++;
485 485
486 info->link_status = 0x00; 486 info->link_status = 0x00;
487 init_timer(&info->watchdog); 487 setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
488 info->watchdog.function = ei_watchdog; 488 mod_timer(&info->watchdog, jiffies + HZ);
489 info->watchdog.data = (u_long)dev;
490 info->watchdog.expires = jiffies + HZ;
491 add_timer(&info->watchdog);
492 489
493 return ax_open(dev); 490 return ax_open(dev);
494} /* axnet_open */ 491} /* axnet_open */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9fb7b9d4fd6c..2777289a26c0 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
918 918
919 info->phy_id = info->eth_phy; 919 info->phy_id = info->eth_phy;
920 info->link_status = 0x00; 920 info->link_status = 0x00;
921 init_timer(&info->watchdog); 921 setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
922 info->watchdog.function = ei_watchdog; 922 mod_timer(&info->watchdog, jiffies + HZ);
923 info->watchdog.data = (u_long)dev;
924 info->watchdog.expires = jiffies + HZ;
925 add_timer(&info->watchdog);
926 923
927 return ei_open(dev); 924 return ei_open(dev);
928} /* pcnet_open */ 925} /* pcnet_open */
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 760c72c6e2ac..6725dc00750b 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
376 u16 pktlength; 376 u16 pktlength;
377 u16 pktstatus; 377 u16 pktstatus;
378 378
379 while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { 379 while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
380 (count < limit)) {
380 pktstatus = rxstatus >> 16; 381 pktstatus = rxstatus >> 16;
381 pktlength = rxstatus & 0xffff; 382 pktlength = rxstatus & 0xffff;
382 383
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
491 struct altera_tse_private *priv = 492 struct altera_tse_private *priv =
492 container_of(napi, struct altera_tse_private, napi); 493 container_of(napi, struct altera_tse_private, napi);
493 int rxcomplete = 0; 494 int rxcomplete = 0;
494 int txcomplete = 0;
495 unsigned long int flags; 495 unsigned long int flags;
496 496
497 txcomplete = tse_tx_complete(priv); 497 tse_tx_complete(priv);
498 498
499 rxcomplete = tse_rx(priv, budget); 499 rxcomplete = tse_rx(priv, budget);
500 500
501 if (rxcomplete >= budget || txcomplete > 0) 501 if (rxcomplete < budget) {
502 return rxcomplete;
503 502
504 napi_gro_flush(napi, false); 503 napi_gro_flush(napi, false);
505 __napi_complete(napi); 504 __napi_complete(napi);
506 505
507 netdev_dbg(priv->dev, 506 netdev_dbg(priv->dev,
508 "NAPI Complete, did %d packets with budget %d\n", 507 "NAPI Complete, did %d packets with budget %d\n",
509 txcomplete+rxcomplete, budget); 508 rxcomplete, budget);
510 509
511 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); 510 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
512 priv->dmaops->enable_rxirq(priv); 511 priv->dmaops->enable_rxirq(priv);
513 priv->dmaops->enable_txirq(priv); 512 priv->dmaops->enable_txirq(priv);
514 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); 513 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
515 return rxcomplete + txcomplete; 514 }
515 return rxcomplete;
516} 516}
517 517
518/* DMA TX & RX FIFO interrupt routing 518/* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
521{ 521{
522 struct net_device *dev = dev_id; 522 struct net_device *dev = dev_id;
523 struct altera_tse_private *priv; 523 struct altera_tse_private *priv;
524 unsigned long int flags;
525 524
526 if (unlikely(!dev)) { 525 if (unlikely(!dev)) {
527 pr_err("%s: invalid dev pointer\n", __func__); 526 pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
529 } 528 }
530 priv = netdev_priv(dev); 529 priv = netdev_priv(dev);
531 530
532 /* turn off desc irqs and enable napi rx */ 531 spin_lock(&priv->rxdma_irq_lock);
533 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); 532 /* reset IRQs */
533 priv->dmaops->clear_rxirq(priv);
534 priv->dmaops->clear_txirq(priv);
535 spin_unlock(&priv->rxdma_irq_lock);
534 536
535 if (likely(napi_schedule_prep(&priv->napi))) { 537 if (likely(napi_schedule_prep(&priv->napi))) {
538 spin_lock(&priv->rxdma_irq_lock);
536 priv->dmaops->disable_rxirq(priv); 539 priv->dmaops->disable_rxirq(priv);
537 priv->dmaops->disable_txirq(priv); 540 priv->dmaops->disable_txirq(priv);
541 spin_unlock(&priv->rxdma_irq_lock);
538 __napi_schedule(&priv->napi); 542 __napi_schedule(&priv->napi);
539 } 543 }
540 544
541 /* reset IRQs */
542 priv->dmaops->clear_rxirq(priv);
543 priv->dmaops->clear_txirq(priv);
544
545 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
546 545
547 return IRQ_HANDLED; 546 return IRQ_HANDLED;
548} 547}
@@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1399 } 1398 }
1400 1399
1401 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", 1400 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1402 &priv->rx_fifo_depth)) { 1401 &priv->tx_fifo_depth)) {
1403 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); 1402 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1404 ret = -ENXIO; 1403 ret = -ENXIO;
1405 goto err_free_netdev; 1404 goto err_free_netdev;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b93d4404d975..885b02b5be07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
609 } 609 }
610} 610}
611 611
612static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
613{
614 struct xgbe_channel *channel;
615 struct net_device *netdev = pdata->netdev;
616 unsigned int i;
617 int ret;
618
619 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
620 netdev->name, pdata);
621 if (ret) {
622 netdev_alert(netdev, "error requesting irq %d\n",
623 pdata->dev_irq);
624 return ret;
625 }
626
627 if (!pdata->per_channel_irq)
628 return 0;
629
630 channel = pdata->channel;
631 for (i = 0; i < pdata->channel_count; i++, channel++) {
632 snprintf(channel->dma_irq_name,
633 sizeof(channel->dma_irq_name) - 1,
634 "%s-TxRx-%u", netdev_name(netdev),
635 channel->queue_index);
636
637 ret = devm_request_irq(pdata->dev, channel->dma_irq,
638 xgbe_dma_isr, 0,
639 channel->dma_irq_name, channel);
640 if (ret) {
641 netdev_alert(netdev, "error requesting irq %d\n",
642 channel->dma_irq);
643 goto err_irq;
644 }
645 }
646
647 return 0;
648
649err_irq:
650 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
651 for (i--, channel--; i < pdata->channel_count; i--, channel--)
652 devm_free_irq(pdata->dev, channel->dma_irq, channel);
653
654 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
655
656 return ret;
657}
658
659static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
660{
661 struct xgbe_channel *channel;
662 unsigned int i;
663
664 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
665
666 if (!pdata->per_channel_irq)
667 return;
668
669 channel = pdata->channel;
670 for (i = 0; i < pdata->channel_count; i++, channel++)
671 devm_free_irq(pdata->dev, channel->dma_irq, channel);
672}
673
612void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) 674void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
613{ 675{
614 struct xgbe_hw_if *hw_if = &pdata->hw_if; 676 struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
810 return -EINVAL; 872 return -EINVAL;
811 } 873 }
812 874
813 phy_stop(pdata->phydev);
814
815 spin_lock_irqsave(&pdata->lock, flags); 875 spin_lock_irqsave(&pdata->lock, flags);
816 876
817 if (caller == XGMAC_DRIVER_CONTEXT) 877 if (caller == XGMAC_DRIVER_CONTEXT)
818 netif_device_detach(netdev); 878 netif_device_detach(netdev);
819 879
820 netif_tx_stop_all_queues(netdev); 880 netif_tx_stop_all_queues(netdev);
821 xgbe_napi_disable(pdata, 0);
822 881
823 /* Powerdown Tx/Rx */
824 hw_if->powerdown_tx(pdata); 882 hw_if->powerdown_tx(pdata);
825 hw_if->powerdown_rx(pdata); 883 hw_if->powerdown_rx(pdata);
826 884
885 xgbe_napi_disable(pdata, 0);
886
887 phy_stop(pdata->phydev);
888
827 pdata->power_down = 1; 889 pdata->power_down = 1;
828 890
829 spin_unlock_irqrestore(&pdata->lock, flags); 891 spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
854 916
855 phy_start(pdata->phydev); 917 phy_start(pdata->phydev);
856 918
857 /* Enable Tx/Rx */ 919 xgbe_napi_enable(pdata, 0);
920
858 hw_if->powerup_tx(pdata); 921 hw_if->powerup_tx(pdata);
859 hw_if->powerup_rx(pdata); 922 hw_if->powerup_rx(pdata);
860 923
861 if (caller == XGMAC_DRIVER_CONTEXT) 924 if (caller == XGMAC_DRIVER_CONTEXT)
862 netif_device_attach(netdev); 925 netif_device_attach(netdev);
863 926
864 xgbe_napi_enable(pdata, 0);
865 netif_tx_start_all_queues(netdev); 927 netif_tx_start_all_queues(netdev);
866 928
867 spin_unlock_irqrestore(&pdata->lock, flags); 929 spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
875{ 937{
876 struct xgbe_hw_if *hw_if = &pdata->hw_if; 938 struct xgbe_hw_if *hw_if = &pdata->hw_if;
877 struct net_device *netdev = pdata->netdev; 939 struct net_device *netdev = pdata->netdev;
940 int ret;
878 941
879 DBGPR("-->xgbe_start\n"); 942 DBGPR("-->xgbe_start\n");
880 943
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
884 947
885 phy_start(pdata->phydev); 948 phy_start(pdata->phydev);
886 949
950 xgbe_napi_enable(pdata, 1);
951
952 ret = xgbe_request_irqs(pdata);
953 if (ret)
954 goto err_napi;
955
887 hw_if->enable_tx(pdata); 956 hw_if->enable_tx(pdata);
888 hw_if->enable_rx(pdata); 957 hw_if->enable_rx(pdata);
889 958
890 xgbe_init_tx_timers(pdata); 959 xgbe_init_tx_timers(pdata);
891 960
892 xgbe_napi_enable(pdata, 1);
893 netif_tx_start_all_queues(netdev); 961 netif_tx_start_all_queues(netdev);
894 962
895 DBGPR("<--xgbe_start\n"); 963 DBGPR("<--xgbe_start\n");
896 964
897 return 0; 965 return 0;
966
967err_napi:
968 xgbe_napi_disable(pdata, 1);
969
970 phy_stop(pdata->phydev);
971
972 hw_if->exit(pdata);
973
974 return ret;
898} 975}
899 976
900static void xgbe_stop(struct xgbe_prv_data *pdata) 977static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
907 984
908 DBGPR("-->xgbe_stop\n"); 985 DBGPR("-->xgbe_stop\n");
909 986
910 phy_stop(pdata->phydev);
911
912 netif_tx_stop_all_queues(netdev); 987 netif_tx_stop_all_queues(netdev);
913 xgbe_napi_disable(pdata, 1);
914 988
915 xgbe_stop_tx_timers(pdata); 989 xgbe_stop_tx_timers(pdata);
916 990
917 hw_if->disable_tx(pdata); 991 hw_if->disable_tx(pdata);
918 hw_if->disable_rx(pdata); 992 hw_if->disable_rx(pdata);
919 993
994 xgbe_free_irqs(pdata);
995
996 xgbe_napi_disable(pdata, 1);
997
998 phy_stop(pdata->phydev);
999
1000 hw_if->exit(pdata);
1001
920 channel = pdata->channel; 1002 channel = pdata->channel;
921 for (i = 0; i < pdata->channel_count; i++, channel++) { 1003 for (i = 0; i < pdata->channel_count; i++, channel++) {
922 if (!channel->tx_ring) 1004 if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
931 1013
932static void xgbe_restart_dev(struct xgbe_prv_data *pdata) 1014static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
933{ 1015{
934 struct xgbe_channel *channel;
935 struct xgbe_hw_if *hw_if = &pdata->hw_if;
936 unsigned int i;
937
938 DBGPR("-->xgbe_restart_dev\n"); 1016 DBGPR("-->xgbe_restart_dev\n");
939 1017
940 /* If not running, "restart" will happen on open */ 1018 /* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
942 return; 1020 return;
943 1021
944 xgbe_stop(pdata); 1022 xgbe_stop(pdata);
945 synchronize_irq(pdata->dev_irq);
946 if (pdata->per_channel_irq) {
947 channel = pdata->channel;
948 for (i = 0; i < pdata->channel_count; i++, channel++)
949 synchronize_irq(channel->dma_irq);
950 }
951 1023
952 xgbe_free_tx_data(pdata); 1024 xgbe_free_tx_data(pdata);
953 xgbe_free_rx_data(pdata); 1025 xgbe_free_rx_data(pdata);
954 1026
955 /* Issue software reset to device */
956 hw_if->exit(pdata);
957
958 xgbe_start(pdata); 1027 xgbe_start(pdata);
959 1028
960 DBGPR("<--xgbe_restart_dev\n"); 1029 DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1283static int xgbe_open(struct net_device *netdev) 1352static int xgbe_open(struct net_device *netdev)
1284{ 1353{
1285 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1354 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1286 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1287 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1355 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1288 struct xgbe_channel *channel = NULL;
1289 unsigned int i = 0;
1290 int ret; 1356 int ret;
1291 1357
1292 DBGPR("-->xgbe_open\n"); 1358 DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
1329 INIT_WORK(&pdata->restart_work, xgbe_restart); 1395 INIT_WORK(&pdata->restart_work, xgbe_restart);
1330 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); 1396 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1331 1397
1332 /* Request interrupts */
1333 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1334 netdev->name, pdata);
1335 if (ret) {
1336 netdev_alert(netdev, "error requesting irq %d\n",
1337 pdata->dev_irq);
1338 goto err_rings;
1339 }
1340
1341 if (pdata->per_channel_irq) {
1342 channel = pdata->channel;
1343 for (i = 0; i < pdata->channel_count; i++, channel++) {
1344 snprintf(channel->dma_irq_name,
1345 sizeof(channel->dma_irq_name) - 1,
1346 "%s-TxRx-%u", netdev_name(netdev),
1347 channel->queue_index);
1348
1349 ret = devm_request_irq(pdata->dev, channel->dma_irq,
1350 xgbe_dma_isr, 0,
1351 channel->dma_irq_name, channel);
1352 if (ret) {
1353 netdev_alert(netdev,
1354 "error requesting irq %d\n",
1355 channel->dma_irq);
1356 goto err_irq;
1357 }
1358 }
1359 }
1360
1361 ret = xgbe_start(pdata); 1398 ret = xgbe_start(pdata);
1362 if (ret) 1399 if (ret)
1363 goto err_start; 1400 goto err_rings;
1364 1401
1365 DBGPR("<--xgbe_open\n"); 1402 DBGPR("<--xgbe_open\n");
1366 1403
1367 return 0; 1404 return 0;
1368 1405
1369err_start:
1370 hw_if->exit(pdata);
1371
1372err_irq:
1373 if (pdata->per_channel_irq) {
1374 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1375 for (i--, channel--; i < pdata->channel_count; i--, channel--)
1376 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1377 }
1378
1379 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1380
1381err_rings: 1406err_rings:
1382 desc_if->free_ring_resources(pdata); 1407 desc_if->free_ring_resources(pdata);
1383 1408
@@ -1399,30 +1424,16 @@ err_phy_init:
1399static int xgbe_close(struct net_device *netdev) 1424static int xgbe_close(struct net_device *netdev)
1400{ 1425{
1401 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1426 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1402 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1403 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1427 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1404 struct xgbe_channel *channel;
1405 unsigned int i;
1406 1428
1407 DBGPR("-->xgbe_close\n"); 1429 DBGPR("-->xgbe_close\n");
1408 1430
1409 /* Stop the device */ 1431 /* Stop the device */
1410 xgbe_stop(pdata); 1432 xgbe_stop(pdata);
1411 1433
1412 /* Issue software reset to device */
1413 hw_if->exit(pdata);
1414
1415 /* Free the ring descriptors and buffers */ 1434 /* Free the ring descriptors and buffers */
1416 desc_if->free_ring_resources(pdata); 1435 desc_if->free_ring_resources(pdata);
1417 1436
1418 /* Release the interrupts */
1419 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1420 if (pdata->per_channel_irq) {
1421 channel = pdata->channel;
1422 for (i = 0; i < pdata->channel_count; i++, channel++)
1423 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1424 }
1425
1426 /* Free the channel and ring structures */ 1437 /* Free the channel and ring structures */
1427 xgbe_free_channels(pdata); 1438 xgbe_free_channels(pdata);
1428 1439
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 869d97fcf781..b927021c6c40 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
593 if (!xgene_ring_mgr_init(pdata)) 593 if (!xgene_ring_mgr_init(pdata))
594 return -ENODEV; 594 return -ENODEV;
595 595
596 if (!efi_enabled(EFI_BOOT)) { 596 if (pdata->clk) {
597 clk_prepare_enable(pdata->clk); 597 clk_prepare_enable(pdata->clk);
598 clk_disable_unprepare(pdata->clk); 598 clk_disable_unprepare(pdata->clk);
599 clk_prepare_enable(pdata->clk); 599 clk_prepare_enable(pdata->clk);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 4de62b210c85..635a83be7e5e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev)
1025#ifdef CONFIG_ACPI 1025#ifdef CONFIG_ACPI
1026static const struct acpi_device_id xgene_enet_acpi_match[] = { 1026static const struct acpi_device_id xgene_enet_acpi_match[] = {
1027 { "APMC0D05", }, 1027 { "APMC0D05", },
1028 { "APMC0D30", },
1029 { "APMC0D31", },
1028 { } 1030 { }
1029}; 1031};
1030MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); 1032MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
@@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1033#ifdef CONFIG_OF 1035#ifdef CONFIG_OF
1034static struct of_device_id xgene_enet_of_match[] = { 1036static struct of_device_id xgene_enet_of_match[] = {
1035 {.compatible = "apm,xgene-enet",}, 1037 {.compatible = "apm,xgene-enet",},
1038 {.compatible = "apm,xgene1-sgenet",},
1039 {.compatible = "apm,xgene1-xgenet",},
1036 {}, 1040 {},
1037}; 1041};
1038 1042
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 21206d33b638..a7f2cc3e485e 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
486{ 486{
487 struct bcm_enet_priv *priv; 487 struct bcm_enet_priv *priv;
488 struct net_device *dev; 488 struct net_device *dev;
489 int tx_work_done, rx_work_done; 489 int rx_work_done;
490 490
491 priv = container_of(napi, struct bcm_enet_priv, napi); 491 priv = container_of(napi, struct bcm_enet_priv, napi);
492 dev = priv->net_dev; 492 dev = priv->net_dev;
@@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
498 ENETDMAC_IR, priv->tx_chan); 498 ENETDMAC_IR, priv->tx_chan);
499 499
500 /* reclaim sent skb */ 500 /* reclaim sent skb */
501 tx_work_done = bcm_enet_tx_reclaim(dev, 0); 501 bcm_enet_tx_reclaim(dev, 0);
502 502
503 spin_lock(&priv->rx_lock); 503 spin_lock(&priv->rx_lock);
504 rx_work_done = bcm_enet_receive_queue(dev, budget); 504 rx_work_done = bcm_enet_receive_queue(dev, budget);
505 spin_unlock(&priv->rx_lock); 505 spin_unlock(&priv->rx_lock);
506 506
507 if (rx_work_done >= budget || tx_work_done > 0) { 507 if (rx_work_done >= budget) {
508 /* rx/tx queue is not yet empty/clean */ 508 /* rx queue is not yet empty/clean */
509 return rx_work_done; 509 return rx_work_done;
510 } 510 }
511 511
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5b308a4a4d0e..783543ad1fcf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
274 /* RBUF misc statistics */ 274 /* RBUF misc statistics */
275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
277 STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 277 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
278 STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), 278 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
279 STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), 279 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
280}; 280};
281 281
282#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 282#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
345 s = &bcm_sysport_gstrings_stats[i]; 345 s = &bcm_sysport_gstrings_stats[i];
346 switch (s->type) { 346 switch (s->type) {
347 case BCM_SYSPORT_STAT_NETDEV: 347 case BCM_SYSPORT_STAT_NETDEV:
348 case BCM_SYSPORT_STAT_SOFT:
348 continue; 349 continue;
349 case BCM_SYSPORT_STAT_MIB_RX: 350 case BCM_SYSPORT_STAT_MIB_RX:
350 case BCM_SYSPORT_STAT_MIB_TX: 351 case BCM_SYSPORT_STAT_MIB_TX:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index fc19417d82a5..7e3d87a88c76 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
570 BCM_SYSPORT_STAT_RUNT, 570 BCM_SYSPORT_STAT_RUNT,
571 BCM_SYSPORT_STAT_RXCHK, 571 BCM_SYSPORT_STAT_RXCHK,
572 BCM_SYSPORT_STAT_RBUF, 572 BCM_SYSPORT_STAT_RBUF,
573 BCM_SYSPORT_STAT_SOFT,
573}; 574};
574 575
575/* Macros to help define ethtool statistics */ 576/* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
590#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) 591#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
591#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) 592#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
592#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) 593#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
594#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
593 595
594#define STAT_RXCHK(str, m, ofs) { \ 596#define STAT_RXCHK(str, m, ofs) { \
595 .stat_string = str, \ 597 .stat_string = str, \
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 676ffe093180..0469f72c6e7e 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
302 slot->skb = skb; 302 slot->skb = skb;
303 slot->dma_addr = dma_addr; 303 slot->dma_addr = dma_addr;
304 304
305 if (slot->dma_addr & 0xC0000000)
306 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
307
308 return 0; 305 return 0;
309} 306}
310 307
@@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
505 ring->mmio_base); 502 ring->mmio_base);
506 goto err_dma_free; 503 goto err_dma_free;
507 } 504 }
508 if (ring->dma_base & 0xC0000000)
509 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
510 505
511 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 506 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
512 BGMAC_DMA_RING_TX); 507 BGMAC_DMA_RING_TX);
@@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
536 err = -ENOMEM; 531 err = -ENOMEM;
537 goto err_dma_free; 532 goto err_dma_free;
538 } 533 }
539 if (ring->dma_base & 0xC0000000)
540 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
541 534
542 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 535 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
543 BGMAC_DMA_RING_RX); 536 BGMAC_DMA_RING_RX);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7155e1d2c208..bef750a09027 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12722 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 12722 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12723 PCICFG_VENDOR_ID_OFFSET); 12723 PCICFG_VENDOR_ID_OFFSET);
12724 12724
12725 /* Set PCIe reset type to fundamental for EEH recovery */
12726 pdev->needs_freset = 1;
12727
12725 /* AER (Advanced Error reporting) configuration */ 12728 /* AER (Advanced Error reporting) configuration */
12726 rc = pci_enable_pcie_error_reporting(pdev); 12729 rc = pci_enable_pcie_error_reporting(pdev);
12727 if (!rc) 12730 if (!rc)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ff83c46bc389..6befde61c203 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
487 BCMGENET_STAT_MIB_TX, 487 BCMGENET_STAT_MIB_TX,
488 BCMGENET_STAT_RUNT, 488 BCMGENET_STAT_RUNT,
489 BCMGENET_STAT_MISC, 489 BCMGENET_STAT_MISC,
490 BCMGENET_STAT_SOFT,
490}; 491};
491 492
492struct bcmgenet_stats { 493struct bcmgenet_stats {
@@ -515,6 +516,7 @@ struct bcmgenet_stats {
515#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) 516#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
516#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) 517#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
517#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) 518#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
519#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
518 520
519#define STAT_GENET_MISC(str, m, offset) { \ 521#define STAT_GENET_MISC(str, m, offset) { \
520 .stat_string = str, \ 522 .stat_string = str, \
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
614 UMAC_RBUF_OVFL_CNT), 616 UMAC_RBUF_OVFL_CNT),
615 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), 617 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
616 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), 618 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
617 STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 619 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
618 STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), 620 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
619 STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), 621 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
620}; 622};
621 623
622#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) 624#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
668 s = &bcmgenet_gstrings_stats[i]; 670 s = &bcmgenet_gstrings_stats[i];
669 switch (s->type) { 671 switch (s->type) {
670 case BCMGENET_STAT_NETDEV: 672 case BCMGENET_STAT_NETDEV:
673 case BCMGENET_STAT_SOFT:
671 continue; 674 continue;
672 case BCMGENET_STAT_MIB_RX: 675 case BCMGENET_STAT_MIB_RX:
673 case BCMGENET_STAT_MIB_TX: 676 case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
971} 974}
972 975
973/* Unlocked version of the reclaim routine */ 976/* Unlocked version of the reclaim routine */
974static void __bcmgenet_tx_reclaim(struct net_device *dev, 977static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
975 struct bcmgenet_tx_ring *ring) 978 struct bcmgenet_tx_ring *ring)
976{ 979{
977 struct bcmgenet_priv *priv = netdev_priv(dev); 980 struct bcmgenet_priv *priv = netdev_priv(dev);
978 int last_tx_cn, last_c_index, num_tx_bds; 981 int last_tx_cn, last_c_index, num_tx_bds;
979 struct enet_cb *tx_cb_ptr; 982 struct enet_cb *tx_cb_ptr;
980 struct netdev_queue *txq; 983 struct netdev_queue *txq;
984 unsigned int pkts_compl = 0;
981 unsigned int bds_compl; 985 unsigned int bds_compl;
982 unsigned int c_index; 986 unsigned int c_index;
983 987
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
1005 tx_cb_ptr = ring->cbs + last_c_index; 1009 tx_cb_ptr = ring->cbs + last_c_index;
1006 bds_compl = 0; 1010 bds_compl = 0;
1007 if (tx_cb_ptr->skb) { 1011 if (tx_cb_ptr->skb) {
1012 pkts_compl++;
1008 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; 1013 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
1009 dev->stats.tx_bytes += tx_cb_ptr->skb->len; 1014 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1010 dma_unmap_single(&dev->dev, 1015 dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
1028 last_c_index &= (num_tx_bds - 1); 1033 last_c_index &= (num_tx_bds - 1);
1029 } 1034 }
1030 1035
1031 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) 1036 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1032 ring->int_disable(priv, ring); 1037 if (netif_tx_queue_stopped(txq))
1033 1038 netif_tx_wake_queue(txq);
1034 if (netif_tx_queue_stopped(txq)) 1039 }
1035 netif_tx_wake_queue(txq);
1036 1040
1037 ring->c_index = c_index; 1041 ring->c_index = c_index;
1042
1043 return pkts_compl;
1038} 1044}
1039 1045
1040static void bcmgenet_tx_reclaim(struct net_device *dev, 1046static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1041 struct bcmgenet_tx_ring *ring) 1047 struct bcmgenet_tx_ring *ring)
1042{ 1048{
1049 unsigned int released;
1043 unsigned long flags; 1050 unsigned long flags;
1044 1051
1045 spin_lock_irqsave(&ring->lock, flags); 1052 spin_lock_irqsave(&ring->lock, flags);
1046 __bcmgenet_tx_reclaim(dev, ring); 1053 released = __bcmgenet_tx_reclaim(dev, ring);
1047 spin_unlock_irqrestore(&ring->lock, flags); 1054 spin_unlock_irqrestore(&ring->lock, flags);
1055
1056 return released;
1057}
1058
1059static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1060{
1061 struct bcmgenet_tx_ring *ring =
1062 container_of(napi, struct bcmgenet_tx_ring, napi);
1063 unsigned int work_done = 0;
1064
1065 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1066
1067 if (work_done == 0) {
1068 napi_complete(napi);
1069 ring->int_enable(ring->priv, ring);
1070
1071 return 0;
1072 }
1073
1074 return budget;
1048} 1075}
1049 1076
1050static void bcmgenet_tx_reclaim_all(struct net_device *dev) 1077static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1302 bcmgenet_tdma_ring_writel(priv, ring->index, 1329 bcmgenet_tdma_ring_writel(priv, ring->index,
1303 ring->prod_index, TDMA_PROD_INDEX); 1330 ring->prod_index, TDMA_PROD_INDEX);
1304 1331
1305 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { 1332 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1306 netif_tx_stop_queue(txq); 1333 netif_tx_stop_queue(txq);
1307 ring->int_enable(priv, ring);
1308 }
1309 1334
1310out: 1335out:
1311 spin_unlock_irqrestore(&ring->lock, flags); 1336 spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
1621 struct device *kdev = &priv->pdev->dev; 1646 struct device *kdev = &priv->pdev->dev;
1622 int ret; 1647 int ret;
1623 u32 reg, cpu_mask_clear; 1648 u32 reg, cpu_mask_clear;
1649 int index;
1624 1650
1625 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); 1651 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1626 1652
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
1647 1673
1648 bcmgenet_intr_disable(priv); 1674 bcmgenet_intr_disable(priv);
1649 1675
1650 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; 1676 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
1651 1677
1652 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); 1678 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1653 1679
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
1674 1700
1675 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); 1701 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
1676 1702
1703 for (index = 0; index < priv->hw_params->tx_queues; index++)
1704 bcmgenet_intrl2_1_writel(priv, (1 << index),
1705 INTRL2_CPU_MASK_CLEAR);
1706
1677 /* Enable rx/tx engine.*/ 1707 /* Enable rx/tx engine.*/
1678 dev_dbg(kdev, "done init umac\n"); 1708 dev_dbg(kdev, "done init umac\n");
1679 1709
@@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1693 unsigned int first_bd; 1723 unsigned int first_bd;
1694 1724
1695 spin_lock_init(&ring->lock); 1725 spin_lock_init(&ring->lock);
1726 ring->priv = priv;
1727 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1696 ring->index = index; 1728 ring->index = index;
1697 if (index == DESC_INDEX) { 1729 if (index == DESC_INDEX) {
1698 ring->queue = 0; 1730 ring->queue = 0;
@@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1738 TDMA_WRITE_PTR); 1770 TDMA_WRITE_PTR);
1739 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, 1771 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1740 DMA_END_ADDR); 1772 DMA_END_ADDR);
1773
1774 napi_enable(&ring->napi);
1775}
1776
1777static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
1778 unsigned int index)
1779{
1780 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1781
1782 napi_disable(&ring->napi);
1783 netif_napi_del(&ring->napi);
1741} 1784}
1742 1785
1743/* Initialize a RDMA ring */ 1786/* Initialize a RDMA ring */
@@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1907 return ret; 1950 return ret;
1908} 1951}
1909 1952
1910static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 1953static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1911{ 1954{
1912 int i; 1955 int i;
1913 1956
@@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1926 kfree(priv->tx_cbs); 1969 kfree(priv->tx_cbs);
1927} 1970}
1928 1971
1972static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1973{
1974 int i;
1975
1976 bcmgenet_fini_tx_ring(priv, DESC_INDEX);
1977
1978 for (i = 0; i < priv->hw_params->tx_queues; i++)
1979 bcmgenet_fini_tx_ring(priv, i);
1980
1981 __bcmgenet_fini_dma(priv);
1982}
1983
1929/* init_edma: Initialize DMA control register */ 1984/* init_edma: Initialize DMA control register */
1930static int bcmgenet_init_dma(struct bcmgenet_priv *priv) 1985static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1931{ 1986{
@@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1952 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), 2007 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
1953 GFP_KERNEL); 2008 GFP_KERNEL);
1954 if (!priv->tx_cbs) { 2009 if (!priv->tx_cbs) {
1955 bcmgenet_fini_dma(priv); 2010 __bcmgenet_fini_dma(priv);
1956 return -ENOMEM; 2011 return -ENOMEM;
1957 } 2012 }
1958 2013
@@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
1975 struct bcmgenet_priv, napi); 2030 struct bcmgenet_priv, napi);
1976 unsigned int work_done; 2031 unsigned int work_done;
1977 2032
1978 /* tx reclaim */
1979 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1980
1981 work_done = bcmgenet_desc_rx(priv, budget); 2033 work_done = bcmgenet_desc_rx(priv, budget);
1982 2034
1983 /* Advancing our consumer index*/ 2035 /* Advancing our consumer index*/
@@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
2022static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) 2074static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2023{ 2075{
2024 struct bcmgenet_priv *priv = dev_id; 2076 struct bcmgenet_priv *priv = dev_id;
2077 struct bcmgenet_tx_ring *ring;
2025 unsigned int index; 2078 unsigned int index;
2026 2079
2027 /* Save irq status for bottom-half processing. */ 2080 /* Save irq status for bottom-half processing. */
2028 priv->irq1_stat = 2081 priv->irq1_stat =
2029 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & 2082 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2030 ~priv->int1_mask; 2083 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2031 /* clear interrupts */ 2084 /* clear interrupts */
2032 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 2085 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2033 2086
2034 netif_dbg(priv, intr, priv->dev, 2087 netif_dbg(priv, intr, priv->dev,
2035 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); 2088 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2089
2036 /* Check the MBDONE interrupts. 2090 /* Check the MBDONE interrupts.
2037 * packet is done, reclaim descriptors 2091 * packet is done, reclaim descriptors
2038 */ 2092 */
2039 if (priv->irq1_stat & 0x0000ffff) { 2093 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2040 index = 0; 2094 if (!(priv->irq1_stat & BIT(index)))
2041 for (index = 0; index < 16; index++) { 2095 continue;
2042 if (priv->irq1_stat & (1 << index)) 2096
2043 bcmgenet_tx_reclaim(priv->dev, 2097 ring = &priv->tx_rings[index];
2044 &priv->tx_rings[index]); 2098
2099 if (likely(napi_schedule_prep(&ring->napi))) {
2100 ring->int_disable(priv, ring);
2101 __napi_schedule(&ring->napi);
2045 } 2102 }
2046 } 2103 }
2104
2047 return IRQ_HANDLED; 2105 return IRQ_HANDLED;
2048} 2106}
2049 2107
@@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2075 } 2133 }
2076 if (priv->irq0_stat & 2134 if (priv->irq0_stat &
2077 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { 2135 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
2078 /* Tx reclaim */ 2136 struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
2079 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); 2137
2138 if (likely(napi_schedule_prep(&ring->napi))) {
2139 ring->int_disable(priv, ring);
2140 __napi_schedule(&ring->napi);
2141 }
2080 } 2142 }
2081 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | 2143 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2082 UMAC_IRQ_PHY_DET_F | 2144 UMAC_IRQ_PHY_DET_F |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index b36ddec0cc0a..0d370d168aee 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
520 520
521struct bcmgenet_tx_ring { 521struct bcmgenet_tx_ring {
522 spinlock_t lock; /* ring lock */ 522 spinlock_t lock; /* ring lock */
523 struct napi_struct napi; /* NAPI per tx queue */
523 unsigned int index; /* ring index */ 524 unsigned int index; /* ring index */
524 unsigned int queue; /* queue index */ 525 unsigned int queue; /* queue index */
525 struct enet_cb *cbs; /* tx ring buffer control block*/ 526 struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
534 struct bcmgenet_tx_ring *); 535 struct bcmgenet_tx_ring *);
535 void (*int_disable)(struct bcmgenet_priv *priv, 536 void (*int_disable)(struct bcmgenet_priv *priv,
536 struct bcmgenet_tx_ring *); 537 struct bcmgenet_tx_ring *);
538 struct bcmgenet_priv *priv;
537}; 539};
538 540
539/* device context */ 541/* device context */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 149a0d70c108..b97122926d3a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
73 if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) 73 if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
74 return -EINVAL; 74 return -EINVAL;
75 75
76 reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
76 if (wol->wolopts & WAKE_MAGICSECURE) { 77 if (wol->wolopts & WAKE_MAGICSECURE) {
77 bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), 78 bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
78 UMAC_MPD_PW_MS); 79 UMAC_MPD_PW_MS);
79 bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), 80 bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
80 UMAC_MPD_PW_LS); 81 UMAC_MPD_PW_LS);
81 reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
82 reg |= MPD_PW_EN; 82 reg |= MPD_PW_EN;
83 bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); 83 } else {
84 reg &= ~MPD_PW_EN;
84 } 85 }
86 bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
85 87
86 /* Flag the device and relevant IRQ as wakeup capable */ 88 /* Flag the device and relevant IRQ as wakeup capable */
87 if (wol->wolopts) { 89 if (wol->wolopts) {
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ad76b8e35a00..81d41539fcba 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = {
2113}; 2113};
2114 2114
2115#if defined(CONFIG_OF) 2115#if defined(CONFIG_OF)
2116static struct macb_config pc302gem_config = { 2116static const struct macb_config pc302gem_config = {
2117 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 2117 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2118 .dma_burst_length = 16, 2118 .dma_burst_length = 16,
2119}; 2119};
2120 2120
2121static struct macb_config sama5d3_config = { 2121static const struct macb_config sama5d3_config = {
2122 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 2122 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2123 .dma_burst_length = 16, 2123 .dma_burst_length = 16,
2124}; 2124};
2125 2125
2126static struct macb_config sama5d4_config = { 2126static const struct macb_config sama5d4_config = {
2127 .caps = 0, 2127 .caps = 0,
2128 .dma_burst_length = 4, 2128 .dma_burst_length = 4,
2129}; 2129};
@@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp)
2154 if (bp->pdev->dev.of_node) { 2154 if (bp->pdev->dev.of_node) {
2155 match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); 2155 match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
2156 if (match && match->data) { 2156 if (match && match->data) {
2157 config = (const struct macb_config *)match->data; 2157 config = match->data;
2158 2158
2159 bp->caps = config->caps; 2159 bp->caps = config->caps;
2160 /* 2160 /*
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 31dc080f2437..ff85619a9732 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -351,7 +351,7 @@
351 351
352/* Bitfields in MID */ 352/* Bitfields in MID */
353#define MACB_IDNUM_OFFSET 16 353#define MACB_IDNUM_OFFSET 16
354#define MACB_IDNUM_SIZE 16 354#define MACB_IDNUM_SIZE 12
355#define MACB_REV_OFFSET 0 355#define MACB_REV_OFFSET 0
356#define MACB_REV_SIZE 16 356#define MACB_REV_SIZE 16
357 357
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 9062a8434246..c308429dd9c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
35} 35}
36 36
37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, 37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
38 int addr_len) 38 u8 v6)
39{ 39{
40 return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) : 40 return v6 ? ipv6_clip_hash(ctbl, addr) :
41 ipv6_clip_hash(ctbl, addr); 41 ipv4_clip_hash(ctbl, addr);
42} 42}
43 43
44static int clip6_get_mbox(const struct net_device *dev, 44static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
78 struct clip_entry *ce, *cte; 78 struct clip_entry *ce, *cte;
79 u32 *addr = (u32 *)lip; 79 u32 *addr = (u32 *)lip;
80 int hash; 80 int hash;
81 int addr_len; 81 int ret = -1;
82 int ret = 0;
83 82
84 if (!ctbl) 83 if (!ctbl)
85 return 0; 84 return 0;
86 85
87 if (v6) 86 hash = clip_addr_hash(ctbl, addr, v6);
88 addr_len = 16;
89 else
90 addr_len = 4;
91
92 hash = clip_addr_hash(ctbl, addr, addr_len);
93 87
94 read_lock_bh(&ctbl->lock); 88 read_lock_bh(&ctbl->lock);
95 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 89 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
96 if (addr_len == cte->addr_len && 90 if (cte->addr6.sin6_family == AF_INET6 && v6)
97 memcmp(lip, cte->addr, cte->addr_len) == 0) { 91 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
92 sizeof(struct in6_addr));
93 else if (cte->addr.sin_family == AF_INET && !v6)
94 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
95 sizeof(struct in_addr));
96 if (!ret) {
98 ce = cte; 97 ce = cte;
99 read_unlock_bh(&ctbl->lock); 98 read_unlock_bh(&ctbl->lock);
100 goto found; 99 goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
111 spin_lock_init(&ce->lock); 110 spin_lock_init(&ce->lock);
112 atomic_set(&ce->refcnt, 0); 111 atomic_set(&ce->refcnt, 0);
113 atomic_dec(&ctbl->nfree); 112 atomic_dec(&ctbl->nfree);
114 ce->addr_len = addr_len;
115 memcpy(ce->addr, lip, addr_len);
116 list_add_tail(&ce->list, &ctbl->hash_list[hash]); 113 list_add_tail(&ce->list, &ctbl->hash_list[hash]);
117 if (v6) { 114 if (v6) {
115 ce->addr6.sin6_family = AF_INET6;
116 memcpy(ce->addr6.sin6_addr.s6_addr,
117 lip, sizeof(struct in6_addr));
118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); 118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
119 if (ret) { 119 if (ret) {
120 write_unlock_bh(&ctbl->lock); 120 write_unlock_bh(&ctbl->lock);
121 return ret; 121 return ret;
122 } 122 }
123 } else {
124 ce->addr.sin_family = AF_INET;
125 memcpy((char *)(&ce->addr.sin_addr), lip,
126 sizeof(struct in_addr));
123 } 127 }
124 } else { 128 } else {
125 write_unlock_bh(&ctbl->lock); 129 write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
140 struct clip_entry *ce, *cte; 144 struct clip_entry *ce, *cte;
141 u32 *addr = (u32 *)lip; 145 u32 *addr = (u32 *)lip;
142 int hash; 146 int hash;
143 int addr_len; 147 int ret = -1;
144
145 if (v6)
146 addr_len = 16;
147 else
148 addr_len = 4;
149 148
150 hash = clip_addr_hash(ctbl, addr, addr_len); 149 hash = clip_addr_hash(ctbl, addr, v6);
151 150
152 read_lock_bh(&ctbl->lock); 151 read_lock_bh(&ctbl->lock);
153 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 152 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
154 if (addr_len == cte->addr_len && 153 if (cte->addr6.sin6_family == AF_INET6 && v6)
155 memcmp(lip, cte->addr, cte->addr_len) == 0) { 154 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
155 sizeof(struct in6_addr));
156 else if (cte->addr.sin_family == AF_INET && !v6)
157 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
158 sizeof(struct in_addr));
159 if (!ret) {
156 ce = cte; 160 ce = cte;
157 read_unlock_bh(&ctbl->lock); 161 read_unlock_bh(&ctbl->lock);
158 goto found; 162 goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
249 for (i = 0 ; i < ctbl->clipt_size; ++i) { 253 for (i = 0 ; i < ctbl->clipt_size; ++i) {
250 list_for_each_entry(ce, &ctbl->hash_list[i], list) { 254 list_for_each_entry(ce, &ctbl->hash_list[i], list) {
251 ip[0] = '\0'; 255 ip[0] = '\0';
252 if (ce->addr_len == 16) 256 sprintf(ip, "%pISc", &ce->addr);
253 sprintf(ip, "%pI6c", ce->addr);
254 else
255 sprintf(ip, "%pI4c", ce->addr);
256 seq_printf(seq, "%-25s %u\n", ip, 257 seq_printf(seq, "%-25s %u\n", ip,
257 atomic_read(&ce->refcnt)); 258 atomic_read(&ce->refcnt));
258 } 259 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 2eaba0161cf8..35eb43c6bcbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -14,8 +14,10 @@ struct clip_entry {
14 spinlock_t lock; /* Hold while modifying clip reference */ 14 spinlock_t lock; /* Hold while modifying clip reference */
15 atomic_t refcnt; 15 atomic_t refcnt;
16 struct list_head list; 16 struct list_head list;
17 u32 addr[4]; 17 union {
18 int addr_len; 18 struct sockaddr_in addr;
19 struct sockaddr_in6 addr6;
20 };
19}; 21};
20 22
21struct clip_tbl { 23struct clip_tbl {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d6cda17efe6e..97842d03675b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
1103#define T4_MEMORY_WRITE 0 1103#define T4_MEMORY_WRITE 0
1104#define T4_MEMORY_READ 1 1104#define T4_MEMORY_READ 1
1105int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, 1105int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
1106 __be32 *buf, int dir); 1106 void *buf, int dir);
1107static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, 1107static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
1108 u32 len, __be32 *buf) 1108 u32 len, __be32 *buf)
1109{ 1109{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4d643b65265e..853c38997c82 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
450 * @addr: address within indicated memory type 450 * @addr: address within indicated memory type
451 * @len: amount of memory to transfer 451 * @len: amount of memory to transfer
452 * @buf: host memory buffer 452 * @hbuf: host memory buffer
453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
454 * 454 *
455 * Reads/writes an [almost] arbitrary memory region in the firmware: the 455 * Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
460 * caller's responsibility to perform appropriate byte order conversions. 460 * caller's responsibility to perform appropriate byte order conversions.
461 */ 461 */
462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, 462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
463 u32 len, __be32 *buf, int dir) 463 u32 len, void *hbuf, int dir)
464{ 464{
465 u32 pos, offset, resid, memoffset; 465 u32 pos, offset, resid, memoffset;
466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; 466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
467 u32 *buf;
467 468
468 /* Argument sanity checks ... 469 /* Argument sanity checks ...
469 */ 470 */
470 if (addr & 0x3) 471 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
471 return -EINVAL; 472 return -EINVAL;
473 buf = (u32 *)hbuf;
472 474
473 /* It's convenient to be able to handle lengths which aren't a 475 /* It's convenient to be able to handle lengths which aren't a
474 * multiple of 32-bits because we often end up transferring files to 476 * multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
532 534
533 /* Transfer data to/from the adapter as long as there's an integral 535 /* Transfer data to/from the adapter as long as there's an integral
534 * number of 32-bit transfers to complete. 536 * number of 32-bit transfers to complete.
537 *
538 * A note on Endianness issues:
539 *
540 * The "register" reads and writes below from/to the PCI-E Memory
541 * Window invoke the standard adapter Big-Endian to PCI-E Link
542 * Little-Endian "swizzel." As a result, if we have the following
543 * data in adapter memory:
544 *
545 * Memory: ... | b0 | b1 | b2 | b3 | ...
546 * Address: i+0 i+1 i+2 i+3
547 *
548 * Then a read of the adapter memory via the PCI-E Memory Window
549 * will yield:
550 *
551 * x = readl(i)
552 * 31 0
553 * [ b3 | b2 | b1 | b0 ]
554 *
555 * If this value is stored into local memory on a Little-Endian system
556 * it will show up correctly in local memory as:
557 *
558 * ( ..., b0, b1, b2, b3, ... )
559 *
560 * But on a Big-Endian system, the store will show up in memory
561 * incorrectly swizzled as:
562 *
563 * ( ..., b3, b2, b1, b0, ... )
564 *
565 * So we need to account for this in the reads and writes to the
566 * PCI-E Memory Window below by undoing the register read/write
567 * swizzels.
535 */ 568 */
536 while (len > 0) { 569 while (len > 0) {
537 if (dir == T4_MEMORY_READ) 570 if (dir == T4_MEMORY_READ)
538 *buf++ = (__force __be32) t4_read_reg(adap, 571 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
539 mem_base + offset); 572 mem_base + offset));
540 else 573 else
541 t4_write_reg(adap, mem_base + offset, 574 t4_write_reg(adap, mem_base + offset,
542 (__force u32) *buf++); 575 (__force u32)cpu_to_le32(*buf++));
543 offset += sizeof(__be32); 576 offset += sizeof(__be32);
544 len -= sizeof(__be32); 577 len -= sizeof(__be32);
545 578
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
568 */ 601 */
569 if (resid) { 602 if (resid) {
570 union { 603 union {
571 __be32 word; 604 u32 word;
572 char byte[4]; 605 char byte[4];
573 } last; 606 } last;
574 unsigned char *bp; 607 unsigned char *bp;
575 int i; 608 int i;
576 609
577 if (dir == T4_MEMORY_READ) { 610 if (dir == T4_MEMORY_READ) {
578 last.word = (__force __be32) t4_read_reg(adap, 611 last.word = le32_to_cpu(
579 mem_base + offset); 612 (__force __le32)t4_read_reg(adap,
613 mem_base + offset));
580 for (bp = (unsigned char *)buf, i = resid; i < 4; i++) 614 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
581 bp[i] = last.byte[i]; 615 bp[i] = last.byte[i];
582 } else { 616 } else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
584 for (i = resid; i < 4; i++) 618 for (i = resid; i < 4; i++)
585 last.byte[i] = 0; 619 last.byte[i] = 0;
586 t4_write_reg(adap, mem_base + offset, 620 t4_write_reg(adap, mem_base + offset,
587 (__force u32) last.word); 621 (__force u32)cpu_to_le32(last.word));
588 } 622 }
589 } 623 }
590 624
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9cbe038a388e..a5179bfcdc2c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
272 } 272 }
273 273
274 if (ENIC_TEST_INTR(pba, notify_intr)) { 274 if (ENIC_TEST_INTR(pba, notify_intr)) {
275 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
276 enic_notify_check(enic); 275 enic_notify_check(enic);
276 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
277 } 277 }
278 278
279 if (ENIC_TEST_INTR(pba, err_intr)) { 279 if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
346 struct enic *enic = data; 346 struct enic *enic = data;
347 unsigned int intr = enic_msix_notify_intr(enic); 347 unsigned int intr = enic_msix_notify_intr(enic);
348 348
349 vnic_intr_return_all_credits(&enic->intr[intr]);
350 enic_notify_check(enic); 349 enic_notify_check(enic);
350 vnic_intr_return_all_credits(&enic->intr[intr]);
351 351
352 return IRQ_HANDLED; 352 return IRQ_HANDLED;
353} 353}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9bb6220663b2..99492b7e3713 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1597,7 +1597,7 @@ fec_enet_interrupt(int irq, void *dev_id)
1597 writel(int_events, fep->hwp + FEC_IEVENT); 1597 writel(int_events, fep->hwp + FEC_IEVENT);
1598 fec_enet_collect_events(fep, int_events); 1598 fec_enet_collect_events(fep, int_events);
1599 1599
1600 if (fep->work_tx || fep->work_rx) { 1600 if ((fep->work_tx || fep->work_rx) && fep->link) {
1601 ret = IRQ_HANDLED; 1601 ret = IRQ_HANDLED;
1602 1602
1603 if (napi_schedule_prep(&fep->napi)) { 1603 if (napi_schedule_prep(&fep->napi)) {
@@ -3383,7 +3383,6 @@ fec_drv_remove(struct platform_device *pdev)
3383 regulator_disable(fep->reg_phy); 3383 regulator_disable(fep->reg_phy);
3384 if (fep->ptp_clock) 3384 if (fep->ptp_clock)
3385 ptp_clock_unregister(fep->ptp_clock); 3385 ptp_clock_unregister(fep->ptp_clock);
3386 fec_enet_clk_enable(ndev, false);
3387 of_node_put(fep->phy_node); 3386 of_node_put(fep->phy_node);
3388 free_netdev(ndev); 3387 free_netdev(ndev);
3389 3388
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 43df78882e48..7bf3682cdf47 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np,
747 return 0; 747 return 0;
748} 748}
749 749
750static int gfar_of_group_count(struct device_node *np)
751{
752 struct device_node *child;
753 int num = 0;
754
755 for_each_available_child_of_node(np, child)
756 if (!of_node_cmp(child->name, "queue-group"))
757 num++;
758
759 return num;
760}
761
750static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 762static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
751{ 763{
752 const char *model; 764 const char *model;
@@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
784 num_rx_qs = 1; 796 num_rx_qs = 1;
785 } else { /* MQ_MG_MODE */ 797 } else { /* MQ_MG_MODE */
786 /* get the actual number of supported groups */ 798 /* get the actual number of supported groups */
787 unsigned int num_grps = of_get_available_child_count(np); 799 unsigned int num_grps = gfar_of_group_count(np);
788 800
789 if (num_grps == 0 || num_grps > MAXGROUPS) { 801 if (num_grps == 0 || num_grps > MAXGROUPS) {
790 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", 802 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
@@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
851 863
852 /* Parse and initialize group specific information */ 864 /* Parse and initialize group specific information */
853 if (priv->mode == MQ_MG_MODE) { 865 if (priv->mode == MQ_MG_MODE) {
854 for_each_child_of_node(np, child) { 866 for_each_available_child_of_node(np, child) {
867 if (of_node_cmp(child->name, "queue-group"))
868 continue;
869
855 err = gfar_parse_group(child, priv, model); 870 err = gfar_parse_group(child, priv, model);
856 if (err) 871 if (err)
857 goto err_grp_init; 872 goto err_grp_init;
@@ -3162,8 +3177,8 @@ static void adjust_link(struct net_device *dev)
3162 struct phy_device *phydev = priv->phydev; 3177 struct phy_device *phydev = priv->phydev;
3163 3178
3164 if (unlikely(phydev->link != priv->oldlink || 3179 if (unlikely(phydev->link != priv->oldlink ||
3165 phydev->duplex != priv->oldduplex || 3180 (phydev->link && (phydev->duplex != priv->oldduplex ||
3166 phydev->speed != priv->oldspeed)) 3181 phydev->speed != priv->oldspeed))))
3167 gfar_update_link_state(priv); 3182 gfar_update_link_state(priv);
3168} 3183}
3169 3184
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e8a1adb7a962..c05e50759621 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
3262 device_remove_file(&dev->dev, &dev_attr_remove_port); 3262 device_remove_file(&dev->dev, &dev_attr_remove_port);
3263} 3263}
3264 3264
3265static int ehea_reboot_notifier(struct notifier_block *nb,
3266 unsigned long action, void *unused)
3267{
3268 if (action == SYS_RESTART) {
3269 pr_info("Reboot: freeing all eHEA resources\n");
3270 ibmebus_unregister_driver(&ehea_driver);
3271 }
3272 return NOTIFY_DONE;
3273}
3274
3275static struct notifier_block ehea_reboot_nb = {
3276 .notifier_call = ehea_reboot_notifier,
3277};
3278
3279static int ehea_mem_notifier(struct notifier_block *nb,
3280 unsigned long action, void *data)
3281{
3282 int ret = NOTIFY_BAD;
3283 struct memory_notify *arg = data;
3284
3285 mutex_lock(&dlpar_mem_lock);
3286
3287 switch (action) {
3288 case MEM_CANCEL_OFFLINE:
3289 pr_info("memory offlining canceled");
3290 /* Fall through: re-add canceled memory block */
3291
3292 case MEM_ONLINE:
3293 pr_info("memory is going online");
3294 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3295 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3296 goto out_unlock;
3297 ehea_rereg_mrs();
3298 break;
3299
3300 case MEM_GOING_OFFLINE:
3301 pr_info("memory is going offline");
3302 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3303 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3304 goto out_unlock;
3305 ehea_rereg_mrs();
3306 break;
3307
3308 default:
3309 break;
3310 }
3311
3312 ehea_update_firmware_handles();
3313 ret = NOTIFY_OK;
3314
3315out_unlock:
3316 mutex_unlock(&dlpar_mem_lock);
3317 return ret;
3318}
3319
3320static struct notifier_block ehea_mem_nb = {
3321 .notifier_call = ehea_mem_notifier,
3322};
3323
3324static void ehea_crash_handler(void)
3325{
3326 int i;
3327
3328 if (ehea_fw_handles.arr)
3329 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3330 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3331 ehea_fw_handles.arr[i].fwh,
3332 FORCE_FREE);
3333
3334 if (ehea_bcmc_regs.arr)
3335 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3336 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3337 ehea_bcmc_regs.arr[i].port_id,
3338 ehea_bcmc_regs.arr[i].reg_type,
3339 ehea_bcmc_regs.arr[i].macaddr,
3340 0, H_DEREG_BCMC);
3341}
3342
3343static atomic_t ehea_memory_hooks_registered;
3344
3345/* Register memory hooks on probe of first adapter */
3346static int ehea_register_memory_hooks(void)
3347{
3348 int ret = 0;
3349
3350 if (atomic_inc_and_test(&ehea_memory_hooks_registered))
3351 return 0;
3352
3353 ret = ehea_create_busmap();
3354 if (ret) {
3355 pr_info("ehea_create_busmap failed\n");
3356 goto out;
3357 }
3358
3359 ret = register_reboot_notifier(&ehea_reboot_nb);
3360 if (ret) {
3361 pr_info("register_reboot_notifier failed\n");
3362 goto out;
3363 }
3364
3365 ret = register_memory_notifier(&ehea_mem_nb);
3366 if (ret) {
3367 pr_info("register_memory_notifier failed\n");
3368 goto out2;
3369 }
3370
3371 ret = crash_shutdown_register(ehea_crash_handler);
3372 if (ret) {
3373 pr_info("crash_shutdown_register failed\n");
3374 goto out3;
3375 }
3376
3377 return 0;
3378
3379out3:
3380 unregister_memory_notifier(&ehea_mem_nb);
3381out2:
3382 unregister_reboot_notifier(&ehea_reboot_nb);
3383out:
3384 return ret;
3385}
3386
3387static void ehea_unregister_memory_hooks(void)
3388{
3389 if (atomic_read(&ehea_memory_hooks_registered))
3390 return;
3391
3392 unregister_reboot_notifier(&ehea_reboot_nb);
3393 if (crash_shutdown_unregister(ehea_crash_handler))
3394 pr_info("failed unregistering crash handler\n");
3395 unregister_memory_notifier(&ehea_mem_nb);
3396}
3397
3265static int ehea_probe_adapter(struct platform_device *dev) 3398static int ehea_probe_adapter(struct platform_device *dev)
3266{ 3399{
3267 struct ehea_adapter *adapter; 3400 struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
3269 int ret; 3402 int ret;
3270 int i; 3403 int i;
3271 3404
3405 ret = ehea_register_memory_hooks();
3406 if (ret)
3407 return ret;
3408
3272 if (!dev || !dev->dev.of_node) { 3409 if (!dev || !dev->dev.of_node) {
3273 pr_err("Invalid ibmebus device probed\n"); 3410 pr_err("Invalid ibmebus device probed\n");
3274 return -EINVAL; 3411 return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
3392 return 0; 3529 return 0;
3393} 3530}
3394 3531
3395static void ehea_crash_handler(void)
3396{
3397 int i;
3398
3399 if (ehea_fw_handles.arr)
3400 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3401 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3402 ehea_fw_handles.arr[i].fwh,
3403 FORCE_FREE);
3404
3405 if (ehea_bcmc_regs.arr)
3406 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3407 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3408 ehea_bcmc_regs.arr[i].port_id,
3409 ehea_bcmc_regs.arr[i].reg_type,
3410 ehea_bcmc_regs.arr[i].macaddr,
3411 0, H_DEREG_BCMC);
3412}
3413
3414static int ehea_mem_notifier(struct notifier_block *nb,
3415 unsigned long action, void *data)
3416{
3417 int ret = NOTIFY_BAD;
3418 struct memory_notify *arg = data;
3419
3420 mutex_lock(&dlpar_mem_lock);
3421
3422 switch (action) {
3423 case MEM_CANCEL_OFFLINE:
3424 pr_info("memory offlining canceled");
3425 /* Readd canceled memory block */
3426 case MEM_ONLINE:
3427 pr_info("memory is going online");
3428 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3429 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3430 goto out_unlock;
3431 ehea_rereg_mrs();
3432 break;
3433 case MEM_GOING_OFFLINE:
3434 pr_info("memory is going offline");
3435 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3436 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3437 goto out_unlock;
3438 ehea_rereg_mrs();
3439 break;
3440 default:
3441 break;
3442 }
3443
3444 ehea_update_firmware_handles();
3445 ret = NOTIFY_OK;
3446
3447out_unlock:
3448 mutex_unlock(&dlpar_mem_lock);
3449 return ret;
3450}
3451
3452static struct notifier_block ehea_mem_nb = {
3453 .notifier_call = ehea_mem_notifier,
3454};
3455
3456static int ehea_reboot_notifier(struct notifier_block *nb,
3457 unsigned long action, void *unused)
3458{
3459 if (action == SYS_RESTART) {
3460 pr_info("Reboot: freeing all eHEA resources\n");
3461 ibmebus_unregister_driver(&ehea_driver);
3462 }
3463 return NOTIFY_DONE;
3464}
3465
3466static struct notifier_block ehea_reboot_nb = {
3467 .notifier_call = ehea_reboot_notifier,
3468};
3469
3470static int check_module_parm(void) 3532static int check_module_parm(void)
3471{ 3533{
3472 int ret = 0; 3534 int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
3520 if (ret) 3582 if (ret)
3521 goto out; 3583 goto out;
3522 3584
3523 ret = ehea_create_busmap();
3524 if (ret)
3525 goto out;
3526
3527 ret = register_reboot_notifier(&ehea_reboot_nb);
3528 if (ret)
3529 pr_info("failed registering reboot notifier\n");
3530
3531 ret = register_memory_notifier(&ehea_mem_nb);
3532 if (ret)
3533 pr_info("failed registering memory remove notifier\n");
3534
3535 ret = crash_shutdown_register(ehea_crash_handler);
3536 if (ret)
3537 pr_info("failed registering crash handler\n");
3538
3539 ret = ibmebus_register_driver(&ehea_driver); 3585 ret = ibmebus_register_driver(&ehea_driver);
3540 if (ret) { 3586 if (ret) {
3541 pr_err("failed registering eHEA device driver on ebus\n"); 3587 pr_err("failed registering eHEA device driver on ebus\n");
3542 goto out2; 3588 goto out;
3543 } 3589 }
3544 3590
3545 ret = driver_create_file(&ehea_driver.driver, 3591 ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
3547 if (ret) { 3593 if (ret) {
3548 pr_err("failed to register capabilities attribute, ret=%d\n", 3594 pr_err("failed to register capabilities attribute, ret=%d\n",
3549 ret); 3595 ret);
3550 goto out3; 3596 goto out2;
3551 } 3597 }
3552 3598
3553 return ret; 3599 return ret;
3554 3600
3555out3:
3556 ibmebus_unregister_driver(&ehea_driver);
3557out2: 3601out2:
3558 unregister_memory_notifier(&ehea_mem_nb); 3602 ibmebus_unregister_driver(&ehea_driver);
3559 unregister_reboot_notifier(&ehea_reboot_nb);
3560 crash_shutdown_unregister(ehea_crash_handler);
3561out: 3603out:
3562 return ret; 3604 return ret;
3563} 3605}
3564 3606
3565static void __exit ehea_module_exit(void) 3607static void __exit ehea_module_exit(void)
3566{ 3608{
3567 int ret;
3568
3569 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3609 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3570 ibmebus_unregister_driver(&ehea_driver); 3610 ibmebus_unregister_driver(&ehea_driver);
3571 unregister_reboot_notifier(&ehea_reboot_nb); 3611 ehea_unregister_memory_hooks();
3572 ret = crash_shutdown_unregister(ehea_crash_handler);
3573 if (ret)
3574 pr_info("failed unregistering crash handler\n");
3575 unregister_memory_notifier(&ehea_mem_nb);
3576 kfree(ehea_fw_handles.arr); 3612 kfree(ehea_fw_handles.arr);
3577 kfree(ehea_bcmc_regs.arr); 3613 kfree(ehea_bcmc_regs.arr);
3578 ehea_destroy_busmap(); 3614 ehea_destroy_busmap();
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 21978cc019e7..072426a72745 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1327 return ret; 1327 return ret;
1328} 1328}
1329 1329
1330static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1331{
1332 struct ibmveth_adapter *adapter = netdev_priv(dev);
1333 struct sockaddr *addr = p;
1334 u64 mac_address;
1335 int rc;
1336
1337 if (!is_valid_ether_addr(addr->sa_data))
1338 return -EADDRNOTAVAIL;
1339
1340 mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1341 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1342 if (rc) {
1343 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1344 return rc;
1345 }
1346
1347 ether_addr_copy(dev->dev_addr, addr->sa_data);
1348
1349 return 0;
1350}
1351
1330static const struct net_device_ops ibmveth_netdev_ops = { 1352static const struct net_device_ops ibmveth_netdev_ops = {
1331 .ndo_open = ibmveth_open, 1353 .ndo_open = ibmveth_open,
1332 .ndo_stop = ibmveth_close, 1354 .ndo_stop = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
1337 .ndo_fix_features = ibmveth_fix_features, 1359 .ndo_fix_features = ibmveth_fix_features,
1338 .ndo_set_features = ibmveth_set_features, 1360 .ndo_set_features = ibmveth_set_features,
1339 .ndo_validate_addr = eth_validate_addr, 1361 .ndo_validate_addr = eth_validate_addr,
1340 .ndo_set_mac_address = eth_mac_addr, 1362 .ndo_set_mac_address = ibmveth_set_mac_addr,
1341#ifdef CONFIG_NET_POLL_CONTROLLER 1363#ifdef CONFIG_NET_POLL_CONTROLLER
1342 .ndo_poll_controller = ibmveth_poll_controller, 1364 .ndo_poll_controller = ibmveth_poll_controller,
1343#endif 1365#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 11a9ffebf8d8..6aea65dae5ed 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
868 * The grst delay value is in 100ms units, and we'll wait a 868 * The grst delay value is in 100ms units, and we'll wait a
869 * couple counts longer to be sure we don't just miss the end. 869 * couple counts longer to be sure we don't just miss the end.
870 */ 870 */
871 grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK 871 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
872 >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 872 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
873 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
873 for (cnt = 0; cnt < grst_del + 2; cnt++) { 874 for (cnt = 0; cnt < grst_del + 2; cnt++) {
874 reg = rd32(hw, I40E_GLGEN_RSTAT); 875 reg = rd32(hw, I40E_GLGEN_RSTAT);
875 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 876 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
2846 2847
2847 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2848 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2848 2849
2849 if (!status) 2850 if (!status && filter_index)
2850 *filter_index = resp->index; 2851 *filter_index = resp->index;
2851 2852
2852 return status; 2853 return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 183dcb63ce98..a11c70ca5a28 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
40 u32 val; 40 u32 val;
41 41
42 val = rd32(hw, I40E_PRTDCB_GENC); 42 val = rd32(hw, I40E_PRTDCB_GENC);
43 *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> 43 *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
44 I40E_PRTDCB_GENC_PFCLDA_SHIFT); 44 I40E_PRTDCB_GENC_PFCLDA_SHIFT);
45} 45}
46 46
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 61236f983971..c17ee77100d3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
989 if (!cmd_buf) 989 if (!cmd_buf)
990 return count; 990 return count;
991 bytes_not_copied = copy_from_user(cmd_buf, buffer, count); 991 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
992 if (bytes_not_copied < 0) 992 if (bytes_not_copied < 0) {
993 kfree(cmd_buf);
993 return bytes_not_copied; 994 return bytes_not_copied;
995 }
994 if (bytes_not_copied > 0) 996 if (bytes_not_copied > 0)
995 count -= bytes_not_copied; 997 count -= bytes_not_copied;
996 cmd_buf[count] = '\0'; 998 cmd_buf[count] = '\0';
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbe281be1c9f..dadda3c5d658 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1512 vsi->tc_config.numtc = numtc; 1512 vsi->tc_config.numtc = numtc;
1513 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1513 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1514 /* Number of queues per enabled TC */ 1514 /* Number of queues per enabled TC */
1515 num_tc_qps = vsi->alloc_queue_pairs/numtc; 1515 /* In MFP case we can have a much lower count of MSIx
1516 * vectors available and so we need to lower the used
1517 * q count.
1518 */
1519 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1520 num_tc_qps = qcount / numtc;
1516 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1521 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1517 1522
1518 /* Setup queue offset/count for all TCs for given VSI */ 1523 /* Setup queue offset/count for all TCs for given VSI */
@@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2684 u16 qoffset, qcount; 2689 u16 qoffset, qcount;
2685 int i, n; 2690 int i, n;
2686 2691
2687 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2692 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2688 return; 2693 /* Reset the TC information */
2694 for (i = 0; i < vsi->num_queue_pairs; i++) {
2695 rx_ring = vsi->rx_rings[i];
2696 tx_ring = vsi->tx_rings[i];
2697 rx_ring->dcb_tc = 0;
2698 tx_ring->dcb_tc = 0;
2699 }
2700 }
2689 2701
2690 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2702 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2691 if (!(vsi->tc_config.enabled_tc & (1 << n))) 2703 if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3830{ 3842{
3831 int i; 3843 int i;
3832 3844
3845 i40e_stop_misc_vector(pf);
3846 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3847 synchronize_irq(pf->msix_entries[0].vector);
3848 free_irq(pf->msix_entries[0].vector, pf);
3849 }
3850
3833 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3851 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3834 for (i = 0; i < pf->num_alloc_vsi; i++) 3852 for (i = 0; i < pf->num_alloc_vsi; i++)
3835 if (pf->vsi[i]) 3853 if (pf->vsi[i])
@@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
5254 5272
5255 /* Wait for the PF's Tx queues to be disabled */ 5273 /* Wait for the PF's Tx queues to be disabled */
5256 ret = i40e_pf_wait_txq_disabled(pf); 5274 ret = i40e_pf_wait_txq_disabled(pf);
5257 if (!ret) 5275 if (ret) {
5276 /* Schedule PF reset to recover */
5277 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5278 i40e_service_event_schedule(pf);
5279 } else {
5258 i40e_pf_unquiesce_all_vsi(pf); 5280 i40e_pf_unquiesce_all_vsi(pf);
5281 }
5282
5259exit: 5283exit:
5260 return ret; 5284 return ret;
5261} 5285}
@@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
5587 int i, v; 5611 int i, v;
5588 5612
5589 /* If we're down or resetting, just bail */ 5613 /* If we're down or resetting, just bail */
5590 if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5614 if (test_bit(__I40E_DOWN, &pf->state) ||
5615 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5591 return; 5616 return;
5592 5617
5593 /* for each VSI/netdev 5618 /* for each VSI/netdev
@@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev)
9533 set_bit(__I40E_DOWN, &pf->state); 9558 set_bit(__I40E_DOWN, &pf->state);
9534 del_timer_sync(&pf->service_timer); 9559 del_timer_sync(&pf->service_timer);
9535 cancel_work_sync(&pf->service_task); 9560 cancel_work_sync(&pf->service_task);
9561 i40e_fdir_teardown(pf);
9536 9562
9537 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 9563 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
9538 i40e_free_vfs(pf); 9564 i40e_free_vfs(pf);
@@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev)
9559 if (pf->vsi[pf->lan_vsi]) 9585 if (pf->vsi[pf->lan_vsi])
9560 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 9586 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
9561 9587
9562 i40e_stop_misc_vector(pf);
9563 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9564 synchronize_irq(pf->msix_entries[0].vector);
9565 free_irq(pf->msix_entries[0].vector, pf);
9566 }
9567
9568 /* shutdown and destroy the HMC */ 9588 /* shutdown and destroy the HMC */
9569 if (pf->hw.hmc.hmc_obj) { 9589 if (pf->hw.hmc.hmc_obj) {
9570 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 9590 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
9718 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 9738 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
9719 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 9739 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
9720 9740
9741 i40e_clear_interrupt_scheme(pf);
9742
9721 if (system_state == SYSTEM_POWER_OFF) { 9743 if (system_state == SYSTEM_POWER_OFF) {
9722 pci_wake_from_d3(pdev, pf->wol_en); 9744 pci_wake_from_d3(pdev, pf->wol_en);
9723 pci_set_power_state(pdev, PCI_D3hot); 9745 pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3e70f2e45a47..5defe0d63514 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
679{ 679{
680 i40e_status status; 680 i40e_status status;
681 enum i40e_nvmupd_cmd upd_cmd; 681 enum i40e_nvmupd_cmd upd_cmd;
682 bool retry_attempt = false;
682 683
683 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); 684 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
684 685
686retry:
685 switch (upd_cmd) { 687 switch (upd_cmd) {
686 case I40E_NVMUPD_WRITE_CON: 688 case I40E_NVMUPD_WRITE_CON:
687 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); 689 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
725 *errno = -ESRCH; 727 *errno = -ESRCH;
726 break; 728 break;
727 } 729 }
730
731 /* In some circumstances, a multi-write transaction takes longer
732 * than the default 3 minute timeout on the write semaphore. If
733 * the write failed with an EBUSY status, this is likely the problem,
734 * so here we try to reacquire the semaphore then retry the write.
735 * We only do one retry, then give up.
736 */
737 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
738 !retry_attempt) {
739 i40e_status old_status = status;
740 u32 old_asq_status = hw->aq.asq_last_status;
741 u32 gtime;
742
743 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
744 if (gtime >= hw->nvm.hw_semaphore_timeout) {
745 i40e_debug(hw, I40E_DEBUG_ALL,
746 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
747 gtime, hw->nvm.hw_semaphore_timeout);
748 i40e_release_nvm(hw);
749 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
750 if (status) {
751 i40e_debug(hw, I40E_DEBUG_ALL,
752 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
753 hw->aq.asq_last_status);
754 status = old_status;
755 hw->aq.asq_last_status = old_asq_status;
756 } else {
757 retry_attempt = true;
758 goto retry;
759 }
760 }
761 }
762
728 return status; 763 return status;
729} 764}
730 765
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2206d2d36f0f..bbf1b1247ac4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
586} 586}
587 587
588/** 588/**
589 * i40e_get_head - Retrieve head from head writeback
590 * @tx_ring: tx ring to fetch head of
591 *
592 * Returns value of Tx ring head based on value stored
593 * in head write-back location
594 **/
595static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
596{
597 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
598
599 return le32_to_cpu(*(volatile __le32 *)head);
600}
601
602/**
589 * i40e_get_tx_pending - how many tx descriptors not processed 603 * i40e_get_tx_pending - how many tx descriptors not processed
590 * @tx_ring: the ring of descriptors 604 * @tx_ring: the ring of descriptors
591 * 605 *
@@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
594 **/ 608 **/
595static u32 i40e_get_tx_pending(struct i40e_ring *ring) 609static u32 i40e_get_tx_pending(struct i40e_ring *ring)
596{ 610{
597 u32 ntu = ((ring->next_to_clean <= ring->next_to_use) 611 u32 head, tail;
598 ? ring->next_to_use 612
599 : ring->next_to_use + ring->count); 613 head = i40e_get_head(ring);
600 return ntu - ring->next_to_clean; 614 tail = readl(ring->tail);
615
616 if (head != tail)
617 return (head < tail) ?
618 tail - head : (tail + ring->count - head);
619
620 return 0;
601} 621}
602 622
603/** 623/**
@@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
606 **/ 626 **/
607static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 627static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
608{ 628{
629 u32 tx_done = tx_ring->stats.packets;
630 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
609 u32 tx_pending = i40e_get_tx_pending(tx_ring); 631 u32 tx_pending = i40e_get_tx_pending(tx_ring);
610 struct i40e_pf *pf = tx_ring->vsi->back; 632 struct i40e_pf *pf = tx_ring->vsi->back;
611 bool ret = false; 633 bool ret = false;
@@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
623 * run the check_tx_hang logic with a transmit completion 645 * run the check_tx_hang logic with a transmit completion
624 * pending but without time to complete it yet. 646 * pending but without time to complete it yet.
625 */ 647 */
626 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 648 if ((tx_done_old == tx_done) && tx_pending) {
627 (tx_pending >= I40E_MIN_DESC_PENDING)) {
628 /* make sure it is true for two checks in a row */ 649 /* make sure it is true for two checks in a row */
629 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 650 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
630 &tx_ring->state); 651 &tx_ring->state);
631 } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 652 } else if (tx_done_old == tx_done &&
632 (tx_pending < I40E_MIN_DESC_PENDING) && 653 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
633 (tx_pending > 0)) {
634 if (I40E_DEBUG_FLOW & pf->hw.debug_mask) 654 if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
635 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", 655 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
636 tx_pending, tx_ring->queue_index); 656 tx_pending, tx_ring->queue_index);
637 pf->tx_sluggish_count++; 657 pf->tx_sluggish_count++;
638 } else { 658 } else {
639 /* update completed stats and disarm the hang check */ 659 /* update completed stats and disarm the hang check */
640 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 660 tx_ring->tx_stats.tx_done_old = tx_done;
641 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 661 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
642 } 662 }
643 663
644 return ret; 664 return ret;
645} 665}
646 666
647/**
648 * i40e_get_head - Retrieve head from head writeback
649 * @tx_ring: tx ring to fetch head of
650 *
651 * Returns value of Tx ring head based on value stored
652 * in head write-back location
653 **/
654static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
655{
656 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
657
658 return le32_to_cpu(*(volatile __le32 *)head);
659}
660
661#define WB_STRIDE 0x3 667#define WB_STRIDE 0x3
662 668
663/** 669/**
@@ -2140,6 +2146,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2140} 2146}
2141 2147
2142/** 2148/**
2149 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2150 * @skb: send buffer
2151 * @tx_flags: collected send information
2152 * @hdr_len: size of the packet header
2153 *
2154 * Note: Our HW can't scatter-gather more than 8 fragments to build
2155 * a packet on the wire and so we need to figure out the cases where we
2156 * need to linearize the skb.
2157 **/
2158static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
2159 const u8 hdr_len)
2160{
2161 struct skb_frag_struct *frag;
2162 bool linearize = false;
2163 unsigned int size = 0;
2164 u16 num_frags;
2165 u16 gso_segs;
2166
2167 num_frags = skb_shinfo(skb)->nr_frags;
2168 gso_segs = skb_shinfo(skb)->gso_segs;
2169
2170 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2171 u16 j = 1;
2172
2173 if (num_frags < (I40E_MAX_BUFFER_TXD))
2174 goto linearize_chk_done;
2175 /* try the simple math, if we have too many frags per segment */
2176 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2177 I40E_MAX_BUFFER_TXD) {
2178 linearize = true;
2179 goto linearize_chk_done;
2180 }
2181 frag = &skb_shinfo(skb)->frags[0];
2182 size = hdr_len;
2183 /* we might still have more fragments per segment */
2184 do {
2185 size += skb_frag_size(frag);
2186 frag++; j++;
2187 if (j == I40E_MAX_BUFFER_TXD) {
2188 if (size < skb_shinfo(skb)->gso_size) {
2189 linearize = true;
2190 break;
2191 }
2192 j = 1;
2193 size -= skb_shinfo(skb)->gso_size;
2194 if (size)
2195 j++;
2196 size += hdr_len;
2197 }
2198 num_frags--;
2199 } while (num_frags);
2200 } else {
2201 if (num_frags >= I40E_MAX_BUFFER_TXD)
2202 linearize = true;
2203 }
2204
2205linearize_chk_done:
2206 return linearize;
2207}
2208
2209/**
2143 * i40e_tx_map - Build the Tx descriptor 2210 * i40e_tx_map - Build the Tx descriptor
2144 * @tx_ring: ring to send buffer on 2211 * @tx_ring: ring to send buffer on
2145 * @skb: send buffer 2212 * @skb: send buffer
@@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2396 if (tsyn) 2463 if (tsyn)
2397 tx_flags |= I40E_TX_FLAGS_TSYN; 2464 tx_flags |= I40E_TX_FLAGS_TSYN;
2398 2465
2466 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
2467 if (skb_linearize(skb))
2468 goto out_drop;
2469
2399 skb_tx_timestamp(skb); 2470 skb_tx_timestamp(skb);
2400 2471
2401 /* always enable CRC insertion offload */ 2472 /* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 18b00231d2f1..dff0baeb1ecc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
112 112
113#define i40e_rx_desc i40e_32byte_rx_desc 113#define i40e_rx_desc i40e_32byte_rx_desc
114 114
115#define I40E_MAX_BUFFER_TXD 8
115#define I40E_MIN_TX_LEN 17 116#define I40E_MIN_TX_LEN 17
116#define I40E_MAX_DATA_PER_TXD 8192 117#define I40E_MAX_DATA_PER_TXD 8192
117 118
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 29004382f462..708891571dae 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
126} 126}
127 127
128/** 128/**
129 * i40e_get_head - Retrieve head from head writeback
130 * @tx_ring: tx ring to fetch head of
131 *
132 * Returns value of Tx ring head based on value stored
133 * in head write-back location
134 **/
135static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
136{
137 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
138
139 return le32_to_cpu(*(volatile __le32 *)head);
140}
141
142/**
129 * i40e_get_tx_pending - how many tx descriptors not processed 143 * i40e_get_tx_pending - how many tx descriptors not processed
130 * @tx_ring: the ring of descriptors 144 * @tx_ring: the ring of descriptors
131 * 145 *
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
134 **/ 148 **/
135static u32 i40e_get_tx_pending(struct i40e_ring *ring) 149static u32 i40e_get_tx_pending(struct i40e_ring *ring)
136{ 150{
137 u32 ntu = ((ring->next_to_clean <= ring->next_to_use) 151 u32 head, tail;
138 ? ring->next_to_use 152
139 : ring->next_to_use + ring->count); 153 head = i40e_get_head(ring);
140 return ntu - ring->next_to_clean; 154 tail = readl(ring->tail);
155
156 if (head != tail)
157 return (head < tail) ?
158 tail - head : (tail + ring->count - head);
159
160 return 0;
141} 161}
142 162
143/** 163/**
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
146 **/ 166 **/
147static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 167static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
148{ 168{
169 u32 tx_done = tx_ring->stats.packets;
170 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
149 u32 tx_pending = i40e_get_tx_pending(tx_ring); 171 u32 tx_pending = i40e_get_tx_pending(tx_ring);
150 bool ret = false; 172 bool ret = false;
151 173
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
162 * run the check_tx_hang logic with a transmit completion 184 * run the check_tx_hang logic with a transmit completion
163 * pending but without time to complete it yet. 185 * pending but without time to complete it yet.
164 */ 186 */
165 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 187 if ((tx_done_old == tx_done) && tx_pending) {
166 (tx_pending >= I40E_MIN_DESC_PENDING)) {
167 /* make sure it is true for two checks in a row */ 188 /* make sure it is true for two checks in a row */
168 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 189 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
169 &tx_ring->state); 190 &tx_ring->state);
170 } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || 191 } else if (tx_done_old == tx_done &&
171 !(tx_pending < I40E_MIN_DESC_PENDING) || 192 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
172 !(tx_pending > 0)) {
173 /* update completed stats and disarm the hang check */ 193 /* update completed stats and disarm the hang check */
174 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 194 tx_ring->tx_stats.tx_done_old = tx_done;
175 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 195 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
176 } 196 }
177 197
178 return ret; 198 return ret;
179} 199}
180 200
181/**
182 * i40e_get_head - Retrieve head from head writeback
183 * @tx_ring: tx ring to fetch head of
184 *
185 * Returns value of Tx ring head based on value stored
186 * in head write-back location
187 **/
188static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
189{
190 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
191
192 return le32_to_cpu(*(volatile __le32 *)head);
193}
194
195#define WB_STRIDE 0x3 201#define WB_STRIDE 0x3
196 202
197/** 203/**
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1206 if (err < 0) 1212 if (err < 0)
1207 return err; 1213 return err;
1208 1214
1209 if (protocol == htons(ETH_P_IP)) { 1215 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1210 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1216 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
1217
1218 if (iph->version == 4) {
1211 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1219 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1212 iph->tot_len = 0; 1220 iph->tot_len = 0;
1213 iph->check = 0; 1221 iph->check = 0;
1214 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1222 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1215 0, IPPROTO_TCP, 0); 1223 0, IPPROTO_TCP, 0);
1216 } else if (skb_is_gso_v6(skb)) { 1224 } else if (ipv6h->version == 6) {
1217
1218 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1219 : ipv6_hdr(skb);
1220 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1225 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1221 ipv6h->payload_len = 0; 1226 ipv6h->payload_len = 0;
1222 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 1227 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1274 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1279 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1275 } 1280 }
1276 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 1281 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1277 if (tx_flags & I40E_TX_FLAGS_TSO) { 1282 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1278 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1283 if (tx_flags & I40E_TX_FLAGS_TSO)
1279 ip_hdr(skb)->check = 0; 1284 ip_hdr(skb)->check = 0;
1280 } else {
1281 *cd_tunneling |=
1282 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1283 }
1284 } 1285 }
1285 1286
1286 /* Now set the ctx descriptor fields */ 1287 /* Now set the ctx descriptor fields */
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1290 ((skb_inner_network_offset(skb) - 1291 ((skb_inner_network_offset(skb) -
1291 skb_transport_offset(skb)) >> 1) << 1292 skb_transport_offset(skb)) >> 1) <<
1292 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 1293 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1294 if (this_ip_hdr->version == 6) {
1295 tx_flags &= ~I40E_TX_FLAGS_IPV4;
1296 tx_flags |= I40E_TX_FLAGS_IPV6;
1297 }
1298
1293 1299
1294 } else { 1300 } else {
1295 network_hdr_len = skb_network_header_len(skb); 1301 network_hdr_len = skb_network_header_len(skb);
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1380 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 1386 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1381} 1387}
1382 1388
1389 /**
1390 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
1391 * @skb: send buffer
1392 * @tx_flags: collected send information
1393 * @hdr_len: size of the packet header
1394 *
1395 * Note: Our HW can't scatter-gather more than 8 fragments to build
1396 * a packet on the wire and so we need to figure out the cases where we
1397 * need to linearize the skb.
1398 **/
1399static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
1400 const u8 hdr_len)
1401{
1402 struct skb_frag_struct *frag;
1403 bool linearize = false;
1404 unsigned int size = 0;
1405 u16 num_frags;
1406 u16 gso_segs;
1407
1408 num_frags = skb_shinfo(skb)->nr_frags;
1409 gso_segs = skb_shinfo(skb)->gso_segs;
1410
1411 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
1412 u16 j = 1;
1413
1414 if (num_frags < (I40E_MAX_BUFFER_TXD))
1415 goto linearize_chk_done;
1416 /* try the simple math, if we have too many frags per segment */
1417 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
1418 I40E_MAX_BUFFER_TXD) {
1419 linearize = true;
1420 goto linearize_chk_done;
1421 }
1422 frag = &skb_shinfo(skb)->frags[0];
1423 size = hdr_len;
1424 /* we might still have more fragments per segment */
1425 do {
1426 size += skb_frag_size(frag);
1427 frag++; j++;
1428 if (j == I40E_MAX_BUFFER_TXD) {
1429 if (size < skb_shinfo(skb)->gso_size) {
1430 linearize = true;
1431 break;
1432 }
1433 j = 1;
1434 size -= skb_shinfo(skb)->gso_size;
1435 if (size)
1436 j++;
1437 size += hdr_len;
1438 }
1439 num_frags--;
1440 } while (num_frags);
1441 } else {
1442 if (num_frags >= I40E_MAX_BUFFER_TXD)
1443 linearize = true;
1444 }
1445
1446linearize_chk_done:
1447 return linearize;
1448}
1449
1383/** 1450/**
1384 * i40e_tx_map - Build the Tx descriptor 1451 * i40e_tx_map - Build the Tx descriptor
1385 * @tx_ring: ring to send buffer on 1452 * @tx_ring: ring to send buffer on
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1654 else if (tso) 1721 else if (tso)
1655 tx_flags |= I40E_TX_FLAGS_TSO; 1722 tx_flags |= I40E_TX_FLAGS_TSO;
1656 1723
1724 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
1725 if (skb_linearize(skb))
1726 goto out_drop;
1727
1657 skb_tx_timestamp(skb); 1728 skb_tx_timestamp(skb);
1658 1729
1659 /* always enable CRC insertion offload */ 1730 /* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 4e15903b2b6d..c950a038237c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
112 112
113#define i40e_rx_desc i40e_32byte_rx_desc 113#define i40e_rx_desc i40e_32byte_rx_desc
114 114
115#define I40E_MAX_BUFFER_TXD 8
115#define I40E_MIN_TX_LEN 17 116#define I40E_MIN_TX_LEN 17
116#define I40E_MAX_DATA_PER_TXD 8192 117#define I40E_MAX_DATA_PER_TXD 8192
117 118
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2d8ee66138e8..a61009f4b2df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
81{ 81{
82 u32 loopback_ok = 0; 82 u32 loopback_ok = 0;
83 int i; 83 int i;
84 84 bool gro_enabled;
85 85
86 priv->loopback_ok = 0; 86 priv->loopback_ok = 0;
87 priv->validate_loopback = 1; 87 priv->validate_loopback = 1;
88 gro_enabled = priv->dev->features & NETIF_F_GRO;
88 89
89 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 90 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
91 priv->dev->features &= ~NETIF_F_GRO;
90 92
91 /* xmit */ 93 /* xmit */
92 if (mlx4_en_test_loopback_xmit(priv)) { 94 if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
108mlx4_en_test_loopback_exit: 110mlx4_en_test_loopback_exit:
109 111
110 priv->validate_loopback = 0; 112 priv->validate_loopback = 0;
113
114 if (gro_enabled)
115 priv->dev->features |= NETIF_F_GRO;
116
111 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 117 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
112 return !loopback_ok; 118 return !loopback_ok;
113} 119}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2bb8553bd905..eda29dbbfcd2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -412,7 +412,6 @@ err_icm:
412 412
413EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 413EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
414 414
415#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
416int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 415int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
417 enum mlx4_update_qp_attr attr, 416 enum mlx4_update_qp_attr attr,
418 struct mlx4_update_qp_params *params) 417 struct mlx4_update_qp_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 486e3d26cd4a..d97ca88c55b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
713 struct mlx4_vport_oper_state *vp_oper; 713 struct mlx4_vport_oper_state *vp_oper;
714 struct mlx4_priv *priv; 714 struct mlx4_priv *priv;
715 u32 qp_type; 715 u32 qp_type;
716 int port; 716 int port, err = 0;
717 717
718 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 718 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
719 priv = mlx4_priv(dev); 719 priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
738 } else { 738 } else {
739 struct mlx4_update_qp_params params = {.flags = 0}; 739 struct mlx4_update_qp_params params = {.flags = 0};
740 740
741 mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params); 741 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
742 if (err)
743 goto out;
742 } 744 }
743 } 745 }
744 746
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
773 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; 775 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
774 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 776 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
775 } 777 }
776 return 0; 778out:
779 return err;
777} 780}
778 781
779static int mpt_mask(struct mlx4_dev *dev) 782static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 44e8d7d25547..57a6e6cd74fc 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
1239 if (mac->phydev) 1239 if (mac->phydev)
1240 phy_start(mac->phydev); 1240 phy_start(mac->phydev);
1241 1241
1242 init_timer(&mac->tx->clean_timer); 1242 setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
1243 mac->tx->clean_timer.function = pasemi_mac_tx_timer; 1243 (unsigned long)mac->tx);
1244 mac->tx->clean_timer.data = (unsigned long)mac->tx; 1244 mod_timer(&mac->tx->clean_timer, jiffies + HZ);
1245 mac->tx->clean_timer.expires = jiffies+HZ;
1246 add_timer(&mac->tx->clean_timer);
1247 1245
1248 return 0; 1246 return 0;
1249 1247
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 6e426ae94692..0a5e204a0179 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
354 354
355} __attribute__ ((aligned(64))); 355} __attribute__ ((aligned(64)));
356 356
357/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ 357/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
358struct rcv_desc { 358struct rcv_desc {
359 __le16 reference_handle; 359 __le16 reference_handle;
360 __le16 reserved; 360 __le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
499#define NETXEN_IMAGE_START 0x43000 /* compressed image */ 499#define NETXEN_IMAGE_START 0x43000 /* compressed image */
500#define NETXEN_SECONDARY_START 0x200000 /* backup images */ 500#define NETXEN_SECONDARY_START 0x200000 /* backup images */
501#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ 501#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
502#define NETXEN_USER_START 0x3E8000 /* Firmare info */ 502#define NETXEN_USER_START 0x3E8000 /* Firmware info */
503#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ 503#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
504#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ 504#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
505 505
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fa4317611fd6..f221126a5c4e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
314#define QLCNIC_BRDCFG_START 0x4000 /* board config */ 314#define QLCNIC_BRDCFG_START 0x4000 /* board config */
315#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ 315#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
316#define QLCNIC_IMAGE_START 0x43000 /* compressed image */ 316#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
317#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ 317#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
318 318
319#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) 319#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
320#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) 320#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ad0020af2193..c70ab40d8698 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2561 int rc = -EINVAL; 2561 int rc = -EINVAL;
2562 2562
2563 if (!rtl_fw_format_ok(tp, rtl_fw)) { 2563 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2564 netif_err(tp, ifup, dev, "invalid firwmare\n"); 2564 netif_err(tp, ifup, dev, "invalid firmware\n");
2565 goto out; 2565 goto out;
2566 } 2566 }
2567 2567
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
5067 RTL_W8(ChipCmd, CmdReset); 5067 RTL_W8(ChipCmd, CmdReset);
5068 5068
5069 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); 5069 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
5070
5071 netdev_reset_queue(tp->dev);
5072} 5070}
5073 5071
5074static void rtl_request_uncached_firmware(struct rtl8169_private *tp) 5072static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7049 u32 status, len; 7047 u32 status, len;
7050 u32 opts[2]; 7048 u32 opts[2];
7051 int frags; 7049 int frags;
7052 bool stop_queue;
7053 7050
7054 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { 7051 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
7055 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 7052 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7090 7087
7091 txd->opts2 = cpu_to_le32(opts[1]); 7088 txd->opts2 = cpu_to_le32(opts[1]);
7092 7089
7093 netdev_sent_queue(dev, skb->len);
7094
7095 skb_tx_timestamp(skb); 7090 skb_tx_timestamp(skb);
7096 7091
7097 /* Force memory writes to complete before releasing descriptor */ 7092 /* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7106 7101
7107 tp->cur_tx += frags + 1; 7102 tp->cur_tx += frags + 1;
7108 7103
7109 stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS); 7104 RTL_W8(TxPoll, NPQ);
7110 7105
7111 if (!skb->xmit_more || stop_queue || 7106 mmiowb();
7112 netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
7113 RTL_W8(TxPoll, NPQ);
7114
7115 mmiowb();
7116 }
7117 7107
7118 if (stop_queue) { 7108 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
7119 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must 7109 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
7120 * not miss a ring update when it notices a stopped queue. 7110 * not miss a ring update when it notices a stopped queue.
7121 */ 7111 */
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
7198static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) 7188static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7199{ 7189{
7200 unsigned int dirty_tx, tx_left; 7190 unsigned int dirty_tx, tx_left;
7201 unsigned int bytes_compl = 0, pkts_compl = 0;
7202 7191
7203 dirty_tx = tp->dirty_tx; 7192 dirty_tx = tp->dirty_tx;
7204 smp_rmb(); 7193 smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7222 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, 7211 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
7223 tp->TxDescArray + entry); 7212 tp->TxDescArray + entry);
7224 if (status & LastFrag) { 7213 if (status & LastFrag) {
7225 pkts_compl++; 7214 u64_stats_update_begin(&tp->tx_stats.syncp);
7226 bytes_compl += tx_skb->skb->len; 7215 tp->tx_stats.packets++;
7216 tp->tx_stats.bytes += tx_skb->skb->len;
7217 u64_stats_update_end(&tp->tx_stats.syncp);
7227 dev_kfree_skb_any(tx_skb->skb); 7218 dev_kfree_skb_any(tx_skb->skb);
7228 tx_skb->skb = NULL; 7219 tx_skb->skb = NULL;
7229 } 7220 }
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7232 } 7223 }
7233 7224
7234 if (tp->dirty_tx != dirty_tx) { 7225 if (tp->dirty_tx != dirty_tx) {
7235 netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
7236
7237 u64_stats_update_begin(&tp->tx_stats.syncp);
7238 tp->tx_stats.packets += pkts_compl;
7239 tp->tx_stats.bytes += bytes_compl;
7240 u64_stats_update_end(&tp->tx_stats.syncp);
7241
7242 tp->dirty_tx = dirty_tx; 7226 tp->dirty_tx = dirty_tx;
7243 /* Sync with rtl8169_start_xmit: 7227 /* Sync with rtl8169_start_xmit:
7244 * - publish dirty_tx ring index (write barrier) 7228 * - publish dirty_tx ring index (write barrier)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 4da8bd263997..736d5d1624a1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
508 .tpauser = 1, 508 .tpauser = 1,
509 .hw_swap = 1, 509 .hw_swap = 1,
510 .rmiimode = 1, 510 .rmiimode = 1,
511 .shift_rd0 = 1,
512}; 511};
513 512
514static void sh_eth_set_rate_sh7724(struct net_device *ndev) 513static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
1392 msleep(2); /* max frame time at 10 Mbps < 1250 us */ 1391 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1393 sh_eth_get_stats(ndev); 1392 sh_eth_get_stats(ndev);
1394 sh_eth_reset(ndev); 1393 sh_eth_reset(ndev);
1394
1395 /* Set MAC address again */
1396 update_mac_address(ndev);
1395} 1397}
1396 1398
1397/* free Tx skb function */ 1399/* free Tx skb function */
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
1407 txdesc = &mdp->tx_ring[entry]; 1409 txdesc = &mdp->tx_ring[entry];
1408 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1410 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1409 break; 1411 break;
1412 /* TACT bit must be checked before all the following reads */
1413 rmb();
1410 /* Free the original skb. */ 1414 /* Free the original skb. */
1411 if (mdp->tx_skbuff[entry]) { 1415 if (mdp->tx_skbuff[entry]) {
1412 dma_unmap_single(&ndev->dev, txdesc->addr, 1416 dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1444 limit = boguscnt; 1448 limit = boguscnt;
1445 rxdesc = &mdp->rx_ring[entry]; 1449 rxdesc = &mdp->rx_ring[entry];
1446 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1450 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1451 /* RACT bit must be checked before all the following reads */
1452 rmb();
1447 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1453 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1448 pkt_len = rxdesc->frame_length; 1454 pkt_len = rxdesc->frame_length;
1449 1455
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1455 1461
1456 /* In case of almost all GETHER/ETHERs, the Receive Frame State 1462 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1457 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to 1463 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1458 * bit 0. However, in case of the R8A7740, R8A779x, and 1464 * bit 0. However, in case of the R8A7740 and R7S72100
1459 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the 1465 * the RFS bits are from bit 25 to bit 16. So, the
1460 * driver needs right shifting by 16. 1466 * driver needs right shifting by 16.
1461 */ 1467 */
1462 if (mdp->cd->shift_rd0) 1468 if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1523 skb_checksum_none_assert(skb); 1529 skb_checksum_none_assert(skb);
1524 rxdesc->addr = dma_addr; 1530 rxdesc->addr = dma_addr;
1525 } 1531 }
1532 wmb(); /* RACT bit must be set after all the above writes */
1526 if (entry >= mdp->num_rx_ring - 1) 1533 if (entry >= mdp->num_rx_ring - 1)
1527 rxdesc->status |= 1534 rxdesc->status |=
1528 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1535 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1535 /* If we don't need to check status, don't. -KDU */ 1542 /* If we don't need to check status, don't. -KDU */
1536 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1543 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1537 /* fix the values for the next receiving if RDE is set */ 1544 /* fix the values for the next receiving if RDE is set */
1538 if (intr_status & EESR_RDE) { 1545 if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
1539 u32 count = (sh_eth_read(ndev, RDFAR) - 1546 u32 count = (sh_eth_read(ndev, RDFAR) -
1540 sh_eth_read(ndev, RDLAR)) >> 4; 1547 sh_eth_read(ndev, RDLAR)) >> 4;
1541 1548
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2174 } 2181 }
2175 spin_unlock_irqrestore(&mdp->lock, flags); 2182 spin_unlock_irqrestore(&mdp->lock, flags);
2176 2183
2177 if (skb_padto(skb, ETH_ZLEN)) 2184 if (skb_put_padto(skb, ETH_ZLEN))
2178 return NETDEV_TX_OK; 2185 return NETDEV_TX_OK;
2179 2186
2180 entry = mdp->cur_tx % mdp->num_tx_ring; 2187 entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2192 } 2199 }
2193 txdesc->buffer_length = skb->len; 2200 txdesc->buffer_length = skb->len;
2194 2201
2202 wmb(); /* TACT bit must be set after all the above writes */
2195 if (entry >= mdp->num_tx_ring - 1) 2203 if (entry >= mdp->num_tx_ring - 1)
2196 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2204 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2197 else 2205 else
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 34389b6aa67c..9fb6948e14c6 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1257 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); 1257 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1258 1258
1259 if (enable) 1259 if (enable)
1260 val |= 1 << rocker_port->lport; 1260 val |= 1ULL << rocker_port->lport;
1261 else 1261 else
1262 val &= ~(1 << rocker_port->lport); 1262 val &= ~(1ULL << rocker_port->lport);
1263 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); 1263 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1264} 1264}
1265 1265
@@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker)
4201 4201
4202 alloc_size = sizeof(struct rocker_port *) * rocker->port_count; 4202 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4203 rocker->ports = kmalloc(alloc_size, GFP_KERNEL); 4203 rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
4204 if (!rocker->ports)
4205 return -ENOMEM;
4204 for (i = 0; i < rocker->port_count; i++) { 4206 for (i = 0; i < rocker->port_count; i++) {
4205 err = rocker_probe_port(rocker, i); 4207 err = rocker_probe_port(rocker, i);
4206 if (err) 4208 if (err)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6b33127ab352..3449893aea8d 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
1070 smc->packets_waiting = 0; 1070 smc->packets_waiting = 0;
1071 1071
1072 smc_reset(dev); 1072 smc_reset(dev);
1073 init_timer(&smc->media); 1073 setup_timer(&smc->media, media_check, (u_long)dev);
1074 smc->media.function = media_check; 1074 mod_timer(&smc->media, jiffies + HZ);
1075 smc->media.data = (u_long) dev;
1076 smc->media.expires = jiffies + HZ;
1077 add_timer(&smc->media);
1078 1075
1079 return 0; 1076 return 0;
1080} /* smc_open */ 1077} /* smc_open */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 88a55f95fe09..5d093dc0f5f5 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -91,6 +91,11 @@ static const char version[] =
91 91
92#include "smc91x.h" 92#include "smc91x.h"
93 93
94#if defined(CONFIG_ASSABET_NEPONSET)
95#include <mach/assabet.h>
96#include <mach/neponset.h>
97#endif
98
94#ifndef SMC_NOWAIT 99#ifndef SMC_NOWAIT
95# define SMC_NOWAIT 0 100# define SMC_NOWAIT 0
96#endif 101#endif
@@ -2355,8 +2360,9 @@ static int smc_drv_probe(struct platform_device *pdev)
2355 ret = smc_request_attrib(pdev, ndev); 2360 ret = smc_request_attrib(pdev, ndev);
2356 if (ret) 2361 if (ret)
2357 goto out_release_io; 2362 goto out_release_io;
2358#if defined(CONFIG_SA1100_ASSABET) 2363#if defined(CONFIG_ASSABET_NEPONSET)
2359 neponset_ncr_set(NCR_ENET_OSC_EN); 2364 if (machine_is_assabet() && machine_has_neponset())
2365 neponset_ncr_set(NCR_ENET_OSC_EN);
2360#endif 2366#endif
2361 platform_set_drvdata(pdev, ndev); 2367 platform_set_drvdata(pdev, ndev);
2362 ret = smc_enable_device(pdev); 2368 ret = smc_enable_device(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index be67baf5f677..3a18501d1068 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -39,14 +39,7 @@
39 * Define your architecture specific bus configuration parameters here. 39 * Define your architecture specific bus configuration parameters here.
40 */ 40 */
41 41
42#if defined(CONFIG_ARCH_LUBBOCK) ||\ 42#if defined(CONFIG_ARM)
43 defined(CONFIG_MACH_MAINSTONE) ||\
44 defined(CONFIG_MACH_ZYLONITE) ||\
45 defined(CONFIG_MACH_LITTLETON) ||\
46 defined(CONFIG_MACH_ZYLONITE2) ||\
47 defined(CONFIG_ARCH_VIPER) ||\
48 defined(CONFIG_MACH_STARGATE2) ||\
49 defined(CONFIG_ARCH_VERSATILE)
50 43
51#include <asm/mach-types.h> 44#include <asm/mach-types.h>
52 45
@@ -74,95 +67,8 @@
74/* We actually can't write halfwords properly if not word aligned */ 67/* We actually can't write halfwords properly if not word aligned */
75static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) 68static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
76{ 69{
77 if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { 70 if ((machine_is_mainstone() || machine_is_stargate2() ||
78 unsigned int v = val << 16; 71 machine_is_pxa_idp()) && reg & 2) {
79 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
80 writel(v, ioaddr + (reg & ~2));
81 } else {
82 writew(val, ioaddr + reg);
83 }
84}
85
86#elif defined(CONFIG_SA1100_PLEB)
87/* We can only do 16-bit reads and writes in the static memory space. */
88#define SMC_CAN_USE_8BIT 1
89#define SMC_CAN_USE_16BIT 1
90#define SMC_CAN_USE_32BIT 0
91#define SMC_IO_SHIFT 0
92#define SMC_NOWAIT 1
93
94#define SMC_inb(a, r) readb((a) + (r))
95#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
96#define SMC_inw(a, r) readw((a) + (r))
97#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
98#define SMC_outb(v, a, r) writeb(v, (a) + (r))
99#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
100#define SMC_outw(v, a, r) writew(v, (a) + (r))
101#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
102
103#define SMC_IRQ_FLAGS (-1)
104
105#elif defined(CONFIG_SA1100_ASSABET)
106
107#include <mach/neponset.h>
108
109/* We can only do 8-bit reads and writes in the static memory space. */
110#define SMC_CAN_USE_8BIT 1
111#define SMC_CAN_USE_16BIT 0
112#define SMC_CAN_USE_32BIT 0
113#define SMC_NOWAIT 1
114
115/* The first two address lines aren't connected... */
116#define SMC_IO_SHIFT 2
117
118#define SMC_inb(a, r) readb((a) + (r))
119#define SMC_outb(v, a, r) writeb(v, (a) + (r))
120#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
121#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
122#define SMC_IRQ_FLAGS (-1) /* from resource */
123
124#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
125 defined(CONFIG_MACH_NOMADIK_8815NHK)
126
127#define SMC_CAN_USE_8BIT 0
128#define SMC_CAN_USE_16BIT 1
129#define SMC_CAN_USE_32BIT 0
130#define SMC_IO_SHIFT 0
131#define SMC_NOWAIT 1
132
133#define SMC_inw(a, r) readw((a) + (r))
134#define SMC_outw(v, a, r) writew(v, (a) + (r))
135#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
136#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
137
138#elif defined(CONFIG_ARCH_INNOKOM) || \
139 defined(CONFIG_ARCH_PXA_IDP) || \
140 defined(CONFIG_ARCH_RAMSES) || \
141 defined(CONFIG_ARCH_PCM027)
142
143#define SMC_CAN_USE_8BIT 1
144#define SMC_CAN_USE_16BIT 1
145#define SMC_CAN_USE_32BIT 1
146#define SMC_IO_SHIFT 0
147#define SMC_NOWAIT 1
148#define SMC_USE_PXA_DMA 1
149
150#define SMC_inb(a, r) readb((a) + (r))
151#define SMC_inw(a, r) readw((a) + (r))
152#define SMC_inl(a, r) readl((a) + (r))
153#define SMC_outb(v, a, r) writeb(v, (a) + (r))
154#define SMC_outl(v, a, r) writel(v, (a) + (r))
155#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
156#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
157#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
158#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
159#define SMC_IRQ_FLAGS (-1) /* from resource */
160
161/* We actually can't write halfwords properly if not word aligned */
162static inline void
163SMC_outw(u16 val, void __iomem *ioaddr, int reg)
164{
165 if (reg & 2) {
166 unsigned int v = val << 16; 72 unsigned int v = val << 16;
167 v |= readl(ioaddr + (reg & ~2)) & 0xffff; 73 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
168 writel(v, ioaddr + (reg & ~2)); 74 writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
237#define RPC_LSA_DEFAULT RPC_LED_100_10 143#define RPC_LSA_DEFAULT RPC_LED_100_10
238#define RPC_LSB_DEFAULT RPC_LED_TX_RX 144#define RPC_LSB_DEFAULT RPC_LED_TX_RX
239 145
240#elif defined(CONFIG_ARCH_MSM)
241
242#define SMC_CAN_USE_8BIT 0
243#define SMC_CAN_USE_16BIT 1
244#define SMC_CAN_USE_32BIT 0
245#define SMC_NOWAIT 1
246
247#define SMC_inw(a, r) readw((a) + (r))
248#define SMC_outw(v, a, r) writew(v, (a) + (r))
249#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
250#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
251
252#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
253
254#elif defined(CONFIG_COLDFIRE) 146#elif defined(CONFIG_COLDFIRE)
255 147
256#define SMC_CAN_USE_8BIT 0 148#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 55e89b3838f1..a0ea84fe6519 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
310 spin_lock_irqsave(&priv->lock, flags); 310 spin_lock_irqsave(&priv->lock, flags);
311 if (!priv->eee_active) { 311 if (!priv->eee_active) {
312 priv->eee_active = 1; 312 priv->eee_active = 1;
313 init_timer(&priv->eee_ctrl_timer); 313 setup_timer(&priv->eee_ctrl_timer,
314 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; 314 stmmac_eee_ctrl_timer,
315 priv->eee_ctrl_timer.data = (unsigned long)priv; 315 (unsigned long)priv);
316 priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); 316 mod_timer(&priv->eee_ctrl_timer,
317 add_timer(&priv->eee_ctrl_timer); 317 STMMAC_LPI_T(eee_timer));
318 318
319 priv->hw->mac->set_eee_timer(priv->hw, 319 priv->hw->mac->set_eee_timer(priv->hw,
320 STMMAC_DEFAULT_LIT_LS, 320 STMMAC_DEFAULT_LIT_LS,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index fb846ebba1d9..f9b42f11950f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
272 struct stmmac_priv *priv = NULL; 272 struct stmmac_priv *priv = NULL;
273 struct plat_stmmacenet_data *plat_dat = NULL; 273 struct plat_stmmacenet_data *plat_dat = NULL;
274 const char *mac = NULL; 274 const char *mac = NULL;
275 int irq, wol_irq, lpi_irq;
276
277 /* Get IRQ information early to have an ability to ask for deferred
278 * probe if needed before we went too far with resource allocation.
279 */
280 irq = platform_get_irq_byname(pdev, "macirq");
281 if (irq < 0) {
282 if (irq != -EPROBE_DEFER) {
283 dev_err(dev,
284 "MAC IRQ configuration information not found\n");
285 }
286 return irq;
287 }
288
289 /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
290 * The external wake up irq can be passed through the platform code
291 * named as "eth_wake_irq"
292 *
293 * In case the wake up interrupt is not passed from the platform
294 * so the driver will continue to use the mac irq (ndev->irq)
295 */
296 wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
297 if (wol_irq < 0) {
298 if (wol_irq == -EPROBE_DEFER)
299 return -EPROBE_DEFER;
300 wol_irq = irq;
301 }
302
303 lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
304 if (lpi_irq == -EPROBE_DEFER)
305 return -EPROBE_DEFER;
275 306
276 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 307 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
277 addr = devm_ioremap_resource(dev, res); 308 addr = devm_ioremap_resource(dev, res);
@@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
323 return PTR_ERR(priv); 354 return PTR_ERR(priv);
324 } 355 }
325 356
357 /* Copy IRQ values to priv structure which is now avaialble */
358 priv->dev->irq = irq;
359 priv->wol_irq = wol_irq;
360 priv->lpi_irq = lpi_irq;
361
326 /* Get MAC address if available (DT) */ 362 /* Get MAC address if available (DT) */
327 if (mac) 363 if (mac)
328 memcpy(priv->dev->dev_addr, mac, ETH_ALEN); 364 memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
329 365
330 /* Get the MAC information */
331 priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
332 if (priv->dev->irq < 0) {
333 if (priv->dev->irq != -EPROBE_DEFER) {
334 netdev_err(priv->dev,
335 "MAC IRQ configuration information not found\n");
336 }
337 return priv->dev->irq;
338 }
339
340 /*
341 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
342 * The external wake up irq can be passed through the platform code
343 * named as "eth_wake_irq"
344 *
345 * In case the wake up interrupt is not passed from the platform
346 * so the driver will continue to use the mac irq (ndev->irq)
347 */
348 priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
349 if (priv->wol_irq < 0) {
350 if (priv->wol_irq == -EPROBE_DEFER)
351 return -EPROBE_DEFER;
352 priv->wol_irq = priv->dev->irq;
353 }
354
355 priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
356 if (priv->lpi_irq == -EPROBE_DEFER)
357 return -EPROBE_DEFER;
358
359 platform_set_drvdata(pdev, priv->dev); 366 platform_set_drvdata(pdev, priv->dev);
360 367
361 pr_debug("STMMAC platform driver registration completed"); 368 pr_debug("STMMAC platform driver registration completed");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 4b51f903fb73..0c5842aeb807 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
6989 *flow_type = IP_USER_FLOW; 6989 *flow_type = IP_USER_FLOW;
6990 break; 6990 break;
6991 default: 6991 default:
6992 return 0; 6992 return -EINVAL;
6993 } 6993 }
6994 6994
6995 return 1; 6995 return 0;
6996} 6996}
6997 6997
6998static int niu_ethflow_to_class(int flow_type, u64 *class) 6998static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
7198 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7198 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7199 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7199 TCAM_V4KEY0_CLASS_CODE_SHIFT;
7200 ret = niu_class_to_ethflow(class, &fsp->flow_type); 7200 ret = niu_class_to_ethflow(class, &fsp->flow_type);
7201
7202 if (ret < 0) { 7201 if (ret < 0) {
7203 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", 7202 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
7204 parent->index); 7203 parent->index);
7205 ret = -EINVAL;
7206 goto out; 7204 goto out;
7207 } 7205 }
7208 7206
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d8dd0d2182e..a1bbaf6352ba 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
1103 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1103 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1104 port_mask, ALE_VLAN, slave->port_vlan, 0); 1104 port_mask, ALE_VLAN, slave->port_vlan, 0);
1105 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, 1105 cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
1106 priv->host_port, ALE_VLAN, slave->port_vlan); 1106 priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
1107} 1107}
1108 1108
1109static void soft_reset_slave(struct cpsw_slave *slave) 1109static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
2466 return 0; 2466 return 0;
2467} 2467}
2468 2468
2469#ifdef CONFIG_PM_SLEEP
2469static int cpsw_suspend(struct device *dev) 2470static int cpsw_suspend(struct device *dev)
2470{ 2471{
2471 struct platform_device *pdev = to_platform_device(dev); 2472 struct platform_device *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
2518 } 2519 }
2519 return 0; 2520 return 0;
2520} 2521}
2522#endif
2521 2523
2522static const struct dev_pm_ops cpsw_pm_ops = { 2524static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2523 .suspend = cpsw_suspend,
2524 .resume = cpsw_resume,
2525};
2526 2525
2527static const struct of_device_id cpsw_of_mtable[] = { 2526static const struct of_device_id cpsw_of_mtable[] = {
2528 { .compatible = "ti,cpsw", }, 2527 { .compatible = "ti,cpsw", },
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 98655b44b97e..c00084d689f3 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
423 return 0; 423 return 0;
424} 424}
425 425
426#ifdef CONFIG_PM_SLEEP
426static int davinci_mdio_suspend(struct device *dev) 427static int davinci_mdio_suspend(struct device *dev)
427{ 428{
428 struct davinci_mdio_data *data = dev_get_drvdata(dev); 429 struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
464 465
465 return 0; 466 return 0;
466} 467}
468#endif
467 469
468static const struct dev_pm_ops davinci_mdio_pm_ops = { 470static const struct dev_pm_ops davinci_mdio_pm_ops = {
469 .suspend_late = davinci_mdio_suspend, 471 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
470 .resume_early = davinci_mdio_resume,
471}; 472};
472 473
473#if IS_ENABLED(CONFIG_OF) 474#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index f7e0f0f7c2e2..9e16a2819d48 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
938 int i; 938 int i;
939 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 939 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
940 940
941 if (dev->flags & IFF_ALLMULTI) { 941 if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
942 for (i = 0; i < ETH_ALEN; i++) { 942 for (i = 0; i < ETH_ALEN; i++) {
943 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); 943 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
944 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); 944 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index e40fdfccc9c1..27ecc5c4fa26 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
654 } /* else everything is zero */ 654 } /* else everything is zero */
655} 655}
656 656
657/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
658#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
659
657/* Get packet from user space buffer */ 660/* Get packet from user space buffer */
658static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, 661static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
659 struct iov_iter *from, int noblock) 662 struct iov_iter *from, int noblock)
660{ 663{
661 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); 664 int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
662 struct sk_buff *skb; 665 struct sk_buff *skb;
663 struct macvlan_dev *vlan; 666 struct macvlan_dev *vlan;
664 unsigned long total_len = iov_iter_count(from); 667 unsigned long total_len = iov_iter_count(from);
@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
722 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); 725 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
723 } 726 }
724 727
725 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, 728 skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
726 linear, noblock, &err); 729 linear, noblock, &err);
727 if (!skb) 730 if (!skb)
728 goto err; 731 goto err;
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 9e3af54c9010..32efbd48f326 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
92#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" 92#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
93#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" 93#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
94#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp" 94#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
95#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
96#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
95 97
96#define XGBE_PHY_SPEEDS 3 98#define XGBE_PHY_SPEEDS 3
97#define XGBE_PHY_SPEED_1000 0 99#define XGBE_PHY_SPEED_1000 0
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
177#define SPEED_10000_BLWC 0 179#define SPEED_10000_BLWC 0
178#define SPEED_10000_CDR 0x7 180#define SPEED_10000_CDR 0x7
179#define SPEED_10000_PLL 0x1 181#define SPEED_10000_PLL 0x1
180#define SPEED_10000_PQ 0x1e 182#define SPEED_10000_PQ 0x12
181#define SPEED_10000_RATE 0x0 183#define SPEED_10000_RATE 0x0
182#define SPEED_10000_TXAMP 0xa 184#define SPEED_10000_TXAMP 0xa
183#define SPEED_10000_WORD 0x7 185#define SPEED_10000_WORD 0x7
186#define SPEED_10000_DFE_TAP_CONFIG 0x1
187#define SPEED_10000_DFE_TAP_ENABLE 0x7f
184 188
185#define SPEED_2500_BLWC 1 189#define SPEED_2500_BLWC 1
186#define SPEED_2500_CDR 0x2 190#define SPEED_2500_CDR 0x2
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
189#define SPEED_2500_RATE 0x1 193#define SPEED_2500_RATE 0x1
190#define SPEED_2500_TXAMP 0xf 194#define SPEED_2500_TXAMP 0xf
191#define SPEED_2500_WORD 0x1 195#define SPEED_2500_WORD 0x1
196#define SPEED_2500_DFE_TAP_CONFIG 0x3
197#define SPEED_2500_DFE_TAP_ENABLE 0x0
192 198
193#define SPEED_1000_BLWC 1 199#define SPEED_1000_BLWC 1
194#define SPEED_1000_CDR 0x2 200#define SPEED_1000_CDR 0x2
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
197#define SPEED_1000_RATE 0x3 203#define SPEED_1000_RATE 0x3
198#define SPEED_1000_TXAMP 0xf 204#define SPEED_1000_TXAMP 0xf
199#define SPEED_1000_WORD 0x1 205#define SPEED_1000_WORD 0x1
206#define SPEED_1000_DFE_TAP_CONFIG 0x3
207#define SPEED_1000_DFE_TAP_ENABLE 0x0
200 208
201/* SerDes RxTx register offsets */ 209/* SerDes RxTx register offsets */
210#define RXTX_REG6 0x0018
202#define RXTX_REG20 0x0050 211#define RXTX_REG20 0x0050
212#define RXTX_REG22 0x0058
203#define RXTX_REG114 0x01c8 213#define RXTX_REG114 0x01c8
214#define RXTX_REG129 0x0204
204 215
205/* SerDes RxTx register entry bit positions and sizes */ 216/* SerDes RxTx register entry bit positions and sizes */
217#define RXTX_REG6_RESETB_RXD_INDEX 8
218#define RXTX_REG6_RESETB_RXD_WIDTH 1
206#define RXTX_REG20_BLWC_ENA_INDEX 2 219#define RXTX_REG20_BLWC_ENA_INDEX 2
207#define RXTX_REG20_BLWC_ENA_WIDTH 1 220#define RXTX_REG20_BLWC_ENA_WIDTH 1
208#define RXTX_REG114_PQ_REG_INDEX 9 221#define RXTX_REG114_PQ_REG_INDEX 9
209#define RXTX_REG114_PQ_REG_WIDTH 7 222#define RXTX_REG114_PQ_REG_WIDTH 7
223#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
224#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
210 225
211/* Bit setting and getting macros 226/* Bit setting and getting macros
212 * The get macro will extract the current bit field value from within 227 * The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
333 SPEED_10000_TXAMP, 348 SPEED_10000_TXAMP,
334}; 349};
335 350
351static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
352 SPEED_1000_DFE_TAP_CONFIG,
353 SPEED_2500_DFE_TAP_CONFIG,
354 SPEED_10000_DFE_TAP_CONFIG,
355};
356
357static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
358 SPEED_1000_DFE_TAP_ENABLE,
359 SPEED_2500_DFE_TAP_ENABLE,
360 SPEED_10000_DFE_TAP_ENABLE,
361};
362
336enum amd_xgbe_phy_an { 363enum amd_xgbe_phy_an {
337 AMD_XGBE_AN_READY = 0, 364 AMD_XGBE_AN_READY = 0,
338 AMD_XGBE_AN_PAGE_RECEIVED, 365 AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
393 u32 serdes_cdr_rate[XGBE_PHY_SPEEDS]; 420 u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
394 u32 serdes_pq_skew[XGBE_PHY_SPEEDS]; 421 u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
395 u32 serdes_tx_amp[XGBE_PHY_SPEEDS]; 422 u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
423 u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
424 u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
396 425
397 /* Auto-negotiation state machine support */ 426 /* Auto-negotiation state machine support */
398 struct mutex an_mutex; 427 struct mutex an_mutex;
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
481 status = XSIR0_IOREAD(priv, SIR0_STATUS); 510 status = XSIR0_IOREAD(priv, SIR0_STATUS);
482 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && 511 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
483 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) 512 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
484 return; 513 goto rx_reset;
485 } 514 }
486 515
487 netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", 516 netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
488 status); 517 status);
518
519rx_reset:
520 /* Perform Rx reset for the DFE changes */
521 XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
522 XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
489} 523}
490 524
491static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) 525static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
534 priv->serdes_blwc[XGBE_PHY_SPEED_10000]); 568 priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
535 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 569 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
536 priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]); 570 priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
571 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
572 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
573 XRXTX_IOWRITE(priv, RXTX_REG22,
574 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
537 575
538 amd_xgbe_phy_serdes_complete_ratechange(phydev); 576 amd_xgbe_phy_serdes_complete_ratechange(phydev);
539 577
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
586 priv->serdes_blwc[XGBE_PHY_SPEED_2500]); 624 priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
587 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 625 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
588 priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]); 626 priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
627 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
628 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
629 XRXTX_IOWRITE(priv, RXTX_REG22,
630 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
589 631
590 amd_xgbe_phy_serdes_complete_ratechange(phydev); 632 amd_xgbe_phy_serdes_complete_ratechange(phydev);
591 633
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
638 priv->serdes_blwc[XGBE_PHY_SPEED_1000]); 680 priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
639 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 681 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
640 priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]); 682 priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
683 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
684 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
685 XRXTX_IOWRITE(priv, RXTX_REG22,
686 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
641 687
642 amd_xgbe_phy_serdes_complete_ratechange(phydev); 688 amd_xgbe_phy_serdes_complete_ratechange(phydev);
643 689
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
1668 sizeof(priv->serdes_tx_amp)); 1714 sizeof(priv->serdes_tx_amp));
1669 } 1715 }
1670 1716
1717 if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
1718 ret = device_property_read_u32_array(phy_dev,
1719 XGBE_PHY_DFE_CFG_PROPERTY,
1720 priv->serdes_dfe_tap_cfg,
1721 XGBE_PHY_SPEEDS);
1722 if (ret) {
1723 dev_err(dev, "invalid %s property\n",
1724 XGBE_PHY_DFE_CFG_PROPERTY);
1725 goto err_sir1;
1726 }
1727 } else {
1728 memcpy(priv->serdes_dfe_tap_cfg,
1729 amd_xgbe_phy_serdes_dfe_tap_cfg,
1730 sizeof(priv->serdes_dfe_tap_cfg));
1731 }
1732
1733 if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
1734 ret = device_property_read_u32_array(phy_dev,
1735 XGBE_PHY_DFE_ENA_PROPERTY,
1736 priv->serdes_dfe_tap_ena,
1737 XGBE_PHY_SPEEDS);
1738 if (ret) {
1739 dev_err(dev, "invalid %s property\n",
1740 XGBE_PHY_DFE_ENA_PROPERTY);
1741 goto err_sir1;
1742 }
1743 } else {
1744 memcpy(priv->serdes_dfe_tap_ena,
1745 amd_xgbe_phy_serdes_dfe_tap_ena,
1746 sizeof(priv->serdes_dfe_tap_ena));
1747 }
1748
1671 phydev->priv = priv; 1749 phydev->priv = priv;
1672 1750
1673 if (!priv->adev || acpi_disabled) 1751 if (!priv->adev || acpi_disabled)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index cdcac6aa4260..52cd8db2c57d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
236} 236}
237 237
238/** 238/**
239 * phy_check_valid - check if there is a valid PHY setting which matches
240 * speed, duplex, and feature mask
241 * @speed: speed to match
242 * @duplex: duplex to match
243 * @features: A mask of the valid settings
244 *
245 * Description: Returns true if there is a valid setting, false otherwise.
246 */
247static inline bool phy_check_valid(int speed, int duplex, u32 features)
248{
249 unsigned int idx;
250
251 idx = phy_find_valid(phy_find_setting(speed, duplex), features);
252
253 return settings[idx].speed == speed && settings[idx].duplex == duplex &&
254 (settings[idx].setting & features);
255}
256
257/**
239 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex 258 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
240 * @phydev: the target phy_device struct 259 * @phydev: the target phy_device struct
241 * 260 *
@@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1045 int eee_lp, eee_cap, eee_adv; 1064 int eee_lp, eee_cap, eee_adv;
1046 u32 lp, cap, adv; 1065 u32 lp, cap, adv;
1047 int status; 1066 int status;
1048 unsigned int idx;
1049 1067
1050 /* Read phy status to properly get the right settings */ 1068 /* Read phy status to properly get the right settings */
1051 status = phy_read_status(phydev); 1069 status = phy_read_status(phydev);
@@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1077 1095
1078 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); 1096 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1079 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); 1097 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1080 idx = phy_find_setting(phydev->speed, phydev->duplex); 1098 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
1081 if (!(lp & adv & settings[idx].setting))
1082 goto eee_exit_err; 1099 goto eee_exit_err;
1083 1100
1084 if (clk_stop_enable) { 1101 if (clk_stop_enable) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 0e62274e884a..7d394846afc2 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -43,9 +43,7 @@
43 43
44static struct team_port *team_port_get_rcu(const struct net_device *dev) 44static struct team_port *team_port_get_rcu(const struct net_device *dev)
45{ 45{
46 struct team_port *port = rcu_dereference(dev->rx_handler_data); 46 return rcu_dereference(dev->rx_handler_data);
47
48 return team_port_exists(dev) ? port : NULL;
49} 47}
50 48
51static struct team_port *team_port_get_rtnl(const struct net_device *dev) 49static struct team_port *team_port_get_rtnl(const struct net_device *dev)
@@ -1732,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
1732 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) 1730 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1733 return -EADDRNOTAVAIL; 1731 return -EADDRNOTAVAIL;
1734 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1732 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1735 rcu_read_lock(); 1733 mutex_lock(&team->lock);
1736 list_for_each_entry_rcu(port, &team->port_list, list) 1734 list_for_each_entry(port, &team->port_list, list)
1737 if (team->ops.port_change_dev_addr) 1735 if (team->ops.port_change_dev_addr)
1738 team->ops.port_change_dev_addr(team, port); 1736 team->ops.port_change_dev_addr(team, port);
1739 rcu_read_unlock(); 1737 mutex_unlock(&team->lock);
1740 return 0; 1738 return 0;
1741} 1739}
1742 1740
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3bd9678315ad..7ba8d0885f12 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -161,6 +161,7 @@ config USB_NET_AX8817X
161 * Linksys USB200M 161 * Linksys USB200M
162 * Netgear FA120 162 * Netgear FA120
163 * Sitecom LN-029 163 * Sitecom LN-029
164 * Sitecom LN-028
164 * Intellinet USB 2.0 Ethernet 165 * Intellinet USB 2.0 Ethernet
165 * ST Lab USB 2.0 Ethernet 166 * ST Lab USB 2.0 Ethernet
166 * TrendNet TU2-ET100 167 * TrendNet TU2-ET100
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index bf49792062a2..1173a24feda3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -979,6 +979,10 @@ static const struct usb_device_id products [] = {
979 USB_DEVICE (0x0df6, 0x0056), 979 USB_DEVICE (0x0df6, 0x0056),
980 .driver_info = (unsigned long) &ax88178_info, 980 .driver_info = (unsigned long) &ax88178_info,
981}, { 981}, {
982 // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
983 USB_DEVICE (0x0df6, 0x061c),
984 .driver_info = (unsigned long) &ax88178_info,
985}, {
982 // corega FEther USB2-TX 986 // corega FEther USB2-TX
983 USB_DEVICE (0x07aa, 0x0017), 987 USB_DEVICE (0x07aa, 0x0017),
984 .driver_info = (unsigned long) &ax8817x_info, 988 .driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 9cdfb3fe9c15..778e91531fac 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1594,7 +1594,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
1594 } 1594 }
1595 cprev = cnow; 1595 cprev = cnow;
1596 } 1596 }
1597 current->state = TASK_RUNNING; 1597 __set_current_state(TASK_RUNNING);
1598 remove_wait_queue(&tiocmget->waitq, &wait); 1598 remove_wait_queue(&tiocmget->waitq, &wait);
1599 1599
1600 return ret; 1600 return ret;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 3d18bb0eee85..1bfe0fcaccf5 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -134,6 +134,11 @@ static const struct usb_device_id products [] = {
134}, { 134}, {
135 USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ 135 USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
136 .driver_info = (unsigned long) &prolific_info, 136 .driver_info = (unsigned long) &prolific_info,
137}, {
138 USB_DEVICE(0x3923, 0x7825), /* National Instruments USB
139 * Host-to-Host Cable
140 */
141 .driver_info = (unsigned long) &prolific_info,
137}, 142},
138 143
139 { }, // END 144 { }, // END
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 83c39e2858bf..88d121d43c08 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file,
806 spin_lock_irqsave(&cosa->lock, flags); 806 spin_lock_irqsave(&cosa->lock, flags);
807 add_wait_queue(&chan->rxwaitq, &wait); 807 add_wait_queue(&chan->rxwaitq, &wait);
808 while (!chan->rx_status) { 808 while (!chan->rx_status) {
809 current->state = TASK_INTERRUPTIBLE; 809 set_current_state(TASK_INTERRUPTIBLE);
810 spin_unlock_irqrestore(&cosa->lock, flags); 810 spin_unlock_irqrestore(&cosa->lock, flags);
811 schedule(); 811 schedule();
812 spin_lock_irqsave(&cosa->lock, flags); 812 spin_lock_irqsave(&cosa->lock, flags);
813 if (signal_pending(current) && chan->rx_status == 0) { 813 if (signal_pending(current) && chan->rx_status == 0) {
814 chan->rx_status = 1; 814 chan->rx_status = 1;
815 remove_wait_queue(&chan->rxwaitq, &wait); 815 remove_wait_queue(&chan->rxwaitq, &wait);
816 current->state = TASK_RUNNING; 816 __set_current_state(TASK_RUNNING);
817 spin_unlock_irqrestore(&cosa->lock, flags); 817 spin_unlock_irqrestore(&cosa->lock, flags);
818 mutex_unlock(&chan->rlock); 818 mutex_unlock(&chan->rlock);
819 return -ERESTARTSYS; 819 return -ERESTARTSYS;
820 } 820 }
821 } 821 }
822 remove_wait_queue(&chan->rxwaitq, &wait); 822 remove_wait_queue(&chan->rxwaitq, &wait);
823 current->state = TASK_RUNNING; 823 __set_current_state(TASK_RUNNING);
824 kbuf = chan->rxdata; 824 kbuf = chan->rxdata;
825 count = chan->rxsize; 825 count = chan->rxsize;
826 spin_unlock_irqrestore(&cosa->lock, flags); 826 spin_unlock_irqrestore(&cosa->lock, flags);
@@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file,
890 spin_lock_irqsave(&cosa->lock, flags); 890 spin_lock_irqsave(&cosa->lock, flags);
891 add_wait_queue(&chan->txwaitq, &wait); 891 add_wait_queue(&chan->txwaitq, &wait);
892 while (!chan->tx_status) { 892 while (!chan->tx_status) {
893 current->state = TASK_INTERRUPTIBLE; 893 set_current_state(TASK_INTERRUPTIBLE);
894 spin_unlock_irqrestore(&cosa->lock, flags); 894 spin_unlock_irqrestore(&cosa->lock, flags);
895 schedule(); 895 schedule();
896 spin_lock_irqsave(&cosa->lock, flags); 896 spin_lock_irqsave(&cosa->lock, flags);
897 if (signal_pending(current) && chan->tx_status == 0) { 897 if (signal_pending(current) && chan->tx_status == 0) {
898 chan->tx_status = 1; 898 chan->tx_status = 1;
899 remove_wait_queue(&chan->txwaitq, &wait); 899 remove_wait_queue(&chan->txwaitq, &wait);
900 current->state = TASK_RUNNING; 900 __set_current_state(TASK_RUNNING);
901 chan->tx_status = 1; 901 chan->tx_status = 1;
902 spin_unlock_irqrestore(&cosa->lock, flags); 902 spin_unlock_irqrestore(&cosa->lock, flags);
903 up(&chan->wsem); 903 up(&chan->wsem);
@@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file,
905 } 905 }
906 } 906 }
907 remove_wait_queue(&chan->txwaitq, &wait); 907 remove_wait_queue(&chan->txwaitq, &wait);
908 current->state = TASK_RUNNING; 908 __set_current_state(TASK_RUNNING);
909 up(&chan->wsem); 909 up(&chan->wsem);
910 spin_unlock_irqrestore(&cosa->lock, flags); 910 spin_unlock_irqrestore(&cosa->lock, flags);
911 kfree(kbuf); 911 kfree(kbuf);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4a4c6586a8d2..8908be6dbc48 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
946 goto nla_put_failure; 946 goto nla_put_failure;
947 947
948 genlmsg_end(skb, msg_head); 948 genlmsg_end(skb, msg_head);
949 genlmsg_unicast(&init_net, skb, dst_portid); 949 if (genlmsg_unicast(&init_net, skb, dst_portid))
950 goto err_free_txskb;
950 951
951 /* Enqueue the packet */ 952 /* Enqueue the packet */
952 skb_queue_tail(&data->pending, my_skb); 953 skb_queue_tail(&data->pending, my_skb);
@@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
955 return; 956 return;
956 957
957nla_put_failure: 958nla_put_failure:
959 nlmsg_free(skb);
960err_free_txskb:
958 printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); 961 printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
959 ieee80211_free_txskb(hw, my_skb); 962 ieee80211_free_txskb(hw, my_skb);
960 data->tx_failed++; 963 data->tx_failed++;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f38227afe099..3aa8648080c8 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
340 unsigned int num_queues = vif->num_queues; 340 unsigned int num_queues = vif->num_queues;
341 int i; 341 int i;
342 unsigned int queue_index; 342 unsigned int queue_index;
343 struct xenvif_stats *vif_stats;
344 343
345 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { 344 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
346 unsigned long accum = 0; 345 unsigned long accum = 0;
347 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 346 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
348 vif_stats = &vif->queues[queue_index].stats; 347 void *vif_stats = &vif->queues[queue_index].stats;
349 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); 348 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
350 } 349 }
351 data[i] = accum; 350 data[i] = accum;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f7a31d2cb3f1..cab9f5257f57 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
655 unsigned long flags; 655 unsigned long flags;
656 656
657 do { 657 do {
658 int notify;
659
658 spin_lock_irqsave(&queue->response_lock, flags); 660 spin_lock_irqsave(&queue->response_lock, flags);
659 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); 661 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
662 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
660 spin_unlock_irqrestore(&queue->response_lock, flags); 663 spin_unlock_irqrestore(&queue->response_lock, flags);
664 if (notify)
665 notify_remote_via_irq(queue->tx_irq);
666
661 if (cons == end) 667 if (cons == end)
662 break; 668 break;
663 txp = RING_GET_REQUEST(&queue->tx, cons++); 669 txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1343,7 +1349,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
1343{ 1349{
1344 unsigned int offset = skb_headlen(skb); 1350 unsigned int offset = skb_headlen(skb);
1345 skb_frag_t frags[MAX_SKB_FRAGS]; 1351 skb_frag_t frags[MAX_SKB_FRAGS];
1346 int i; 1352 int i, f;
1347 struct ubuf_info *uarg; 1353 struct ubuf_info *uarg;
1348 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1354 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1349 1355
@@ -1383,23 +1389,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
1383 frags[i].page_offset = 0; 1389 frags[i].page_offset = 0;
1384 skb_frag_size_set(&frags[i], len); 1390 skb_frag_size_set(&frags[i], len);
1385 } 1391 }
1386 /* swap out with old one */
1387 memcpy(skb_shinfo(skb)->frags,
1388 frags,
1389 i * sizeof(skb_frag_t));
1390 skb_shinfo(skb)->nr_frags = i;
1391 skb->truesize += i * PAGE_SIZE;
1392 1392
1393 /* remove traces of mapped pages and frag_list */ 1393 /* Copied all the bits from the frag list -- free it. */
1394 skb_frag_list_init(skb); 1394 skb_frag_list_init(skb);
1395 xenvif_skb_zerocopy_prepare(queue, nskb);
1396 kfree_skb(nskb);
1397
1398 /* Release all the original (foreign) frags. */
1399 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1400 skb_frag_unref(skb, f);
1395 uarg = skb_shinfo(skb)->destructor_arg; 1401 uarg = skb_shinfo(skb)->destructor_arg;
1396 /* increase inflight counter to offset decrement in callback */ 1402 /* increase inflight counter to offset decrement in callback */
1397 atomic_inc(&queue->inflight_packets); 1403 atomic_inc(&queue->inflight_packets);
1398 uarg->callback(uarg, true); 1404 uarg->callback(uarg, true);
1399 skb_shinfo(skb)->destructor_arg = NULL; 1405 skb_shinfo(skb)->destructor_arg = NULL;
1400 1406
1401 xenvif_skb_zerocopy_prepare(queue, nskb); 1407 /* Fill the skb with the new (local) frags. */
1402 kfree_skb(nskb); 1408 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1409 skb_shinfo(skb)->nr_frags = i;
1410 skb->truesize += i * PAGE_SIZE;
1403 1411
1404 return 0; 1412 return 0;
1405} 1413}
@@ -1649,17 +1657,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1649{ 1657{
1650 struct pending_tx_info *pending_tx_info; 1658 struct pending_tx_info *pending_tx_info;
1651 pending_ring_idx_t index; 1659 pending_ring_idx_t index;
1660 int notify;
1652 unsigned long flags; 1661 unsigned long flags;
1653 1662
1654 pending_tx_info = &queue->pending_tx_info[pending_idx]; 1663 pending_tx_info = &queue->pending_tx_info[pending_idx];
1664
1655 spin_lock_irqsave(&queue->response_lock, flags); 1665 spin_lock_irqsave(&queue->response_lock, flags);
1666
1656 make_tx_response(queue, &pending_tx_info->req, status); 1667 make_tx_response(queue, &pending_tx_info->req, status);
1657 index = pending_index(queue->pending_prod); 1668
1669 /* Release the pending index before pusing the Tx response so
1670 * its available before a new Tx request is pushed by the
1671 * frontend.
1672 */
1673 index = pending_index(queue->pending_prod++);
1658 queue->pending_ring[index] = pending_idx; 1674 queue->pending_ring[index] = pending_idx;
1659 /* TX shouldn't use the index before we give it back here */ 1675
1660 mb(); 1676 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1661 queue->pending_prod++; 1677
1662 spin_unlock_irqrestore(&queue->response_lock, flags); 1678 spin_unlock_irqrestore(&queue->response_lock, flags);
1679
1680 if (notify)
1681 notify_remote_via_irq(queue->tx_irq);
1663} 1682}
1664 1683
1665 1684
@@ -1669,7 +1688,6 @@ static void make_tx_response(struct xenvif_queue *queue,
1669{ 1688{
1670 RING_IDX i = queue->tx.rsp_prod_pvt; 1689 RING_IDX i = queue->tx.rsp_prod_pvt;
1671 struct xen_netif_tx_response *resp; 1690 struct xen_netif_tx_response *resp;
1672 int notify;
1673 1691
1674 resp = RING_GET_RESPONSE(&queue->tx, i); 1692 resp = RING_GET_RESPONSE(&queue->tx, i);
1675 resp->id = txp->id; 1693 resp->id = txp->id;
@@ -1679,9 +1697,6 @@ static void make_tx_response(struct xenvif_queue *queue,
1679 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; 1697 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1680 1698
1681 queue->tx.rsp_prod_pvt = ++i; 1699 queue->tx.rsp_prod_pvt = ++i;
1682 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1683 if (notify)
1684 notify_remote_via_irq(queue->tx_irq);
1685} 1700}
1686 1701
1687static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 1702static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 38d1c51f58b1..7bcaeec876c0 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -84,8 +84,7 @@ config OF_RESOLVE
84 bool 84 bool
85 85
86config OF_OVERLAY 86config OF_OVERLAY
87 bool 87 bool "Device Tree overlays"
88 depends on OF
89 select OF_DYNAMIC 88 select OF_DYNAMIC
90 select OF_RESOLVE 89 select OF_RESOLVE
91 90
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 0a8aeb8523fe..adb8764861c0 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -714,16 +714,17 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
714 const char *path) 714 const char *path)
715{ 715{
716 struct device_node *child; 716 struct device_node *child;
717 int len = strchrnul(path, '/') - path; 717 int len;
718 int term; 718 const char *end;
719 719
720 end = strchr(path, ':');
721 if (!end)
722 end = strchrnul(path, '/');
723
724 len = end - path;
720 if (!len) 725 if (!len)
721 return NULL; 726 return NULL;
722 727
723 term = strchrnul(path, ':') - path;
724 if (term < len)
725 len = term;
726
727 __for_each_child_of_node(parent, child) { 728 __for_each_child_of_node(parent, child) {
728 const char *name = strrchr(child->full_name, '/'); 729 const char *name = strrchr(child->full_name, '/');
729 if (WARN(!name, "malformed device_node %s\n", child->full_name)) 730 if (WARN(!name, "malformed device_node %s\n", child->full_name))
@@ -768,8 +769,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
768 769
769 /* The path could begin with an alias */ 770 /* The path could begin with an alias */
770 if (*path != '/') { 771 if (*path != '/') {
771 char *p = strchrnul(path, '/'); 772 int len;
772 int len = separator ? separator - path : p - path; 773 const char *p = separator;
774
775 if (!p)
776 p = strchrnul(path, '/');
777 len = p - path;
773 778
774 /* of_aliases must not be NULL */ 779 /* of_aliases must not be NULL */
775 if (!of_aliases) 780 if (!of_aliases)
@@ -794,6 +799,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
794 path++; /* Increment past '/' delimiter */ 799 path++; /* Increment past '/' delimiter */
795 np = __of_find_node_by_path(np, path); 800 np = __of_find_node_by_path(np, path);
796 path = strchrnul(path, '/'); 801 path = strchrnul(path, '/');
802 if (separator && separator < path)
803 break;
797 } 804 }
798 raw_spin_unlock_irqrestore(&devtree_lock, flags); 805 raw_spin_unlock_irqrestore(&devtree_lock, flags);
799 return np; 806 return np;
@@ -1886,8 +1893,10 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1886 name = of_get_property(of_chosen, "linux,stdout-path", NULL); 1893 name = of_get_property(of_chosen, "linux,stdout-path", NULL);
1887 if (IS_ENABLED(CONFIG_PPC) && !name) 1894 if (IS_ENABLED(CONFIG_PPC) && !name)
1888 name = of_get_property(of_aliases, "stdout", NULL); 1895 name = of_get_property(of_aliases, "stdout", NULL);
1889 if (name) 1896 if (name) {
1890 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); 1897 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1898 add_preferred_console("stdout-path", 0, NULL);
1899 }
1891 } 1900 }
1892 1901
1893 if (!of_aliases) 1902 if (!of_aliases)
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 352b4f28f82c..dee9270ba547 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -19,6 +19,7 @@
19#include <linux/string.h> 19#include <linux/string.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/idr.h>
22 23
23#include "of_private.h" 24#include "of_private.h"
24 25
@@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
85 struct device_node *target, struct device_node *child) 86 struct device_node *target, struct device_node *child)
86{ 87{
87 const char *cname; 88 const char *cname;
88 struct device_node *tchild, *grandchild; 89 struct device_node *tchild;
89 int ret = 0; 90 int ret = 0;
90 91
91 cname = kbasename(child->full_name); 92 cname = kbasename(child->full_name);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 0cf9a236d438..aba8946cac46 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -92,6 +92,11 @@ static void __init of_selftest_find_node_by_name(void)
92 "option path test failed\n"); 92 "option path test failed\n");
93 of_node_put(np); 93 of_node_put(np);
94 94
95 np = of_find_node_opts_by_path("/testcase-data:test/option", &options);
96 selftest(np && !strcmp("test/option", options),
97 "option path test, subcase #1 failed\n");
98 of_node_put(np);
99
95 np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); 100 np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
96 selftest(np, "NULL option path test failed\n"); 101 selftest(np, "NULL option path test failed\n");
97 of_node_put(np); 102 of_node_put(np);
@@ -102,6 +107,12 @@ static void __init of_selftest_find_node_by_name(void)
102 "option alias path test failed\n"); 107 "option alias path test failed\n");
103 of_node_put(np); 108 of_node_put(np);
104 109
110 np = of_find_node_opts_by_path("testcase-alias:test/alias/option",
111 &options);
112 selftest(np && !strcmp("test/alias/option", options),
113 "option alias path test, subcase #1 failed\n");
114 of_node_put(np);
115
105 np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); 116 np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
106 selftest(np, "NULL option alias path test failed\n"); 117 selftest(np, "NULL option alias path test failed\n");
107 of_node_put(np); 118 of_node_put(np);
@@ -378,9 +389,9 @@ static void __init of_selftest_property_string(void)
378 rc = of_property_match_string(np, "phandle-list-names", "first"); 389 rc = of_property_match_string(np, "phandle-list-names", "first");
379 selftest(rc == 0, "first expected:0 got:%i\n", rc); 390 selftest(rc == 0, "first expected:0 got:%i\n", rc);
380 rc = of_property_match_string(np, "phandle-list-names", "second"); 391 rc = of_property_match_string(np, "phandle-list-names", "second");
381 selftest(rc == 1, "second expected:0 got:%i\n", rc); 392 selftest(rc == 1, "second expected:1 got:%i\n", rc);
382 rc = of_property_match_string(np, "phandle-list-names", "third"); 393 rc = of_property_match_string(np, "phandle-list-names", "third");
383 selftest(rc == 2, "third expected:0 got:%i\n", rc); 394 selftest(rc == 2, "third expected:2 got:%i\n", rc);
384 rc = of_property_match_string(np, "phandle-list-names", "fourth"); 395 rc = of_property_match_string(np, "phandle-list-names", "fourth");
385 selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); 396 selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
386 rc = of_property_match_string(np, "missing-property", "blah"); 397 rc = of_property_match_string(np, "missing-property", "blah");
@@ -478,7 +489,6 @@ static void __init of_selftest_changeset(void)
478 struct device_node *n1, *n2, *n21, *nremove, *parent, *np; 489 struct device_node *n1, *n2, *n21, *nremove, *parent, *np;
479 struct of_changeset chgset; 490 struct of_changeset chgset;
480 491
481 of_changeset_init(&chgset);
482 n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); 492 n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1");
483 selftest(n1, "testcase setup failure\n"); 493 selftest(n1, "testcase setup failure\n");
484 n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); 494 n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2");
@@ -979,7 +989,7 @@ static int of_path_platform_device_exists(const char *path)
979 return pdev != NULL; 989 return pdev != NULL;
980} 990}
981 991
982#if IS_ENABLED(CONFIG_I2C) 992#if IS_BUILTIN(CONFIG_I2C)
983 993
984/* get the i2c client device instantiated at the path */ 994/* get the i2c client device instantiated at the path */
985static struct i2c_client *of_path_to_i2c_client(const char *path) 995static struct i2c_client *of_path_to_i2c_client(const char *path)
@@ -1445,7 +1455,7 @@ static void of_selftest_overlay_11(void)
1445 return; 1455 return;
1446} 1456}
1447 1457
1448#if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) 1458#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
1449 1459
1450struct selftest_i2c_bus_data { 1460struct selftest_i2c_bus_data {
1451 struct platform_device *pdev; 1461 struct platform_device *pdev;
@@ -1584,7 +1594,7 @@ static struct i2c_driver selftest_i2c_dev_driver = {
1584 .id_table = selftest_i2c_dev_id, 1594 .id_table = selftest_i2c_dev_id,
1585}; 1595};
1586 1596
1587#if IS_ENABLED(CONFIG_I2C_MUX) 1597#if IS_BUILTIN(CONFIG_I2C_MUX)
1588 1598
1589struct selftest_i2c_mux_data { 1599struct selftest_i2c_mux_data {
1590 int nchans; 1600 int nchans;
@@ -1695,7 +1705,7 @@ static int of_selftest_overlay_i2c_init(void)
1695 "could not register selftest i2c bus driver\n")) 1705 "could not register selftest i2c bus driver\n"))
1696 return ret; 1706 return ret;
1697 1707
1698#if IS_ENABLED(CONFIG_I2C_MUX) 1708#if IS_BUILTIN(CONFIG_I2C_MUX)
1699 ret = i2c_add_driver(&selftest_i2c_mux_driver); 1709 ret = i2c_add_driver(&selftest_i2c_mux_driver);
1700 if (selftest(ret == 0, 1710 if (selftest(ret == 0,
1701 "could not register selftest i2c mux driver\n")) 1711 "could not register selftest i2c mux driver\n"))
@@ -1707,7 +1717,7 @@ static int of_selftest_overlay_i2c_init(void)
1707 1717
1708static void of_selftest_overlay_i2c_cleanup(void) 1718static void of_selftest_overlay_i2c_cleanup(void)
1709{ 1719{
1710#if IS_ENABLED(CONFIG_I2C_MUX) 1720#if IS_BUILTIN(CONFIG_I2C_MUX)
1711 i2c_del_driver(&selftest_i2c_mux_driver); 1721 i2c_del_driver(&selftest_i2c_mux_driver);
1712#endif 1722#endif
1713 platform_driver_unregister(&selftest_i2c_bus_driver); 1723 platform_driver_unregister(&selftest_i2c_bus_driver);
@@ -1814,7 +1824,7 @@ static void __init of_selftest_overlay(void)
1814 of_selftest_overlay_10(); 1824 of_selftest_overlay_10();
1815 of_selftest_overlay_11(); 1825 of_selftest_overlay_11();
1816 1826
1817#if IS_ENABLED(CONFIG_I2C) 1827#if IS_BUILTIN(CONFIG_I2C)
1818 if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) 1828 if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n"))
1819 goto out; 1829 goto out;
1820 1830
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 1ec694a52379..464bf492ee2a 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -80,7 +80,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
80 if (err) 80 if (err)
81 return err; 81 return err;
82 82
83 resource_list_for_each_entry(win, res, list) { 83 resource_list_for_each_entry(win, res) {
84 struct resource *parent, *res = win->res; 84 struct resource *parent, *res = win->res;
85 85
86 switch (resource_type(res)) { 86 switch (resource_type(res)) {
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index aab55474dd0d..ee082c0366ec 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
127 return false; 127 return false;
128} 128}
129 129
130static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, 130static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
131 int offset) 131 int offset)
132{ 132{
133 struct xgene_pcie_port *port = bus->sysdata; 133 struct xgene_pcie_port *port = bus->sysdata;
@@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
137 return NULL; 137 return NULL;
138 138
139 xgene_pcie_set_rtdid_reg(bus, devfn); 139 xgene_pcie_set_rtdid_reg(bus, devfn);
140 return xgene_pcie_get_cfg_base(bus); 140 return xgene_pcie_get_cfg_base(bus) + offset;
141} 141}
142 142
143static struct pci_ops xgene_pcie_ops = { 143static struct pci_ops xgene_pcie_ops = {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index aa012fb3834b..312f23a8429c 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev,
521 struct pci_dev *pdev = to_pci_dev(dev); 521 struct pci_dev *pdev = to_pci_dev(dev);
522 char *driver_override, *old = pdev->driver_override, *cp; 522 char *driver_override, *old = pdev->driver_override, *cp;
523 523
524 if (count > PATH_MAX) 524 /* We need to keep extra room for a newline */
525 if (count >= (PAGE_SIZE - 1))
525 return -EINVAL; 526 return -EINVAL;
526 527
527 driver_override = kstrndup(buf, count, GFP_KERNEL); 528 driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev,
549{ 550{
550 struct pci_dev *pdev = to_pci_dev(dev); 551 struct pci_dev *pdev = to_pci_dev(dev);
551 552
552 return sprintf(buf, "%s\n", pdev->driver_override); 553 return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
553} 554}
554static DEVICE_ATTR_RW(driver_override); 555static DEVICE_ATTR_RW(driver_override);
555 556
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b899947d839d..1245dca79009 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3444,13 +3444,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj,
3444 if (attr == &dev_attr_requested_microamps.attr) 3444 if (attr == &dev_attr_requested_microamps.attr)
3445 return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; 3445 return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
3446 3446
3447 /* all the other attributes exist to support constraints;
3448 * don't show them if there are no constraints, or if the
3449 * relevant supporting methods are missing.
3450 */
3451 if (!rdev->constraints)
3452 return 0;
3453
3454 /* constraints need specific supporting methods */ 3447 /* constraints need specific supporting methods */
3455 if (attr == &dev_attr_min_microvolts.attr || 3448 if (attr == &dev_attr_min_microvolts.attr ||
3456 attr == &dev_attr_max_microvolts.attr) 3449 attr == &dev_attr_max_microvolts.attr)
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index bc6100103f7f..f0489cb9018b 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -152,6 +152,15 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
152 config.regmap = chip->regmap; 152 config.regmap = chip->regmap;
153 config.of_node = dev->of_node; 153 config.of_node = dev->of_node;
154 154
155 /* Mask all interrupt sources to deassert interrupt line */
156 error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0);
157 if (!error)
158 error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0);
159 if (error) {
160 dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error);
161 return error;
162 }
163
155 rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); 164 rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config);
156 if (IS_ERR(rdev)) { 165 if (IS_ERR(rdev)) {
157 dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); 166 dev_err(&i2c->dev, "Failed to register DA9210 regulator\n");
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 1f93b752a81c..3fd44353cc80 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = {
235 .vsel_mask = RK808_LDO_VSEL_MASK, 235 .vsel_mask = RK808_LDO_VSEL_MASK,
236 .enable_reg = RK808_LDO_EN_REG, 236 .enable_reg = RK808_LDO_EN_REG,
237 .enable_mask = BIT(0), 237 .enable_mask = BIT(0),
238 .enable_time = 400,
238 .owner = THIS_MODULE, 239 .owner = THIS_MODULE,
239 }, { 240 }, {
240 .name = "LDO_REG2", 241 .name = "LDO_REG2",
@@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = {
249 .vsel_mask = RK808_LDO_VSEL_MASK, 250 .vsel_mask = RK808_LDO_VSEL_MASK,
250 .enable_reg = RK808_LDO_EN_REG, 251 .enable_reg = RK808_LDO_EN_REG,
251 .enable_mask = BIT(1), 252 .enable_mask = BIT(1),
253 .enable_time = 400,
252 .owner = THIS_MODULE, 254 .owner = THIS_MODULE,
253 }, { 255 }, {
254 .name = "LDO_REG3", 256 .name = "LDO_REG3",
@@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = {
263 .vsel_mask = RK808_BUCK4_VSEL_MASK, 265 .vsel_mask = RK808_BUCK4_VSEL_MASK,
264 .enable_reg = RK808_LDO_EN_REG, 266 .enable_reg = RK808_LDO_EN_REG,
265 .enable_mask = BIT(2), 267 .enable_mask = BIT(2),
268 .enable_time = 400,
266 .owner = THIS_MODULE, 269 .owner = THIS_MODULE,
267 }, { 270 }, {
268 .name = "LDO_REG4", 271 .name = "LDO_REG4",
@@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = {
277 .vsel_mask = RK808_LDO_VSEL_MASK, 280 .vsel_mask = RK808_LDO_VSEL_MASK,
278 .enable_reg = RK808_LDO_EN_REG, 281 .enable_reg = RK808_LDO_EN_REG,
279 .enable_mask = BIT(3), 282 .enable_mask = BIT(3),
283 .enable_time = 400,
280 .owner = THIS_MODULE, 284 .owner = THIS_MODULE,
281 }, { 285 }, {
282 .name = "LDO_REG5", 286 .name = "LDO_REG5",
@@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = {
291 .vsel_mask = RK808_LDO_VSEL_MASK, 295 .vsel_mask = RK808_LDO_VSEL_MASK,
292 .enable_reg = RK808_LDO_EN_REG, 296 .enable_reg = RK808_LDO_EN_REG,
293 .enable_mask = BIT(4), 297 .enable_mask = BIT(4),
298 .enable_time = 400,
294 .owner = THIS_MODULE, 299 .owner = THIS_MODULE,
295 }, { 300 }, {
296 .name = "LDO_REG6", 301 .name = "LDO_REG6",
@@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = {
305 .vsel_mask = RK808_LDO_VSEL_MASK, 310 .vsel_mask = RK808_LDO_VSEL_MASK,
306 .enable_reg = RK808_LDO_EN_REG, 311 .enable_reg = RK808_LDO_EN_REG,
307 .enable_mask = BIT(5), 312 .enable_mask = BIT(5),
313 .enable_time = 400,
308 .owner = THIS_MODULE, 314 .owner = THIS_MODULE,
309 }, { 315 }, {
310 .name = "LDO_REG7", 316 .name = "LDO_REG7",
@@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = {
319 .vsel_mask = RK808_LDO_VSEL_MASK, 325 .vsel_mask = RK808_LDO_VSEL_MASK,
320 .enable_reg = RK808_LDO_EN_REG, 326 .enable_reg = RK808_LDO_EN_REG,
321 .enable_mask = BIT(6), 327 .enable_mask = BIT(6),
328 .enable_time = 400,
322 .owner = THIS_MODULE, 329 .owner = THIS_MODULE,
323 }, { 330 }, {
324 .name = "LDO_REG8", 331 .name = "LDO_REG8",
@@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = {
333 .vsel_mask = RK808_LDO_VSEL_MASK, 340 .vsel_mask = RK808_LDO_VSEL_MASK,
334 .enable_reg = RK808_LDO_EN_REG, 341 .enable_reg = RK808_LDO_EN_REG,
335 .enable_mask = BIT(7), 342 .enable_mask = BIT(7),
343 .enable_time = 400,
336 .owner = THIS_MODULE, 344 .owner = THIS_MODULE,
337 }, { 345 }, {
338 .name = "SWITCH_REG1", 346 .name = "SWITCH_REG1",
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 70a5d94cc766..b4f7744f6751 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -31,6 +31,7 @@
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/of.h> 32#include <linux/of.h>
33#include <linux/of_device.h> 33#include <linux/of_device.h>
34#include <linux/suspend.h>
34#include <linux/uaccess.h> 35#include <linux/uaccess.h>
35 36
36#include "rtc-at91rm9200.h" 37#include "rtc-at91rm9200.h"
@@ -54,6 +55,10 @@ static void __iomem *at91_rtc_regs;
54static int irq; 55static int irq;
55static DEFINE_SPINLOCK(at91_rtc_lock); 56static DEFINE_SPINLOCK(at91_rtc_lock);
56static u32 at91_rtc_shadow_imr; 57static u32 at91_rtc_shadow_imr;
58static bool suspended;
59static DEFINE_SPINLOCK(suspended_lock);
60static unsigned long cached_events;
61static u32 at91_rtc_imr;
57 62
58static void at91_rtc_write_ier(u32 mask) 63static void at91_rtc_write_ier(u32 mask)
59{ 64{
@@ -290,7 +295,9 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
290 struct rtc_device *rtc = platform_get_drvdata(pdev); 295 struct rtc_device *rtc = platform_get_drvdata(pdev);
291 unsigned int rtsr; 296 unsigned int rtsr;
292 unsigned long events = 0; 297 unsigned long events = 0;
298 int ret = IRQ_NONE;
293 299
300 spin_lock(&suspended_lock);
294 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr(); 301 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
295 if (rtsr) { /* this interrupt is shared! Is it ours? */ 302 if (rtsr) { /* this interrupt is shared! Is it ours? */
296 if (rtsr & AT91_RTC_ALARM) 303 if (rtsr & AT91_RTC_ALARM)
@@ -304,14 +311,22 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
304 311
305 at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */ 312 at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */
306 313
307 rtc_update_irq(rtc, 1, events); 314 if (!suspended) {
315 rtc_update_irq(rtc, 1, events);
308 316
309 dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__, 317 dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n",
310 events >> 8, events & 0x000000FF); 318 __func__, events >> 8, events & 0x000000FF);
319 } else {
320 cached_events |= events;
321 at91_rtc_write_idr(at91_rtc_imr);
322 pm_system_wakeup();
323 }
311 324
312 return IRQ_HANDLED; 325 ret = IRQ_HANDLED;
313 } 326 }
314 return IRQ_NONE; /* not handled */ 327 spin_lock(&suspended_lock);
328
329 return ret;
315} 330}
316 331
317static const struct at91_rtc_config at91rm9200_config = { 332static const struct at91_rtc_config at91rm9200_config = {
@@ -401,8 +416,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
401 AT91_RTC_CALEV); 416 AT91_RTC_CALEV);
402 417
403 ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt, 418 ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt,
404 IRQF_SHARED, 419 IRQF_SHARED | IRQF_COND_SUSPEND,
405 "at91_rtc", pdev); 420 "at91_rtc", pdev);
406 if (ret) { 421 if (ret) {
407 dev_err(&pdev->dev, "IRQ %d already in use.\n", irq); 422 dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
408 return ret; 423 return ret;
@@ -454,8 +469,6 @@ static void at91_rtc_shutdown(struct platform_device *pdev)
454 469
455/* AT91RM9200 RTC Power management control */ 470/* AT91RM9200 RTC Power management control */
456 471
457static u32 at91_rtc_imr;
458
459static int at91_rtc_suspend(struct device *dev) 472static int at91_rtc_suspend(struct device *dev)
460{ 473{
461 /* this IRQ is shared with DBGU and other hardware which isn't 474 /* this IRQ is shared with DBGU and other hardware which isn't
@@ -464,21 +477,42 @@ static int at91_rtc_suspend(struct device *dev)
464 at91_rtc_imr = at91_rtc_read_imr() 477 at91_rtc_imr = at91_rtc_read_imr()
465 & (AT91_RTC_ALARM|AT91_RTC_SECEV); 478 & (AT91_RTC_ALARM|AT91_RTC_SECEV);
466 if (at91_rtc_imr) { 479 if (at91_rtc_imr) {
467 if (device_may_wakeup(dev)) 480 if (device_may_wakeup(dev)) {
481 unsigned long flags;
482
468 enable_irq_wake(irq); 483 enable_irq_wake(irq);
469 else 484
485 spin_lock_irqsave(&suspended_lock, flags);
486 suspended = true;
487 spin_unlock_irqrestore(&suspended_lock, flags);
488 } else {
470 at91_rtc_write_idr(at91_rtc_imr); 489 at91_rtc_write_idr(at91_rtc_imr);
490 }
471 } 491 }
472 return 0; 492 return 0;
473} 493}
474 494
475static int at91_rtc_resume(struct device *dev) 495static int at91_rtc_resume(struct device *dev)
476{ 496{
497 struct rtc_device *rtc = dev_get_drvdata(dev);
498
477 if (at91_rtc_imr) { 499 if (at91_rtc_imr) {
478 if (device_may_wakeup(dev)) 500 if (device_may_wakeup(dev)) {
501 unsigned long flags;
502
503 spin_lock_irqsave(&suspended_lock, flags);
504
505 if (cached_events) {
506 rtc_update_irq(rtc, 1, cached_events);
507 cached_events = 0;
508 }
509
510 suspended = false;
511 spin_unlock_irqrestore(&suspended_lock, flags);
512
479 disable_irq_wake(irq); 513 disable_irq_wake(irq);
480 else 514 }
481 at91_rtc_write_ier(at91_rtc_imr); 515 at91_rtc_write_ier(at91_rtc_imr);
482 } 516 }
483 return 0; 517 return 0;
484} 518}
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 2183fd2750ab..5ccaee32df72 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -23,6 +23,7 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/mfd/syscon.h> 24#include <linux/mfd/syscon.h>
25#include <linux/regmap.h> 25#include <linux/regmap.h>
26#include <linux/suspend.h>
26#include <linux/clk.h> 27#include <linux/clk.h>
27 28
28/* 29/*
@@ -77,6 +78,9 @@ struct sam9_rtc {
77 unsigned int gpbr_offset; 78 unsigned int gpbr_offset;
78 int irq; 79 int irq;
79 struct clk *sclk; 80 struct clk *sclk;
81 bool suspended;
82 unsigned long events;
83 spinlock_t lock;
80}; 84};
81 85
82#define rtt_readl(rtc, field) \ 86#define rtt_readl(rtc, field) \
@@ -271,14 +275,9 @@ static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
271 return 0; 275 return 0;
272} 276}
273 277
274/* 278static irqreturn_t at91_rtc_cache_events(struct sam9_rtc *rtc)
275 * IRQ handler for the RTC
276 */
277static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
278{ 279{
279 struct sam9_rtc *rtc = _rtc;
280 u32 sr, mr; 280 u32 sr, mr;
281 unsigned long events = 0;
282 281
283 /* Shared interrupt may be for another device. Note: reading 282 /* Shared interrupt may be for another device. Note: reading
284 * SR clears it, so we must only read it in this irq handler! 283 * SR clears it, so we must only read it in this irq handler!
@@ -290,18 +289,54 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
290 289
291 /* alarm status */ 290 /* alarm status */
292 if (sr & AT91_RTT_ALMS) 291 if (sr & AT91_RTT_ALMS)
293 events |= (RTC_AF | RTC_IRQF); 292 rtc->events |= (RTC_AF | RTC_IRQF);
294 293
295 /* timer update/increment */ 294 /* timer update/increment */
296 if (sr & AT91_RTT_RTTINC) 295 if (sr & AT91_RTT_RTTINC)
297 events |= (RTC_UF | RTC_IRQF); 296 rtc->events |= (RTC_UF | RTC_IRQF);
297
298 return IRQ_HANDLED;
299}
300
301static void at91_rtc_flush_events(struct sam9_rtc *rtc)
302{
303 if (!rtc->events)
304 return;
298 305
299 rtc_update_irq(rtc->rtcdev, 1, events); 306 rtc_update_irq(rtc->rtcdev, 1, rtc->events);
307 rtc->events = 0;
300 308
301 pr_debug("%s: num=%ld, events=0x%02lx\n", __func__, 309 pr_debug("%s: num=%ld, events=0x%02lx\n", __func__,
302 events >> 8, events & 0x000000FF); 310 rtc->events >> 8, rtc->events & 0x000000FF);
311}
303 312
304 return IRQ_HANDLED; 313/*
314 * IRQ handler for the RTC
315 */
316static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
317{
318 struct sam9_rtc *rtc = _rtc;
319 int ret;
320
321 spin_lock(&rtc->lock);
322
323 ret = at91_rtc_cache_events(rtc);
324
325 /* We're called in suspended state */
326 if (rtc->suspended) {
327 /* Mask irqs coming from this peripheral */
328 rtt_writel(rtc, MR,
329 rtt_readl(rtc, MR) &
330 ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
331 /* Trigger a system wakeup */
332 pm_system_wakeup();
333 } else {
334 at91_rtc_flush_events(rtc);
335 }
336
337 spin_unlock(&rtc->lock);
338
339 return ret;
305} 340}
306 341
307static const struct rtc_class_ops at91_rtc_ops = { 342static const struct rtc_class_ops at91_rtc_ops = {
@@ -421,7 +456,8 @@ static int at91_rtc_probe(struct platform_device *pdev)
421 456
422 /* register irq handler after we know what name we'll use */ 457 /* register irq handler after we know what name we'll use */
423 ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt, 458 ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt,
424 IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc); 459 IRQF_SHARED | IRQF_COND_SUSPEND,
460 dev_name(&rtc->rtcdev->dev), rtc);
425 if (ret) { 461 if (ret) {
426 dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); 462 dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
427 return ret; 463 return ret;
@@ -482,7 +518,12 @@ static int at91_rtc_suspend(struct device *dev)
482 rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); 518 rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
483 if (rtc->imr) { 519 if (rtc->imr) {
484 if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) { 520 if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) {
521 unsigned long flags;
522
485 enable_irq_wake(rtc->irq); 523 enable_irq_wake(rtc->irq);
524 spin_lock_irqsave(&rtc->lock, flags);
525 rtc->suspended = true;
526 spin_unlock_irqrestore(&rtc->lock, flags);
486 /* don't let RTTINC cause wakeups */ 527 /* don't let RTTINC cause wakeups */
487 if (mr & AT91_RTT_RTTINCIEN) 528 if (mr & AT91_RTT_RTTINCIEN)
488 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); 529 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
@@ -499,10 +540,18 @@ static int at91_rtc_resume(struct device *dev)
499 u32 mr; 540 u32 mr;
500 541
501 if (rtc->imr) { 542 if (rtc->imr) {
543 unsigned long flags;
544
502 if (device_may_wakeup(dev)) 545 if (device_may_wakeup(dev))
503 disable_irq_wake(rtc->irq); 546 disable_irq_wake(rtc->irq);
504 mr = rtt_readl(rtc, MR); 547 mr = rtt_readl(rtc, MR);
505 rtt_writel(rtc, MR, mr | rtc->imr); 548 rtt_writel(rtc, MR, mr | rtc->imr);
549
550 spin_lock_irqsave(&rtc->lock, flags);
551 rtc->suspended = false;
552 at91_rtc_cache_events(rtc);
553 at91_rtc_flush_events(rtc);
554 spin_unlock_irqrestore(&rtc->lock, flags);
506 } 555 }
507 556
508 return 0; 557 return 0;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 4241eeab3386..f4cf6851fae9 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = {
849 849
850static struct s3c_rtc_data const s3c6410_rtc_data = { 850static struct s3c_rtc_data const s3c6410_rtc_data = {
851 .max_user_freq = 32768, 851 .max_user_freq = 32768,
852 .needs_src_clk = true,
852 .irq_handler = s3c6410_rtc_irq, 853 .irq_handler = s3c6410_rtc_irq,
853 .set_freq = s3c6410_rtc_setfreq, 854 .set_freq = s3c6410_rtc_setfreq,
854 .enable_tick = s3c6410_rtc_enable_tick, 855 .enable_tick = s3c6410_rtc_enable_tick,
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 96128cb009f3..da212813f2d5 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
547 * parse input 547 * parse input
548 */ 548 */
549 num_of_segments = 0; 549 num_of_segments = 0;
550 for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { 550 for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
551 for (j = i; (buf[j] != ':') && 551 for (j = i; (buf[j] != ':') &&
552 (buf[j] != '\0') && 552 (buf[j] != '\0') &&
553 (buf[j] != '\n') && 553 (buf[j] != '\n') &&
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 09db45296eed..7497ddde2dd6 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq)
92 add = 0; 92 add = 0;
93 continue; 93 continue;
94 } 94 }
95 for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { 95 for (pos = 0; pos < iter->aob->request.msb_count; pos++) {
96 if (clusters_intersect(req, iter->request[pos]) && 96 if (clusters_intersect(req, iter->request[pos]) &&
97 (rq_data_dir(req) == WRITE || 97 (rq_data_dir(req) == WRITE ||
98 rq_data_dir(iter->request[pos]) == WRITE)) { 98 rq_data_dir(iter->request[pos]) == WRITE)) {
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 62b58d38ce2e..60de66252fa2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work)
500 struct sas_discovery_event *ev = to_sas_discovery_event(work); 500 struct sas_discovery_event *ev = to_sas_discovery_event(work);
501 struct asd_sas_port *port = ev->port; 501 struct asd_sas_port *port = ev->port;
502 struct sas_ha_struct *ha = port->ha; 502 struct sas_ha_struct *ha = port->ha;
503 struct domain_device *ddev = port->port_dev;
503 504
504 /* prevent revalidation from finding sata links in recovery */ 505 /* prevent revalidation from finding sata links in recovery */
505 mutex_lock(&ha->disco_mutex); 506 mutex_lock(&ha->disco_mutex);
@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work)
514 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, 515 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
515 task_pid_nr(current)); 516 task_pid_nr(current));
516 517
517 if (port->port_dev) 518 if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
518 res = sas_ex_revalidate_domain(port->port_dev); 519 ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
520 res = sas_ex_revalidate_domain(ddev);
519 521
520 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", 522 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
521 port->id, task_pid_nr(current), res); 523 port->id, task_pid_nr(current), res);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 9af7841f2e8c..06de34001c66 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
764 (unsigned long long)xfer->rx_dma); 764 (unsigned long long)xfer->rx_dma);
765 } 765 }
766 766
767 /* REVISIT: We're waiting for ENDRX before we start the next 767 /* REVISIT: We're waiting for RXBUFF before we start the next
768 * transfer because we need to handle some difficult timing 768 * transfer because we need to handle some difficult timing
769 * issues otherwise. If we wait for ENDTX in one transfer and 769 * issues otherwise. If we wait for TXBUFE in one transfer and
770 * then starts waiting for ENDRX in the next, it's difficult 770 * then starts waiting for RXBUFF in the next, it's difficult
771 * to tell the difference between the ENDRX interrupt we're 771 * to tell the difference between the RXBUFF interrupt we're
772 * actually waiting for and the ENDRX interrupt of the 772 * actually waiting for and the RXBUFF interrupt of the
773 * previous transfer. 773 * previous transfer.
774 * 774 *
775 * It should be doable, though. Just not now... 775 * It should be doable, though. Just not now...
776 */ 776 */
777 spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); 777 spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
778 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); 778 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
779} 779}
780 780
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index a0197fd4e95c..3ce39d10fafb 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -139,6 +139,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
139 1, 139 1,
140 DMA_MEM_TO_DEV, 140 DMA_MEM_TO_DEV,
141 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 141 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
142 if (!txdesc)
143 return NULL;
144
142 txdesc->callback = dw_spi_dma_tx_done; 145 txdesc->callback = dw_spi_dma_tx_done;
143 txdesc->callback_param = dws; 146 txdesc->callback_param = dws;
144 147
@@ -184,6 +187,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
184 1, 187 1,
185 DMA_DEV_TO_MEM, 188 DMA_DEV_TO_MEM,
186 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 189 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
190 if (!rxdesc)
191 return NULL;
192
187 rxdesc->callback = dw_spi_dma_rx_done; 193 rxdesc->callback = dw_spi_dma_rx_done;
188 rxdesc->callback_param = dws; 194 rxdesc->callback_param = dws;
189 195
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 5ba331047cbe..6d331e0db331 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -36,13 +36,13 @@ struct spi_pci_desc {
36 36
37static struct spi_pci_desc spi_pci_mid_desc_1 = { 37static struct spi_pci_desc spi_pci_mid_desc_1 = {
38 .setup = dw_spi_mid_init, 38 .setup = dw_spi_mid_init,
39 .num_cs = 32, 39 .num_cs = 5,
40 .bus_num = 0, 40 .bus_num = 0,
41}; 41};
42 42
43static struct spi_pci_desc spi_pci_mid_desc_2 = { 43static struct spi_pci_desc spi_pci_mid_desc_2 = {
44 .setup = dw_spi_mid_init, 44 .setup = dw_spi_mid_init,
45 .num_cs = 4, 45 .num_cs = 2,
46 .bus_num = 1, 46 .bus_num = 1,
47}; 47};
48 48
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 5a97a62b298a..4847afba89f4 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -621,14 +621,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws)
621 if (!dws->fifo_len) { 621 if (!dws->fifo_len) {
622 u32 fifo; 622 u32 fifo;
623 623
624 for (fifo = 2; fifo <= 256; fifo++) { 624 for (fifo = 1; fifo < 256; fifo++) {
625 dw_writew(dws, DW_SPI_TXFLTR, fifo); 625 dw_writew(dws, DW_SPI_TXFLTR, fifo);
626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) 626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
627 break; 627 break;
628 } 628 }
629 dw_writew(dws, DW_SPI_TXFLTR, 0); 629 dw_writew(dws, DW_SPI_TXFLTR, 0);
630 630
631 dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; 631 dws->fifo_len = (fifo == 1) ? 0 : fifo;
632 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); 632 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
633 } 633 }
634} 634}
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index c01567d53581..e649bc7d4c08 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -459,6 +459,13 @@ static int img_spfi_transfer_one(struct spi_master *master,
459 unsigned long flags; 459 unsigned long flags;
460 int ret; 460 int ret;
461 461
462 if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
463 dev_err(spfi->dev,
464 "Transfer length (%d) is greater than the max supported (%d)",
465 xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
466 return -EINVAL;
467 }
468
462 /* 469 /*
463 * Stop all DMA and reset the controller if the previous transaction 470 * Stop all DMA and reset the controller if the previous transaction
464 * timed-out and never completed it's DMA. 471 * timed-out and never completed it's DMA.
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 89ca162801da..ee513a85296b 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022)
534 pl022->cur_msg = NULL; 534 pl022->cur_msg = NULL;
535 pl022->cur_transfer = NULL; 535 pl022->cur_transfer = NULL;
536 pl022->cur_chip = NULL; 536 pl022->cur_chip = NULL;
537 spi_finalize_current_message(pl022->master);
538 537
539 /* disable the SPI/SSP operation */ 538 /* disable the SPI/SSP operation */
540 writew((readw(SSP_CR1(pl022->virtbase)) & 539 writew((readw(SSP_CR1(pl022->virtbase)) &
541 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); 540 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
542 541
542 spi_finalize_current_message(pl022->master);
543} 543}
544 544
545/** 545/**
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 884a716e50cb..5c0616870358 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -101,6 +101,7 @@ struct ti_qspi {
101#define QSPI_FLEN(n) ((n - 1) << 0) 101#define QSPI_FLEN(n) ((n - 1) << 0)
102 102
103/* STATUS REGISTER */ 103/* STATUS REGISTER */
104#define BUSY 0x01
104#define WC 0x02 105#define WC 0x02
105 106
106/* INTERRUPT REGISTER */ 107/* INTERRUPT REGISTER */
@@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
199 ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); 200 ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
200} 201}
201 202
203static inline u32 qspi_is_busy(struct ti_qspi *qspi)
204{
205 u32 stat;
206 unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
207
208 stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
209 while ((stat & BUSY) && time_after(timeout, jiffies)) {
210 cpu_relax();
211 stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
212 }
213
214 WARN(stat & BUSY, "qspi busy\n");
215 return stat & BUSY;
216}
217
202static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) 218static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
203{ 219{
204 int wlen, count; 220 int wlen, count;
@@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
211 wlen = t->bits_per_word >> 3; /* in bytes */ 227 wlen = t->bits_per_word >> 3; /* in bytes */
212 228
213 while (count) { 229 while (count) {
230 if (qspi_is_busy(qspi))
231 return -EBUSY;
232
214 switch (wlen) { 233 switch (wlen) {
215 case 1: 234 case 1:
216 dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", 235 dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
@@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
266 285
267 while (count) { 286 while (count) {
268 dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); 287 dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
288 if (qspi_is_busy(qspi))
289 return -EBUSY;
290
269 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); 291 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
270 if (!wait_for_completion_timeout(&qspi->transfer_complete, 292 if (!wait_for_completion_timeout(&qspi->transfer_complete,
271 QSPI_COMPLETION_TIMEOUT)) { 293 QSPI_COMPLETION_TIMEOUT)) {
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 9800c01e6fb9..3f72451d2de0 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -426,7 +426,6 @@ static int pci171x_ai_insn_read(struct comedi_device *dev,
426 unsigned int *data) 426 unsigned int *data)
427{ 427{
428 struct pci1710_private *devpriv = dev->private; 428 struct pci1710_private *devpriv = dev->private;
429 unsigned int chan = CR_CHAN(insn->chanspec);
430 int ret = 0; 429 int ret = 0;
431 int i; 430 int i;
432 431
@@ -447,7 +446,7 @@ static int pci171x_ai_insn_read(struct comedi_device *dev,
447 if (ret) 446 if (ret)
448 break; 447 break;
449 448
450 ret = pci171x_ai_read_sample(dev, s, chan, &val); 449 ret = pci171x_ai_read_sample(dev, s, 0, &val);
451 if (ret) 450 if (ret)
452 break; 451 break;
453 452
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c
index dbdea71d6b95..e856f01ca077 100644
--- a/drivers/staging/comedi/drivers/comedi_isadma.c
+++ b/drivers/staging/comedi/drivers/comedi_isadma.c
@@ -91,9 +91,10 @@ unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
91 stalled++; 91 stalled++;
92 if (stalled > 10) 92 if (stalled > 10)
93 break; 93 break;
94 } else {
95 residue = new_residue;
96 stalled = 0;
94 } 97 }
95 residue = new_residue;
96 stalled = 0;
97 } 98 }
98 return residue; 99 return residue;
99} 100}
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index e37118321a27..a0906685e27f 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -103,11 +103,6 @@ enum vmk80xx_model {
103 VMK8061_MODEL 103 VMK8061_MODEL
104}; 104};
105 105
106struct firmware_version {
107 unsigned char ic3_vers[32]; /* USB-Controller */
108 unsigned char ic6_vers[32]; /* CPU */
109};
110
111static const struct comedi_lrange vmk8061_range = { 106static const struct comedi_lrange vmk8061_range = {
112 2, { 107 2, {
113 UNI_RANGE(5), 108 UNI_RANGE(5),
@@ -156,68 +151,12 @@ static const struct vmk80xx_board vmk80xx_boardinfo[] = {
156struct vmk80xx_private { 151struct vmk80xx_private {
157 struct usb_endpoint_descriptor *ep_rx; 152 struct usb_endpoint_descriptor *ep_rx;
158 struct usb_endpoint_descriptor *ep_tx; 153 struct usb_endpoint_descriptor *ep_tx;
159 struct firmware_version fw;
160 struct semaphore limit_sem; 154 struct semaphore limit_sem;
161 unsigned char *usb_rx_buf; 155 unsigned char *usb_rx_buf;
162 unsigned char *usb_tx_buf; 156 unsigned char *usb_tx_buf;
163 enum vmk80xx_model model; 157 enum vmk80xx_model model;
164}; 158};
165 159
166static int vmk80xx_check_data_link(struct comedi_device *dev)
167{
168 struct vmk80xx_private *devpriv = dev->private;
169 struct usb_device *usb = comedi_to_usb_dev(dev);
170 unsigned int tx_pipe;
171 unsigned int rx_pipe;
172 unsigned char tx[1];
173 unsigned char rx[2];
174
175 tx_pipe = usb_sndbulkpipe(usb, 0x01);
176 rx_pipe = usb_rcvbulkpipe(usb, 0x81);
177
178 tx[0] = VMK8061_CMD_RD_PWR_STAT;
179
180 /*
181 * Check that IC6 (PIC16F871) is powered and
182 * running and the data link between IC3 and
183 * IC6 is working properly
184 */
185 usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval);
186 usb_bulk_msg(usb, rx_pipe, rx, 2, NULL, HZ * 10);
187
188 return (int)rx[1];
189}
190
191static void vmk80xx_read_eeprom(struct comedi_device *dev, int flag)
192{
193 struct vmk80xx_private *devpriv = dev->private;
194 struct usb_device *usb = comedi_to_usb_dev(dev);
195 unsigned int tx_pipe;
196 unsigned int rx_pipe;
197 unsigned char tx[1];
198 unsigned char rx[64];
199 int cnt;
200
201 tx_pipe = usb_sndbulkpipe(usb, 0x01);
202 rx_pipe = usb_rcvbulkpipe(usb, 0x81);
203
204 tx[0] = VMK8061_CMD_RD_VERSION;
205
206 /*
207 * Read the firmware version info of IC3 and
208 * IC6 from the internal EEPROM of the IC
209 */
210 usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval);
211 usb_bulk_msg(usb, rx_pipe, rx, 64, &cnt, HZ * 10);
212
213 rx[cnt] = '\0';
214
215 if (flag & IC3_VERSION)
216 strncpy(devpriv->fw.ic3_vers, rx + 1, 24);
217 else /* IC6_VERSION */
218 strncpy(devpriv->fw.ic6_vers, rx + 25, 24);
219}
220
221static void vmk80xx_do_bulk_msg(struct comedi_device *dev) 160static void vmk80xx_do_bulk_msg(struct comedi_device *dev)
222{ 161{
223 struct vmk80xx_private *devpriv = dev->private; 162 struct vmk80xx_private *devpriv = dev->private;
@@ -878,16 +817,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
878 817
879 usb_set_intfdata(intf, devpriv); 818 usb_set_intfdata(intf, devpriv);
880 819
881 if (devpriv->model == VMK8061_MODEL) {
882 vmk80xx_read_eeprom(dev, IC3_VERSION);
883 dev_info(&intf->dev, "%s\n", devpriv->fw.ic3_vers);
884
885 if (vmk80xx_check_data_link(dev)) {
886 vmk80xx_read_eeprom(dev, IC6_VERSION);
887 dev_info(&intf->dev, "%s\n", devpriv->fw.ic6_vers);
888 }
889 }
890
891 if (devpriv->model == VMK8055_MODEL) 820 if (devpriv->model == VMK8055_MODEL)
892 vmk80xx_reset_device(dev); 821 vmk80xx_reset_device(dev);
893 822
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index d9d6fad7cb00..816174388f13 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -214,11 +214,17 @@ struct mxs_lradc {
214 unsigned long is_divided; 214 unsigned long is_divided;
215 215
216 /* 216 /*
217 * Touchscreen LRADC channels receives a private slot in the CTRL4 217 * When the touchscreen is enabled, we give it two private virtual
218 * register, the slot #7. Therefore only 7 slots instead of 8 in the 218 * channels: #6 and #7. This means that only 6 virtual channels (instead
219 * CTRL4 register can be mapped to LRADC channels when using the 219 * of 8) will be available for buffered capture.
220 * touchscreen. 220 */
221 * 221#define TOUCHSCREEN_VCHANNEL1 7
222#define TOUCHSCREEN_VCHANNEL2 6
223#define BUFFER_VCHANS_LIMITED 0x3f
224#define BUFFER_VCHANS_ALL 0xff
225 u8 buffer_vchans;
226
227 /*
222 * Furthermore, certain LRADC channels are shared between touchscreen 228 * Furthermore, certain LRADC channels are shared between touchscreen
223 * and/or touch-buttons and generic LRADC block. Therefore when using 229 * and/or touch-buttons and generic LRADC block. Therefore when using
224 * either of these, these channels are not available for the regular 230 * either of these, these channels are not available for the regular
@@ -342,6 +348,9 @@ struct mxs_lradc {
342#define LRADC_CTRL4 0x140 348#define LRADC_CTRL4 0x140
343#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4)) 349#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
344#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4) 350#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4)
351#define LRADC_CTRL4_LRADCSELECT(n, x) \
352 (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \
353 LRADC_CTRL4_LRADCSELECT_MASK(n))
345 354
346#define LRADC_RESOLUTION 12 355#define LRADC_RESOLUTION 12
347#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1) 356#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
@@ -416,6 +425,14 @@ static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc)
416 LRADC_STATUS_TOUCH_DETECT_RAW); 425 LRADC_STATUS_TOUCH_DETECT_RAW);
417} 426}
418 427
428static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch,
429 unsigned ch)
430{
431 mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch),
432 LRADC_CTRL4);
433 mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4);
434}
435
419static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) 436static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
420{ 437{
421 /* 438 /*
@@ -450,12 +467,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
450 LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), 467 LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
451 LRADC_DELAY(3)); 468 LRADC_DELAY(3));
452 469
453 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | 470 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1);
454 LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
455 LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
456 471
457 /* wake us again, when the complete conversion is done */
458 mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1);
459 /* 472 /*
460 * after changing the touchscreen plates setting 473 * after changing the touchscreen plates setting
461 * the signals need some initial time to settle. Start the 474 * the signals need some initial time to settle. Start the
@@ -509,12 +522,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
509 LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), 522 LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
510 LRADC_DELAY(3)); 523 LRADC_DELAY(3));
511 524
512 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | 525 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1);
513 LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
514 LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
515 526
516 /* wake us again, when the conversions are done */
517 mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1);
518 /* 527 /*
519 * after changing the touchscreen plates setting 528 * after changing the touchscreen plates setting
520 * the signals need some initial time to settle. Start the 529 * the signals need some initial time to settle. Start the
@@ -580,36 +589,6 @@ static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc,
580#define TS_CH_XM 4 589#define TS_CH_XM 4
581#define TS_CH_YM 5 590#define TS_CH_YM 5
582 591
583static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc)
584{
585 u32 reg;
586 int val;
587
588 reg = readl(lradc->base + LRADC_CTRL1);
589
590 /* only channels 3 to 5 are of interest here */
591 if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) {
592 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) |
593 LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1);
594 val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP);
595 } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) {
596 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) |
597 LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1);
598 val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM);
599 } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) {
600 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) |
601 LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1);
602 val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM);
603 } else {
604 return -EIO;
605 }
606
607 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
608 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
609
610 return val;
611}
612
613/* 592/*
614 * YP(open)--+-------------+ 593 * YP(open)--+-------------+
615 * | |--+ 594 * | |--+
@@ -653,7 +632,8 @@ static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc)
653 mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0); 632 mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0);
654 633
655 lradc->cur_plate = LRADC_SAMPLE_X; 634 lradc->cur_plate = LRADC_SAMPLE_X;
656 mxs_lradc_setup_ts_channel(lradc, TS_CH_YP); 635 mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP);
636 mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
657} 637}
658 638
659/* 639/*
@@ -674,7 +654,8 @@ static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc)
674 mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0); 654 mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0);
675 655
676 lradc->cur_plate = LRADC_SAMPLE_Y; 656 lradc->cur_plate = LRADC_SAMPLE_Y;
677 mxs_lradc_setup_ts_channel(lradc, TS_CH_XM); 657 mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM);
658 mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
678} 659}
679 660
680/* 661/*
@@ -695,7 +676,10 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
695 mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0); 676 mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0);
696 677
697 lradc->cur_plate = LRADC_SAMPLE_PRESSURE; 678 lradc->cur_plate = LRADC_SAMPLE_PRESSURE;
698 mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); 679 mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM);
680 mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP);
681 mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2,
682 TOUCHSCREEN_VCHANNEL1);
699} 683}
700 684
701static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) 685static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
@@ -708,6 +692,19 @@ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
708 mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); 692 mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
709} 693}
710 694
695static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc)
696{
697 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
698 LRADC_CTRL1);
699 mxs_lradc_reg_set(lradc,
700 LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
701 /*
702 * start with the Y-pos, because it uses nearly the same plate
703 * settings like the touch detection
704 */
705 mxs_lradc_prepare_y_pos(lradc);
706}
707
711static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc) 708static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc)
712{ 709{
713 input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos); 710 input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos);
@@ -725,10 +722,12 @@ static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc)
725 * start a dummy conversion to burn time to settle the signals 722 * start a dummy conversion to burn time to settle the signals
726 * note: we are not interested in the conversion's value 723 * note: we are not interested in the conversion's value
727 */ 724 */
728 mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5)); 725 mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1));
729 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); 726 mxs_lradc_reg_clear(lradc,
730 mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1); 727 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
731 mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) | 728 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
729 mxs_lradc_reg_wrt(lradc,
730 LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) |
732 LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */ 731 LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */
733 LRADC_DELAY(2)); 732 LRADC_DELAY(2));
734} 733}
@@ -760,59 +759,45 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
760 759
761 /* if it is released, wait for the next touch via IRQ */ 760 /* if it is released, wait for the next touch via IRQ */
762 lradc->cur_plate = LRADC_TOUCH; 761 lradc->cur_plate = LRADC_TOUCH;
763 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1); 762 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
763 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
764 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
765 LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
766 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
764 mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); 767 mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
765} 768}
766 769
767/* touchscreen's state machine */ 770/* touchscreen's state machine */
768static void mxs_lradc_handle_touch(struct mxs_lradc *lradc) 771static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
769{ 772{
770 int val;
771
772 switch (lradc->cur_plate) { 773 switch (lradc->cur_plate) {
773 case LRADC_TOUCH: 774 case LRADC_TOUCH:
774 /* 775 if (mxs_lradc_check_touch_event(lradc))
775 * start with the Y-pos, because it uses nearly the same plate 776 mxs_lradc_start_touch_event(lradc);
776 * settings like the touch detection
777 */
778 if (mxs_lradc_check_touch_event(lradc)) {
779 mxs_lradc_reg_clear(lradc,
780 LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
781 LRADC_CTRL1);
782 mxs_lradc_prepare_y_pos(lradc);
783 }
784 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, 777 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ,
785 LRADC_CTRL1); 778 LRADC_CTRL1);
786 return; 779 return;
787 780
788 case LRADC_SAMPLE_Y: 781 case LRADC_SAMPLE_Y:
789 val = mxs_lradc_read_ts_channel(lradc); 782 lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc,
790 if (val < 0) { 783 TOUCHSCREEN_VCHANNEL1);
791 mxs_lradc_enable_touch_detection(lradc); /* re-start */
792 return;
793 }
794 lradc->ts_y_pos = val;
795 mxs_lradc_prepare_x_pos(lradc); 784 mxs_lradc_prepare_x_pos(lradc);
796 return; 785 return;
797 786
798 case LRADC_SAMPLE_X: 787 case LRADC_SAMPLE_X:
799 val = mxs_lradc_read_ts_channel(lradc); 788 lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc,
800 if (val < 0) { 789 TOUCHSCREEN_VCHANNEL1);
801 mxs_lradc_enable_touch_detection(lradc); /* re-start */
802 return;
803 }
804 lradc->ts_x_pos = val;
805 mxs_lradc_prepare_pressure(lradc); 790 mxs_lradc_prepare_pressure(lradc);
806 return; 791 return;
807 792
808 case LRADC_SAMPLE_PRESSURE: 793 case LRADC_SAMPLE_PRESSURE:
809 lradc->ts_pressure = 794 lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc,
810 mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); 795 TOUCHSCREEN_VCHANNEL2,
796 TOUCHSCREEN_VCHANNEL1);
811 mxs_lradc_complete_touch_event(lradc); 797 mxs_lradc_complete_touch_event(lradc);
812 return; 798 return;
813 799
814 case LRADC_SAMPLE_VALID: 800 case LRADC_SAMPLE_VALID:
815 val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */
816 mxs_lradc_finish_touch_event(lradc, 1); 801 mxs_lradc_finish_touch_event(lradc, 1);
817 break; 802 break;
818 } 803 }
@@ -844,9 +829,9 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
844 * used if doing raw sampling. 829 * used if doing raw sampling.
845 */ 830 */
846 if (lradc->soc == IMX28_LRADC) 831 if (lradc->soc == IMX28_LRADC)
847 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, 832 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0),
848 LRADC_CTRL1); 833 LRADC_CTRL1);
849 mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); 834 mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0);
850 835
851 /* Enable / disable the divider per requirement */ 836 /* Enable / disable the divider per requirement */
852 if (test_bit(chan, &lradc->is_divided)) 837 if (test_bit(chan, &lradc->is_divided))
@@ -1090,9 +1075,8 @@ static void mxs_lradc_disable_ts(struct mxs_lradc *lradc)
1090{ 1075{
1091 /* stop all interrupts from firing */ 1076 /* stop all interrupts from firing */
1092 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN | 1077 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
1093 LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) | 1078 LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
1094 LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5), 1079 LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
1095 LRADC_CTRL1);
1096 1080
1097 /* Power-down touchscreen touch-detect circuitry. */ 1081 /* Power-down touchscreen touch-detect circuitry. */
1098 mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); 1082 mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
@@ -1158,26 +1142,31 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
1158 struct iio_dev *iio = data; 1142 struct iio_dev *iio = data;
1159 struct mxs_lradc *lradc = iio_priv(iio); 1143 struct mxs_lradc *lradc = iio_priv(iio);
1160 unsigned long reg = readl(lradc->base + LRADC_CTRL1); 1144 unsigned long reg = readl(lradc->base + LRADC_CTRL1);
1145 uint32_t clr_irq = mxs_lradc_irq_mask(lradc);
1161 const uint32_t ts_irq_mask = 1146 const uint32_t ts_irq_mask =
1162 LRADC_CTRL1_TOUCH_DETECT_IRQ | 1147 LRADC_CTRL1_TOUCH_DETECT_IRQ |
1163 LRADC_CTRL1_LRADC_IRQ(2) | 1148 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
1164 LRADC_CTRL1_LRADC_IRQ(3) | 1149 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2);
1165 LRADC_CTRL1_LRADC_IRQ(4) |
1166 LRADC_CTRL1_LRADC_IRQ(5);
1167 1150
1168 if (!(reg & mxs_lradc_irq_mask(lradc))) 1151 if (!(reg & mxs_lradc_irq_mask(lradc)))
1169 return IRQ_NONE; 1152 return IRQ_NONE;
1170 1153
1171 if (lradc->use_touchscreen && (reg & ts_irq_mask)) 1154 if (lradc->use_touchscreen && (reg & ts_irq_mask)) {
1172 mxs_lradc_handle_touch(lradc); 1155 mxs_lradc_handle_touch(lradc);
1173 1156
1174 if (iio_buffer_enabled(iio)) 1157 /* Make sure we don't clear the next conversion's interrupt. */
1175 iio_trigger_poll(iio->trig); 1158 clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
1176 else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) 1159 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2));
1160 }
1161
1162 if (iio_buffer_enabled(iio)) {
1163 if (reg & lradc->buffer_vchans)
1164 iio_trigger_poll(iio->trig);
1165 } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) {
1177 complete(&lradc->completion); 1166 complete(&lradc->completion);
1167 }
1178 1168
1179 mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc), 1169 mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1);
1180 LRADC_CTRL1);
1181 1170
1182 return IRQ_HANDLED; 1171 return IRQ_HANDLED;
1183} 1172}
@@ -1289,9 +1278,10 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
1289 } 1278 }
1290 1279
1291 if (lradc->soc == IMX28_LRADC) 1280 if (lradc->soc == IMX28_LRADC)
1292 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, 1281 mxs_lradc_reg_clear(lradc,
1293 LRADC_CTRL1); 1282 lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
1294 mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); 1283 LRADC_CTRL1);
1284 mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
1295 1285
1296 for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { 1286 for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
1297 ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); 1287 ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
@@ -1324,10 +1314,11 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio)
1324 mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK | 1314 mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
1325 LRADC_DELAY_KICK, LRADC_DELAY(0)); 1315 LRADC_DELAY_KICK, LRADC_DELAY(0));
1326 1316
1327 mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); 1317 mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
1328 if (lradc->soc == IMX28_LRADC) 1318 if (lradc->soc == IMX28_LRADC)
1329 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, 1319 mxs_lradc_reg_clear(lradc,
1330 LRADC_CTRL1); 1320 lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
1321 LRADC_CTRL1);
1331 1322
1332 kfree(lradc->buffer); 1323 kfree(lradc->buffer);
1333 mutex_unlock(&lradc->lock); 1324 mutex_unlock(&lradc->lock);
@@ -1353,7 +1344,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
1353 if (lradc->use_touchbutton) 1344 if (lradc->use_touchbutton)
1354 rsvd_chans++; 1345 rsvd_chans++;
1355 if (lradc->use_touchscreen) 1346 if (lradc->use_touchscreen)
1356 rsvd_chans++; 1347 rsvd_chans += 2;
1357 1348
1358 /* Test for attempts to map channels with special mode of operation. */ 1349 /* Test for attempts to map channels with special mode of operation. */
1359 if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS)) 1350 if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS))
@@ -1413,6 +1404,13 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
1413 .channel = 8, 1404 .channel = 8,
1414 .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,}, 1405 .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
1415 }, 1406 },
1407 /* Hidden channel to keep indexes */
1408 {
1409 .type = IIO_TEMP,
1410 .indexed = 1,
1411 .scan_index = -1,
1412 .channel = 9,
1413 },
1416 MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */ 1414 MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */
1417 MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */ 1415 MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */
1418 MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */ 1416 MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */
@@ -1583,6 +1581,11 @@ static int mxs_lradc_probe(struct platform_device *pdev)
1583 1581
1584 touch_ret = mxs_lradc_probe_touchscreen(lradc, node); 1582 touch_ret = mxs_lradc_probe_touchscreen(lradc, node);
1585 1583
1584 if (touch_ret == 0)
1585 lradc->buffer_vchans = BUFFER_VCHANS_LIMITED;
1586 else
1587 lradc->buffer_vchans = BUFFER_VCHANS_ALL;
1588
1586 /* Grab all IRQ sources */ 1589 /* Grab all IRQ sources */
1587 for (i = 0; i < of_cfg->irq_count; i++) { 1590 for (i = 0; i < of_cfg->irq_count; i++) {
1588 lradc->irq[i] = platform_get_irq(pdev, i); 1591 lradc->irq[i] = platform_get_irq(pdev, i);
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 017d2f8379b7..c17893b4918c 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -18,6 +18,7 @@
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/bitops.h>
21 22
22#include <linux/iio/iio.h> 23#include <linux/iio/iio.h>
23#include <linux/iio/sysfs.h> 24#include <linux/iio/sysfs.h>
@@ -68,7 +69,7 @@ static int ad2s1200_read_raw(struct iio_dev *indio_dev,
68 break; 69 break;
69 case IIO_ANGL_VEL: 70 case IIO_ANGL_VEL:
70 vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); 71 vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
71 vel = (vel << 4) >> 4; 72 vel = sign_extend32(vel, 11);
72 *val = vel; 73 *val = vel;
73 break; 74 break;
74 default: 75 default:
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
index f88b08877025..1e25133d35e2 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
@@ -208,7 +208,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
208 trip_cnt, GFP_KERNEL); 208 trip_cnt, GFP_KERNEL);
209 if (!int34x_thermal_zone->aux_trips) { 209 if (!int34x_thermal_zone->aux_trips) {
210 ret = -ENOMEM; 210 ret = -ENOMEM;
211 goto free_mem; 211 goto err_trip_alloc;
212 } 212 }
213 trip_mask = BIT(trip_cnt) - 1; 213 trip_mask = BIT(trip_cnt) - 1;
214 int34x_thermal_zone->aux_trip_nr = trip_cnt; 214 int34x_thermal_zone->aux_trip_nr = trip_cnt;
@@ -248,14 +248,15 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
248 0, 0); 248 0, 0);
249 if (IS_ERR(int34x_thermal_zone->zone)) { 249 if (IS_ERR(int34x_thermal_zone->zone)) {
250 ret = PTR_ERR(int34x_thermal_zone->zone); 250 ret = PTR_ERR(int34x_thermal_zone->zone);
251 goto free_lpat; 251 goto err_thermal_zone;
252 } 252 }
253 253
254 return int34x_thermal_zone; 254 return int34x_thermal_zone;
255 255
256free_lpat: 256err_thermal_zone:
257 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); 257 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
258free_mem: 258 kfree(int34x_thermal_zone->aux_trips);
259err_trip_alloc:
259 kfree(int34x_thermal_zone); 260 kfree(int34x_thermal_zone);
260 return ERR_PTR(ret); 261 return ERR_PTR(ret);
261} 262}
@@ -266,6 +267,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone
266{ 267{
267 thermal_zone_device_unregister(int34x_thermal_zone->zone); 268 thermal_zone_device_unregister(int34x_thermal_zone->zone);
268 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); 269 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
270 kfree(int34x_thermal_zone->aux_trips);
269 kfree(int34x_thermal_zone); 271 kfree(int34x_thermal_zone);
270} 272}
271EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); 273EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 1fc54ab911d2..1d30b0975651 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -682,6 +682,7 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
682 682
683 if (on) { 683 if (on) {
684 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 684 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
685 con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
685 interrupt_en = 686 interrupt_en =
686 (of_thermal_is_trip_valid(tz, 7) 687 (of_thermal_is_trip_valid(tz, 7)
687 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | 688 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
@@ -704,9 +705,9 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
704 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; 705 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
705 } else { 706 } else {
706 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 707 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
708 con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
707 interrupt_en = 0; /* Disable all interrupts */ 709 interrupt_en = 0; /* Disable all interrupts */
708 } 710 }
709 con |= 1 << EXYNOS7_PD_DET_EN_SHIFT;
710 711
711 writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN); 712 writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
712 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 713 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 48491d1a81d6..174d3bcf8bd7 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -899,6 +899,22 @@ thermal_cooling_device_trip_point_show(struct device *dev,
899 return sprintf(buf, "%d\n", instance->trip); 899 return sprintf(buf, "%d\n", instance->trip);
900} 900}
901 901
902static struct attribute *cooling_device_attrs[] = {
903 &dev_attr_cdev_type.attr,
904 &dev_attr_max_state.attr,
905 &dev_attr_cur_state.attr,
906 NULL,
907};
908
909static const struct attribute_group cooling_device_attr_group = {
910 .attrs = cooling_device_attrs,
911};
912
913static const struct attribute_group *cooling_device_attr_groups[] = {
914 &cooling_device_attr_group,
915 NULL,
916};
917
902/* Device management */ 918/* Device management */
903 919
904/** 920/**
@@ -1130,6 +1146,7 @@ __thermal_cooling_device_register(struct device_node *np,
1130 cdev->ops = ops; 1146 cdev->ops = ops;
1131 cdev->updated = false; 1147 cdev->updated = false;
1132 cdev->device.class = &thermal_class; 1148 cdev->device.class = &thermal_class;
1149 cdev->device.groups = cooling_device_attr_groups;
1133 cdev->devdata = devdata; 1150 cdev->devdata = devdata;
1134 dev_set_name(&cdev->device, "cooling_device%d", cdev->id); 1151 dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
1135 result = device_register(&cdev->device); 1152 result = device_register(&cdev->device);
@@ -1139,21 +1156,6 @@ __thermal_cooling_device_register(struct device_node *np,
1139 return ERR_PTR(result); 1156 return ERR_PTR(result);
1140 } 1157 }
1141 1158
1142 /* sys I/F */
1143 if (type) {
1144 result = device_create_file(&cdev->device, &dev_attr_cdev_type);
1145 if (result)
1146 goto unregister;
1147 }
1148
1149 result = device_create_file(&cdev->device, &dev_attr_max_state);
1150 if (result)
1151 goto unregister;
1152
1153 result = device_create_file(&cdev->device, &dev_attr_cur_state);
1154 if (result)
1155 goto unregister;
1156
1157 /* Add 'this' new cdev to the global cdev list */ 1159 /* Add 'this' new cdev to the global cdev list */
1158 mutex_lock(&thermal_list_lock); 1160 mutex_lock(&thermal_list_lock);
1159 list_add(&cdev->node, &thermal_cdev_list); 1161 list_add(&cdev->node, &thermal_cdev_list);
@@ -1163,11 +1165,6 @@ __thermal_cooling_device_register(struct device_node *np,
1163 bind_cdev(cdev); 1165 bind_cdev(cdev);
1164 1166
1165 return cdev; 1167 return cdev;
1166
1167unregister:
1168 release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
1169 device_unregister(&cdev->device);
1170 return ERR_PTR(result);
1171} 1168}
1172 1169
1173/** 1170/**
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index d7b198c400c7..ce24182f8514 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -210,18 +210,6 @@ bfin_jc_chars_in_buffer(struct tty_struct *tty)
210 return circ_cnt(&bfin_jc_write_buf); 210 return circ_cnt(&bfin_jc_write_buf);
211} 211}
212 212
213static void
214bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout)
215{
216 unsigned long expire = jiffies + timeout;
217 while (!circ_empty(&bfin_jc_write_buf)) {
218 if (signal_pending(current))
219 break;
220 if (time_after(jiffies, expire))
221 break;
222 }
223}
224
225static const struct tty_operations bfin_jc_ops = { 213static const struct tty_operations bfin_jc_ops = {
226 .open = bfin_jc_open, 214 .open = bfin_jc_open,
227 .close = bfin_jc_close, 215 .close = bfin_jc_close,
@@ -230,7 +218,6 @@ static const struct tty_operations bfin_jc_ops = {
230 .flush_chars = bfin_jc_flush_chars, 218 .flush_chars = bfin_jc_flush_chars,
231 .write_room = bfin_jc_write_room, 219 .write_room = bfin_jc_write_room,
232 .chars_in_buffer = bfin_jc_chars_in_buffer, 220 .chars_in_buffer = bfin_jc_chars_in_buffer,
233 .wait_until_sent = bfin_jc_wait_until_sent,
234}; 221};
235 222
236static int __init bfin_jc_init(void) 223static int __init bfin_jc_init(void)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index e3b9570a1eff..deae122c9c4b 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2138,8 +2138,8 @@ int serial8250_do_startup(struct uart_port *port)
2138 /* 2138 /*
2139 * Clear the interrupt registers. 2139 * Clear the interrupt registers.
2140 */ 2140 */
2141 if (serial_port_in(port, UART_LSR) & UART_LSR_DR) 2141 serial_port_in(port, UART_LSR);
2142 serial_port_in(port, UART_RX); 2142 serial_port_in(port, UART_RX);
2143 serial_port_in(port, UART_IIR); 2143 serial_port_in(port, UART_IIR);
2144 serial_port_in(port, UART_MSR); 2144 serial_port_in(port, UART_MSR);
2145 2145
@@ -2300,8 +2300,8 @@ dont_test_tx_en:
2300 * saved flags to avoid getting false values from polling 2300 * saved flags to avoid getting false values from polling
2301 * routines or the previous session. 2301 * routines or the previous session.
2302 */ 2302 */
2303 if (serial_port_in(port, UART_LSR) & UART_LSR_DR) 2303 serial_port_in(port, UART_LSR);
2304 serial_port_in(port, UART_RX); 2304 serial_port_in(port, UART_RX);
2305 serial_port_in(port, UART_IIR); 2305 serial_port_in(port, UART_IIR);
2306 serial_port_in(port, UART_MSR); 2306 serial_port_in(port, UART_MSR);
2307 up->lsr_saved_flags = 0; 2307 up->lsr_saved_flags = 0;
@@ -2394,8 +2394,7 @@ void serial8250_do_shutdown(struct uart_port *port)
2394 * Read data port to reset things, and then unlink from 2394 * Read data port to reset things, and then unlink from
2395 * the IRQ chain. 2395 * the IRQ chain.
2396 */ 2396 */
2397 if (serial_port_in(port, UART_LSR) & UART_LSR_DR) 2397 serial_port_in(port, UART_RX);
2398 serial_port_in(port, UART_RX);
2399 serial8250_rpm_put(up); 2398 serial8250_rpm_put(up);
2400 2399
2401 del_timer_sync(&up->timer); 2400 del_timer_sync(&up->timer);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index e60116235836..2ab229ddee38 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -59,6 +59,8 @@ struct dw8250_data {
59 u8 usr_reg; 59 u8 usr_reg;
60 int last_mcr; 60 int last_mcr;
61 int line; 61 int line;
62 int msr_mask_on;
63 int msr_mask_off;
62 struct clk *clk; 64 struct clk *clk;
63 struct clk *pclk; 65 struct clk *pclk;
64 struct reset_control *rst; 66 struct reset_control *rst;
@@ -81,6 +83,12 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
81 value &= ~UART_MSR_DCTS; 83 value &= ~UART_MSR_DCTS;
82 } 84 }
83 85
86 /* Override any modem control signals if needed */
87 if (offset == UART_MSR) {
88 value |= d->msr_mask_on;
89 value &= ~d->msr_mask_off;
90 }
91
84 return value; 92 return value;
85} 93}
86 94
@@ -334,6 +342,30 @@ static int dw8250_probe_of(struct uart_port *p,
334 if (id >= 0) 342 if (id >= 0)
335 p->line = id; 343 p->line = id;
336 344
345 if (of_property_read_bool(np, "dcd-override")) {
346 /* Always report DCD as active */
347 data->msr_mask_on |= UART_MSR_DCD;
348 data->msr_mask_off |= UART_MSR_DDCD;
349 }
350
351 if (of_property_read_bool(np, "dsr-override")) {
352 /* Always report DSR as active */
353 data->msr_mask_on |= UART_MSR_DSR;
354 data->msr_mask_off |= UART_MSR_DDSR;
355 }
356
357 if (of_property_read_bool(np, "cts-override")) {
358 /* Always report DSR as active */
359 data->msr_mask_on |= UART_MSR_DSR;
360 data->msr_mask_off |= UART_MSR_DDSR;
361 }
362
363 if (of_property_read_bool(np, "ri-override")) {
364 /* Always report Ring indicator as inactive */
365 data->msr_mask_off |= UART_MSR_RI;
366 data->msr_mask_off |= UART_MSR_TERI;
367 }
368
337 /* clock got configured through clk api, all done */ 369 /* clock got configured through clk api, all done */
338 if (p->uartclk) 370 if (p->uartclk)
339 return 0; 371 return 0;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index daf2c82984e9..892eb32cdef4 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -69,7 +69,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
69 "Please send the output of lspci -vv, this\n" 69 "Please send the output of lspci -vv, this\n"
70 "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" 70 "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
71 "manufacturer and name of serial board or\n" 71 "manufacturer and name of serial board or\n"
72 "modem board to rmk+serial@arm.linux.org.uk.\n", 72 "modem board to <linux-serial@vger.kernel.org>.\n",
73 pci_name(dev), str, dev->vendor, dev->device, 73 pci_name(dev), str, dev->vendor, dev->device,
74 dev->subsystem_vendor, dev->subsystem_device); 74 dev->subsystem_vendor, dev->subsystem_device);
75} 75}
@@ -1989,13 +1989,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1989 }, 1989 },
1990 { 1990 {
1991 .vendor = PCI_VENDOR_ID_INTEL, 1991 .vendor = PCI_VENDOR_ID_INTEL,
1992 .device = PCI_DEVICE_ID_INTEL_QRK_UART,
1993 .subvendor = PCI_ANY_ID,
1994 .subdevice = PCI_ANY_ID,
1995 .setup = pci_default_setup,
1996 },
1997 {
1998 .vendor = PCI_VENDOR_ID_INTEL,
1999 .device = PCI_DEVICE_ID_INTEL_BSW_UART1, 1992 .device = PCI_DEVICE_ID_INTEL_BSW_UART1,
2000 .subvendor = PCI_ANY_ID, 1993 .subvendor = PCI_ANY_ID,
2001 .subdevice = PCI_ANY_ID, 1994 .subdevice = PCI_ANY_ID,
@@ -2201,13 +2194,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
2201 */ 2194 */
2202 { 2195 {
2203 .vendor = PCI_VENDOR_ID_PLX, 2196 .vendor = PCI_VENDOR_ID_PLX,
2204 .device = PCI_DEVICE_ID_PLX_9030,
2205 .subvendor = PCI_SUBVENDOR_ID_PERLE,
2206 .subdevice = PCI_ANY_ID,
2207 .setup = pci_default_setup,
2208 },
2209 {
2210 .vendor = PCI_VENDOR_ID_PLX,
2211 .device = PCI_DEVICE_ID_PLX_9050, 2197 .device = PCI_DEVICE_ID_PLX_9050,
2212 .subvendor = PCI_SUBVENDOR_ID_EXSYS, 2198 .subvendor = PCI_SUBVENDOR_ID_EXSYS,
2213 .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055, 2199 .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055,
@@ -5415,10 +5401,6 @@ static struct pci_device_id serial_pci_tbl[] = {
5415 PCI_ANY_ID, PCI_ANY_ID, 5401 PCI_ANY_ID, PCI_ANY_ID,
5416 0, 0, pbn_b0_bt_2_115200 }, 5402 0, 0, pbn_b0_bt_2_115200 },
5417 5403
5418 { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S,
5419 PCI_ANY_ID, PCI_ANY_ID,
5420 0, 0, pbn_b0_bt_2_115200 },
5421
5422 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, 5404 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
5423 PCI_ANY_ID, PCI_ANY_ID, 5405 PCI_ANY_ID, PCI_ANY_ID,
5424 0, 0, pbn_wch384_4 }, 5406 0, 0, pbn_wch384_4 },
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 846552bff67d..4e959c43f680 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -47,6 +47,7 @@
47#include <linux/gpio/consumer.h> 47#include <linux/gpio/consumer.h>
48#include <linux/err.h> 48#include <linux/err.h>
49#include <linux/irq.h> 49#include <linux/irq.h>
50#include <linux/suspend.h>
50 51
51#include <asm/io.h> 52#include <asm/io.h>
52#include <asm/ioctls.h> 53#include <asm/ioctls.h>
@@ -173,6 +174,12 @@ struct atmel_uart_port {
173 bool ms_irq_enabled; 174 bool ms_irq_enabled;
174 bool is_usart; /* usart or uart */ 175 bool is_usart; /* usart or uart */
175 struct timer_list uart_timer; /* uart timer */ 176 struct timer_list uart_timer; /* uart timer */
177
178 bool suspended;
179 unsigned int pending;
180 unsigned int pending_status;
181 spinlock_t lock_suspended;
182
176 int (*prepare_rx)(struct uart_port *port); 183 int (*prepare_rx)(struct uart_port *port);
177 int (*prepare_tx)(struct uart_port *port); 184 int (*prepare_tx)(struct uart_port *port);
178 void (*schedule_rx)(struct uart_port *port); 185 void (*schedule_rx)(struct uart_port *port);
@@ -1179,12 +1186,15 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1179{ 1186{
1180 struct uart_port *port = dev_id; 1187 struct uart_port *port = dev_id;
1181 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1188 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1182 unsigned int status, pending, pass_counter = 0; 1189 unsigned int status, pending, mask, pass_counter = 0;
1183 bool gpio_handled = false; 1190 bool gpio_handled = false;
1184 1191
1192 spin_lock(&atmel_port->lock_suspended);
1193
1185 do { 1194 do {
1186 status = atmel_get_lines_status(port); 1195 status = atmel_get_lines_status(port);
1187 pending = status & UART_GET_IMR(port); 1196 mask = UART_GET_IMR(port);
1197 pending = status & mask;
1188 if (!gpio_handled) { 1198 if (!gpio_handled) {
1189 /* 1199 /*
1190 * Dealing with GPIO interrupt 1200 * Dealing with GPIO interrupt
@@ -1206,11 +1216,21 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1206 if (!pending) 1216 if (!pending)
1207 break; 1217 break;
1208 1218
1219 if (atmel_port->suspended) {
1220 atmel_port->pending |= pending;
1221 atmel_port->pending_status = status;
1222 UART_PUT_IDR(port, mask);
1223 pm_system_wakeup();
1224 break;
1225 }
1226
1209 atmel_handle_receive(port, pending); 1227 atmel_handle_receive(port, pending);
1210 atmel_handle_status(port, pending, status); 1228 atmel_handle_status(port, pending, status);
1211 atmel_handle_transmit(port, pending); 1229 atmel_handle_transmit(port, pending);
1212 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); 1230 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1213 1231
1232 spin_unlock(&atmel_port->lock_suspended);
1233
1214 return pass_counter ? IRQ_HANDLED : IRQ_NONE; 1234 return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1215} 1235}
1216 1236
@@ -1742,7 +1762,8 @@ static int atmel_startup(struct uart_port *port)
1742 /* 1762 /*
1743 * Allocate the IRQ 1763 * Allocate the IRQ
1744 */ 1764 */
1745 retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED, 1765 retval = request_irq(port->irq, atmel_interrupt,
1766 IRQF_SHARED | IRQF_COND_SUSPEND,
1746 tty ? tty->name : "atmel_serial", port); 1767 tty ? tty->name : "atmel_serial", port);
1747 if (retval) { 1768 if (retval) {
1748 dev_err(port->dev, "atmel_startup - Can't get irq\n"); 1769 dev_err(port->dev, "atmel_startup - Can't get irq\n");
@@ -2513,8 +2534,14 @@ static int atmel_serial_suspend(struct platform_device *pdev,
2513 2534
2514 /* we can not wake up if we're running on slow clock */ 2535 /* we can not wake up if we're running on slow clock */
2515 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); 2536 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2516 if (atmel_serial_clk_will_stop()) 2537 if (atmel_serial_clk_will_stop()) {
2538 unsigned long flags;
2539
2540 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2541 atmel_port->suspended = true;
2542 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2517 device_set_wakeup_enable(&pdev->dev, 0); 2543 device_set_wakeup_enable(&pdev->dev, 0);
2544 }
2518 2545
2519 uart_suspend_port(&atmel_uart, port); 2546 uart_suspend_port(&atmel_uart, port);
2520 2547
@@ -2525,6 +2552,18 @@ static int atmel_serial_resume(struct platform_device *pdev)
2525{ 2552{
2526 struct uart_port *port = platform_get_drvdata(pdev); 2553 struct uart_port *port = platform_get_drvdata(pdev);
2527 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2554 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2555 unsigned long flags;
2556
2557 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2558 if (atmel_port->pending) {
2559 atmel_handle_receive(port, atmel_port->pending);
2560 atmel_handle_status(port, atmel_port->pending,
2561 atmel_port->pending_status);
2562 atmel_handle_transmit(port, atmel_port->pending);
2563 atmel_port->pending = 0;
2564 }
2565 atmel_port->suspended = false;
2566 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2528 2567
2529 uart_resume_port(&atmel_uart, port); 2568 uart_resume_port(&atmel_uart, port);
2530 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); 2569 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
@@ -2593,6 +2632,8 @@ static int atmel_serial_probe(struct platform_device *pdev)
2593 port->backup_imr = 0; 2632 port->backup_imr = 0;
2594 port->uart.line = ret; 2633 port->uart.line = ret;
2595 2634
2635 spin_lock_init(&port->lock_suspended);
2636
2596 ret = atmel_init_gpios(port, &pdev->dev); 2637 ret = atmel_init_gpios(port, &pdev->dev);
2597 if (ret < 0) 2638 if (ret < 0)
2598 dev_err(&pdev->dev, "%s", 2639 dev_err(&pdev->dev, "%s",
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 7ff61e24a195..33fb94f78967 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -133,10 +133,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
133 if (of_find_property(np, "no-loopback-test", NULL)) 133 if (of_find_property(np, "no-loopback-test", NULL))
134 port->flags |= UPF_SKIP_TEST; 134 port->flags |= UPF_SKIP_TEST;
135 135
136 ret = of_alias_get_id(np, "serial");
137 if (ret >= 0)
138 port->line = ret;
139
140 port->dev = &ofdev->dev; 136 port->dev = &ofdev->dev;
141 137
142 switch (type) { 138 switch (type) {
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 594b63331ef4..bca975f5093b 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -293,8 +293,10 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
293 293
294 ims = serial_in(port, SPRD_IMSR); 294 ims = serial_in(port, SPRD_IMSR);
295 295
296 if (!ims) 296 if (!ims) {
297 spin_unlock(&port->lock);
297 return IRQ_NONE; 298 return IRQ_NONE;
299 }
298 300
299 serial_out(port, SPRD_ICLR, ~0); 301 serial_out(port, SPRD_ICLR, ~0);
300 302
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 51f066aa375e..2bb4dfc02873 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1028,8 +1028,8 @@ EXPORT_SYMBOL(start_tty);
1028/* We limit tty time update visibility to every 8 seconds or so. */ 1028/* We limit tty time update visibility to every 8 seconds or so. */
1029static void tty_update_time(struct timespec *time) 1029static void tty_update_time(struct timespec *time)
1030{ 1030{
1031 unsigned long sec = get_seconds() & ~7; 1031 unsigned long sec = get_seconds();
1032 if ((long)(sec - time->tv_sec) > 0) 1032 if (abs(sec - time->tv_sec) & ~7)
1033 time->tv_sec = sec; 1033 time->tv_sec = sec;
1034} 1034}
1035 1035
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index a5cf253b2544..632fc8152061 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
217#endif 217#endif
218 if (!timeout) 218 if (!timeout)
219 timeout = MAX_SCHEDULE_TIMEOUT; 219 timeout = MAX_SCHEDULE_TIMEOUT;
220 if (wait_event_interruptible_timeout(tty->write_wait, 220
221 !tty_chars_in_buffer(tty), timeout) >= 0) { 221 timeout = wait_event_interruptible_timeout(tty->write_wait,
222 if (tty->ops->wait_until_sent) 222 !tty_chars_in_buffer(tty), timeout);
223 tty->ops->wait_until_sent(tty, timeout); 223 if (timeout <= 0)
224 } 224 return;
225
226 if (timeout == MAX_SCHEDULE_TIMEOUT)
227 timeout = 0;
228
229 if (tty->ops->wait_until_sent)
230 tty->ops->wait_until_sent(tty, timeout);
225} 231}
226EXPORT_SYMBOL(tty_wait_until_sent); 232EXPORT_SYMBOL(tty_wait_until_sent);
227 233
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e78720b59d67..683617714e7c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1650,6 +1650,8 @@ static int acm_reset_resume(struct usb_interface *intf)
1650 1650
1651static const struct usb_device_id acm_ids[] = { 1651static const struct usb_device_id acm_ids[] = {
1652 /* quirky and broken devices */ 1652 /* quirky and broken devices */
1653 { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
1654 .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
1653 { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ 1655 { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
1654 .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ 1656 .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
1655 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ 1657 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 66abdbcfbfa5..11635537c052 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -501,6 +501,7 @@ static void async_completed(struct urb *urb)
501 as->status = urb->status; 501 as->status = urb->status;
502 signr = as->signr; 502 signr = as->signr;
503 if (signr) { 503 if (signr) {
504 memset(&sinfo, 0, sizeof(sinfo));
504 sinfo.si_signo = as->signr; 505 sinfo.si_signo = as->signr;
505 sinfo.si_errno = as->status; 506 sinfo.si_errno = as->status;
506 sinfo.si_code = SI_ASYNCIO; 507 sinfo.si_code = SI_ASYNCIO;
@@ -2382,6 +2383,7 @@ static void usbdev_remove(struct usb_device *udev)
2382 wake_up_all(&ps->wait); 2383 wake_up_all(&ps->wait);
2383 list_del_init(&ps->list); 2384 list_del_init(&ps->list);
2384 if (ps->discsignr) { 2385 if (ps->discsignr) {
2386 memset(&sinfo, 0, sizeof(sinfo));
2385 sinfo.si_signo = ps->discsignr; 2387 sinfo.si_signo = ps->discsignr;
2386 sinfo.si_errno = EPIPE; 2388 sinfo.si_errno = EPIPE;
2387 sinfo.si_code = SI_ASYNCIO; 2389 sinfo.si_code = SI_ASYNCIO;
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 172d64e585b6..52e0c4e5e48e 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -205,6 +205,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
205 omap->irq0_offset, value); 205 omap->irq0_offset, value);
206} 206}
207 207
208static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value)
209{
210 dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC +
211 omap->irqmisc_offset, value);
212}
213
214static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value)
215{
216 dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 -
217 omap->irq0_offset, value);
218}
219
208static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, 220static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
209 enum omap_dwc3_vbus_id_status status) 221 enum omap_dwc3_vbus_id_status status)
210{ 222{
@@ -345,9 +357,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
345 357
346static void dwc3_omap_disable_irqs(struct dwc3_omap *omap) 358static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
347{ 359{
360 u32 reg;
361
348 /* disable all IRQs */ 362 /* disable all IRQs */
349 dwc3_omap_write_irqmisc_set(omap, 0x00); 363 reg = USBOTGSS_IRQO_COREIRQ_ST;
350 dwc3_omap_write_irq0_set(omap, 0x00); 364 dwc3_omap_write_irq0_clr(omap, reg);
365
366 reg = (USBOTGSS_IRQMISC_OEVT |
367 USBOTGSS_IRQMISC_DRVVBUS_RISE |
368 USBOTGSS_IRQMISC_CHRGVBUS_RISE |
369 USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
370 USBOTGSS_IRQMISC_IDPULLUP_RISE |
371 USBOTGSS_IRQMISC_DRVVBUS_FALL |
372 USBOTGSS_IRQMISC_CHRGVBUS_FALL |
373 USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
374 USBOTGSS_IRQMISC_IDPULLUP_FALL);
375
376 dwc3_omap_write_irqmisc_clr(omap, reg);
351} 377}
352 378
353static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32); 379static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 75648145dc1b..c42765b3a060 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1161,7 +1161,6 @@ static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc,
1161 if (desc->opts_mutex) 1161 if (desc->opts_mutex)
1162 mutex_lock(desc->opts_mutex); 1162 mutex_lock(desc->opts_mutex);
1163 memcpy(desc->ext_compat_id, page, l); 1163 memcpy(desc->ext_compat_id, page, l);
1164 desc->ext_compat_id[l] = '\0';
1165 1164
1166 if (desc->opts_mutex) 1165 if (desc->opts_mutex)
1167 mutex_unlock(desc->opts_mutex); 1166 mutex_unlock(desc->opts_mutex);
@@ -1192,7 +1191,6 @@ static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc,
1192 if (desc->opts_mutex) 1191 if (desc->opts_mutex)
1193 mutex_lock(desc->opts_mutex); 1192 mutex_lock(desc->opts_mutex);
1194 memcpy(desc->ext_compat_id + 8, page, l); 1193 memcpy(desc->ext_compat_id + 8, page, l);
1195 desc->ext_compat_id[l + 8] = '\0';
1196 1194
1197 if (desc->opts_mutex) 1195 if (desc->opts_mutex)
1198 mutex_unlock(desc->opts_mutex); 1196 mutex_unlock(desc->opts_mutex);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index af98b096af2f..175c9956cbe3 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -144,10 +144,9 @@ struct ffs_io_data {
144 bool read; 144 bool read;
145 145
146 struct kiocb *kiocb; 146 struct kiocb *kiocb;
147 const struct iovec *iovec; 147 struct iov_iter data;
148 unsigned long nr_segs; 148 const void *to_free;
149 char __user *buf; 149 char *buf;
150 size_t len;
151 150
152 struct mm_struct *mm; 151 struct mm_struct *mm;
153 struct work_struct work; 152 struct work_struct work;
@@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work)
649 io_data->req->actual; 648 io_data->req->actual;
650 649
651 if (io_data->read && ret > 0) { 650 if (io_data->read && ret > 0) {
652 int i;
653 size_t pos = 0;
654
655 /*
656 * Since req->length may be bigger than io_data->len (after
657 * being rounded up to maxpacketsize), we may end up with more
658 * data then user space has space for.
659 */
660 ret = min_t(int, ret, io_data->len);
661
662 use_mm(io_data->mm); 651 use_mm(io_data->mm);
663 for (i = 0; i < io_data->nr_segs; i++) { 652 ret = copy_to_iter(io_data->buf, ret, &io_data->data);
664 size_t len = min_t(size_t, ret - pos, 653 if (iov_iter_count(&io_data->data))
665 io_data->iovec[i].iov_len); 654 ret = -EFAULT;
666 if (!len)
667 break;
668 if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
669 &io_data->buf[pos], len))) {
670 ret = -EFAULT;
671 break;
672 }
673 pos += len;
674 }
675 unuse_mm(io_data->mm); 655 unuse_mm(io_data->mm);
676 } 656 }
677 657
@@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
684 664
685 io_data->kiocb->private = NULL; 665 io_data->kiocb->private = NULL;
686 if (io_data->read) 666 if (io_data->read)
687 kfree(io_data->iovec); 667 kfree(io_data->to_free);
688 kfree(io_data->buf); 668 kfree(io_data->buf);
689 kfree(io_data); 669 kfree(io_data);
690} 670}
@@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
743 * before the waiting completes, so do not assign to 'gadget' earlier 723 * before the waiting completes, so do not assign to 'gadget' earlier
744 */ 724 */
745 struct usb_gadget *gadget = epfile->ffs->gadget; 725 struct usb_gadget *gadget = epfile->ffs->gadget;
726 size_t copied;
746 727
747 spin_lock_irq(&epfile->ffs->eps_lock); 728 spin_lock_irq(&epfile->ffs->eps_lock);
748 /* In the meantime, endpoint got disabled or changed. */ 729 /* In the meantime, endpoint got disabled or changed. */
@@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
750 spin_unlock_irq(&epfile->ffs->eps_lock); 731 spin_unlock_irq(&epfile->ffs->eps_lock);
751 return -ESHUTDOWN; 732 return -ESHUTDOWN;
752 } 733 }
734 data_len = iov_iter_count(&io_data->data);
753 /* 735 /*
754 * Controller may require buffer size to be aligned to 736 * Controller may require buffer size to be aligned to
755 * maxpacketsize of an out endpoint. 737 * maxpacketsize of an out endpoint.
756 */ 738 */
757 data_len = io_data->read ? 739 if (io_data->read)
758 usb_ep_align_maybe(gadget, ep->ep, io_data->len) : 740 data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
759 io_data->len;
760 spin_unlock_irq(&epfile->ffs->eps_lock); 741 spin_unlock_irq(&epfile->ffs->eps_lock);
761 742
762 data = kmalloc(data_len, GFP_KERNEL); 743 data = kmalloc(data_len, GFP_KERNEL);
763 if (unlikely(!data)) 744 if (unlikely(!data))
764 return -ENOMEM; 745 return -ENOMEM;
765 if (io_data->aio && !io_data->read) { 746 if (!io_data->read) {
766 int i; 747 copied = copy_from_iter(data, data_len, &io_data->data);
767 size_t pos = 0; 748 if (copied != data_len) {
768 for (i = 0; i < io_data->nr_segs; i++) {
769 if (unlikely(copy_from_user(&data[pos],
770 io_data->iovec[i].iov_base,
771 io_data->iovec[i].iov_len))) {
772 ret = -EFAULT;
773 goto error;
774 }
775 pos += io_data->iovec[i].iov_len;
776 }
777 } else {
778 if (!io_data->read &&
779 unlikely(__copy_from_user(data, io_data->buf,
780 io_data->len))) {
781 ret = -EFAULT; 749 ret = -EFAULT;
782 goto error; 750 goto error;
783 } 751 }
@@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
876 */ 844 */
877 ret = ep->status; 845 ret = ep->status;
878 if (io_data->read && ret > 0) { 846 if (io_data->read && ret > 0) {
879 ret = min_t(size_t, ret, io_data->len); 847 ret = copy_to_iter(data, ret, &io_data->data);
880 848 if (unlikely(iov_iter_count(&io_data->data)))
881 if (unlikely(copy_to_user(io_data->buf,
882 data, ret)))
883 ret = -EFAULT; 849 ret = -EFAULT;
884 } 850 }
885 } 851 }
@@ -898,37 +864,6 @@ error:
898 return ret; 864 return ret;
899} 865}
900 866
901static ssize_t
902ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
903 loff_t *ptr)
904{
905 struct ffs_io_data io_data;
906
907 ENTER();
908
909 io_data.aio = false;
910 io_data.read = false;
911 io_data.buf = (char * __user)buf;
912 io_data.len = len;
913
914 return ffs_epfile_io(file, &io_data);
915}
916
917static ssize_t
918ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
919{
920 struct ffs_io_data io_data;
921
922 ENTER();
923
924 io_data.aio = false;
925 io_data.read = true;
926 io_data.buf = buf;
927 io_data.len = len;
928
929 return ffs_epfile_io(file, &io_data);
930}
931
932static int 867static int
933ffs_epfile_open(struct inode *inode, struct file *file) 868ffs_epfile_open(struct inode *inode, struct file *file)
934{ 869{
@@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
965 return value; 900 return value;
966} 901}
967 902
968static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, 903static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
969 const struct iovec *iovec,
970 unsigned long nr_segs, loff_t loff)
971{ 904{
972 struct ffs_io_data *io_data; 905 struct ffs_io_data io_data, *p = &io_data;
906 ssize_t res;
973 907
974 ENTER(); 908 ENTER();
975 909
976 io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); 910 if (!is_sync_kiocb(kiocb)) {
977 if (unlikely(!io_data)) 911 p = kmalloc(sizeof(io_data), GFP_KERNEL);
978 return -ENOMEM; 912 if (unlikely(!p))
913 return -ENOMEM;
914 p->aio = true;
915 } else {
916 p->aio = false;
917 }
979 918
980 io_data->aio = true; 919 p->read = false;
981 io_data->read = false; 920 p->kiocb = kiocb;
982 io_data->kiocb = kiocb; 921 p->data = *from;
983 io_data->iovec = iovec; 922 p->mm = current->mm;
984 io_data->nr_segs = nr_segs;
985 io_data->len = kiocb->ki_nbytes;
986 io_data->mm = current->mm;
987 923
988 kiocb->private = io_data; 924 kiocb->private = p;
989 925
990 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); 926 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
991 927
992 return ffs_epfile_io(kiocb->ki_filp, io_data); 928 res = ffs_epfile_io(kiocb->ki_filp, p);
929 if (res == -EIOCBQUEUED)
930 return res;
931 if (p->aio)
932 kfree(p);
933 else
934 *from = p->data;
935 return res;
993} 936}
994 937
995static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, 938static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
996 const struct iovec *iovec,
997 unsigned long nr_segs, loff_t loff)
998{ 939{
999 struct ffs_io_data *io_data; 940 struct ffs_io_data io_data, *p = &io_data;
1000 struct iovec *iovec_copy; 941 ssize_t res;
1001 942
1002 ENTER(); 943 ENTER();
1003 944
1004 iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); 945 if (!is_sync_kiocb(kiocb)) {
1005 if (unlikely(!iovec_copy)) 946 p = kmalloc(sizeof(io_data), GFP_KERNEL);
1006 return -ENOMEM; 947 if (unlikely(!p))
1007 948 return -ENOMEM;
1008 memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); 949 p->aio = true;
1009 950 } else {
1010 io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); 951 p->aio = false;
1011 if (unlikely(!io_data)) {
1012 kfree(iovec_copy);
1013 return -ENOMEM;
1014 } 952 }
1015 953
1016 io_data->aio = true; 954 p->read = true;
1017 io_data->read = true; 955 p->kiocb = kiocb;
1018 io_data->kiocb = kiocb; 956 if (p->aio) {
1019 io_data->iovec = iovec_copy; 957 p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
1020 io_data->nr_segs = nr_segs; 958 if (!p->to_free) {
1021 io_data->len = kiocb->ki_nbytes; 959 kfree(p);
1022 io_data->mm = current->mm; 960 return -ENOMEM;
961 }
962 } else {
963 p->data = *to;
964 p->to_free = NULL;
965 }
966 p->mm = current->mm;
1023 967
1024 kiocb->private = io_data; 968 kiocb->private = p;
1025 969
1026 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); 970 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
1027 971
1028 return ffs_epfile_io(kiocb->ki_filp, io_data); 972 res = ffs_epfile_io(kiocb->ki_filp, p);
973 if (res == -EIOCBQUEUED)
974 return res;
975
976 if (p->aio) {
977 kfree(p->to_free);
978 kfree(p);
979 } else {
980 *to = p->data;
981 }
982 return res;
1029} 983}
1030 984
1031static int 985static int
@@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = {
1105 .llseek = no_llseek, 1059 .llseek = no_llseek,
1106 1060
1107 .open = ffs_epfile_open, 1061 .open = ffs_epfile_open,
1108 .write = ffs_epfile_write, 1062 .write = new_sync_write,
1109 .read = ffs_epfile_read, 1063 .read = new_sync_read,
1110 .aio_write = ffs_epfile_aio_write, 1064 .write_iter = ffs_epfile_write_iter,
1111 .aio_read = ffs_epfile_aio_read, 1065 .read_iter = ffs_epfile_read_iter,
1112 .release = ffs_epfile_release, 1066 .release = ffs_epfile_release,
1113 .unlocked_ioctl = ffs_epfile_ioctl, 1067 .unlocked_ioctl = ffs_epfile_ioctl,
1114}; 1068};
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 426d69a9c018..a2612fb79eff 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -569,7 +569,7 @@ fail:
569 return status; 569 return status;
570} 570}
571 571
572const struct file_operations f_hidg_fops = { 572static const struct file_operations f_hidg_fops = {
573 .owner = THIS_MODULE, 573 .owner = THIS_MODULE,
574 .open = f_hidg_open, 574 .open = f_hidg_open,
575 .release = f_hidg_release, 575 .release = f_hidg_release,
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index c89e96cfa3e4..c0c3ef272714 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -417,7 +417,10 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
417 return -EINVAL; 417 return -EINVAL;
418 418
419 spin_lock(&port->lock); 419 spin_lock(&port->lock);
420 __pn_reset(f); 420
421 if (fp->in_ep->driver_data)
422 __pn_reset(f);
423
421 if (alt == 1) { 424 if (alt == 1) {
422 int i; 425 int i;
423 426
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index e07c50ced64d..e3dae47baef3 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -344,7 +344,7 @@ static struct usb_endpoint_descriptor ss_int_source_desc = {
344 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), 344 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
345}; 345};
346 346
347struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = { 347static struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = {
348 .bLength = USB_DT_SS_EP_COMP_SIZE, 348 .bLength = USB_DT_SS_EP_COMP_SIZE,
349 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, 349 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
350 350
@@ -362,7 +362,7 @@ static struct usb_endpoint_descriptor ss_int_sink_desc = {
362 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), 362 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
363}; 363};
364 364
365struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = { 365static struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = {
366 .bLength = USB_DT_SS_EP_COMP_SIZE, 366 .bLength = USB_DT_SS_EP_COMP_SIZE,
367 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, 367 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
368 368
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 33e16658e5cf..6d3eb8b00a48 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -54,7 +54,7 @@
54#define UNFLW_CTRL 8 54#define UNFLW_CTRL 8
55#define OVFLW_CTRL 10 55#define OVFLW_CTRL 10
56 56
57const char *uac2_name = "snd_uac2"; 57static const char *uac2_name = "snd_uac2";
58 58
59struct uac2_req { 59struct uac2_req {
60 struct uac2_rtd_params *pp; /* parent param */ 60 struct uac2_rtd_params *pp; /* parent param */
@@ -634,7 +634,7 @@ static struct usb_interface_descriptor std_ac_if_desc = {
634}; 634};
635 635
636/* Clock source for IN traffic */ 636/* Clock source for IN traffic */
637struct uac_clock_source_descriptor in_clk_src_desc = { 637static struct uac_clock_source_descriptor in_clk_src_desc = {
638 .bLength = sizeof in_clk_src_desc, 638 .bLength = sizeof in_clk_src_desc,
639 .bDescriptorType = USB_DT_CS_INTERFACE, 639 .bDescriptorType = USB_DT_CS_INTERFACE,
640 640
@@ -646,7 +646,7 @@ struct uac_clock_source_descriptor in_clk_src_desc = {
646}; 646};
647 647
648/* Clock source for OUT traffic */ 648/* Clock source for OUT traffic */
649struct uac_clock_source_descriptor out_clk_src_desc = { 649static struct uac_clock_source_descriptor out_clk_src_desc = {
650 .bLength = sizeof out_clk_src_desc, 650 .bLength = sizeof out_clk_src_desc,
651 .bDescriptorType = USB_DT_CS_INTERFACE, 651 .bDescriptorType = USB_DT_CS_INTERFACE,
652 652
@@ -658,7 +658,7 @@ struct uac_clock_source_descriptor out_clk_src_desc = {
658}; 658};
659 659
660/* Input Terminal for USB_OUT */ 660/* Input Terminal for USB_OUT */
661struct uac2_input_terminal_descriptor usb_out_it_desc = { 661static struct uac2_input_terminal_descriptor usb_out_it_desc = {
662 .bLength = sizeof usb_out_it_desc, 662 .bLength = sizeof usb_out_it_desc,
663 .bDescriptorType = USB_DT_CS_INTERFACE, 663 .bDescriptorType = USB_DT_CS_INTERFACE,
664 664
@@ -672,7 +672,7 @@ struct uac2_input_terminal_descriptor usb_out_it_desc = {
672}; 672};
673 673
674/* Input Terminal for I/O-In */ 674/* Input Terminal for I/O-In */
675struct uac2_input_terminal_descriptor io_in_it_desc = { 675static struct uac2_input_terminal_descriptor io_in_it_desc = {
676 .bLength = sizeof io_in_it_desc, 676 .bLength = sizeof io_in_it_desc,
677 .bDescriptorType = USB_DT_CS_INTERFACE, 677 .bDescriptorType = USB_DT_CS_INTERFACE,
678 678
@@ -686,7 +686,7 @@ struct uac2_input_terminal_descriptor io_in_it_desc = {
686}; 686};
687 687
688/* Ouput Terminal for USB_IN */ 688/* Ouput Terminal for USB_IN */
689struct uac2_output_terminal_descriptor usb_in_ot_desc = { 689static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
690 .bLength = sizeof usb_in_ot_desc, 690 .bLength = sizeof usb_in_ot_desc,
691 .bDescriptorType = USB_DT_CS_INTERFACE, 691 .bDescriptorType = USB_DT_CS_INTERFACE,
692 692
@@ -700,7 +700,7 @@ struct uac2_output_terminal_descriptor usb_in_ot_desc = {
700}; 700};
701 701
702/* Ouput Terminal for I/O-Out */ 702/* Ouput Terminal for I/O-Out */
703struct uac2_output_terminal_descriptor io_out_ot_desc = { 703static struct uac2_output_terminal_descriptor io_out_ot_desc = {
704 .bLength = sizeof io_out_ot_desc, 704 .bLength = sizeof io_out_ot_desc,
705 .bDescriptorType = USB_DT_CS_INTERFACE, 705 .bDescriptorType = USB_DT_CS_INTERFACE,
706 706
@@ -713,7 +713,7 @@ struct uac2_output_terminal_descriptor io_out_ot_desc = {
713 .bmControls = (CONTROL_RDWR << COPY_CTRL), 713 .bmControls = (CONTROL_RDWR << COPY_CTRL),
714}; 714};
715 715
716struct uac2_ac_header_descriptor ac_hdr_desc = { 716static struct uac2_ac_header_descriptor ac_hdr_desc = {
717 .bLength = sizeof ac_hdr_desc, 717 .bLength = sizeof ac_hdr_desc,
718 .bDescriptorType = USB_DT_CS_INTERFACE, 718 .bDescriptorType = USB_DT_CS_INTERFACE,
719 719
@@ -751,7 +751,7 @@ static struct usb_interface_descriptor std_as_out_if1_desc = {
751}; 751};
752 752
753/* Audio Stream OUT Intface Desc */ 753/* Audio Stream OUT Intface Desc */
754struct uac2_as_header_descriptor as_out_hdr_desc = { 754static struct uac2_as_header_descriptor as_out_hdr_desc = {
755 .bLength = sizeof as_out_hdr_desc, 755 .bLength = sizeof as_out_hdr_desc,
756 .bDescriptorType = USB_DT_CS_INTERFACE, 756 .bDescriptorType = USB_DT_CS_INTERFACE,
757 757
@@ -764,7 +764,7 @@ struct uac2_as_header_descriptor as_out_hdr_desc = {
764}; 764};
765 765
766/* Audio USB_OUT Format */ 766/* Audio USB_OUT Format */
767struct uac2_format_type_i_descriptor as_out_fmt1_desc = { 767static struct uac2_format_type_i_descriptor as_out_fmt1_desc = {
768 .bLength = sizeof as_out_fmt1_desc, 768 .bLength = sizeof as_out_fmt1_desc,
769 .bDescriptorType = USB_DT_CS_INTERFACE, 769 .bDescriptorType = USB_DT_CS_INTERFACE,
770 .bDescriptorSubtype = UAC_FORMAT_TYPE, 770 .bDescriptorSubtype = UAC_FORMAT_TYPE,
@@ -772,7 +772,7 @@ struct uac2_format_type_i_descriptor as_out_fmt1_desc = {
772}; 772};
773 773
774/* STD AS ISO OUT Endpoint */ 774/* STD AS ISO OUT Endpoint */
775struct usb_endpoint_descriptor fs_epout_desc = { 775static struct usb_endpoint_descriptor fs_epout_desc = {
776 .bLength = USB_DT_ENDPOINT_SIZE, 776 .bLength = USB_DT_ENDPOINT_SIZE,
777 .bDescriptorType = USB_DT_ENDPOINT, 777 .bDescriptorType = USB_DT_ENDPOINT,
778 778
@@ -782,7 +782,7 @@ struct usb_endpoint_descriptor fs_epout_desc = {
782 .bInterval = 1, 782 .bInterval = 1,
783}; 783};
784 784
785struct usb_endpoint_descriptor hs_epout_desc = { 785static struct usb_endpoint_descriptor hs_epout_desc = {
786 .bLength = USB_DT_ENDPOINT_SIZE, 786 .bLength = USB_DT_ENDPOINT_SIZE,
787 .bDescriptorType = USB_DT_ENDPOINT, 787 .bDescriptorType = USB_DT_ENDPOINT,
788 788
@@ -828,7 +828,7 @@ static struct usb_interface_descriptor std_as_in_if1_desc = {
828}; 828};
829 829
830/* Audio Stream IN Intface Desc */ 830/* Audio Stream IN Intface Desc */
831struct uac2_as_header_descriptor as_in_hdr_desc = { 831static struct uac2_as_header_descriptor as_in_hdr_desc = {
832 .bLength = sizeof as_in_hdr_desc, 832 .bLength = sizeof as_in_hdr_desc,
833 .bDescriptorType = USB_DT_CS_INTERFACE, 833 .bDescriptorType = USB_DT_CS_INTERFACE,
834 834
@@ -841,7 +841,7 @@ struct uac2_as_header_descriptor as_in_hdr_desc = {
841}; 841};
842 842
843/* Audio USB_IN Format */ 843/* Audio USB_IN Format */
844struct uac2_format_type_i_descriptor as_in_fmt1_desc = { 844static struct uac2_format_type_i_descriptor as_in_fmt1_desc = {
845 .bLength = sizeof as_in_fmt1_desc, 845 .bLength = sizeof as_in_fmt1_desc,
846 .bDescriptorType = USB_DT_CS_INTERFACE, 846 .bDescriptorType = USB_DT_CS_INTERFACE,
847 .bDescriptorSubtype = UAC_FORMAT_TYPE, 847 .bDescriptorSubtype = UAC_FORMAT_TYPE,
@@ -849,7 +849,7 @@ struct uac2_format_type_i_descriptor as_in_fmt1_desc = {
849}; 849};
850 850
851/* STD AS ISO IN Endpoint */ 851/* STD AS ISO IN Endpoint */
852struct usb_endpoint_descriptor fs_epin_desc = { 852static struct usb_endpoint_descriptor fs_epin_desc = {
853 .bLength = USB_DT_ENDPOINT_SIZE, 853 .bLength = USB_DT_ENDPOINT_SIZE,
854 .bDescriptorType = USB_DT_ENDPOINT, 854 .bDescriptorType = USB_DT_ENDPOINT,
855 855
@@ -859,7 +859,7 @@ struct usb_endpoint_descriptor fs_epin_desc = {
859 .bInterval = 1, 859 .bInterval = 1,
860}; 860};
861 861
862struct usb_endpoint_descriptor hs_epin_desc = { 862static struct usb_endpoint_descriptor hs_epin_desc = {
863 .bLength = USB_DT_ENDPOINT_SIZE, 863 .bLength = USB_DT_ENDPOINT_SIZE,
864 .bDescriptorType = USB_DT_ENDPOINT, 864 .bDescriptorType = USB_DT_ENDPOINT,
865 865
@@ -1563,7 +1563,7 @@ static void afunc_unbind(struct usb_configuration *c, struct usb_function *f)
1563 agdev->out_ep->driver_data = NULL; 1563 agdev->out_ep->driver_data = NULL;
1564} 1564}
1565 1565
1566struct usb_function *afunc_alloc(struct usb_function_instance *fi) 1566static struct usb_function *afunc_alloc(struct usb_function_instance *fi)
1567{ 1567{
1568 struct audio_dev *agdev; 1568 struct audio_dev *agdev;
1569 struct f_uac2_opts *opts; 1569 struct f_uac2_opts *opts;
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 5aad7fededa5..8b818fd027b3 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -27,6 +27,7 @@
27#include "uvc.h" 27#include "uvc.h"
28#include "uvc_queue.h" 28#include "uvc_queue.h"
29#include "uvc_video.h" 29#include "uvc_video.h"
30#include "uvc_v4l2.h"
30 31
31/* -------------------------------------------------------------------------- 32/* --------------------------------------------------------------------------
32 * Requests handling 33 * Requests handling
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 9cb86bc1a9a5..50a5e637ca35 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -21,6 +21,7 @@
21 21
22#include "uvc.h" 22#include "uvc.h"
23#include "uvc_queue.h" 23#include "uvc_queue.h"
24#include "uvc_video.h"
24 25
25/* -------------------------------------------------------------------------- 26/* --------------------------------------------------------------------------
26 * Video codecs 27 * Video codecs
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index 06acfa55864a..b01b88e1b716 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -133,7 +133,9 @@ struct gfs_configuration {
133 struct usb_configuration c; 133 struct usb_configuration c;
134 int (*eth)(struct usb_configuration *c); 134 int (*eth)(struct usb_configuration *c);
135 int num; 135 int num;
136} gfs_configurations[] = { 136};
137
138static struct gfs_configuration gfs_configurations[] = {
137#ifdef CONFIG_USB_FUNCTIONFS_RNDIS 139#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
138 { 140 {
139 .eth = bind_rndis_config, 141 .eth = bind_rndis_config,
@@ -278,7 +280,7 @@ static void *functionfs_acquire_dev(struct ffs_dev *dev)
278 if (!try_module_get(THIS_MODULE)) 280 if (!try_module_get(THIS_MODULE))
279 return ERR_PTR(-ENOENT); 281 return ERR_PTR(-ENOENT);
280 282
281 return 0; 283 return NULL;
282} 284}
283 285
284static void functionfs_release_dev(struct ffs_dev *dev) 286static void functionfs_release_dev(struct ffs_dev *dev)
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index db49ec4c748e..200f9a584064 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC);
74MODULE_AUTHOR ("David Brownell"); 74MODULE_AUTHOR ("David Brownell");
75MODULE_LICENSE ("GPL"); 75MODULE_LICENSE ("GPL");
76 76
77static int ep_open(struct inode *, struct file *);
78
77 79
78/*----------------------------------------------------------------------*/ 80/*----------------------------------------------------------------------*/
79 81
@@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req)
283 * still need dev->lock to use epdata->ep. 285 * still need dev->lock to use epdata->ep.
284 */ 286 */
285static int 287static int
286get_ready_ep (unsigned f_flags, struct ep_data *epdata) 288get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
287{ 289{
288 int val; 290 int val;
289 291
290 if (f_flags & O_NONBLOCK) { 292 if (f_flags & O_NONBLOCK) {
291 if (!mutex_trylock(&epdata->lock)) 293 if (!mutex_trylock(&epdata->lock))
292 goto nonblock; 294 goto nonblock;
293 if (epdata->state != STATE_EP_ENABLED) { 295 if (epdata->state != STATE_EP_ENABLED &&
296 (!is_write || epdata->state != STATE_EP_READY)) {
294 mutex_unlock(&epdata->lock); 297 mutex_unlock(&epdata->lock);
295nonblock: 298nonblock:
296 val = -EAGAIN; 299 val = -EAGAIN;
@@ -305,18 +308,20 @@ nonblock:
305 308
306 switch (epdata->state) { 309 switch (epdata->state) {
307 case STATE_EP_ENABLED: 310 case STATE_EP_ENABLED:
311 return 0;
312 case STATE_EP_READY: /* not configured yet */
313 if (is_write)
314 return 0;
315 // FALLTHRU
316 case STATE_EP_UNBOUND: /* clean disconnect */
308 break; 317 break;
309 // case STATE_EP_DISABLED: /* "can't happen" */ 318 // case STATE_EP_DISABLED: /* "can't happen" */
310 // case STATE_EP_READY: /* "can't happen" */
311 default: /* error! */ 319 default: /* error! */
312 pr_debug ("%s: ep %p not available, state %d\n", 320 pr_debug ("%s: ep %p not available, state %d\n",
313 shortname, epdata, epdata->state); 321 shortname, epdata, epdata->state);
314 // FALLTHROUGH
315 case STATE_EP_UNBOUND: /* clean disconnect */
316 val = -ENODEV;
317 mutex_unlock(&epdata->lock);
318 } 322 }
319 return val; 323 mutex_unlock(&epdata->lock);
324 return -ENODEV;
320} 325}
321 326
322static ssize_t 327static ssize_t
@@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
363 return value; 368 return value;
364} 369}
365 370
366
367/* handle a synchronous OUT bulk/intr/iso transfer */
368static ssize_t
369ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
370{
371 struct ep_data *data = fd->private_data;
372 void *kbuf;
373 ssize_t value;
374
375 if ((value = get_ready_ep (fd->f_flags, data)) < 0)
376 return value;
377
378 /* halt any endpoint by doing a "wrong direction" i/o call */
379 if (usb_endpoint_dir_in(&data->desc)) {
380 if (usb_endpoint_xfer_isoc(&data->desc)) {
381 mutex_unlock(&data->lock);
382 return -EINVAL;
383 }
384 DBG (data->dev, "%s halt\n", data->name);
385 spin_lock_irq (&data->dev->lock);
386 if (likely (data->ep != NULL))
387 usb_ep_set_halt (data->ep);
388 spin_unlock_irq (&data->dev->lock);
389 mutex_unlock(&data->lock);
390 return -EBADMSG;
391 }
392
393 /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
394
395 value = -ENOMEM;
396 kbuf = kmalloc (len, GFP_KERNEL);
397 if (unlikely (!kbuf))
398 goto free1;
399
400 value = ep_io (data, kbuf, len);
401 VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
402 data->name, len, (int) value);
403 if (value >= 0 && copy_to_user (buf, kbuf, value))
404 value = -EFAULT;
405
406free1:
407 mutex_unlock(&data->lock);
408 kfree (kbuf);
409 return value;
410}
411
412/* handle a synchronous IN bulk/intr/iso transfer */
413static ssize_t
414ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
415{
416 struct ep_data *data = fd->private_data;
417 void *kbuf;
418 ssize_t value;
419
420 if ((value = get_ready_ep (fd->f_flags, data)) < 0)
421 return value;
422
423 /* halt any endpoint by doing a "wrong direction" i/o call */
424 if (!usb_endpoint_dir_in(&data->desc)) {
425 if (usb_endpoint_xfer_isoc(&data->desc)) {
426 mutex_unlock(&data->lock);
427 return -EINVAL;
428 }
429 DBG (data->dev, "%s halt\n", data->name);
430 spin_lock_irq (&data->dev->lock);
431 if (likely (data->ep != NULL))
432 usb_ep_set_halt (data->ep);
433 spin_unlock_irq (&data->dev->lock);
434 mutex_unlock(&data->lock);
435 return -EBADMSG;
436 }
437
438 /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
439
440 value = -ENOMEM;
441 kbuf = memdup_user(buf, len);
442 if (IS_ERR(kbuf)) {
443 value = PTR_ERR(kbuf);
444 kbuf = NULL;
445 goto free1;
446 }
447
448 value = ep_io (data, kbuf, len);
449 VDEBUG (data->dev, "%s write %zu IN, status %d\n",
450 data->name, len, (int) value);
451free1:
452 mutex_unlock(&data->lock);
453 kfree (kbuf);
454 return value;
455}
456
457static int 371static int
458ep_release (struct inode *inode, struct file *fd) 372ep_release (struct inode *inode, struct file *fd)
459{ 373{
@@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
481 struct ep_data *data = fd->private_data; 395 struct ep_data *data = fd->private_data;
482 int status; 396 int status;
483 397
484 if ((status = get_ready_ep (fd->f_flags, data)) < 0) 398 if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
485 return status; 399 return status;
486 400
487 spin_lock_irq (&data->dev->lock); 401 spin_lock_irq (&data->dev->lock);
@@ -517,8 +431,8 @@ struct kiocb_priv {
517 struct mm_struct *mm; 431 struct mm_struct *mm;
518 struct work_struct work; 432 struct work_struct work;
519 void *buf; 433 void *buf;
520 const struct iovec *iv; 434 struct iov_iter to;
521 unsigned long nr_segs; 435 const void *to_free;
522 unsigned actual; 436 unsigned actual;
523}; 437};
524 438
@@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb)
541 return value; 455 return value;
542} 456}
543 457
544static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
545{
546 ssize_t len, total;
547 void *to_copy;
548 int i;
549
550 /* copy stuff into user buffers */
551 total = priv->actual;
552 len = 0;
553 to_copy = priv->buf;
554 for (i=0; i < priv->nr_segs; i++) {
555 ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
556
557 if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
558 if (len == 0)
559 len = -EFAULT;
560 break;
561 }
562
563 total -= this;
564 len += this;
565 to_copy += this;
566 if (total == 0)
567 break;
568 }
569
570 return len;
571}
572
573static void ep_user_copy_worker(struct work_struct *work) 458static void ep_user_copy_worker(struct work_struct *work)
574{ 459{
575 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); 460 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
@@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work)
578 size_t ret; 463 size_t ret;
579 464
580 use_mm(mm); 465 use_mm(mm);
581 ret = ep_copy_to_user(priv); 466 ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
582 unuse_mm(mm); 467 unuse_mm(mm);
468 if (!ret)
469 ret = -EFAULT;
583 470
584 /* completing the iocb can drop the ctx and mm, don't touch mm after */ 471 /* completing the iocb can drop the ctx and mm, don't touch mm after */
585 aio_complete(iocb, ret, ret); 472 aio_complete(iocb, ret, ret);
586 473
587 kfree(priv->buf); 474 kfree(priv->buf);
475 kfree(priv->to_free);
588 kfree(priv); 476 kfree(priv);
589} 477}
590 478
@@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
603 * don't need to copy anything to userspace, so we can 491 * don't need to copy anything to userspace, so we can
604 * complete the aio request immediately. 492 * complete the aio request immediately.
605 */ 493 */
606 if (priv->iv == NULL || unlikely(req->actual == 0)) { 494 if (priv->to_free == NULL || unlikely(req->actual == 0)) {
607 kfree(req->buf); 495 kfree(req->buf);
496 kfree(priv->to_free);
608 kfree(priv); 497 kfree(priv);
609 iocb->private = NULL; 498 iocb->private = NULL;
610 /* aio_complete() reports bytes-transferred _and_ faults */ 499 /* aio_complete() reports bytes-transferred _and_ faults */
@@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
618 507
619 priv->buf = req->buf; 508 priv->buf = req->buf;
620 priv->actual = req->actual; 509 priv->actual = req->actual;
510 INIT_WORK(&priv->work, ep_user_copy_worker);
621 schedule_work(&priv->work); 511 schedule_work(&priv->work);
622 } 512 }
623 spin_unlock(&epdata->dev->lock); 513 spin_unlock(&epdata->dev->lock);
@@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
626 put_ep(epdata); 516 put_ep(epdata);
627} 517}
628 518
629static ssize_t 519static ssize_t ep_aio(struct kiocb *iocb,
630ep_aio_rwtail( 520 struct kiocb_priv *priv,
631 struct kiocb *iocb, 521 struct ep_data *epdata,
632 char *buf, 522 char *buf,
633 size_t len, 523 size_t len)
634 struct ep_data *epdata,
635 const struct iovec *iv,
636 unsigned long nr_segs
637)
638{ 524{
639 struct kiocb_priv *priv; 525 struct usb_request *req;
640 struct usb_request *req; 526 ssize_t value;
641 ssize_t value;
642 527
643 priv = kmalloc(sizeof *priv, GFP_KERNEL);
644 if (!priv) {
645 value = -ENOMEM;
646fail:
647 kfree(buf);
648 return value;
649 }
650 iocb->private = priv; 528 iocb->private = priv;
651 priv->iocb = iocb; 529 priv->iocb = iocb;
652 priv->iv = iv;
653 priv->nr_segs = nr_segs;
654 INIT_WORK(&priv->work, ep_user_copy_worker);
655
656 value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
657 if (unlikely(value < 0)) {
658 kfree(priv);
659 goto fail;
660 }
661 530
662 kiocb_set_cancel_fn(iocb, ep_aio_cancel); 531 kiocb_set_cancel_fn(iocb, ep_aio_cancel);
663 get_ep(epdata); 532 get_ep(epdata);
@@ -669,75 +538,154 @@ fail:
669 * allocate or submit those if the host disconnected. 538 * allocate or submit those if the host disconnected.
670 */ 539 */
671 spin_lock_irq(&epdata->dev->lock); 540 spin_lock_irq(&epdata->dev->lock);
672 if (likely(epdata->ep)) { 541 value = -ENODEV;
673 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); 542 if (unlikely(epdata->ep))
674 if (likely(req)) { 543 goto fail;
675 priv->req = req;
676 req->buf = buf;
677 req->length = len;
678 req->complete = ep_aio_complete;
679 req->context = iocb;
680 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
681 if (unlikely(0 != value))
682 usb_ep_free_request(epdata->ep, req);
683 } else
684 value = -EAGAIN;
685 } else
686 value = -ENODEV;
687 spin_unlock_irq(&epdata->dev->lock);
688 544
689 mutex_unlock(&epdata->lock); 545 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
546 value = -ENOMEM;
547 if (unlikely(!req))
548 goto fail;
690 549
691 if (unlikely(value)) { 550 priv->req = req;
692 kfree(priv); 551 req->buf = buf;
693 put_ep(epdata); 552 req->length = len;
694 } else 553 req->complete = ep_aio_complete;
695 value = -EIOCBQUEUED; 554 req->context = iocb;
555 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
556 if (unlikely(0 != value)) {
557 usb_ep_free_request(epdata->ep, req);
558 goto fail;
559 }
560 spin_unlock_irq(&epdata->dev->lock);
561 return -EIOCBQUEUED;
562
563fail:
564 spin_unlock_irq(&epdata->dev->lock);
565 kfree(priv->to_free);
566 kfree(priv);
567 put_ep(epdata);
696 return value; 568 return value;
697} 569}
698 570
699static ssize_t 571static ssize_t
700ep_aio_read(struct kiocb *iocb, const struct iovec *iov, 572ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
701 unsigned long nr_segs, loff_t o)
702{ 573{
703 struct ep_data *epdata = iocb->ki_filp->private_data; 574 struct file *file = iocb->ki_filp;
704 char *buf; 575 struct ep_data *epdata = file->private_data;
576 size_t len = iov_iter_count(to);
577 ssize_t value;
578 char *buf;
705 579
706 if (unlikely(usb_endpoint_dir_in(&epdata->desc))) 580 if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
707 return -EINVAL; 581 return value;
708 582
709 buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); 583 /* halt any endpoint by doing a "wrong direction" i/o call */
710 if (unlikely(!buf)) 584 if (usb_endpoint_dir_in(&epdata->desc)) {
711 return -ENOMEM; 585 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
586 !is_sync_kiocb(iocb)) {
587 mutex_unlock(&epdata->lock);
588 return -EINVAL;
589 }
590 DBG (epdata->dev, "%s halt\n", epdata->name);
591 spin_lock_irq(&epdata->dev->lock);
592 if (likely(epdata->ep != NULL))
593 usb_ep_set_halt(epdata->ep);
594 spin_unlock_irq(&epdata->dev->lock);
595 mutex_unlock(&epdata->lock);
596 return -EBADMSG;
597 }
712 598
713 return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); 599 buf = kmalloc(len, GFP_KERNEL);
600 if (unlikely(!buf)) {
601 mutex_unlock(&epdata->lock);
602 return -ENOMEM;
603 }
604 if (is_sync_kiocb(iocb)) {
605 value = ep_io(epdata, buf, len);
606 if (value >= 0 && copy_to_iter(buf, value, to))
607 value = -EFAULT;
608 } else {
609 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
610 value = -ENOMEM;
611 if (!priv)
612 goto fail;
613 priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
614 if (!priv->to_free) {
615 kfree(priv);
616 goto fail;
617 }
618 value = ep_aio(iocb, priv, epdata, buf, len);
619 if (value == -EIOCBQUEUED)
620 buf = NULL;
621 }
622fail:
623 kfree(buf);
624 mutex_unlock(&epdata->lock);
625 return value;
714} 626}
715 627
628static ssize_t ep_config(struct ep_data *, const char *, size_t);
629
716static ssize_t 630static ssize_t
717ep_aio_write(struct kiocb *iocb, const struct iovec *iov, 631ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
718 unsigned long nr_segs, loff_t o)
719{ 632{
720 struct ep_data *epdata = iocb->ki_filp->private_data; 633 struct file *file = iocb->ki_filp;
721 char *buf; 634 struct ep_data *epdata = file->private_data;
722 size_t len = 0; 635 size_t len = iov_iter_count(from);
723 int i = 0; 636 bool configured;
637 ssize_t value;
638 char *buf;
639
640 if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
641 return value;
724 642
725 if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) 643 configured = epdata->state == STATE_EP_ENABLED;
726 return -EINVAL;
727 644
728 buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); 645 /* halt any endpoint by doing a "wrong direction" i/o call */
729 if (unlikely(!buf)) 646 if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
647 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
648 !is_sync_kiocb(iocb)) {
649 mutex_unlock(&epdata->lock);
650 return -EINVAL;
651 }
652 DBG (epdata->dev, "%s halt\n", epdata->name);
653 spin_lock_irq(&epdata->dev->lock);
654 if (likely(epdata->ep != NULL))
655 usb_ep_set_halt(epdata->ep);
656 spin_unlock_irq(&epdata->dev->lock);
657 mutex_unlock(&epdata->lock);
658 return -EBADMSG;
659 }
660
661 buf = kmalloc(len, GFP_KERNEL);
662 if (unlikely(!buf)) {
663 mutex_unlock(&epdata->lock);
730 return -ENOMEM; 664 return -ENOMEM;
665 }
731 666
732 for (i=0; i < nr_segs; i++) { 667 if (unlikely(copy_from_iter(buf, len, from) != len)) {
733 if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, 668 value = -EFAULT;
734 iov[i].iov_len) != 0)) { 669 goto out;
735 kfree(buf); 670 }
736 return -EFAULT; 671
672 if (unlikely(!configured)) {
673 value = ep_config(epdata, buf, len);
674 } else if (is_sync_kiocb(iocb)) {
675 value = ep_io(epdata, buf, len);
676 } else {
677 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
678 value = -ENOMEM;
679 if (priv) {
680 value = ep_aio(iocb, priv, epdata, buf, len);
681 if (value == -EIOCBQUEUED)
682 buf = NULL;
737 } 683 }
738 len += iov[i].iov_len;
739 } 684 }
740 return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); 685out:
686 kfree(buf);
687 mutex_unlock(&epdata->lock);
688 return value;
741} 689}
742 690
743/*----------------------------------------------------------------------*/ 691/*----------------------------------------------------------------------*/
@@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
745/* used after endpoint configuration */ 693/* used after endpoint configuration */
746static const struct file_operations ep_io_operations = { 694static const struct file_operations ep_io_operations = {
747 .owner = THIS_MODULE, 695 .owner = THIS_MODULE,
748 .llseek = no_llseek,
749 696
750 .read = ep_read, 697 .open = ep_open,
751 .write = ep_write,
752 .unlocked_ioctl = ep_ioctl,
753 .release = ep_release, 698 .release = ep_release,
754 699 .llseek = no_llseek,
755 .aio_read = ep_aio_read, 700 .read = new_sync_read,
756 .aio_write = ep_aio_write, 701 .write = new_sync_write,
702 .unlocked_ioctl = ep_ioctl,
703 .read_iter = ep_read_iter,
704 .write_iter = ep_write_iter,
757}; 705};
758 706
759/* ENDPOINT INITIALIZATION 707/* ENDPOINT INITIALIZATION
@@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = {
770 * speed descriptor, then optional high speed descriptor. 718 * speed descriptor, then optional high speed descriptor.
771 */ 719 */
772static ssize_t 720static ssize_t
773ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) 721ep_config (struct ep_data *data, const char *buf, size_t len)
774{ 722{
775 struct ep_data *data = fd->private_data;
776 struct usb_ep *ep; 723 struct usb_ep *ep;
777 u32 tag; 724 u32 tag;
778 int value, length = len; 725 int value, length = len;
779 726
780 value = mutex_lock_interruptible(&data->lock);
781 if (value < 0)
782 return value;
783
784 if (data->state != STATE_EP_READY) { 727 if (data->state != STATE_EP_READY) {
785 value = -EL2HLT; 728 value = -EL2HLT;
786 goto fail; 729 goto fail;
@@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
791 goto fail0; 734 goto fail0;
792 735
793 /* we might need to change message format someday */ 736 /* we might need to change message format someday */
794 if (copy_from_user (&tag, buf, 4)) { 737 memcpy(&tag, buf, 4);
795 goto fail1;
796 }
797 if (tag != 1) { 738 if (tag != 1) {
798 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); 739 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
799 goto fail0; 740 goto fail0;
@@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
806 */ 747 */
807 748
808 /* full/low speed descriptor, then high speed */ 749 /* full/low speed descriptor, then high speed */
809 if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { 750 memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
810 goto fail1;
811 }
812 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE 751 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
813 || data->desc.bDescriptorType != USB_DT_ENDPOINT) 752 || data->desc.bDescriptorType != USB_DT_ENDPOINT)
814 goto fail0; 753 goto fail0;
815 if (len != USB_DT_ENDPOINT_SIZE) { 754 if (len != USB_DT_ENDPOINT_SIZE) {
816 if (len != 2 * USB_DT_ENDPOINT_SIZE) 755 if (len != 2 * USB_DT_ENDPOINT_SIZE)
817 goto fail0; 756 goto fail0;
818 if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, 757 memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
819 USB_DT_ENDPOINT_SIZE)) { 758 USB_DT_ENDPOINT_SIZE);
820 goto fail1;
821 }
822 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE 759 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
823 || data->hs_desc.bDescriptorType 760 || data->hs_desc.bDescriptorType
824 != USB_DT_ENDPOINT) { 761 != USB_DT_ENDPOINT) {
@@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
840 case USB_SPEED_LOW: 777 case USB_SPEED_LOW:
841 case USB_SPEED_FULL: 778 case USB_SPEED_FULL:
842 ep->desc = &data->desc; 779 ep->desc = &data->desc;
843 value = usb_ep_enable(ep);
844 if (value == 0)
845 data->state = STATE_EP_ENABLED;
846 break; 780 break;
847 case USB_SPEED_HIGH: 781 case USB_SPEED_HIGH:
848 /* fails if caller didn't provide that descriptor... */ 782 /* fails if caller didn't provide that descriptor... */
849 ep->desc = &data->hs_desc; 783 ep->desc = &data->hs_desc;
850 value = usb_ep_enable(ep);
851 if (value == 0)
852 data->state = STATE_EP_ENABLED;
853 break; 784 break;
854 default: 785 default:
855 DBG(data->dev, "unconnected, %s init abandoned\n", 786 DBG(data->dev, "unconnected, %s init abandoned\n",
856 data->name); 787 data->name);
857 value = -EINVAL; 788 value = -EINVAL;
789 goto gone;
858 } 790 }
791 value = usb_ep_enable(ep);
859 if (value == 0) { 792 if (value == 0) {
860 fd->f_op = &ep_io_operations; 793 data->state = STATE_EP_ENABLED;
861 value = length; 794 value = length;
862 } 795 }
863gone: 796gone:
@@ -867,14 +800,10 @@ fail:
867 data->desc.bDescriptorType = 0; 800 data->desc.bDescriptorType = 0;
868 data->hs_desc.bDescriptorType = 0; 801 data->hs_desc.bDescriptorType = 0;
869 } 802 }
870 mutex_unlock(&data->lock);
871 return value; 803 return value;
872fail0: 804fail0:
873 value = -EINVAL; 805 value = -EINVAL;
874 goto fail; 806 goto fail;
875fail1:
876 value = -EFAULT;
877 goto fail;
878} 807}
879 808
880static int 809static int
@@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd)
902 return value; 831 return value;
903} 832}
904 833
905/* used before endpoint configuration */
906static const struct file_operations ep_config_operations = {
907 .llseek = no_llseek,
908
909 .open = ep_open,
910 .write = ep_config,
911 .release = ep_release,
912};
913
914/*----------------------------------------------------------------------*/ 834/*----------------------------------------------------------------------*/
915 835
916/* EP0 IMPLEMENTATION can be partly in userspace. 836/* EP0 IMPLEMENTATION can be partly in userspace.
@@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
989 enum ep0_state state; 909 enum ep0_state state;
990 910
991 spin_lock_irq (&dev->lock); 911 spin_lock_irq (&dev->lock);
912 if (dev->state <= STATE_DEV_OPENED) {
913 retval = -EINVAL;
914 goto done;
915 }
992 916
993 /* report fd mode change before acting on it */ 917 /* report fd mode change before acting on it */
994 if (dev->setup_abort) { 918 if (dev->setup_abort) {
@@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1187 struct dev_data *dev = fd->private_data; 1111 struct dev_data *dev = fd->private_data;
1188 ssize_t retval = -ESRCH; 1112 ssize_t retval = -ESRCH;
1189 1113
1190 spin_lock_irq (&dev->lock);
1191
1192 /* report fd mode change before acting on it */ 1114 /* report fd mode change before acting on it */
1193 if (dev->setup_abort) { 1115 if (dev->setup_abort) {
1194 dev->setup_abort = 0; 1116 dev->setup_abort = 0;
@@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1234 } else 1156 } else
1235 DBG (dev, "fail %s, state %d\n", __func__, dev->state); 1157 DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1236 1158
1237 spin_unlock_irq (&dev->lock);
1238 return retval; 1159 return retval;
1239} 1160}
1240 1161
@@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait)
1281 struct dev_data *dev = fd->private_data; 1202 struct dev_data *dev = fd->private_data;
1282 int mask = 0; 1203 int mask = 0;
1283 1204
1205 if (dev->state <= STATE_DEV_OPENED)
1206 return DEFAULT_POLLMASK;
1207
1284 poll_wait(fd, &dev->wait, wait); 1208 poll_wait(fd, &dev->wait, wait);
1285 1209
1286 spin_lock_irq (&dev->lock); 1210 spin_lock_irq (&dev->lock);
@@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1316 return ret; 1240 return ret;
1317} 1241}
1318 1242
1319/* used after device configuration */
1320static const struct file_operations ep0_io_operations = {
1321 .owner = THIS_MODULE,
1322 .llseek = no_llseek,
1323
1324 .read = ep0_read,
1325 .write = ep0_write,
1326 .fasync = ep0_fasync,
1327 .poll = ep0_poll,
1328 .unlocked_ioctl = dev_ioctl,
1329 .release = dev_release,
1330};
1331
1332/*----------------------------------------------------------------------*/ 1243/*----------------------------------------------------------------------*/
1333 1244
1334/* The in-kernel gadget driver handles most ep0 issues, in particular 1245/* The in-kernel gadget driver handles most ep0 issues, in particular
@@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev)
1650 goto enomem1; 1561 goto enomem1;
1651 1562
1652 data->dentry = gadgetfs_create_file (dev->sb, data->name, 1563 data->dentry = gadgetfs_create_file (dev->sb, data->name,
1653 data, &ep_config_operations); 1564 data, &ep_io_operations);
1654 if (!data->dentry) 1565 if (!data->dentry)
1655 goto enomem2; 1566 goto enomem2;
1656 list_add_tail (&data->epfiles, &dev->epfiles); 1567 list_add_tail (&data->epfiles, &dev->epfiles);
@@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1852 u32 tag; 1763 u32 tag;
1853 char *kbuf; 1764 char *kbuf;
1854 1765
1766 spin_lock_irq(&dev->lock);
1767 if (dev->state > STATE_DEV_OPENED) {
1768 value = ep0_write(fd, buf, len, ptr);
1769 spin_unlock_irq(&dev->lock);
1770 return value;
1771 }
1772 spin_unlock_irq(&dev->lock);
1773
1855 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) 1774 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
1856 return -EINVAL; 1775 return -EINVAL;
1857 1776
@@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1925 * on, they can work ... except in cleanup paths that 1844 * on, they can work ... except in cleanup paths that
1926 * kick in after the ep0 descriptor is closed. 1845 * kick in after the ep0 descriptor is closed.
1927 */ 1846 */
1928 fd->f_op = &ep0_io_operations;
1929 value = len; 1847 value = len;
1930 } 1848 }
1931 return value; 1849 return value;
@@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd)
1956 return value; 1874 return value;
1957} 1875}
1958 1876
1959static const struct file_operations dev_init_operations = { 1877static const struct file_operations ep0_operations = {
1960 .llseek = no_llseek, 1878 .llseek = no_llseek,
1961 1879
1962 .open = dev_open, 1880 .open = dev_open,
1881 .read = ep0_read,
1963 .write = dev_config, 1882 .write = dev_config,
1964 .fasync = ep0_fasync, 1883 .fasync = ep0_fasync,
1884 .poll = ep0_poll,
1965 .unlocked_ioctl = dev_ioctl, 1885 .unlocked_ioctl = dev_ioctl,
1966 .release = dev_release, 1886 .release = dev_release,
1967}; 1887};
@@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
2077 goto Enomem; 1997 goto Enomem;
2078 1998
2079 dev->sb = sb; 1999 dev->sb = sb;
2080 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations); 2000 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2081 if (!dev->dentry) { 2001 if (!dev->dentry) {
2082 put_dev(dev); 2002 put_dev(dev);
2083 goto Enomem; 2003 goto Enomem;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7f76c8a12f89..fd53c9ebd662 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -37,6 +37,9 @@
37 37
38#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 38#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
39#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 39#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
40#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
41#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
42#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
40 43
41static const char hcd_name[] = "xhci_hcd"; 44static const char hcd_name[] = "xhci_hcd";
42 45
@@ -133,6 +136,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
133 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { 136 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
134 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 137 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
135 } 138 }
139 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
140 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
141 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
142 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
143 xhci->quirks |= XHCI_PME_STUCK_QUIRK;
144 }
136 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 145 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
137 pdev->device == PCI_DEVICE_ID_EJ168) { 146 pdev->device == PCI_DEVICE_ID_EJ168) {
138 xhci->quirks |= XHCI_RESET_ON_RESUME; 147 xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -159,6 +168,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
159 "QUIRK: Resetting on resume"); 168 "QUIRK: Resetting on resume");
160} 169}
161 170
171/*
172 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
173 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
174 */
175static void xhci_pme_quirk(struct xhci_hcd *xhci)
176{
177 u32 val;
178 void __iomem *reg;
179
180 reg = (void __iomem *) xhci->cap_regs + 0x80a4;
181 val = readl(reg);
182 writel(val | BIT(28), reg);
183 readl(reg);
184}
185
162/* called during probe() after chip reset completes */ 186/* called during probe() after chip reset completes */
163static int xhci_pci_setup(struct usb_hcd *hcd) 187static int xhci_pci_setup(struct usb_hcd *hcd)
164{ 188{
@@ -283,6 +307,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
283 if (xhci->quirks & XHCI_COMP_MODE_QUIRK) 307 if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
284 pdev->no_d3cold = true; 308 pdev->no_d3cold = true;
285 309
310 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
311 xhci_pme_quirk(xhci);
312
286 return xhci_suspend(xhci, do_wakeup); 313 return xhci_suspend(xhci, do_wakeup);
287} 314}
288 315
@@ -313,6 +340,9 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
313 if (pdev->vendor == PCI_VENDOR_ID_INTEL) 340 if (pdev->vendor == PCI_VENDOR_ID_INTEL)
314 usb_enable_intel_xhci_ports(pdev); 341 usb_enable_intel_xhci_ports(pdev);
315 342
343 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
344 xhci_pme_quirk(xhci);
345
316 retval = xhci_resume(xhci, hibernated); 346 retval = xhci_resume(xhci, hibernated);
317 return retval; 347 return retval;
318} 348}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 08d402b15482..0e11d61408ff 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -83,16 +83,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
83 if (irq < 0) 83 if (irq < 0)
84 return -ENODEV; 84 return -ENODEV;
85 85
86
87 if (of_device_is_compatible(pdev->dev.of_node,
88 "marvell,armada-375-xhci") ||
89 of_device_is_compatible(pdev->dev.of_node,
90 "marvell,armada-380-xhci")) {
91 ret = xhci_mvebu_mbus_init_quirk(pdev);
92 if (ret)
93 return ret;
94 }
95
96 /* Initialize dma_mask and coherent_dma_mask to 32-bits */ 86 /* Initialize dma_mask and coherent_dma_mask to 32-bits */
97 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 87 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
98 if (ret) 88 if (ret)
@@ -127,6 +117,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
127 goto put_hcd; 117 goto put_hcd;
128 } 118 }
129 119
120 if (of_device_is_compatible(pdev->dev.of_node,
121 "marvell,armada-375-xhci") ||
122 of_device_is_compatible(pdev->dev.of_node,
123 "marvell,armada-380-xhci")) {
124 ret = xhci_mvebu_mbus_init_quirk(pdev);
125 if (ret)
126 goto disable_clk;
127 }
128
130 ret = usb_add_hcd(hcd, irq, IRQF_SHARED); 129 ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
131 if (ret) 130 if (ret)
132 goto disable_clk; 131 goto disable_clk;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 88da8d629820..5fb66db89e05 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1729,7 +1729,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1729 if (!command) 1729 if (!command)
1730 return; 1730 return;
1731 1731
1732 ep->ep_state |= EP_HALTED; 1732 ep->ep_state |= EP_HALTED | EP_RECENTLY_HALTED;
1733 ep->stopped_stream = stream_id; 1733 ep->stopped_stream = stream_id;
1734 1734
1735 xhci_queue_reset_ep(xhci, command, slot_id, ep_index); 1735 xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
@@ -1946,7 +1946,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1946 if (event_trb != ep_ring->dequeue) { 1946 if (event_trb != ep_ring->dequeue) {
1947 /* The event was for the status stage */ 1947 /* The event was for the status stage */
1948 if (event_trb == td->last_trb) { 1948 if (event_trb == td->last_trb) {
1949 if (td->urb->actual_length != 0) { 1949 if (td->urb_length_set) {
1950 /* Don't overwrite a previously set error code 1950 /* Don't overwrite a previously set error code
1951 */ 1951 */
1952 if ((*status == -EINPROGRESS || *status == 0) && 1952 if ((*status == -EINPROGRESS || *status == 0) &&
@@ -1960,7 +1960,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1960 td->urb->transfer_buffer_length; 1960 td->urb->transfer_buffer_length;
1961 } 1961 }
1962 } else { 1962 } else {
1963 /* Maybe the event was for the data stage? */ 1963 /*
1964 * Maybe the event was for the data stage? If so, update
1965 * already the actual_length of the URB and flag it as
1966 * set, so that it is not overwritten in the event for
1967 * the last TRB.
1968 */
1969 td->urb_length_set = true;
1964 td->urb->actual_length = 1970 td->urb->actual_length =
1965 td->urb->transfer_buffer_length - 1971 td->urb->transfer_buffer_length -
1966 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 1972 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ec8ac1674854..b06d1a53652d 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1338,6 +1338,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1338 goto exit; 1338 goto exit;
1339 } 1339 }
1340 1340
1341 /* Reject urb if endpoint is in soft reset, queue must stay empty */
1342 if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_CONFIG_PENDING) {
1343 xhci_warn(xhci, "Can't enqueue URB while ep is in soft reset\n");
1344 ret = -EINVAL;
1345 }
1346
1341 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1347 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1342 size = urb->number_of_packets; 1348 size = urb->number_of_packets;
1343 else 1349 else
@@ -2948,23 +2954,36 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2948 } 2954 }
2949} 2955}
2950 2956
2951/* Called when clearing halted device. The core should have sent the control 2957/* Called after clearing a halted device. USB core should have sent the control
2952 * message to clear the device halt condition. The host side of the halt should 2958 * message to clear the device halt condition. The host side of the halt should
2953 * already be cleared with a reset endpoint command issued when the STALL tx 2959 * already be cleared with a reset endpoint command issued immediately when the
2954 * event was received. 2960 * STALL tx event was received.
2955 *
2956 * Context: in_interrupt
2957 */ 2961 */
2958 2962
2959void xhci_endpoint_reset(struct usb_hcd *hcd, 2963void xhci_endpoint_reset(struct usb_hcd *hcd,
2960 struct usb_host_endpoint *ep) 2964 struct usb_host_endpoint *ep)
2961{ 2965{
2962 struct xhci_hcd *xhci; 2966 struct xhci_hcd *xhci;
2967 struct usb_device *udev;
2968 struct xhci_virt_device *virt_dev;
2969 struct xhci_virt_ep *virt_ep;
2970 struct xhci_input_control_ctx *ctrl_ctx;
2971 struct xhci_command *command;
2972 unsigned int ep_index, ep_state;
2973 unsigned long flags;
2974 u32 ep_flag;
2963 2975
2964 xhci = hcd_to_xhci(hcd); 2976 xhci = hcd_to_xhci(hcd);
2977 udev = (struct usb_device *) ep->hcpriv;
2978 if (!ep->hcpriv)
2979 return;
2980 virt_dev = xhci->devs[udev->slot_id];
2981 ep_index = xhci_get_endpoint_index(&ep->desc);
2982 virt_ep = &virt_dev->eps[ep_index];
2983 ep_state = virt_ep->ep_state;
2965 2984
2966 /* 2985 /*
2967 * We might need to implement the config ep cmd in xhci 4.8.1 note: 2986 * Implement the config ep command in xhci 4.6.8 additional note:
2968 * The Reset Endpoint Command may only be issued to endpoints in the 2987 * The Reset Endpoint Command may only be issued to endpoints in the
2969 * Halted state. If software wishes reset the Data Toggle or Sequence 2988 * Halted state. If software wishes reset the Data Toggle or Sequence
2970 * Number of an endpoint that isn't in the Halted state, then software 2989 * Number of an endpoint that isn't in the Halted state, then software
@@ -2972,9 +2991,72 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
2972 * for the target endpoint. that is in the Stopped state. 2991 * for the target endpoint. that is in the Stopped state.
2973 */ 2992 */
2974 2993
2975 /* For now just print debug to follow the situation */ 2994 if (ep_state & SET_DEQ_PENDING || ep_state & EP_RECENTLY_HALTED) {
2976 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", 2995 virt_ep->ep_state &= ~EP_RECENTLY_HALTED;
2977 ep->desc.bEndpointAddress); 2996 xhci_dbg(xhci, "ep recently halted, no toggle reset needed\n");
2997 return;
2998 }
2999
3000 /* Only interrupt and bulk ep's use Data toggle, USB2 spec 5.5.4-> */
3001 if (usb_endpoint_xfer_control(&ep->desc) ||
3002 usb_endpoint_xfer_isoc(&ep->desc))
3003 return;
3004
3005 ep_flag = xhci_get_endpoint_flag(&ep->desc);
3006
3007 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3008 return;
3009
3010 command = xhci_alloc_command(xhci, true, true, GFP_NOWAIT);
3011 if (!command) {
3012 xhci_err(xhci, "Could not allocate xHCI command structure.\n");
3013 return;
3014 }
3015
3016 spin_lock_irqsave(&xhci->lock, flags);
3017
3018 /* block ringing ep doorbell */
3019 virt_ep->ep_state |= EP_CONFIG_PENDING;
3020
3021 /*
3022 * Make sure endpoint ring is empty before resetting the toggle/seq.
3023 * Driver is required to synchronously cancel all transfer request.
3024 *
3025 * xhci 4.6.6 says we can issue a configure endpoint command on a
3026 * running endpoint ring as long as it's idle (queue empty)
3027 */
3028
3029 if (!list_empty(&virt_ep->ring->td_list)) {
3030 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3031 spin_unlock_irqrestore(&xhci->lock, flags);
3032 goto cleanup;
3033 }
3034
3035 xhci_dbg(xhci, "Reset toggle/seq for slot %d, ep_index: %d\n",
3036 udev->slot_id, ep_index);
3037
3038 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3039 if (!ctrl_ctx) {
3040 xhci_err(xhci, "Could not get input context, bad type. virt_dev: %p, in_ctx %p\n",
3041 virt_dev, virt_dev->in_ctx);
3042 spin_unlock_irqrestore(&xhci->lock, flags);
3043 goto cleanup;
3044 }
3045 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3046 virt_dev->out_ctx, ctrl_ctx,
3047 ep_flag, ep_flag);
3048 xhci_endpoint_copy(xhci, command->in_ctx, virt_dev->out_ctx, ep_index);
3049
3050 xhci_queue_configure_endpoint(xhci, command, command->in_ctx->dma,
3051 udev->slot_id, false);
3052 xhci_ring_cmd_db(xhci);
3053 spin_unlock_irqrestore(&xhci->lock, flags);
3054
3055 wait_for_completion(command->completion);
3056
3057cleanup:
3058 virt_ep->ep_state &= ~EP_CONFIG_PENDING;
3059 xhci_free_command(xhci, command);
2978} 3060}
2979 3061
2980static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 3062static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 974514762a14..265ab1771d24 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1,3 +1,4 @@
1
1/* 2/*
2 * xHCI host controller driver 3 * xHCI host controller driver
3 * 4 *
@@ -88,9 +89,10 @@ struct xhci_cap_regs {
88#define HCS_IST(p) (((p) >> 0) & 0xf) 89#define HCS_IST(p) (((p) >> 0) & 0xf)
89/* bits 4:7, max number of Event Ring segments */ 90/* bits 4:7, max number of Event Ring segments */
90#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) 91#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
92/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
91/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ 93/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
92/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ 94/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
93#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) 95#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
94 96
95/* HCSPARAMS3 - hcs_params3 - bitmasks */ 97/* HCSPARAMS3 - hcs_params3 - bitmasks */
96/* bits 0:7, Max U1 to U0 latency for the roothub ports */ 98/* bits 0:7, Max U1 to U0 latency for the roothub ports */
@@ -863,6 +865,8 @@ struct xhci_virt_ep {
863#define EP_HAS_STREAMS (1 << 4) 865#define EP_HAS_STREAMS (1 << 4)
864/* Transitioning the endpoint to not using streams, don't enqueue URBs */ 866/* Transitioning the endpoint to not using streams, don't enqueue URBs */
865#define EP_GETTING_NO_STREAMS (1 << 5) 867#define EP_GETTING_NO_STREAMS (1 << 5)
868#define EP_RECENTLY_HALTED (1 << 6)
869#define EP_CONFIG_PENDING (1 << 7)
866 /* ---- Related to URB cancellation ---- */ 870 /* ---- Related to URB cancellation ---- */
867 struct list_head cancelled_td_list; 871 struct list_head cancelled_td_list;
868 struct xhci_td *stopped_td; 872 struct xhci_td *stopped_td;
@@ -1288,6 +1292,8 @@ struct xhci_td {
1288 struct xhci_segment *start_seg; 1292 struct xhci_segment *start_seg;
1289 union xhci_trb *first_trb; 1293 union xhci_trb *first_trb;
1290 union xhci_trb *last_trb; 1294 union xhci_trb *last_trb;
1295 /* actual_length of the URB has already been set */
1296 bool urb_length_set;
1291}; 1297};
1292 1298
1293/* xHCI command default timeout value */ 1299/* xHCI command default timeout value */
@@ -1560,6 +1566,7 @@ struct xhci_hcd {
1560#define XHCI_SPURIOUS_WAKEUP (1 << 18) 1566#define XHCI_SPURIOUS_WAKEUP (1 << 18)
1561/* For controllers with a broken beyond repair streams implementation */ 1567/* For controllers with a broken beyond repair streams implementation */
1562#define XHCI_BROKEN_STREAMS (1 << 19) 1568#define XHCI_BROKEN_STREAMS (1 << 19)
1569#define XHCI_PME_STUCK_QUIRK (1 << 20)
1563 unsigned int num_active_eps; 1570 unsigned int num_active_eps;
1564 unsigned int limit_active_eps; 1571 unsigned int limit_active_eps;
1565 /* There are two roothubs to keep track of bus suspend info for */ 1572 /* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index eba9b82e2d70..3cb98b1d5d29 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -1274,7 +1274,7 @@ static void errata2_function(unsigned long data)
1274 for (slot = 0; slot < 32; slot++) 1274 for (slot = 0; slot < 32; slot++)
1275 if (priv->atl_slots[slot].qh && time_after(jiffies, 1275 if (priv->atl_slots[slot].qh && time_after(jiffies,
1276 priv->atl_slots[slot].timestamp + 1276 priv->atl_slots[slot].timestamp +
1277 SLOT_TIMEOUT * HZ / 1000)) { 1277 msecs_to_jiffies(SLOT_TIMEOUT))) {
1278 ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); 1278 ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
1279 if (!FROM_DW0_VALID(ptd.dw0) && 1279 if (!FROM_DW0_VALID(ptd.dw0) &&
1280 !FROM_DW3_ACTIVE(ptd.dw3)) 1280 !FROM_DW3_ACTIVE(ptd.dw3))
@@ -1286,7 +1286,7 @@ static void errata2_function(unsigned long data)
1286 1286
1287 spin_unlock_irqrestore(&priv->lock, spinflags); 1287 spin_unlock_irqrestore(&priv->lock, spinflags);
1288 1288
1289 errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000; 1289 errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
1290 add_timer(&errata2_timer); 1290 add_timer(&errata2_timer);
1291} 1291}
1292 1292
@@ -1336,7 +1336,7 @@ static int isp1760_run(struct usb_hcd *hcd)
1336 return retval; 1336 return retval;
1337 1337
1338 setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd); 1338 setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd);
1339 errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000; 1339 errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
1340 add_timer(&errata2_timer); 1340 add_timer(&errata2_timer);
1341 1341
1342 chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG); 1342 chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index e6f4cbfeed97..067920f2d570 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1969,10 +1969,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1969 goto fail0; 1969 goto fail0;
1970 } 1970 }
1971 1971
1972 pm_runtime_use_autosuspend(musb->controller);
1973 pm_runtime_set_autosuspend_delay(musb->controller, 200);
1974 pm_runtime_enable(musb->controller);
1975
1976 spin_lock_init(&musb->lock); 1972 spin_lock_init(&musb->lock);
1977 musb->board_set_power = plat->set_power; 1973 musb->board_set_power = plat->set_power;
1978 musb->min_power = plat->min_power; 1974 musb->min_power = plat->min_power;
@@ -1991,6 +1987,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1991 musb_readl = musb_default_readl; 1987 musb_readl = musb_default_readl;
1992 musb_writel = musb_default_writel; 1988 musb_writel = musb_default_writel;
1993 1989
1990 /* We need musb_read/write functions initialized for PM */
1991 pm_runtime_use_autosuspend(musb->controller);
1992 pm_runtime_set_autosuspend_delay(musb->controller, 200);
1993 pm_runtime_irq_safe(musb->controller);
1994 pm_runtime_enable(musb->controller);
1995
1994 /* The musb_platform_init() call: 1996 /* The musb_platform_init() call:
1995 * - adjusts musb->mregs 1997 * - adjusts musb->mregs
1996 * - sets the musb->isr 1998 * - sets the musb->isr
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 53bd0e71d19f..a900c9877195 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -457,12 +457,27 @@ static int dsps_musb_init(struct musb *musb)
457 if (IS_ERR(musb->xceiv)) 457 if (IS_ERR(musb->xceiv))
458 return PTR_ERR(musb->xceiv); 458 return PTR_ERR(musb->xceiv);
459 459
460 musb->phy = devm_phy_get(dev->parent, "usb2-phy");
461
460 /* Returns zero if e.g. not clocked */ 462 /* Returns zero if e.g. not clocked */
461 rev = dsps_readl(reg_base, wrp->revision); 463 rev = dsps_readl(reg_base, wrp->revision);
462 if (!rev) 464 if (!rev)
463 return -ENODEV; 465 return -ENODEV;
464 466
465 usb_phy_init(musb->xceiv); 467 usb_phy_init(musb->xceiv);
468 if (IS_ERR(musb->phy)) {
469 musb->phy = NULL;
470 } else {
471 ret = phy_init(musb->phy);
472 if (ret < 0)
473 return ret;
474 ret = phy_power_on(musb->phy);
475 if (ret) {
476 phy_exit(musb->phy);
477 return ret;
478 }
479 }
480
466 setup_timer(&glue->timer, otg_timer, (unsigned long) musb); 481 setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
467 482
468 /* Reset the musb */ 483 /* Reset the musb */
@@ -502,6 +517,8 @@ static int dsps_musb_exit(struct musb *musb)
502 517
503 del_timer_sync(&glue->timer); 518 del_timer_sync(&glue->timer);
504 usb_phy_shutdown(musb->xceiv); 519 usb_phy_shutdown(musb->xceiv);
520 phy_power_off(musb->phy);
521 phy_exit(musb->phy);
505 debugfs_remove_recursive(glue->dbgfs_root); 522 debugfs_remove_recursive(glue->dbgfs_root);
506 523
507 return 0; 524 return 0;
@@ -610,7 +627,7 @@ static int dsps_musb_reset(struct musb *musb)
610 struct device *dev = musb->controller; 627 struct device *dev = musb->controller;
611 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 628 struct dsps_glue *glue = dev_get_drvdata(dev->parent);
612 const struct dsps_musb_wrapper *wrp = glue->wrp; 629 const struct dsps_musb_wrapper *wrp = glue->wrp;
613 int session_restart = 0; 630 int session_restart = 0, error;
614 631
615 if (glue->sw_babble_enabled) 632 if (glue->sw_babble_enabled)
616 session_restart = sw_babble_control(musb); 633 session_restart = sw_babble_control(musb);
@@ -624,8 +641,14 @@ static int dsps_musb_reset(struct musb *musb)
624 dsps_writel(musb->ctrl_base, wrp->control, (1 << wrp->reset)); 641 dsps_writel(musb->ctrl_base, wrp->control, (1 << wrp->reset));
625 usleep_range(100, 200); 642 usleep_range(100, 200);
626 usb_phy_shutdown(musb->xceiv); 643 usb_phy_shutdown(musb->xceiv);
644 error = phy_power_off(musb->phy);
645 if (error)
646 dev_err(dev, "phy shutdown failed: %i\n", error);
627 usleep_range(100, 200); 647 usleep_range(100, 200);
628 usb_phy_init(musb->xceiv); 648 usb_phy_init(musb->xceiv);
649 error = phy_power_on(musb->phy);
650 if (error)
651 dev_err(dev, "phy powerup failed: %i\n", error);
629 session_restart = 1; 652 session_restart = 1;
630 } 653 }
631 654
@@ -687,7 +710,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue,
687 struct musb_hdrc_config *config; 710 struct musb_hdrc_config *config;
688 struct platform_device *musb; 711 struct platform_device *musb;
689 struct device_node *dn = parent->dev.of_node; 712 struct device_node *dn = parent->dev.of_node;
690 int ret; 713 int ret, val;
691 714
692 memset(resources, 0, sizeof(resources)); 715 memset(resources, 0, sizeof(resources));
693 res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc"); 716 res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc");
@@ -739,7 +762,10 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue,
739 pdata.mode = get_musb_port_mode(dev); 762 pdata.mode = get_musb_port_mode(dev);
740 /* DT keeps this entry in mA, musb expects it as per USB spec */ 763 /* DT keeps this entry in mA, musb expects it as per USB spec */
741 pdata.power = get_int_prop(dn, "mentor,power") / 2; 764 pdata.power = get_int_prop(dn, "mentor,power") / 2;
742 config->multipoint = of_property_read_bool(dn, "mentor,multipoint"); 765
766 ret = of_property_read_u32(dn, "mentor,multipoint", &val);
767 if (!ret && val)
768 config->multipoint = true;
743 769
744 ret = platform_device_add_data(musb, &pdata, sizeof(pdata)); 770 ret = platform_device_add_data(musb, &pdata, sizeof(pdata));
745 if (ret) { 771 if (ret) {
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 883a9adfdfff..c3d5fc9dfb5b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2613,7 +2613,7 @@ static const struct hc_driver musb_hc_driver = {
2613 .description = "musb-hcd", 2613 .description = "musb-hcd",
2614 .product_desc = "MUSB HDRC host driver", 2614 .product_desc = "MUSB HDRC host driver",
2615 .hcd_priv_size = sizeof(struct musb *), 2615 .hcd_priv_size = sizeof(struct musb *),
2616 .flags = HCD_USB2 | HCD_MEMORY, 2616 .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
2617 2617
2618 /* not using irq handler or reset hooks from usbcore, since 2618 /* not using irq handler or reset hooks from usbcore, since
2619 * those must be shared with peripheral code for OTG configs 2619 * those must be shared with peripheral code for OTG configs
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 763649eb4987..cc752d8c7773 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -516,7 +516,7 @@ static int omap2430_probe(struct platform_device *pdev)
516 struct omap2430_glue *glue; 516 struct omap2430_glue *glue;
517 struct device_node *np = pdev->dev.of_node; 517 struct device_node *np = pdev->dev.of_node;
518 struct musb_hdrc_config *config; 518 struct musb_hdrc_config *config;
519 int ret = -ENOMEM; 519 int ret = -ENOMEM, val;
520 520
521 glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); 521 glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
522 if (!glue) 522 if (!glue)
@@ -559,7 +559,10 @@ static int omap2430_probe(struct platform_device *pdev)
559 of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps); 559 of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps);
560 of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits); 560 of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits);
561 of_property_read_u32(np, "power", (u32 *)&pdata->power); 561 of_property_read_u32(np, "power", (u32 *)&pdata->power);
562 config->multipoint = of_property_read_bool(np, "multipoint"); 562
563 ret = of_property_read_u32(np, "multipoint", &val);
564 if (!ret && val)
565 config->multipoint = true;
563 566
564 pdata->board_data = data; 567 pdata->board_data = data;
565 pdata->config = config; 568 pdata->config = config;
diff --git a/drivers/usb/renesas_usbhs/Kconfig b/drivers/usb/renesas_usbhs/Kconfig
index de83b9d0cd5c..ebc99ee076ce 100644
--- a/drivers/usb/renesas_usbhs/Kconfig
+++ b/drivers/usb/renesas_usbhs/Kconfig
@@ -6,6 +6,7 @@ config USB_RENESAS_USBHS
6 tristate 'Renesas USBHS controller' 6 tristate 'Renesas USBHS controller'
7 depends on USB_GADGET 7 depends on USB_GADGET
8 depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST 8 depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
9 depends on EXTCON || !EXTCON # if EXTCON=m, USBHS cannot be built-in
9 default n 10 default n
10 help 11 help
11 Renesas USBHS is a discrete USB host and peripheral controller chip 12 Renesas USBHS is a discrete USB host and peripheral controller chip
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 9374bd2aba20..8936a83c96cd 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -38,56 +38,51 @@ static int usb_serial_device_match(struct device *dev,
38 return 0; 38 return 0;
39} 39}
40 40
41static ssize_t port_number_show(struct device *dev,
42 struct device_attribute *attr, char *buf)
43{
44 struct usb_serial_port *port = to_usb_serial_port(dev);
45
46 return sprintf(buf, "%d\n", port->port_number);
47}
48static DEVICE_ATTR_RO(port_number);
49
50static int usb_serial_device_probe(struct device *dev) 41static int usb_serial_device_probe(struct device *dev)
51{ 42{
52 struct usb_serial_driver *driver; 43 struct usb_serial_driver *driver;
53 struct usb_serial_port *port; 44 struct usb_serial_port *port;
45 struct device *tty_dev;
54 int retval = 0; 46 int retval = 0;
55 int minor; 47 int minor;
56 48
57 port = to_usb_serial_port(dev); 49 port = to_usb_serial_port(dev);
58 if (!port) { 50 if (!port)
59 retval = -ENODEV; 51 return -ENODEV;
60 goto exit;
61 }
62 52
63 /* make sure suspend/resume doesn't race against port_probe */ 53 /* make sure suspend/resume doesn't race against port_probe */
64 retval = usb_autopm_get_interface(port->serial->interface); 54 retval = usb_autopm_get_interface(port->serial->interface);
65 if (retval) 55 if (retval)
66 goto exit; 56 return retval;
67 57
68 driver = port->serial->type; 58 driver = port->serial->type;
69 if (driver->port_probe) { 59 if (driver->port_probe) {
70 retval = driver->port_probe(port); 60 retval = driver->port_probe(port);
71 if (retval) 61 if (retval)
72 goto exit_with_autopm; 62 goto err_autopm_put;
73 } 63 }
74 64
75 retval = device_create_file(dev, &dev_attr_port_number); 65 minor = port->minor;
76 if (retval) { 66 tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev);
77 if (driver->port_remove) 67 if (IS_ERR(tty_dev)) {
78 retval = driver->port_remove(port); 68 retval = PTR_ERR(tty_dev);
79 goto exit_with_autopm; 69 goto err_port_remove;
80 } 70 }
81 71
82 minor = port->minor; 72 usb_autopm_put_interface(port->serial->interface);
83 tty_register_device(usb_serial_tty_driver, minor, dev); 73
84 dev_info(&port->serial->dev->dev, 74 dev_info(&port->serial->dev->dev,
85 "%s converter now attached to ttyUSB%d\n", 75 "%s converter now attached to ttyUSB%d\n",
86 driver->description, minor); 76 driver->description, minor);
87 77
88exit_with_autopm: 78 return 0;
79
80err_port_remove:
81 if (driver->port_remove)
82 driver->port_remove(port);
83err_autopm_put:
89 usb_autopm_put_interface(port->serial->interface); 84 usb_autopm_put_interface(port->serial->interface);
90exit: 85
91 return retval; 86 return retval;
92} 87}
93 88
@@ -114,8 +109,6 @@ static int usb_serial_device_remove(struct device *dev)
114 minor = port->minor; 109 minor = port->minor;
115 tty_unregister_device(usb_serial_tty_driver, minor); 110 tty_unregister_device(usb_serial_tty_driver, minor);
116 111
117 device_remove_file(&port->dev, &dev_attr_port_number);
118
119 driver = port->serial->type; 112 driver = port->serial->type;
120 if (driver->port_remove) 113 if (driver->port_remove)
121 retval = driver->port_remove(port); 114 retval = driver->port_remove(port);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2d72aa3564a3..ede4f5fcfadd 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -84,6 +84,10 @@ struct ch341_private {
84 u8 line_status; /* active status of modem control inputs */ 84 u8 line_status; /* active status of modem control inputs */
85}; 85};
86 86
87static void ch341_set_termios(struct tty_struct *tty,
88 struct usb_serial_port *port,
89 struct ktermios *old_termios);
90
87static int ch341_control_out(struct usb_device *dev, u8 request, 91static int ch341_control_out(struct usb_device *dev, u8 request,
88 u16 value, u16 index) 92 u16 value, u16 index)
89{ 93{
@@ -309,19 +313,12 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
309 struct ch341_private *priv = usb_get_serial_port_data(port); 313 struct ch341_private *priv = usb_get_serial_port_data(port);
310 int r; 314 int r;
311 315
312 priv->baud_rate = DEFAULT_BAUD_RATE;
313
314 r = ch341_configure(serial->dev, priv); 316 r = ch341_configure(serial->dev, priv);
315 if (r) 317 if (r)
316 goto out; 318 goto out;
317 319
318 r = ch341_set_handshake(serial->dev, priv->line_control); 320 if (tty)
319 if (r) 321 ch341_set_termios(tty, port, NULL);
320 goto out;
321
322 r = ch341_set_baudrate(serial->dev, priv);
323 if (r)
324 goto out;
325 322
326 dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__); 323 dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__);
327 r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 324 r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 29fa1c3d0089..3806e7014199 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -14,6 +14,7 @@
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include <linux/tty.h> 19#include <linux/tty.h>
19#include <linux/console.h> 20#include <linux/console.h>
@@ -144,6 +145,7 @@ static int usb_console_setup(struct console *co, char *options)
144 init_ldsem(&tty->ldisc_sem); 145 init_ldsem(&tty->ldisc_sem);
145 INIT_LIST_HEAD(&tty->tty_files); 146 INIT_LIST_HEAD(&tty->tty_files);
146 kref_get(&tty->driver->kref); 147 kref_get(&tty->driver->kref);
148 __module_get(tty->driver->owner);
147 tty->ops = &usb_console_fake_tty_ops; 149 tty->ops = &usb_console_fake_tty_ops;
148 if (tty_init_termios(tty)) { 150 if (tty_init_termios(tty)) {
149 retval = -ENOMEM; 151 retval = -ENOMEM;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f40c856ff758..84ce2d74894c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -147,6 +147,8 @@ static const struct usb_device_id id_table[] = {
147 { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ 147 { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
148 { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ 148 { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
149 { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ 149 { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
150 { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */
151 { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */
150 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ 152 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
151 { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ 153 { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
152 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ 154 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1ebb351b9e9a..3086dec0ef53 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -799,6 +799,8 @@ static const struct usb_device_id id_table_combined[] = {
799 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, 799 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
800 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, 800 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
801 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, 801 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
802 { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
803 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
802 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), 804 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
803 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 805 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
804 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), 806 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
@@ -978,6 +980,23 @@ static const struct usb_device_id id_table_combined[] = {
978 { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, 980 { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
979 /* GE Healthcare devices */ 981 /* GE Healthcare devices */
980 { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, 982 { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
983 /* Active Research (Actisense) devices */
984 { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) },
985 { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
986 { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
987 { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
988 { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
989 { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
990 { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
991 { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
992 { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
993 { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
994 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) },
995 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) },
996 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) },
997 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
998 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
999 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
981 { } /* Terminating entry */ 1000 { } /* Terminating entry */
982}; 1001};
983 1002
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index e52409c9be99..56b1b55c4751 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -38,6 +38,9 @@
38 38
39#define FTDI_LUMEL_PD12_PID 0x6002 39#define FTDI_LUMEL_PD12_PID 0x6002
40 40
41/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
42#define CYBER_CORTEX_AV_PID 0x8698
43
41/* 44/*
42 * Marvell OpenRD Base, Client 45 * Marvell OpenRD Base, Client
43 * http://www.open-rd.org 46 * http://www.open-rd.org
@@ -1438,3 +1441,23 @@
1438 */ 1441 */
1439#define GE_HEALTHCARE_VID 0x1901 1442#define GE_HEALTHCARE_VID 0x1901
1440#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 1443#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015
1444
1445/*
1446 * Active Research (Actisense) devices
1447 */
1448#define ACTISENSE_NDC_PID 0xD9A8 /* NDC USB Serial Adapter */
1449#define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */
1450#define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */
1451#define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */
1452#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */
1453#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */
1454#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */
1455#define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */
1456#define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */
1457#define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */
1458#define CHETCO_SEASMART_NMEA2000_PID 0xA54A /* SeaSmart NMEA2000 Gateway */
1459#define CHETCO_SEASMART_ETHERNET_PID 0xA54B /* SeaSmart Ethernet Gateway */
1460#define CHETCO_SEASMART_WIFI_PID 0xA5AC /* SeaSmart Wifi Gateway */
1461#define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */
1462#define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */
1463#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index ccf1df7c4b80..54e170dd3dad 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
258 * character or at least one jiffy. 258 * character or at least one jiffy.
259 */ 259 */
260 period = max_t(unsigned long, (10 * HZ / bps), 1); 260 period = max_t(unsigned long, (10 * HZ / bps), 1);
261 period = min_t(unsigned long, period, timeout); 261 if (timeout)
262 period = min_t(unsigned long, period, timeout);
262 263
263 dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", 264 dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
264 __func__, jiffies_to_msecs(timeout), 265 __func__, jiffies_to_msecs(timeout),
@@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
268 schedule_timeout_interruptible(period); 269 schedule_timeout_interruptible(period);
269 if (signal_pending(current)) 270 if (signal_pending(current))
270 break; 271 break;
271 if (time_after(jiffies, expire)) 272 if (timeout && time_after(jiffies, expire))
272 break; 273 break;
273 } 274 }
274} 275}
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index ab1d690274ae..460a40669967 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -1284,7 +1284,8 @@ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
1284 } 1284 }
1285 1285
1286 /* Initial port termios */ 1286 /* Initial port termios */
1287 mxuport_set_termios(tty, port, NULL); 1287 if (tty)
1288 mxuport_set_termios(tty, port, NULL);
1288 1289
1289 /* 1290 /*
1290 * TODO: use RQ_VENDOR_GET_MSR, once we know what it 1291 * TODO: use RQ_VENDOR_GET_MSR, once we know what it
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 0f872e6b2c87..829604d11f3f 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -132,6 +132,7 @@ MODULE_DEVICE_TABLE(usb, id_table);
132#define UART_OVERRUN_ERROR 0x40 132#define UART_OVERRUN_ERROR 0x40
133#define UART_CTS 0x80 133#define UART_CTS 0x80
134 134
135static void pl2303_set_break(struct usb_serial_port *port, bool enable);
135 136
136enum pl2303_type { 137enum pl2303_type {
137 TYPE_01, /* Type 0 and 1 (difference unknown) */ 138 TYPE_01, /* Type 0 and 1 (difference unknown) */
@@ -615,6 +616,7 @@ static void pl2303_close(struct usb_serial_port *port)
615{ 616{
616 usb_serial_generic_close(port); 617 usb_serial_generic_close(port);
617 usb_kill_urb(port->interrupt_in_urb); 618 usb_kill_urb(port->interrupt_in_urb);
619 pl2303_set_break(port, false);
618} 620}
619 621
620static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) 622static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
@@ -741,17 +743,16 @@ static int pl2303_ioctl(struct tty_struct *tty,
741 return -ENOIOCTLCMD; 743 return -ENOIOCTLCMD;
742} 744}
743 745
744static void pl2303_break_ctl(struct tty_struct *tty, int break_state) 746static void pl2303_set_break(struct usb_serial_port *port, bool enable)
745{ 747{
746 struct usb_serial_port *port = tty->driver_data;
747 struct usb_serial *serial = port->serial; 748 struct usb_serial *serial = port->serial;
748 u16 state; 749 u16 state;
749 int result; 750 int result;
750 751
751 if (break_state == 0) 752 if (enable)
752 state = BREAK_OFF;
753 else
754 state = BREAK_ON; 753 state = BREAK_ON;
754 else
755 state = BREAK_OFF;
755 756
756 dev_dbg(&port->dev, "%s - turning break %s\n", __func__, 757 dev_dbg(&port->dev, "%s - turning break %s\n", __func__,
757 state == BREAK_OFF ? "off" : "on"); 758 state == BREAK_OFF ? "off" : "on");
@@ -763,6 +764,13 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
763 dev_err(&port->dev, "error sending break = %d\n", result); 764 dev_err(&port->dev, "error sending break = %d\n", result);
764} 765}
765 766
767static void pl2303_break_ctl(struct tty_struct *tty, int state)
768{
769 struct usb_serial_port *port = tty->driver_data;
770
771 pl2303_set_break(port, state);
772}
773
766static void pl2303_update_line_status(struct usb_serial_port *port, 774static void pl2303_update_line_status(struct usb_serial_port *port,
767 unsigned char *data, 775 unsigned char *data,
768 unsigned int actual_length) 776 unsigned int actual_length)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 475723c006f9..529066bbc7e8 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -687,6 +687,21 @@ static void serial_port_dtr_rts(struct tty_port *port, int on)
687 drv->dtr_rts(p, on); 687 drv->dtr_rts(p, on);
688} 688}
689 689
690static ssize_t port_number_show(struct device *dev,
691 struct device_attribute *attr, char *buf)
692{
693 struct usb_serial_port *port = to_usb_serial_port(dev);
694
695 return sprintf(buf, "%u\n", port->port_number);
696}
697static DEVICE_ATTR_RO(port_number);
698
699static struct attribute *usb_serial_port_attrs[] = {
700 &dev_attr_port_number.attr,
701 NULL
702};
703ATTRIBUTE_GROUPS(usb_serial_port);
704
690static const struct tty_port_operations serial_port_ops = { 705static const struct tty_port_operations serial_port_ops = {
691 .carrier_raised = serial_port_carrier_raised, 706 .carrier_raised = serial_port_carrier_raised,
692 .dtr_rts = serial_port_dtr_rts, 707 .dtr_rts = serial_port_dtr_rts,
@@ -902,6 +917,7 @@ static int usb_serial_probe(struct usb_interface *interface,
902 port->dev.driver = NULL; 917 port->dev.driver = NULL;
903 port->dev.bus = &usb_serial_bus_type; 918 port->dev.bus = &usb_serial_bus_type;
904 port->dev.release = &usb_serial_port_release; 919 port->dev.release = &usb_serial_port_release;
920 port->dev.groups = usb_serial_port_groups;
905 device_initialize(&port->dev); 921 device_initialize(&port->dev);
906 } 922 }
907 923
@@ -940,8 +956,9 @@ static int usb_serial_probe(struct usb_interface *interface,
940 port = serial->port[i]; 956 port = serial->port[i];
941 if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL)) 957 if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
942 goto probe_error; 958 goto probe_error;
943 buffer_size = max_t(int, serial->type->bulk_out_size, 959 buffer_size = serial->type->bulk_out_size;
944 usb_endpoint_maxp(endpoint)); 960 if (!buffer_size)
961 buffer_size = usb_endpoint_maxp(endpoint);
945 port->bulk_out_size = buffer_size; 962 port->bulk_out_size = buffer_size;
946 port->bulk_out_endpointAddress = endpoint->bEndpointAddress; 963 port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
947 964
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index dbc00e56c7f5..82570425fdfe 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -113,6 +113,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
113 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 113 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
114 US_FL_NO_ATA_1X), 114 US_FL_NO_ATA_1X),
115 115
116/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
117UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
118 "JMicron",
119 "JMS539",
120 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
121 US_FL_NO_REPORT_OPCODES),
122
116/* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */ 123/* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
117UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, 124UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
118 "JMicron", 125 "JMicron",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index d468d02179f4..5600c33fcadb 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -889,6 +889,12 @@ static void usb_stor_scan_dwork(struct work_struct *work)
889 !(us->fflags & US_FL_SCM_MULT_TARG)) { 889 !(us->fflags & US_FL_SCM_MULT_TARG)) {
890 mutex_lock(&us->dev_mutex); 890 mutex_lock(&us->dev_mutex);
891 us->max_lun = usb_stor_Bulk_max_lun(us); 891 us->max_lun = usb_stor_Bulk_max_lun(us);
892 /*
893 * Allow proper scanning of devices that present more than 8 LUNs
894 * While not affecting other devices that may need the previous behavior
895 */
896 if (us->max_lun >= 8)
897 us_to_host(us)->max_lun = us->max_lun+1;
892 mutex_unlock(&us->dev_mutex); 898 mutex_unlock(&us->dev_mutex);
893 } 899 }
894 scsi_scan_host(us_to_host(us)); 900 scsi_scan_host(us_to_host(us));
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index f88bfdf5b6a0..2027a27546ef 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -868,12 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
868 func = vfio_pci_set_err_trigger; 868 func = vfio_pci_set_err_trigger;
869 break; 869 break;
870 } 870 }
871 break;
871 case VFIO_PCI_REQ_IRQ_INDEX: 872 case VFIO_PCI_REQ_IRQ_INDEX:
872 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 873 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
873 case VFIO_IRQ_SET_ACTION_TRIGGER: 874 case VFIO_IRQ_SET_ACTION_TRIGGER:
874 func = vfio_pci_set_req_trigger; 875 func = vfio_pci_set_req_trigger;
875 break; 876 break;
876 } 877 }
878 break;
877 } 879 }
878 880
879 if (!func) 881 if (!func)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index afa06d28725d..2bbfc25e582c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net)
591 * TODO: support TSO. 591 * TODO: support TSO.
592 */ 592 */
593 iov_iter_advance(&msg.msg_iter, vhost_hlen); 593 iov_iter_advance(&msg.msg_iter, vhost_hlen);
594 } else {
595 /* It'll come from socket; we'll need to patch
596 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
597 */
598 iov_iter_advance(&fixup, sizeof(hdr));
599 } 594 }
600 err = sock->ops->recvmsg(NULL, sock, &msg, 595 err = sock->ops->recvmsg(NULL, sock, &msg,
601 sock_len, MSG_DONTWAIT | MSG_TRUNC); 596 sock_len, MSG_DONTWAIT | MSG_TRUNC);
@@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net)
609 continue; 604 continue;
610 } 605 }
611 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ 606 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
612 if (unlikely(vhost_hlen) && 607 if (unlikely(vhost_hlen)) {
613 copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) { 608 if (copy_to_iter(&hdr, sizeof(hdr),
614 vq_err(vq, "Unable to write vnet_hdr at addr %p\n", 609 &fixup) != sizeof(hdr)) {
615 vq->iov->iov_base); 610 vq_err(vq, "Unable to write vnet_hdr "
616 break; 611 "at addr %p\n", vq->iov->iov_base);
612 break;
613 }
614 } else {
615 /* Header came from socket; we'll need to patch
616 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
617 */
618 iov_iter_advance(&fixup, sizeof(hdr));
617 } 619 }
618 /* TODO: Should check and handle checksum. */ 620 /* TODO: Should check and handle checksum. */
619 621
620 num_buffers = cpu_to_vhost16(vq, headcount); 622 num_buffers = cpu_to_vhost16(vq, headcount);
621 if (likely(mergeable) && 623 if (likely(mergeable) &&
622 copy_to_iter(&num_buffers, 2, &fixup) != 2) { 624 copy_to_iter(&num_buffers, sizeof num_buffers,
625 &fixup) != sizeof num_buffers) {
623 vq_err(vq, "Failed num_buffers write"); 626 vq_err(vq, "Failed num_buffers write");
624 vhost_discard_vq_desc(vq, headcount); 627 vhost_discard_vq_desc(vq, headcount);
625 break; 628 break;
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 32c0b6b28097..9362424c2340 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -599,6 +599,9 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint,
599 599
600 len = clcdfb_snprintf_mode(NULL, 0, mode); 600 len = clcdfb_snprintf_mode(NULL, 0, mode);
601 name = devm_kzalloc(dev, len + 1, GFP_KERNEL); 601 name = devm_kzalloc(dev, len + 1, GFP_KERNEL);
602 if (!name)
603 return -ENOMEM;
604
602 clcdfb_snprintf_mode(name, len + 1, mode); 605 clcdfb_snprintf_mode(name, len + 1, mode);
603 mode->name = name; 606 mode->name = name;
604 607
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 95338593ebf4..868facdec638 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -624,9 +624,6 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize,
624 int num = 0, i, first = 1; 624 int num = 0, i, first = 1;
625 int ver, rev; 625 int ver, rev;
626 626
627 ver = edid[EDID_STRUCT_VERSION];
628 rev = edid[EDID_STRUCT_REVISION];
629
630 mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); 627 mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
631 if (mode == NULL) 628 if (mode == NULL)
632 return NULL; 629 return NULL;
@@ -637,6 +634,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize,
637 return NULL; 634 return NULL;
638 } 635 }
639 636
637 ver = edid[EDID_STRUCT_VERSION];
638 rev = edid[EDID_STRUCT_REVISION];
639
640 *dbsize = 0; 640 *dbsize = 0;
641 641
642 DPRINTK(" Detailed Timings\n"); 642 DPRINTK(" Detailed Timings\n");
diff --git a/drivers/video/fbdev/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c
index 5a2095a98ed8..12186557a9d4 100644
--- a/drivers/video/fbdev/omap2/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c
@@ -28,44 +28,22 @@
28#include <video/omapdss.h> 28#include <video/omapdss.h>
29#include "dss.h" 29#include "dss.h"
30 30
31static struct omap_dss_device *to_dss_device_sysfs(struct device *dev) 31static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
32{ 32{
33 struct omap_dss_device *dssdev = NULL;
34
35 for_each_dss_dev(dssdev) {
36 if (dssdev->dev == dev) {
37 omap_dss_put_device(dssdev);
38 return dssdev;
39 }
40 }
41
42 return NULL;
43}
44
45static ssize_t display_name_show(struct device *dev,
46 struct device_attribute *attr, char *buf)
47{
48 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
49
50 return snprintf(buf, PAGE_SIZE, "%s\n", 33 return snprintf(buf, PAGE_SIZE, "%s\n",
51 dssdev->name ? 34 dssdev->name ?
52 dssdev->name : ""); 35 dssdev->name : "");
53} 36}
54 37
55static ssize_t display_enabled_show(struct device *dev, 38static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf)
56 struct device_attribute *attr, char *buf)
57{ 39{
58 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
59
60 return snprintf(buf, PAGE_SIZE, "%d\n", 40 return snprintf(buf, PAGE_SIZE, "%d\n",
61 omapdss_device_is_enabled(dssdev)); 41 omapdss_device_is_enabled(dssdev));
62} 42}
63 43
64static ssize_t display_enabled_store(struct device *dev, 44static ssize_t display_enabled_store(struct omap_dss_device *dssdev,
65 struct device_attribute *attr,
66 const char *buf, size_t size) 45 const char *buf, size_t size)
67{ 46{
68 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
69 int r; 47 int r;
70 bool enable; 48 bool enable;
71 49
@@ -90,19 +68,16 @@ static ssize_t display_enabled_store(struct device *dev,
90 return size; 68 return size;
91} 69}
92 70
93static ssize_t display_tear_show(struct device *dev, 71static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf)
94 struct device_attribute *attr, char *buf)
95{ 72{
96 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
97 return snprintf(buf, PAGE_SIZE, "%d\n", 73 return snprintf(buf, PAGE_SIZE, "%d\n",
98 dssdev->driver->get_te ? 74 dssdev->driver->get_te ?
99 dssdev->driver->get_te(dssdev) : 0); 75 dssdev->driver->get_te(dssdev) : 0);
100} 76}
101 77
102static ssize_t display_tear_store(struct device *dev, 78static ssize_t display_tear_store(struct omap_dss_device *dssdev,
103 struct device_attribute *attr, const char *buf, size_t size) 79 const char *buf, size_t size)
104{ 80{
105 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
106 int r; 81 int r;
107 bool te; 82 bool te;
108 83
@@ -120,10 +95,8 @@ static ssize_t display_tear_store(struct device *dev,
120 return size; 95 return size;
121} 96}
122 97
123static ssize_t display_timings_show(struct device *dev, 98static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf)
124 struct device_attribute *attr, char *buf)
125{ 99{
126 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
127 struct omap_video_timings t; 100 struct omap_video_timings t;
128 101
129 if (!dssdev->driver->get_timings) 102 if (!dssdev->driver->get_timings)
@@ -137,10 +110,9 @@ static ssize_t display_timings_show(struct device *dev,
137 t.y_res, t.vfp, t.vbp, t.vsw); 110 t.y_res, t.vfp, t.vbp, t.vsw);
138} 111}
139 112
140static ssize_t display_timings_store(struct device *dev, 113static ssize_t display_timings_store(struct omap_dss_device *dssdev,
141 struct device_attribute *attr, const char *buf, size_t size) 114 const char *buf, size_t size)
142{ 115{
143 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
144 struct omap_video_timings t = dssdev->panel.timings; 116 struct omap_video_timings t = dssdev->panel.timings;
145 int r, found; 117 int r, found;
146 118
@@ -176,10 +148,8 @@ static ssize_t display_timings_store(struct device *dev,
176 return size; 148 return size;
177} 149}
178 150
179static ssize_t display_rotate_show(struct device *dev, 151static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf)
180 struct device_attribute *attr, char *buf)
181{ 152{
182 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
183 int rotate; 153 int rotate;
184 if (!dssdev->driver->get_rotate) 154 if (!dssdev->driver->get_rotate)
185 return -ENOENT; 155 return -ENOENT;
@@ -187,10 +157,9 @@ static ssize_t display_rotate_show(struct device *dev,
187 return snprintf(buf, PAGE_SIZE, "%u\n", rotate); 157 return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
188} 158}
189 159
190static ssize_t display_rotate_store(struct device *dev, 160static ssize_t display_rotate_store(struct omap_dss_device *dssdev,
191 struct device_attribute *attr, const char *buf, size_t size) 161 const char *buf, size_t size)
192{ 162{
193 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
194 int rot, r; 163 int rot, r;
195 164
196 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) 165 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
@@ -207,10 +176,8 @@ static ssize_t display_rotate_store(struct device *dev,
207 return size; 176 return size;
208} 177}
209 178
210static ssize_t display_mirror_show(struct device *dev, 179static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf)
211 struct device_attribute *attr, char *buf)
212{ 180{
213 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
214 int mirror; 181 int mirror;
215 if (!dssdev->driver->get_mirror) 182 if (!dssdev->driver->get_mirror)
216 return -ENOENT; 183 return -ENOENT;
@@ -218,10 +185,9 @@ static ssize_t display_mirror_show(struct device *dev,
218 return snprintf(buf, PAGE_SIZE, "%u\n", mirror); 185 return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
219} 186}
220 187
221static ssize_t display_mirror_store(struct device *dev, 188static ssize_t display_mirror_store(struct omap_dss_device *dssdev,
222 struct device_attribute *attr, const char *buf, size_t size) 189 const char *buf, size_t size)
223{ 190{
224 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
225 int r; 191 int r;
226 bool mirror; 192 bool mirror;
227 193
@@ -239,10 +205,8 @@ static ssize_t display_mirror_store(struct device *dev,
239 return size; 205 return size;
240} 206}
241 207
242static ssize_t display_wss_show(struct device *dev, 208static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf)
243 struct device_attribute *attr, char *buf)
244{ 209{
245 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
246 unsigned int wss; 210 unsigned int wss;
247 211
248 if (!dssdev->driver->get_wss) 212 if (!dssdev->driver->get_wss)
@@ -253,10 +217,9 @@ static ssize_t display_wss_show(struct device *dev,
253 return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); 217 return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
254} 218}
255 219
256static ssize_t display_wss_store(struct device *dev, 220static ssize_t display_wss_store(struct omap_dss_device *dssdev,
257 struct device_attribute *attr, const char *buf, size_t size) 221 const char *buf, size_t size)
258{ 222{
259 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
260 u32 wss; 223 u32 wss;
261 int r; 224 int r;
262 225
@@ -277,50 +240,94 @@ static ssize_t display_wss_store(struct device *dev,
277 return size; 240 return size;
278} 241}
279 242
280static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL); 243struct display_attribute {
281static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, 244 struct attribute attr;
245 ssize_t (*show)(struct omap_dss_device *, char *);
246 ssize_t (*store)(struct omap_dss_device *, const char *, size_t);
247};
248
249#define DISPLAY_ATTR(_name, _mode, _show, _store) \
250 struct display_attribute display_attr_##_name = \
251 __ATTR(_name, _mode, _show, _store)
252
253static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL);
254static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL);
255static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
282 display_enabled_show, display_enabled_store); 256 display_enabled_show, display_enabled_store);
283static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, 257static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR,
284 display_tear_show, display_tear_store); 258 display_tear_show, display_tear_store);
285static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, 259static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR,
286 display_timings_show, display_timings_store); 260 display_timings_show, display_timings_store);
287static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR, 261static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR,
288 display_rotate_show, display_rotate_store); 262 display_rotate_show, display_rotate_store);
289static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR, 263static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR,
290 display_mirror_show, display_mirror_store); 264 display_mirror_show, display_mirror_store);
291static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, 265static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR,
292 display_wss_show, display_wss_store); 266 display_wss_show, display_wss_store);
293 267
294static const struct attribute *display_sysfs_attrs[] = { 268static struct attribute *display_sysfs_attrs[] = {
295 &dev_attr_display_name.attr, 269 &display_attr_name.attr,
296 &dev_attr_enabled.attr, 270 &display_attr_display_name.attr,
297 &dev_attr_tear_elim.attr, 271 &display_attr_enabled.attr,
298 &dev_attr_timings.attr, 272 &display_attr_tear_elim.attr,
299 &dev_attr_rotate.attr, 273 &display_attr_timings.attr,
300 &dev_attr_mirror.attr, 274 &display_attr_rotate.attr,
301 &dev_attr_wss.attr, 275 &display_attr_mirror.attr,
276 &display_attr_wss.attr,
302 NULL 277 NULL
303}; 278};
304 279
280static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr,
281 char *buf)
282{
283 struct omap_dss_device *dssdev;
284 struct display_attribute *display_attr;
285
286 dssdev = container_of(kobj, struct omap_dss_device, kobj);
287 display_attr = container_of(attr, struct display_attribute, attr);
288
289 if (!display_attr->show)
290 return -ENOENT;
291
292 return display_attr->show(dssdev, buf);
293}
294
295static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr,
296 const char *buf, size_t size)
297{
298 struct omap_dss_device *dssdev;
299 struct display_attribute *display_attr;
300
301 dssdev = container_of(kobj, struct omap_dss_device, kobj);
302 display_attr = container_of(attr, struct display_attribute, attr);
303
304 if (!display_attr->store)
305 return -ENOENT;
306
307 return display_attr->store(dssdev, buf, size);
308}
309
310static const struct sysfs_ops display_sysfs_ops = {
311 .show = display_attr_show,
312 .store = display_attr_store,
313};
314
315static struct kobj_type display_ktype = {
316 .sysfs_ops = &display_sysfs_ops,
317 .default_attrs = display_sysfs_attrs,
318};
319
305int display_init_sysfs(struct platform_device *pdev) 320int display_init_sysfs(struct platform_device *pdev)
306{ 321{
307 struct omap_dss_device *dssdev = NULL; 322 struct omap_dss_device *dssdev = NULL;
308 int r; 323 int r;
309 324
310 for_each_dss_dev(dssdev) { 325 for_each_dss_dev(dssdev) {
311 struct kobject *kobj = &dssdev->dev->kobj; 326 r = kobject_init_and_add(&dssdev->kobj, &display_ktype,
312 327 &pdev->dev.kobj, dssdev->alias);
313 r = sysfs_create_files(kobj, display_sysfs_attrs);
314 if (r) { 328 if (r) {
315 DSSERR("failed to create sysfs files\n"); 329 DSSERR("failed to create sysfs files\n");
316 goto err; 330 omap_dss_put_device(dssdev);
317 }
318
319 r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias);
320 if (r) {
321 sysfs_remove_files(kobj, display_sysfs_attrs);
322
323 DSSERR("failed to create sysfs display link\n");
324 goto err; 331 goto err;
325 } 332 }
326 } 333 }
@@ -338,8 +345,12 @@ void display_uninit_sysfs(struct platform_device *pdev)
338 struct omap_dss_device *dssdev = NULL; 345 struct omap_dss_device *dssdev = NULL;
339 346
340 for_each_dss_dev(dssdev) { 347 for_each_dss_dev(dssdev) {
341 sysfs_remove_link(&pdev->dev.kobj, dssdev->alias); 348 if (kobject_name(&dssdev->kobj) == NULL)
342 sysfs_remove_files(&dssdev->dev->kobj, 349 continue;
343 display_sysfs_attrs); 350
351 kobject_del(&dssdev->kobj);
352 kobject_put(&dssdev->kobj);
353
354 memset(&dssdev->kobj, 0, sizeof(dssdev->kobj));
344 } 355 }
345} 356}
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 6df940528fd2..1443b3c391de 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -208,7 +208,8 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
208 208
209 if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) { 209 if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
210 err = request_irq(wdt->irq, wdt_interrupt, 210 err = request_irq(wdt->irq, wdt_interrupt,
211 IRQF_SHARED | IRQF_IRQPOLL, 211 IRQF_SHARED | IRQF_IRQPOLL |
212 IRQF_NO_SUSPEND,
212 pdev->name, wdt); 213 pdev->name, wdt);
213 if (err) 214 if (err)
214 return err; 215 return err;
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b4bca2d4a7e5..70fba973a107 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq)
526 pirq_query_unmask(irq); 526 pirq_query_unmask(irq);
527 527
528 rc = set_evtchn_to_irq(evtchn, irq); 528 rc = set_evtchn_to_irq(evtchn, irq);
529 if (rc != 0) { 529 if (rc)
530 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", 530 goto err;
531 irq, rc); 531
532 xen_evtchn_close(evtchn);
533 return 0;
534 }
535 bind_evtchn_to_cpu(evtchn, 0); 532 bind_evtchn_to_cpu(evtchn, 0);
536 info->evtchn = evtchn; 533 info->evtchn = evtchn;
537 534
535 rc = xen_evtchn_port_setup(info);
536 if (rc)
537 goto err;
538
538out: 539out:
539 unmask_evtchn(evtchn); 540 unmask_evtchn(evtchn);
540 eoi_pirq(irq_get_irq_data(irq)); 541 eoi_pirq(irq_get_irq_data(irq));
541 542
542 return 0; 543 return 0;
544
545err:
546 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
547 xen_evtchn_close(evtchn);
548 return 0;
543} 549}
544 550
545static unsigned int startup_pirq(struct irq_data *data) 551static unsigned int startup_pirq(struct irq_data *data)
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 46ae0f9f02ad..75fe3d466515 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -16,7 +16,7 @@
16#include "conf_space.h" 16#include "conf_space.h"
17#include "conf_space_quirks.h" 17#include "conf_space_quirks.h"
18 18
19static bool permissive; 19bool permissive;
20module_param(permissive, bool, 0644); 20module_param(permissive, bool, 0644);
21 21
22/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, 22/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index e56c934ad137..2e1d73d1d5d0 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -64,6 +64,8 @@ struct config_field_entry {
64 void *data; 64 void *data;
65}; 65};
66 66
67extern bool permissive;
68
67#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) 69#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
68 70
69/* Add fields to a device - the add_fields macro expects to get a pointer to 71/* Add fields to a device - the add_fields macro expects to get a pointer to
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index c5ee82587e8c..2d7369391472 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -11,6 +11,10 @@
11#include "pciback.h" 11#include "pciback.h"
12#include "conf_space.h" 12#include "conf_space.h"
13 13
14struct pci_cmd_info {
15 u16 val;
16};
17
14struct pci_bar_info { 18struct pci_bar_info {
15 u32 val; 19 u32 val;
16 u32 len_val; 20 u32 len_val;
@@ -20,22 +24,36 @@ struct pci_bar_info {
20#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) 24#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
21#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) 25#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
22 26
23static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) 27/* Bits guests are allowed to control in permissive mode. */
28#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \
29 PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \
30 PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK)
31
32static void *command_init(struct pci_dev *dev, int offset)
24{ 33{
25 int i; 34 struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
26 int ret; 35 int err;
27 36
28 ret = xen_pcibk_read_config_word(dev, offset, value, data); 37 if (!cmd)
29 if (!pci_is_enabled(dev)) 38 return ERR_PTR(-ENOMEM);
30 return ret; 39
31 40 err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val);
32 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 41 if (err) {
33 if (dev->resource[i].flags & IORESOURCE_IO) 42 kfree(cmd);
34 *value |= PCI_COMMAND_IO; 43 return ERR_PTR(err);
35 if (dev->resource[i].flags & IORESOURCE_MEM)
36 *value |= PCI_COMMAND_MEMORY;
37 } 44 }
38 45
46 return cmd;
47}
48
49static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
50{
51 int ret = pci_read_config_word(dev, offset, value);
52 const struct pci_cmd_info *cmd = data;
53
54 *value &= PCI_COMMAND_GUEST;
55 *value |= cmd->val & ~PCI_COMMAND_GUEST;
56
39 return ret; 57 return ret;
40} 58}
41 59
@@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
43{ 61{
44 struct xen_pcibk_dev_data *dev_data; 62 struct xen_pcibk_dev_data *dev_data;
45 int err; 63 int err;
64 u16 val;
65 struct pci_cmd_info *cmd = data;
46 66
47 dev_data = pci_get_drvdata(dev); 67 dev_data = pci_get_drvdata(dev);
48 if (!pci_is_enabled(dev) && is_enable_cmd(value)) { 68 if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
83 } 103 }
84 } 104 }
85 105
106 cmd->val = value;
107
108 if (!permissive && (!dev_data || !dev_data->permissive))
109 return 0;
110
111 /* Only allow the guest to control certain bits. */
112 err = pci_read_config_word(dev, offset, &val);
113 if (err || val == value)
114 return err;
115
116 value &= PCI_COMMAND_GUEST;
117 value |= val & ~PCI_COMMAND_GUEST;
118
86 return pci_write_config_word(dev, offset, value); 119 return pci_write_config_word(dev, offset, value);
87} 120}
88 121
@@ -282,6 +315,8 @@ static const struct config_field header_common[] = {
282 { 315 {
283 .offset = PCI_COMMAND, 316 .offset = PCI_COMMAND,
284 .size = 2, 317 .size = 2,
318 .init = command_init,
319 .release = bar_release,
285 .u.w.read = command_read, 320 .u.w.read = command_read,
286 .u.w.write = command_write, 321 .u.w.write = command_write,
287 }, 322 },
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 993642199326..6d67f32e648d 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1645,14 +1645,14 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1645 1645
1646 parent_nritems = btrfs_header_nritems(parent); 1646 parent_nritems = btrfs_header_nritems(parent);
1647 blocksize = root->nodesize; 1647 blocksize = root->nodesize;
1648 end_slot = parent_nritems; 1648 end_slot = parent_nritems - 1;
1649 1649
1650 if (parent_nritems == 1) 1650 if (parent_nritems <= 1)
1651 return 0; 1651 return 0;
1652 1652
1653 btrfs_set_lock_blocking(parent); 1653 btrfs_set_lock_blocking(parent);
1654 1654
1655 for (i = start_slot; i < end_slot; i++) { 1655 for (i = start_slot; i <= end_slot; i++) {
1656 int close = 1; 1656 int close = 1;
1657 1657
1658 btrfs_node_key(parent, &disk_key, i); 1658 btrfs_node_key(parent, &disk_key, i);
@@ -1669,7 +1669,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1669 other = btrfs_node_blockptr(parent, i - 1); 1669 other = btrfs_node_blockptr(parent, i - 1);
1670 close = close_blocks(blocknr, other, blocksize); 1670 close = close_blocks(blocknr, other, blocksize);
1671 } 1671 }
1672 if (!close && i < end_slot - 2) { 1672 if (!close && i < end_slot) {
1673 other = btrfs_node_blockptr(parent, i + 1); 1673 other = btrfs_node_blockptr(parent, i + 1);
1674 close = close_blocks(blocknr, other, blocksize); 1674 close = close_blocks(blocknr, other, blocksize);
1675 } 1675 }
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 571f402d3fc4..6f080451fcb1 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3208,6 +3208,8 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3208 return 0; 3208 return 0;
3209 } 3209 }
3210 3210
3211 if (trans->aborted)
3212 return 0;
3211again: 3213again:
3212 inode = lookup_free_space_inode(root, block_group, path); 3214 inode = lookup_free_space_inode(root, block_group, path);
3213 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 3215 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
@@ -3243,6 +3245,20 @@ again:
3243 */ 3245 */
3244 BTRFS_I(inode)->generation = 0; 3246 BTRFS_I(inode)->generation = 0;
3245 ret = btrfs_update_inode(trans, root, inode); 3247 ret = btrfs_update_inode(trans, root, inode);
3248 if (ret) {
3249 /*
3250 * So theoretically we could recover from this, simply set the
3251 * super cache generation to 0 so we know to invalidate the
3252 * cache, but then we'd have to keep track of the block groups
3253 * that fail this way so we know we _have_ to reset this cache
3254 * before the next commit or risk reading stale cache. So to
3255 * limit our exposure to horrible edge cases lets just abort the
3256 * transaction, this only happens in really bad situations
3257 * anyway.
3258 */
3259 btrfs_abort_transaction(trans, root, ret);
3260 goto out_put;
3261 }
3246 WARN_ON(ret); 3262 WARN_ON(ret);
3247 3263
3248 if (i_size_read(inode) > 0) { 3264 if (i_size_read(inode) > 0) {
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index b78bbbac900d..30982bbd31c3 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1811,22 +1811,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1811 mutex_unlock(&inode->i_mutex); 1811 mutex_unlock(&inode->i_mutex);
1812 1812
1813 /* 1813 /*
1814 * we want to make sure fsync finds this change
1815 * but we haven't joined a transaction running right now.
1816 *
1817 * Later on, someone is sure to update the inode and get the
1818 * real transid recorded.
1819 *
1820 * We set last_trans now to the fs_info generation + 1,
1821 * this will either be one more than the running transaction
1822 * or the generation used for the next transaction if there isn't
1823 * one running right now.
1824 *
1825 * We also have to set last_sub_trans to the current log transid, 1814 * We also have to set last_sub_trans to the current log transid,
1826 * otherwise subsequent syncs to a file that's been synced in this 1815 * otherwise subsequent syncs to a file that's been synced in this
1827 * transaction will appear to have already occured. 1816 * transaction will appear to have already occured.
1828 */ 1817 */
1829 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1830 BTRFS_I(inode)->last_sub_trans = root->log_transid; 1818 BTRFS_I(inode)->last_sub_trans = root->log_transid;
1831 if (num_written > 0) { 1819 if (num_written > 0) {
1832 err = generic_write_sync(file, pos, num_written); 1820 err = generic_write_sync(file, pos, num_written);
@@ -1959,25 +1947,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1959 atomic_inc(&root->log_batch); 1947 atomic_inc(&root->log_batch);
1960 1948
1961 /* 1949 /*
1962 * check the transaction that last modified this inode 1950 * If the last transaction that changed this file was before the current
1963 * and see if its already been committed 1951 * transaction and we have the full sync flag set in our inode, we can
1964 */ 1952 * bail out now without any syncing.
1965 if (!BTRFS_I(inode)->last_trans) { 1953 *
1966 mutex_unlock(&inode->i_mutex); 1954 * Note that we can't bail out if the full sync flag isn't set. This is
1967 goto out; 1955 * because when the full sync flag is set we start all ordered extents
1968 } 1956 * and wait for them to fully complete - when they complete they update
1969 1957 * the inode's last_trans field through:
1970 /* 1958 *
1971 * if the last transaction that changed this file was before 1959 * btrfs_finish_ordered_io() ->
1972 * the current transaction, we can bail out now without any 1960 * btrfs_update_inode_fallback() ->
1973 * syncing 1961 * btrfs_update_inode() ->
1962 * btrfs_set_inode_last_trans()
1963 *
1964 * So we are sure that last_trans is up to date and can do this check to
1965 * bail out safely. For the fast path, when the full sync flag is not
1966 * set in our inode, we can not do it because we start only our ordered
1967 * extents and don't wait for them to complete (that is when
1968 * btrfs_finish_ordered_io runs), so here at this point their last_trans
1969 * value might be less than or equals to fs_info->last_trans_committed,
1970 * and setting a speculative last_trans for an inode when a buffered
1971 * write is made (such as fs_info->generation + 1 for example) would not
1972 * be reliable since after setting the value and before fsync is called
1973 * any number of transactions can start and commit (transaction kthread
1974 * commits the current transaction periodically), and a transaction
1975 * commit does not start nor waits for ordered extents to complete.
1974 */ 1976 */
1975 smp_mb(); 1977 smp_mb();
1976 if (btrfs_inode_in_log(inode, root->fs_info->generation) || 1978 if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1977 BTRFS_I(inode)->last_trans <= 1979 (full_sync && BTRFS_I(inode)->last_trans <=
1978 root->fs_info->last_trans_committed) { 1980 root->fs_info->last_trans_committed)) {
1979 BTRFS_I(inode)->last_trans = 0;
1980
1981 /* 1981 /*
1982 * We'v had everything committed since the last time we were 1982 * We'v had everything committed since the last time we were
1983 * modified so clear this flag in case it was set for whatever 1983 * modified so clear this flag in case it was set for whatever
@@ -2275,6 +2275,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2275 bool same_page; 2275 bool same_page;
2276 bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES); 2276 bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
2277 u64 ino_size; 2277 u64 ino_size;
2278 bool truncated_page = false;
2279 bool updated_inode = false;
2278 2280
2279 ret = btrfs_wait_ordered_range(inode, offset, len); 2281 ret = btrfs_wait_ordered_range(inode, offset, len);
2280 if (ret) 2282 if (ret)
@@ -2306,13 +2308,18 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2306 * entire page. 2308 * entire page.
2307 */ 2309 */
2308 if (same_page && len < PAGE_CACHE_SIZE) { 2310 if (same_page && len < PAGE_CACHE_SIZE) {
2309 if (offset < ino_size) 2311 if (offset < ino_size) {
2312 truncated_page = true;
2310 ret = btrfs_truncate_page(inode, offset, len, 0); 2313 ret = btrfs_truncate_page(inode, offset, len, 0);
2314 } else {
2315 ret = 0;
2316 }
2311 goto out_only_mutex; 2317 goto out_only_mutex;
2312 } 2318 }
2313 2319
2314 /* zero back part of the first page */ 2320 /* zero back part of the first page */
2315 if (offset < ino_size) { 2321 if (offset < ino_size) {
2322 truncated_page = true;
2316 ret = btrfs_truncate_page(inode, offset, 0, 0); 2323 ret = btrfs_truncate_page(inode, offset, 0, 0);
2317 if (ret) { 2324 if (ret) {
2318 mutex_unlock(&inode->i_mutex); 2325 mutex_unlock(&inode->i_mutex);
@@ -2348,6 +2355,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2348 if (!ret) { 2355 if (!ret) {
2349 /* zero the front end of the last page */ 2356 /* zero the front end of the last page */
2350 if (tail_start + tail_len < ino_size) { 2357 if (tail_start + tail_len < ino_size) {
2358 truncated_page = true;
2351 ret = btrfs_truncate_page(inode, 2359 ret = btrfs_truncate_page(inode,
2352 tail_start + tail_len, 0, 1); 2360 tail_start + tail_len, 0, 1);
2353 if (ret) 2361 if (ret)
@@ -2357,8 +2365,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2357 } 2365 }
2358 2366
2359 if (lockend < lockstart) { 2367 if (lockend < lockstart) {
2360 mutex_unlock(&inode->i_mutex); 2368 ret = 0;
2361 return 0; 2369 goto out_only_mutex;
2362 } 2370 }
2363 2371
2364 while (1) { 2372 while (1) {
@@ -2506,6 +2514,7 @@ out_trans:
2506 2514
2507 trans->block_rsv = &root->fs_info->trans_block_rsv; 2515 trans->block_rsv = &root->fs_info->trans_block_rsv;
2508 ret = btrfs_update_inode(trans, root, inode); 2516 ret = btrfs_update_inode(trans, root, inode);
2517 updated_inode = true;
2509 btrfs_end_transaction(trans, root); 2518 btrfs_end_transaction(trans, root);
2510 btrfs_btree_balance_dirty(root); 2519 btrfs_btree_balance_dirty(root);
2511out_free: 2520out_free:
@@ -2515,6 +2524,22 @@ out:
2515 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2524 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2516 &cached_state, GFP_NOFS); 2525 &cached_state, GFP_NOFS);
2517out_only_mutex: 2526out_only_mutex:
2527 if (!updated_inode && truncated_page && !ret && !err) {
2528 /*
2529 * If we only end up zeroing part of a page, we still need to
2530 * update the inode item, so that all the time fields are
2531 * updated as well as the necessary btrfs inode in memory fields
2532 * for detecting, at fsync time, if the inode isn't yet in the
2533 * log tree or it's there but not up to date.
2534 */
2535 trans = btrfs_start_transaction(root, 1);
2536 if (IS_ERR(trans)) {
2537 err = PTR_ERR(trans);
2538 } else {
2539 err = btrfs_update_inode(trans, root, inode);
2540 ret = btrfs_end_transaction(trans, root);
2541 }
2542 }
2518 mutex_unlock(&inode->i_mutex); 2543 mutex_unlock(&inode->i_mutex);
2519 if (ret && !err) 2544 if (ret && !err)
2520 err = ret; 2545 err = ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a85c23dfcddb..da828cf5e8f8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7285,7 +7285,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7285 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7285 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7286 em->block_start != EXTENT_MAP_HOLE)) { 7286 em->block_start != EXTENT_MAP_HOLE)) {
7287 int type; 7287 int type;
7288 int ret;
7289 u64 block_start, orig_start, orig_block_len, ram_bytes; 7288 u64 block_start, orig_start, orig_block_len, ram_bytes;
7290 7289
7291 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7290 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 534544e08f76..157cc54fc634 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -452,9 +452,7 @@ void btrfs_get_logged_extents(struct inode *inode,
452 continue; 452 continue;
453 if (entry_end(ordered) <= start) 453 if (entry_end(ordered) <= start)
454 break; 454 break;
455 if (!list_empty(&ordered->log_list)) 455 if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
456 continue;
457 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
458 continue; 456 continue;
459 list_add(&ordered->log_list, logged_list); 457 list_add(&ordered->log_list, logged_list);
460 atomic_inc(&ordered->refs); 458 atomic_inc(&ordered->refs);
@@ -511,8 +509,7 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
511 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, 509 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
512 &ordered->flags)); 510 &ordered->flags));
513 511
514 if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) 512 list_add_tail(&ordered->trans_list, &trans->ordered);
515 list_add_tail(&ordered->trans_list, &trans->ordered);
516 spin_lock_irq(&log->log_extents_lock[index]); 513 spin_lock_irq(&log->log_extents_lock[index]);
517 } 514 }
518 spin_unlock_irq(&log->log_extents_lock[index]); 515 spin_unlock_irq(&log->log_extents_lock[index]);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index fe5857223515..d6033f540cc7 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -230,6 +230,7 @@ struct pending_dir_move {
230 u64 parent_ino; 230 u64 parent_ino;
231 u64 ino; 231 u64 ino;
232 u64 gen; 232 u64 gen;
233 bool is_orphan;
233 struct list_head update_refs; 234 struct list_head update_refs;
234}; 235};
235 236
@@ -2984,7 +2985,8 @@ static int add_pending_dir_move(struct send_ctx *sctx,
2984 u64 ino_gen, 2985 u64 ino_gen,
2985 u64 parent_ino, 2986 u64 parent_ino,
2986 struct list_head *new_refs, 2987 struct list_head *new_refs,
2987 struct list_head *deleted_refs) 2988 struct list_head *deleted_refs,
2989 const bool is_orphan)
2988{ 2990{
2989 struct rb_node **p = &sctx->pending_dir_moves.rb_node; 2991 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
2990 struct rb_node *parent = NULL; 2992 struct rb_node *parent = NULL;
@@ -2999,6 +3001,7 @@ static int add_pending_dir_move(struct send_ctx *sctx,
2999 pm->parent_ino = parent_ino; 3001 pm->parent_ino = parent_ino;
3000 pm->ino = ino; 3002 pm->ino = ino;
3001 pm->gen = ino_gen; 3003 pm->gen = ino_gen;
3004 pm->is_orphan = is_orphan;
3002 INIT_LIST_HEAD(&pm->list); 3005 INIT_LIST_HEAD(&pm->list);
3003 INIT_LIST_HEAD(&pm->update_refs); 3006 INIT_LIST_HEAD(&pm->update_refs);
3004 RB_CLEAR_NODE(&pm->node); 3007 RB_CLEAR_NODE(&pm->node);
@@ -3131,16 +3134,20 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3131 rmdir_ino = dm->rmdir_ino; 3134 rmdir_ino = dm->rmdir_ino;
3132 free_waiting_dir_move(sctx, dm); 3135 free_waiting_dir_move(sctx, dm);
3133 3136
3134 ret = get_first_ref(sctx->parent_root, pm->ino, 3137 if (pm->is_orphan) {
3135 &parent_ino, &parent_gen, name); 3138 ret = gen_unique_name(sctx, pm->ino,
3136 if (ret < 0) 3139 pm->gen, from_path);
3137 goto out; 3140 } else {
3138 3141 ret = get_first_ref(sctx->parent_root, pm->ino,
3139 ret = get_cur_path(sctx, parent_ino, parent_gen, 3142 &parent_ino, &parent_gen, name);
3140 from_path); 3143 if (ret < 0)
3141 if (ret < 0) 3144 goto out;
3142 goto out; 3145 ret = get_cur_path(sctx, parent_ino, parent_gen,
3143 ret = fs_path_add_path(from_path, name); 3146 from_path);
3147 if (ret < 0)
3148 goto out;
3149 ret = fs_path_add_path(from_path, name);
3150 }
3144 if (ret < 0) 3151 if (ret < 0)
3145 goto out; 3152 goto out;
3146 3153
@@ -3150,7 +3157,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3150 LIST_HEAD(deleted_refs); 3157 LIST_HEAD(deleted_refs);
3151 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); 3158 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3152 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, 3159 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3153 &pm->update_refs, &deleted_refs); 3160 &pm->update_refs, &deleted_refs,
3161 pm->is_orphan);
3154 if (ret < 0) 3162 if (ret < 0)
3155 goto out; 3163 goto out;
3156 if (rmdir_ino) { 3164 if (rmdir_ino) {
@@ -3283,6 +3291,127 @@ out:
3283 return ret; 3291 return ret;
3284} 3292}
3285 3293
3294/*
3295 * We might need to delay a directory rename even when no ancestor directory
3296 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3297 * renamed. This happens when we rename a directory to the old name (the name
3298 * in the parent root) of some other unrelated directory that got its rename
3299 * delayed due to some ancestor with higher number that got renamed.
3300 *
3301 * Example:
3302 *
3303 * Parent snapshot:
3304 * . (ino 256)
3305 * |---- a/ (ino 257)
3306 * | |---- file (ino 260)
3307 * |
3308 * |---- b/ (ino 258)
3309 * |---- c/ (ino 259)
3310 *
3311 * Send snapshot:
3312 * . (ino 256)
3313 * |---- a/ (ino 258)
3314 * |---- x/ (ino 259)
3315 * |---- y/ (ino 257)
3316 * |----- file (ino 260)
3317 *
3318 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3319 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3320 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3321 * must issue is:
3322 *
3323 * 1 - rename 259 from 'c' to 'x'
3324 * 2 - rename 257 from 'a' to 'x/y'
3325 * 3 - rename 258 from 'b' to 'a'
3326 *
3327 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3328 * be done right away and < 0 on error.
3329 */
3330static int wait_for_dest_dir_move(struct send_ctx *sctx,
3331 struct recorded_ref *parent_ref,
3332 const bool is_orphan)
3333{
3334 struct btrfs_path *path;
3335 struct btrfs_key key;
3336 struct btrfs_key di_key;
3337 struct btrfs_dir_item *di;
3338 u64 left_gen;
3339 u64 right_gen;
3340 int ret = 0;
3341
3342 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3343 return 0;
3344
3345 path = alloc_path_for_send();
3346 if (!path)
3347 return -ENOMEM;
3348
3349 key.objectid = parent_ref->dir;
3350 key.type = BTRFS_DIR_ITEM_KEY;
3351 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3352
3353 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3354 if (ret < 0) {
3355 goto out;
3356 } else if (ret > 0) {
3357 ret = 0;
3358 goto out;
3359 }
3360
3361 di = btrfs_match_dir_item_name(sctx->parent_root, path,
3362 parent_ref->name, parent_ref->name_len);
3363 if (!di) {
3364 ret = 0;
3365 goto out;
3366 }
3367 /*
3368 * di_key.objectid has the number of the inode that has a dentry in the
3369 * parent directory with the same name that sctx->cur_ino is being
3370 * renamed to. We need to check if that inode is in the send root as
3371 * well and if it is currently marked as an inode with a pending rename,
3372 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3373 * that it happens after that other inode is renamed.
3374 */
3375 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3376 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3377 ret = 0;
3378 goto out;
3379 }
3380
3381 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3382 &left_gen, NULL, NULL, NULL, NULL);
3383 if (ret < 0)
3384 goto out;
3385 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3386 &right_gen, NULL, NULL, NULL, NULL);
3387 if (ret < 0) {
3388 if (ret == -ENOENT)
3389 ret = 0;
3390 goto out;
3391 }
3392
3393 /* Different inode, no need to delay the rename of sctx->cur_ino */
3394 if (right_gen != left_gen) {
3395 ret = 0;
3396 goto out;
3397 }
3398
3399 if (is_waiting_for_move(sctx, di_key.objectid)) {
3400 ret = add_pending_dir_move(sctx,
3401 sctx->cur_ino,
3402 sctx->cur_inode_gen,
3403 di_key.objectid,
3404 &sctx->new_refs,
3405 &sctx->deleted_refs,
3406 is_orphan);
3407 if (!ret)
3408 ret = 1;
3409 }
3410out:
3411 btrfs_free_path(path);
3412 return ret;
3413}
3414
3286static int wait_for_parent_move(struct send_ctx *sctx, 3415static int wait_for_parent_move(struct send_ctx *sctx,
3287 struct recorded_ref *parent_ref) 3416 struct recorded_ref *parent_ref)
3288{ 3417{
@@ -3349,7 +3478,8 @@ out:
3349 sctx->cur_inode_gen, 3478 sctx->cur_inode_gen,
3350 ino, 3479 ino,
3351 &sctx->new_refs, 3480 &sctx->new_refs,
3352 &sctx->deleted_refs); 3481 &sctx->deleted_refs,
3482 false);
3353 if (!ret) 3483 if (!ret)
3354 ret = 1; 3484 ret = 1;
3355 } 3485 }
@@ -3372,6 +3502,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3372 int did_overwrite = 0; 3502 int did_overwrite = 0;
3373 int is_orphan = 0; 3503 int is_orphan = 0;
3374 u64 last_dir_ino_rm = 0; 3504 u64 last_dir_ino_rm = 0;
3505 bool can_rename = true;
3375 3506
3376verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); 3507verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
3377 3508
@@ -3490,12 +3621,22 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
3490 } 3621 }
3491 } 3622 }
3492 3623
3624 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
3625 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
3626 if (ret < 0)
3627 goto out;
3628 if (ret == 1) {
3629 can_rename = false;
3630 *pending_move = 1;
3631 }
3632 }
3633
3493 /* 3634 /*
3494 * link/move the ref to the new place. If we have an orphan 3635 * link/move the ref to the new place. If we have an orphan
3495 * inode, move it and update valid_path. If not, link or move 3636 * inode, move it and update valid_path. If not, link or move
3496 * it depending on the inode mode. 3637 * it depending on the inode mode.
3497 */ 3638 */
3498 if (is_orphan) { 3639 if (is_orphan && can_rename) {
3499 ret = send_rename(sctx, valid_path, cur->full_path); 3640 ret = send_rename(sctx, valid_path, cur->full_path);
3500 if (ret < 0) 3641 if (ret < 0)
3501 goto out; 3642 goto out;
@@ -3503,7 +3644,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
3503 ret = fs_path_copy(valid_path, cur->full_path); 3644 ret = fs_path_copy(valid_path, cur->full_path);
3504 if (ret < 0) 3645 if (ret < 0)
3505 goto out; 3646 goto out;
3506 } else { 3647 } else if (can_rename) {
3507 if (S_ISDIR(sctx->cur_inode_mode)) { 3648 if (S_ISDIR(sctx->cur_inode_mode)) {
3508 /* 3649 /*
3509 * Dirs can't be linked, so move it. For moved 3650 * Dirs can't be linked, so move it. For moved
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 7e80f32550a6..88e51aded6bd 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1052,9 +1052,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
1052 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1052 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1053 if (ret) 1053 if (ret)
1054 return ret; 1054 return ret;
1055 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1056 if (ret)
1057 return ret;
1058 } 1055 }
1059 1056
1060 return 0; 1057 return 0;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9a37f8b39bae..c5b8ba37f88e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1012,7 +1012,7 @@ again:
1012 base = btrfs_item_ptr_offset(leaf, path->slots[0]); 1012 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1013 1013
1014 while (cur_offset < item_size) { 1014 while (cur_offset < item_size) {
1015 extref = (struct btrfs_inode_extref *)base + cur_offset; 1015 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1016 1016
1017 victim_name_len = btrfs_inode_extref_name_len(leaf, extref); 1017 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1018 1018
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 47b19465f0dc..883b93623bc5 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -111,6 +111,8 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
111 name, name_len, -1); 111 name, name_len, -1);
112 if (!di && (flags & XATTR_REPLACE)) 112 if (!di && (flags & XATTR_REPLACE))
113 ret = -ENODATA; 113 ret = -ENODATA;
114 else if (IS_ERR(di))
115 ret = PTR_ERR(di);
114 else if (di) 116 else if (di)
115 ret = btrfs_delete_one_dir_name(trans, root, path, di); 117 ret = btrfs_delete_one_dir_name(trans, root, path, di);
116 goto out; 118 goto out;
@@ -127,10 +129,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
127 ASSERT(mutex_is_locked(&inode->i_mutex)); 129 ASSERT(mutex_is_locked(&inode->i_mutex));
128 di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), 130 di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
129 name, name_len, 0); 131 name, name_len, 0);
130 if (!di) { 132 if (!di)
131 ret = -ENODATA; 133 ret = -ENODATA;
134 else if (IS_ERR(di))
135 ret = PTR_ERR(di);
136 if (ret)
132 goto out; 137 goto out;
133 }
134 btrfs_release_path(path); 138 btrfs_release_path(path);
135 di = NULL; 139 di = NULL;
136 } 140 }
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 90d1882b306f..5ba029e627cc 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -124,7 +124,7 @@ ecryptfs_get_key_payload_data(struct key *key)
124} 124}
125 125
126#define ECRYPTFS_MAX_KEYSET_SIZE 1024 126#define ECRYPTFS_MAX_KEYSET_SIZE 1024
127#define ECRYPTFS_MAX_CIPHER_NAME_SIZE 32 127#define ECRYPTFS_MAX_CIPHER_NAME_SIZE 31
128#define ECRYPTFS_MAX_NUM_ENC_KEYS 64 128#define ECRYPTFS_MAX_NUM_ENC_KEYS 64
129#define ECRYPTFS_MAX_IV_BYTES 16 /* 128 bits */ 129#define ECRYPTFS_MAX_IV_BYTES 16 /* 128 bits */
130#define ECRYPTFS_SALT_BYTES 2 130#define ECRYPTFS_SALT_BYTES 2
@@ -237,7 +237,7 @@ struct ecryptfs_crypt_stat {
237 struct crypto_ablkcipher *tfm; 237 struct crypto_ablkcipher *tfm;
238 struct crypto_hash *hash_tfm; /* Crypto context for generating 238 struct crypto_hash *hash_tfm; /* Crypto context for generating
239 * the initialization vectors */ 239 * the initialization vectors */
240 unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE]; 240 unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
241 unsigned char key[ECRYPTFS_MAX_KEY_BYTES]; 241 unsigned char key[ECRYPTFS_MAX_KEY_BYTES];
242 unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES]; 242 unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES];
243 struct list_head keysig_list; 243 struct list_head keysig_list;
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index b07731e68c0b..fd39bad6f1bd 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -303,9 +303,22 @@ ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
303 struct file *lower_file = ecryptfs_file_to_lower(file); 303 struct file *lower_file = ecryptfs_file_to_lower(file);
304 long rc = -ENOTTY; 304 long rc = -ENOTTY;
305 305
306 if (lower_file->f_op->unlocked_ioctl) 306 if (!lower_file->f_op->unlocked_ioctl)
307 return rc;
308
309 switch (cmd) {
310 case FITRIM:
311 case FS_IOC_GETFLAGS:
312 case FS_IOC_SETFLAGS:
313 case FS_IOC_GETVERSION:
314 case FS_IOC_SETVERSION:
307 rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg); 315 rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
308 return rc; 316 fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
317
318 return rc;
319 default:
320 return rc;
321 }
309} 322}
310 323
311#ifdef CONFIG_COMPAT 324#ifdef CONFIG_COMPAT
@@ -315,9 +328,22 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
315 struct file *lower_file = ecryptfs_file_to_lower(file); 328 struct file *lower_file = ecryptfs_file_to_lower(file);
316 long rc = -ENOIOCTLCMD; 329 long rc = -ENOIOCTLCMD;
317 330
318 if (lower_file->f_op->compat_ioctl) 331 if (!lower_file->f_op->compat_ioctl)
332 return rc;
333
334 switch (cmd) {
335 case FITRIM:
336 case FS_IOC32_GETFLAGS:
337 case FS_IOC32_SETFLAGS:
338 case FS_IOC32_GETVERSION:
339 case FS_IOC32_SETVERSION:
319 rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg); 340 rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
320 return rc; 341 fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
342
343 return rc;
344 default:
345 return rc;
346 }
321} 347}
322#endif 348#endif
323 349
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 917bd5c9776a..6bd67e2011f0 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -891,7 +891,7 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
891 struct blkcipher_desc desc; 891 struct blkcipher_desc desc;
892 char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1]; 892 char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
893 char iv[ECRYPTFS_MAX_IV_BYTES]; 893 char iv[ECRYPTFS_MAX_IV_BYTES];
894 char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE]; 894 char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
895}; 895};
896 896
897/** 897/**
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 1895d60f4122..c095d3264259 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -407,7 +407,7 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
407 if (!cipher_name_set) { 407 if (!cipher_name_set) {
408 int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER); 408 int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER);
409 409
410 BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE); 410 BUG_ON(cipher_name_len > ECRYPTFS_MAX_CIPHER_NAME_SIZE);
411 strcpy(mount_crypt_stat->global_default_cipher_name, 411 strcpy(mount_crypt_stat->global_default_cipher_name,
412 ECRYPTFS_DEFAULT_CIPHER); 412 ECRYPTFS_DEFAULT_CIPHER);
413 } 413 }
diff --git a/fs/locks.c b/fs/locks.c
index 365c82e1b3a9..528fedfda15e 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1665,7 +1665,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1665 } 1665 }
1666 1666
1667 if (my_fl != NULL) { 1667 if (my_fl != NULL) {
1668 error = lease->fl_lmops->lm_change(my_fl, arg, &dispose); 1668 lease = my_fl;
1669 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1669 if (error) 1670 if (error)
1670 goto out; 1671 goto out;
1671 goto out_setup; 1672 goto out_setup;
@@ -1727,7 +1728,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
1727 break; 1728 break;
1728 } 1729 }
1729 } 1730 }
1730 trace_generic_delete_lease(inode, fl); 1731 trace_generic_delete_lease(inode, victim);
1731 if (victim) 1732 if (victim)
1732 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); 1733 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1733 spin_unlock(&ctx->flc_lock); 1734 spin_unlock(&ctx->flc_lock);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index f9f4845db989..19874151e95c 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -433,7 +433,7 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
433 433
434static bool nfs_client_init_is_complete(const struct nfs_client *clp) 434static bool nfs_client_init_is_complete(const struct nfs_client *clp)
435{ 435{
436 return clp->cl_cons_state != NFS_CS_INITING; 436 return clp->cl_cons_state <= NFS_CS_READY;
437} 437}
438 438
439int nfs_wait_client_init_complete(const struct nfs_client *clp) 439int nfs_wait_client_init_complete(const struct nfs_client *clp)
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index a1f0685b42ff..a6ad68865880 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -181,8 +181,8 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
181 clear_bit(NFS_DELEGATION_NEED_RECLAIM, 181 clear_bit(NFS_DELEGATION_NEED_RECLAIM,
182 &delegation->flags); 182 &delegation->flags);
183 spin_unlock(&delegation->lock); 183 spin_unlock(&delegation->lock);
184 put_rpccred(oldcred);
185 rcu_read_unlock(); 184 rcu_read_unlock();
185 put_rpccred(oldcred);
186 trace_nfs4_reclaim_delegation(inode, res->delegation_type); 186 trace_nfs4_reclaim_delegation(inode, res->delegation_type);
187 } else { 187 } else {
188 /* We appear to have raced with a delegation return. */ 188 /* We appear to have raced with a delegation return. */
@@ -370,7 +370,10 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
370 delegation = NULL; 370 delegation = NULL;
371 goto out; 371 goto out;
372 } 372 }
373 freeme = nfs_detach_delegation_locked(nfsi, 373 if (test_and_set_bit(NFS_DELEGATION_RETURNING,
374 &old_delegation->flags))
375 goto out;
376 freeme = nfs_detach_delegation_locked(nfsi,
374 old_delegation, clp); 377 old_delegation, clp);
375 if (freeme == NULL) 378 if (freeme == NULL)
376 goto out; 379 goto out;
@@ -433,6 +436,8 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
433{ 436{
434 bool ret = false; 437 bool ret = false;
435 438
439 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
440 goto out;
436 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) 441 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
437 ret = true; 442 ret = true;
438 if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) { 443 if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) {
@@ -444,6 +449,7 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
444 ret = true; 449 ret = true;
445 spin_unlock(&delegation->lock); 450 spin_unlock(&delegation->lock);
446 } 451 }
452out:
447 return ret; 453 return ret;
448} 454}
449 455
@@ -471,14 +477,20 @@ restart:
471 super_list) { 477 super_list) {
472 if (!nfs_delegation_need_return(delegation)) 478 if (!nfs_delegation_need_return(delegation))
473 continue; 479 continue;
474 inode = nfs_delegation_grab_inode(delegation); 480 if (!nfs_sb_active(server->super))
475 if (inode == NULL)
476 continue; 481 continue;
482 inode = nfs_delegation_grab_inode(delegation);
483 if (inode == NULL) {
484 rcu_read_unlock();
485 nfs_sb_deactive(server->super);
486 goto restart;
487 }
477 delegation = nfs_start_delegation_return_locked(NFS_I(inode)); 488 delegation = nfs_start_delegation_return_locked(NFS_I(inode));
478 rcu_read_unlock(); 489 rcu_read_unlock();
479 490
480 err = nfs_end_delegation_return(inode, delegation, 0); 491 err = nfs_end_delegation_return(inode, delegation, 0);
481 iput(inode); 492 iput(inode);
493 nfs_sb_deactive(server->super);
482 if (!err) 494 if (!err)
483 goto restart; 495 goto restart;
484 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); 496 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
@@ -809,19 +821,30 @@ restart:
809 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 821 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
810 list_for_each_entry_rcu(delegation, &server->delegations, 822 list_for_each_entry_rcu(delegation, &server->delegations,
811 super_list) { 823 super_list) {
824 if (test_bit(NFS_DELEGATION_RETURNING,
825 &delegation->flags))
826 continue;
812 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, 827 if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
813 &delegation->flags) == 0) 828 &delegation->flags) == 0)
814 continue; 829 continue;
815 inode = nfs_delegation_grab_inode(delegation); 830 if (!nfs_sb_active(server->super))
816 if (inode == NULL)
817 continue; 831 continue;
818 delegation = nfs_detach_delegation(NFS_I(inode), 832 inode = nfs_delegation_grab_inode(delegation);
819 delegation, server); 833 if (inode == NULL) {
834 rcu_read_unlock();
835 nfs_sb_deactive(server->super);
836 goto restart;
837 }
838 delegation = nfs_start_delegation_return_locked(NFS_I(inode));
820 rcu_read_unlock(); 839 rcu_read_unlock();
821 840 if (delegation != NULL) {
822 if (delegation != NULL) 841 delegation = nfs_detach_delegation(NFS_I(inode),
823 nfs_free_delegation(delegation); 842 delegation, server);
843 if (delegation != NULL)
844 nfs_free_delegation(delegation);
845 }
824 iput(inode); 846 iput(inode);
847 nfs_sb_deactive(server->super);
825 goto restart; 848 goto restart;
826 } 849 }
827 } 850 }
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 9b0c55cb2a2e..c19e16f0b2d0 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -408,14 +408,22 @@ static int xdr_decode(nfs_readdir_descriptor_t *desc,
408 return 0; 408 return 0;
409} 409}
410 410
411/* Match file and dirent using either filehandle or fileid
412 * Note: caller is responsible for checking the fsid
413 */
411static 414static
412int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) 415int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
413{ 416{
417 struct nfs_inode *nfsi;
418
414 if (dentry->d_inode == NULL) 419 if (dentry->d_inode == NULL)
415 goto different; 420 goto different;
416 if (nfs_compare_fh(entry->fh, NFS_FH(dentry->d_inode)) != 0) 421
417 goto different; 422 nfsi = NFS_I(dentry->d_inode);
418 return 1; 423 if (entry->fattr->fileid == nfsi->fileid)
424 return 1;
425 if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0)
426 return 1;
419different: 427different:
420 return 0; 428 return 0;
421} 429}
@@ -469,6 +477,10 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
469 struct inode *inode; 477 struct inode *inode;
470 int status; 478 int status;
471 479
480 if (!(entry->fattr->valid & NFS_ATTR_FATTR_FILEID))
481 return;
482 if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID))
483 return;
472 if (filename.name[0] == '.') { 484 if (filename.name[0] == '.') {
473 if (filename.len == 1) 485 if (filename.len == 1)
474 return; 486 return;
@@ -479,6 +491,10 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
479 491
480 dentry = d_lookup(parent, &filename); 492 dentry = d_lookup(parent, &filename);
481 if (dentry != NULL) { 493 if (dentry != NULL) {
494 /* Is there a mountpoint here? If so, just exit */
495 if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid,
496 &entry->fattr->fsid))
497 goto out;
482 if (nfs_same_file(dentry, entry)) { 498 if (nfs_same_file(dentry, entry)) {
483 nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); 499 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
484 status = nfs_refresh_inode(dentry->d_inode, entry->fattr); 500 status = nfs_refresh_inode(dentry->d_inode, entry->fattr);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 94712fc781fa..e679d24c39d3 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -178,7 +178,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
178 iocb->ki_filp, 178 iocb->ki_filp,
179 iov_iter_count(to), (unsigned long) iocb->ki_pos); 179 iov_iter_count(to), (unsigned long) iocb->ki_pos);
180 180
181 result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); 181 result = nfs_revalidate_mapping_protected(inode, iocb->ki_filp->f_mapping);
182 if (!result) { 182 if (!result) {
183 result = generic_file_read_iter(iocb, to); 183 result = generic_file_read_iter(iocb, to);
184 if (result > 0) 184 if (result > 0)
@@ -199,7 +199,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
199 dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n", 199 dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n",
200 filp, (unsigned long) count, (unsigned long long) *ppos); 200 filp, (unsigned long) count, (unsigned long long) *ppos);
201 201
202 res = nfs_revalidate_mapping(inode, filp->f_mapping); 202 res = nfs_revalidate_mapping_protected(inode, filp->f_mapping);
203 if (!res) { 203 if (!res) {
204 res = generic_file_splice_read(filp, ppos, pipe, count, flags); 204 res = generic_file_splice_read(filp, ppos, pipe, count, flags);
205 if (res > 0) 205 if (res > 0)
@@ -372,6 +372,10 @@ start:
372 nfs_wait_bit_killable, TASK_KILLABLE); 372 nfs_wait_bit_killable, TASK_KILLABLE);
373 if (ret) 373 if (ret)
374 return ret; 374 return ret;
375 /*
376 * Wait for O_DIRECT to complete
377 */
378 nfs_inode_dio_wait(mapping->host);
375 379
376 page = grab_cache_page_write_begin(mapping, index, flags); 380 page = grab_cache_page_write_begin(mapping, index, flags);
377 if (!page) 381 if (!page)
@@ -619,6 +623,9 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
619 /* make sure the cache has finished storing the page */ 623 /* make sure the cache has finished storing the page */
620 nfs_fscache_wait_on_page_write(NFS_I(inode), page); 624 nfs_fscache_wait_on_page_write(NFS_I(inode), page);
621 625
626 wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING,
627 nfs_wait_bit_killable, TASK_KILLABLE);
628
622 lock_page(page); 629 lock_page(page);
623 mapping = page_file_mapping(page); 630 mapping = page_file_mapping(page);
624 if (mapping != inode->i_mapping) 631 if (mapping != inode->i_mapping)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 83107be3dd01..d42dff6d5e98 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -556,6 +556,7 @@ EXPORT_SYMBOL_GPL(nfs_setattr);
556 * This is a copy of the common vmtruncate, but with the locking 556 * This is a copy of the common vmtruncate, but with the locking
557 * corrected to take into account the fact that NFS requires 557 * corrected to take into account the fact that NFS requires
558 * inode->i_size to be updated under the inode->i_lock. 558 * inode->i_size to be updated under the inode->i_lock.
559 * Note: must be called with inode->i_lock held!
559 */ 560 */
560static int nfs_vmtruncate(struct inode * inode, loff_t offset) 561static int nfs_vmtruncate(struct inode * inode, loff_t offset)
561{ 562{
@@ -565,14 +566,14 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
565 if (err) 566 if (err)
566 goto out; 567 goto out;
567 568
568 spin_lock(&inode->i_lock);
569 i_size_write(inode, offset); 569 i_size_write(inode, offset);
570 /* Optimisation */ 570 /* Optimisation */
571 if (offset == 0) 571 if (offset == 0)
572 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA; 572 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
573 spin_unlock(&inode->i_lock);
574 573
574 spin_unlock(&inode->i_lock);
575 truncate_pagecache(inode, offset); 575 truncate_pagecache(inode, offset);
576 spin_lock(&inode->i_lock);
576out: 577out:
577 return err; 578 return err;
578} 579}
@@ -585,10 +586,15 @@ out:
585 * Note: we do this in the *proc.c in order to ensure that 586 * Note: we do this in the *proc.c in order to ensure that
586 * it works for things like exclusive creates too. 587 * it works for things like exclusive creates too.
587 */ 588 */
588void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr) 589void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
590 struct nfs_fattr *fattr)
589{ 591{
592 /* Barrier: bump the attribute generation count. */
593 nfs_fattr_set_barrier(fattr);
594
595 spin_lock(&inode->i_lock);
596 NFS_I(inode)->attr_gencount = fattr->gencount;
590 if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) { 597 if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
591 spin_lock(&inode->i_lock);
592 if ((attr->ia_valid & ATTR_MODE) != 0) { 598 if ((attr->ia_valid & ATTR_MODE) != 0) {
593 int mode = attr->ia_mode & S_IALLUGO; 599 int mode = attr->ia_mode & S_IALLUGO;
594 mode |= inode->i_mode & ~S_IALLUGO; 600 mode |= inode->i_mode & ~S_IALLUGO;
@@ -600,12 +606,13 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
600 inode->i_gid = attr->ia_gid; 606 inode->i_gid = attr->ia_gid;
601 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS 607 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS
602 | NFS_INO_INVALID_ACL); 608 | NFS_INO_INVALID_ACL);
603 spin_unlock(&inode->i_lock);
604 } 609 }
605 if ((attr->ia_valid & ATTR_SIZE) != 0) { 610 if ((attr->ia_valid & ATTR_SIZE) != 0) {
606 nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC); 611 nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
607 nfs_vmtruncate(inode, attr->ia_size); 612 nfs_vmtruncate(inode, attr->ia_size);
608 } 613 }
614 nfs_update_inode(inode, fattr);
615 spin_unlock(&inode->i_lock);
609} 616}
610EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); 617EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
611 618
@@ -1028,6 +1035,7 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
1028 1035
1029 if (mapping->nrpages != 0) { 1036 if (mapping->nrpages != 0) {
1030 if (S_ISREG(inode->i_mode)) { 1037 if (S_ISREG(inode->i_mode)) {
1038 unmap_mapping_range(mapping, 0, 0, 0);
1031 ret = nfs_sync_mapping(mapping); 1039 ret = nfs_sync_mapping(mapping);
1032 if (ret < 0) 1040 if (ret < 0)
1033 return ret; 1041 return ret;
@@ -1060,11 +1068,14 @@ static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
1060} 1068}
1061 1069
1062/** 1070/**
1063 * nfs_revalidate_mapping - Revalidate the pagecache 1071 * __nfs_revalidate_mapping - Revalidate the pagecache
1064 * @inode - pointer to host inode 1072 * @inode - pointer to host inode
1065 * @mapping - pointer to mapping 1073 * @mapping - pointer to mapping
1074 * @may_lock - take inode->i_mutex?
1066 */ 1075 */
1067int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) 1076static int __nfs_revalidate_mapping(struct inode *inode,
1077 struct address_space *mapping,
1078 bool may_lock)
1068{ 1079{
1069 struct nfs_inode *nfsi = NFS_I(inode); 1080 struct nfs_inode *nfsi = NFS_I(inode);
1070 unsigned long *bitlock = &nfsi->flags; 1081 unsigned long *bitlock = &nfsi->flags;
@@ -1113,7 +1124,12 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
1113 nfsi->cache_validity &= ~NFS_INO_INVALID_DATA; 1124 nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
1114 spin_unlock(&inode->i_lock); 1125 spin_unlock(&inode->i_lock);
1115 trace_nfs_invalidate_mapping_enter(inode); 1126 trace_nfs_invalidate_mapping_enter(inode);
1116 ret = nfs_invalidate_mapping(inode, mapping); 1127 if (may_lock) {
1128 mutex_lock(&inode->i_mutex);
1129 ret = nfs_invalidate_mapping(inode, mapping);
1130 mutex_unlock(&inode->i_mutex);
1131 } else
1132 ret = nfs_invalidate_mapping(inode, mapping);
1117 trace_nfs_invalidate_mapping_exit(inode, ret); 1133 trace_nfs_invalidate_mapping_exit(inode, ret);
1118 1134
1119 clear_bit_unlock(NFS_INO_INVALIDATING, bitlock); 1135 clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
@@ -1123,6 +1139,29 @@ out:
1123 return ret; 1139 return ret;
1124} 1140}
1125 1141
1142/**
1143 * nfs_revalidate_mapping - Revalidate the pagecache
1144 * @inode - pointer to host inode
1145 * @mapping - pointer to mapping
1146 */
1147int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
1148{
1149 return __nfs_revalidate_mapping(inode, mapping, false);
1150}
1151
1152/**
1153 * nfs_revalidate_mapping_protected - Revalidate the pagecache
1154 * @inode - pointer to host inode
1155 * @mapping - pointer to mapping
1156 *
1157 * Differs from nfs_revalidate_mapping() in that it grabs the inode->i_mutex
1158 * while invalidating the mapping.
1159 */
1160int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping)
1161{
1162 return __nfs_revalidate_mapping(inode, mapping, true);
1163}
1164
1126static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) 1165static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1127{ 1166{
1128 struct nfs_inode *nfsi = NFS_I(inode); 1167 struct nfs_inode *nfsi = NFS_I(inode);
@@ -1231,13 +1270,6 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat
1231 return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0; 1270 return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
1232} 1271}
1233 1272
1234static int nfs_size_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
1235{
1236 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1237 return 0;
1238 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
1239}
1240
1241static atomic_long_t nfs_attr_generation_counter; 1273static atomic_long_t nfs_attr_generation_counter;
1242 1274
1243static unsigned long nfs_read_attr_generation_counter(void) 1275static unsigned long nfs_read_attr_generation_counter(void)
@@ -1249,6 +1281,7 @@ unsigned long nfs_inc_attr_generation_counter(void)
1249{ 1281{
1250 return atomic_long_inc_return(&nfs_attr_generation_counter); 1282 return atomic_long_inc_return(&nfs_attr_generation_counter);
1251} 1283}
1284EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
1252 1285
1253void nfs_fattr_init(struct nfs_fattr *fattr) 1286void nfs_fattr_init(struct nfs_fattr *fattr)
1254{ 1287{
@@ -1260,6 +1293,22 @@ void nfs_fattr_init(struct nfs_fattr *fattr)
1260} 1293}
1261EXPORT_SYMBOL_GPL(nfs_fattr_init); 1294EXPORT_SYMBOL_GPL(nfs_fattr_init);
1262 1295
1296/**
1297 * nfs_fattr_set_barrier
1298 * @fattr: attributes
1299 *
1300 * Used to set a barrier after an attribute was updated. This
1301 * barrier ensures that older attributes from RPC calls that may
1302 * have raced with our update cannot clobber these new values.
1303 * Note that you are still responsible for ensuring that other
1304 * operations which change the attribute on the server do not
1305 * collide.
1306 */
1307void nfs_fattr_set_barrier(struct nfs_fattr *fattr)
1308{
1309 fattr->gencount = nfs_inc_attr_generation_counter();
1310}
1311
1263struct nfs_fattr *nfs_alloc_fattr(void) 1312struct nfs_fattr *nfs_alloc_fattr(void)
1264{ 1313{
1265 struct nfs_fattr *fattr; 1314 struct nfs_fattr *fattr;
@@ -1370,7 +1419,6 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n
1370 1419
1371 return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 || 1420 return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
1372 nfs_ctime_need_update(inode, fattr) || 1421 nfs_ctime_need_update(inode, fattr) ||
1373 nfs_size_need_update(inode, fattr) ||
1374 ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); 1422 ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
1375} 1423}
1376 1424
@@ -1460,6 +1508,7 @@ int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1460 int status; 1508 int status;
1461 1509
1462 spin_lock(&inode->i_lock); 1510 spin_lock(&inode->i_lock);
1511 nfs_fattr_set_barrier(fattr);
1463 status = nfs_post_op_update_inode_locked(inode, fattr); 1512 status = nfs_post_op_update_inode_locked(inode, fattr);
1464 spin_unlock(&inode->i_lock); 1513 spin_unlock(&inode->i_lock);
1465 1514
@@ -1468,7 +1517,7 @@ int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1468EXPORT_SYMBOL_GPL(nfs_post_op_update_inode); 1517EXPORT_SYMBOL_GPL(nfs_post_op_update_inode);
1469 1518
1470/** 1519/**
1471 * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache 1520 * nfs_post_op_update_inode_force_wcc_locked - update the inode attribute cache
1472 * @inode - pointer to inode 1521 * @inode - pointer to inode
1473 * @fattr - updated attributes 1522 * @fattr - updated attributes
1474 * 1523 *
@@ -1478,11 +1527,10 @@ EXPORT_SYMBOL_GPL(nfs_post_op_update_inode);
1478 * 1527 *
1479 * This function is mainly designed to be used by the ->write_done() functions. 1528 * This function is mainly designed to be used by the ->write_done() functions.
1480 */ 1529 */
1481int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr) 1530int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr)
1482{ 1531{
1483 int status; 1532 int status;
1484 1533
1485 spin_lock(&inode->i_lock);
1486 /* Don't do a WCC update if these attributes are already stale */ 1534 /* Don't do a WCC update if these attributes are already stale */
1487 if ((fattr->valid & NFS_ATTR_FATTR) == 0 || 1535 if ((fattr->valid & NFS_ATTR_FATTR) == 0 ||
1488 !nfs_inode_attrs_need_update(inode, fattr)) { 1536 !nfs_inode_attrs_need_update(inode, fattr)) {
@@ -1514,6 +1562,27 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
1514 } 1562 }
1515out_noforce: 1563out_noforce:
1516 status = nfs_post_op_update_inode_locked(inode, fattr); 1564 status = nfs_post_op_update_inode_locked(inode, fattr);
1565 return status;
1566}
1567
1568/**
1569 * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache
1570 * @inode - pointer to inode
1571 * @fattr - updated attributes
1572 *
1573 * After an operation that has changed the inode metadata, mark the
1574 * attribute cache as being invalid, then try to update it. Fake up
1575 * weak cache consistency data, if none exist.
1576 *
1577 * This function is mainly designed to be used by the ->write_done() functions.
1578 */
1579int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr)
1580{
1581 int status;
1582
1583 spin_lock(&inode->i_lock);
1584 nfs_fattr_set_barrier(fattr);
1585 status = nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1517 spin_unlock(&inode->i_lock); 1586 spin_unlock(&inode->i_lock);
1518 return status; 1587 return status;
1519} 1588}
@@ -1715,6 +1784,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1715 nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE); 1784 nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
1716 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1785 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
1717 nfsi->attrtimeo_timestamp = now; 1786 nfsi->attrtimeo_timestamp = now;
1787 /* Set barrier to be more recent than all outstanding updates */
1718 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1788 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1719 } else { 1789 } else {
1720 if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) { 1790 if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
@@ -1722,6 +1792,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1722 nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode); 1792 nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
1723 nfsi->attrtimeo_timestamp = now; 1793 nfsi->attrtimeo_timestamp = now;
1724 } 1794 }
1795 /* Set the barrier to be more recent than this fattr */
1796 if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
1797 nfsi->attr_gencount = fattr->gencount;
1725 } 1798 }
1726 invalid &= ~NFS_INO_INVALID_ATTR; 1799 invalid &= ~NFS_INO_INVALID_ATTR;
1727 /* Don't invalidate the data if we were to blame */ 1800 /* Don't invalidate the data if we were to blame */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b802fb3a2d99..9e6475bc5ba2 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -459,6 +459,7 @@ void nfs_mark_request_commit(struct nfs_page *req,
459 struct nfs_commit_info *cinfo, 459 struct nfs_commit_info *cinfo,
460 u32 ds_commit_idx); 460 u32 ds_commit_idx);
461int nfs_write_need_commit(struct nfs_pgio_header *); 461int nfs_write_need_commit(struct nfs_pgio_header *);
462void nfs_writeback_update_inode(struct nfs_pgio_header *hdr);
462int nfs_generic_commit_list(struct inode *inode, struct list_head *head, 463int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
463 int how, struct nfs_commit_info *cinfo); 464 int how, struct nfs_commit_info *cinfo);
464void nfs_retry_commit(struct list_head *page_list, 465void nfs_retry_commit(struct list_head *page_list,
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 78e557c3ab87..1f11d2533ee4 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -138,7 +138,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
138 nfs_fattr_init(fattr); 138 nfs_fattr_init(fattr);
139 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); 139 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
140 if (status == 0) 140 if (status == 0)
141 nfs_setattr_update_inode(inode, sattr); 141 nfs_setattr_update_inode(inode, sattr, fattr);
142 dprintk("NFS reply setattr: %d\n", status); 142 dprintk("NFS reply setattr: %d\n", status);
143 return status; 143 return status;
144} 144}
@@ -834,7 +834,7 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
834 if (nfs3_async_handle_jukebox(task, inode)) 834 if (nfs3_async_handle_jukebox(task, inode))
835 return -EAGAIN; 835 return -EAGAIN;
836 if (task->tk_status >= 0) 836 if (task->tk_status >= 0)
837 nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr); 837 nfs_writeback_update_inode(hdr);
838 return 0; 838 return 0;
839} 839}
840 840
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 2a932fdc57cb..53852a4bd88b 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -1987,6 +1987,11 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
1987 if (entry->fattr->valid & NFS_ATTR_FATTR_V3) 1987 if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
1988 entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); 1988 entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
1989 1989
1990 if (entry->fattr->fileid != entry->ino) {
1991 entry->fattr->mounted_on_fileid = entry->ino;
1992 entry->fattr->valid |= NFS_ATTR_FATTR_MOUNTED_ON_FILEID;
1993 }
1994
1990 /* In fact, a post_op_fh3: */ 1995 /* In fact, a post_op_fh3: */
1991 p = xdr_inline_decode(xdr, 4); 1996 p = xdr_inline_decode(xdr, 4);
1992 if (unlikely(p == NULL)) 1997 if (unlikely(p == NULL))
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 8646af9b11d2..86d6214ea022 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -621,6 +621,9 @@ int nfs41_walk_client_list(struct nfs_client *new,
621 spin_lock(&nn->nfs_client_lock); 621 spin_lock(&nn->nfs_client_lock);
622 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { 622 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
623 623
624 if (pos == new)
625 goto found;
626
624 if (pos->rpc_ops != new->rpc_ops) 627 if (pos->rpc_ops != new->rpc_ops)
625 continue; 628 continue;
626 629
@@ -639,10 +642,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
639 prev = pos; 642 prev = pos;
640 643
641 status = nfs_wait_client_init_complete(pos); 644 status = nfs_wait_client_init_complete(pos);
642 if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
643 nfs4_schedule_lease_recovery(pos);
644 status = nfs4_wait_clnt_recover(pos);
645 }
646 spin_lock(&nn->nfs_client_lock); 645 spin_lock(&nn->nfs_client_lock);
647 if (status < 0) 646 if (status < 0)
648 break; 647 break;
@@ -668,7 +667,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
668 */ 667 */
669 if (!nfs4_match_client_owner_id(pos, new)) 668 if (!nfs4_match_client_owner_id(pos, new))
670 continue; 669 continue;
671 670found:
672 atomic_inc(&pos->cl_count); 671 atomic_inc(&pos->cl_count);
673 *result = pos; 672 *result = pos;
674 status = 0; 673 status = 0;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 88180ac5ea0e..627f37c44456 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -901,6 +901,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
901 if (!cinfo->atomic || cinfo->before != dir->i_version) 901 if (!cinfo->atomic || cinfo->before != dir->i_version)
902 nfs_force_lookup_revalidate(dir); 902 nfs_force_lookup_revalidate(dir);
903 dir->i_version = cinfo->after; 903 dir->i_version = cinfo->after;
904 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
904 nfs_fscache_invalidate(dir); 905 nfs_fscache_invalidate(dir);
905 spin_unlock(&dir->i_lock); 906 spin_unlock(&dir->i_lock);
906} 907}
@@ -1552,6 +1553,9 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod
1552 1553
1553 opendata->o_arg.open_flags = 0; 1554 opendata->o_arg.open_flags = 0;
1554 opendata->o_arg.fmode = fmode; 1555 opendata->o_arg.fmode = fmode;
1556 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1557 NFS_SB(opendata->dentry->d_sb),
1558 fmode, 0);
1555 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1559 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1556 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1560 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1557 nfs4_init_opendata_res(opendata); 1561 nfs4_init_opendata_res(opendata);
@@ -2413,8 +2417,8 @@ static int _nfs4_do_open(struct inode *dir,
2413 opendata->o_res.f_attr, sattr, 2417 opendata->o_res.f_attr, sattr,
2414 state, label, olabel); 2418 state, label, olabel);
2415 if (status == 0) { 2419 if (status == 0) {
2416 nfs_setattr_update_inode(state->inode, sattr); 2420 nfs_setattr_update_inode(state->inode, sattr,
2417 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); 2421 opendata->o_res.f_attr);
2418 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2422 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2419 } 2423 }
2420 } 2424 }
@@ -2651,7 +2655,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
2651 case -NFS4ERR_BAD_STATEID: 2655 case -NFS4ERR_BAD_STATEID:
2652 case -NFS4ERR_EXPIRED: 2656 case -NFS4ERR_EXPIRED:
2653 if (!nfs4_stateid_match(&calldata->arg.stateid, 2657 if (!nfs4_stateid_match(&calldata->arg.stateid,
2654 &state->stateid)) { 2658 &state->open_stateid)) {
2655 rpc_restart_call_prepare(task); 2659 rpc_restart_call_prepare(task);
2656 goto out_release; 2660 goto out_release;
2657 } 2661 }
@@ -2687,7 +2691,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
2687 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 2691 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2688 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 2692 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2689 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 2693 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2690 nfs4_stateid_copy(&calldata->arg.stateid, &state->stateid); 2694 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2691 /* Calculate the change in open mode */ 2695 /* Calculate the change in open mode */
2692 calldata->arg.fmode = 0; 2696 calldata->arg.fmode = 0;
2693 if (state->n_rdwr == 0) { 2697 if (state->n_rdwr == 0) {
@@ -3288,7 +3292,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3288 3292
3289 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); 3293 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3290 if (status == 0) { 3294 if (status == 0) {
3291 nfs_setattr_update_inode(inode, sattr); 3295 nfs_setattr_update_inode(inode, sattr, fattr);
3292 nfs_setsecurity(inode, fattr, label); 3296 nfs_setsecurity(inode, fattr, label);
3293 } 3297 }
3294 nfs4_label_free(label); 3298 nfs4_label_free(label);
@@ -4234,7 +4238,7 @@ static int nfs4_write_done_cb(struct rpc_task *task,
4234 } 4238 }
4235 if (task->tk_status >= 0) { 4239 if (task->tk_status >= 0) {
4236 renew_lease(NFS_SERVER(inode), hdr->timestamp); 4240 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4237 nfs_post_op_update_inode_force_wcc(inode, &hdr->fattr); 4241 nfs_writeback_update_inode(hdr);
4238 } 4242 }
4239 return 0; 4243 return 0;
4240} 4244}
@@ -6893,9 +6897,13 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
6893 6897
6894 if (status == 0) { 6898 if (status == 0) {
6895 clp->cl_clientid = res.clientid; 6899 clp->cl_clientid = res.clientid;
6896 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R); 6900 clp->cl_exchange_flags = res.flags;
6897 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) 6901 /* Client ID is not confirmed */
6902 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
6903 clear_bit(NFS4_SESSION_ESTABLISHED,
6904 &clp->cl_session->session_state);
6898 clp->cl_seqid = res.seqid; 6905 clp->cl_seqid = res.seqid;
6906 }
6899 6907
6900 kfree(clp->cl_serverowner); 6908 kfree(clp->cl_serverowner);
6901 clp->cl_serverowner = res.server_owner; 6909 clp->cl_serverowner = res.server_owner;
@@ -7227,6 +7235,9 @@ static void nfs4_update_session(struct nfs4_session *session,
7227 struct nfs41_create_session_res *res) 7235 struct nfs41_create_session_res *res)
7228{ 7236{
7229 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 7237 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7238 /* Mark client id and session as being confirmed */
7239 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7240 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7230 session->flags = res->flags; 7241 session->flags = res->flags;
7231 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 7242 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7232 if (res->flags & SESSION4_BACK_CHAN) 7243 if (res->flags & SESSION4_BACK_CHAN)
@@ -7322,8 +7333,8 @@ int nfs4_proc_destroy_session(struct nfs4_session *session,
7322 dprintk("--> nfs4_proc_destroy_session\n"); 7333 dprintk("--> nfs4_proc_destroy_session\n");
7323 7334
7324 /* session is still being setup */ 7335 /* session is still being setup */
7325 if (session->clp->cl_cons_state != NFS_CS_READY) 7336 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7326 return status; 7337 return 0;
7327 7338
7328 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7339 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7329 trace_nfs4_destroy_session(session->clp, status); 7340 trace_nfs4_destroy_session(session->clp, status);
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index fc46c7455898..e3ea2c5324d6 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -70,6 +70,7 @@ struct nfs4_session {
70 70
71enum nfs4_session_state { 71enum nfs4_session_state {
72 NFS4_SESSION_INITING, 72 NFS4_SESSION_INITING,
73 NFS4_SESSION_ESTABLISHED,
73}; 74};
74 75
75extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, 76extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5ad908e9ce9c..f95e3b58bbc3 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -346,9 +346,23 @@ int nfs41_discover_server_trunking(struct nfs_client *clp,
346 status = nfs4_proc_exchange_id(clp, cred); 346 status = nfs4_proc_exchange_id(clp, cred);
347 if (status != NFS4_OK) 347 if (status != NFS4_OK)
348 return status; 348 return status;
349 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
350 349
351 return nfs41_walk_client_list(clp, result, cred); 350 status = nfs41_walk_client_list(clp, result, cred);
351 if (status < 0)
352 return status;
353 if (clp != *result)
354 return 0;
355
356 /* Purge state if the client id was established in a prior instance */
357 if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R)
358 set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
359 else
360 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
361 nfs4_schedule_state_manager(clp);
362 status = nfs_wait_client_init_complete(clp);
363 if (status < 0)
364 nfs_put_client(clp);
365 return status;
352} 366}
353 367
354#endif /* CONFIG_NFS_V4_1 */ 368#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index b09cc23d6f43..c63189acd052 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -139,7 +139,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
139 nfs_fattr_init(fattr); 139 nfs_fattr_init(fattr);
140 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); 140 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
141 if (status == 0) 141 if (status == 0)
142 nfs_setattr_update_inode(inode, sattr); 142 nfs_setattr_update_inode(inode, sattr, fattr);
143 dprintk("NFS reply setattr: %d\n", status); 143 dprintk("NFS reply setattr: %d\n", status);
144 return status; 144 return status;
145} 145}
@@ -609,10 +609,8 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
609 609
610static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 610static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
611{ 611{
612 struct inode *inode = hdr->inode;
613
614 if (task->tk_status >= 0) 612 if (task->tk_status >= 0)
615 nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr); 613 nfs_writeback_update_inode(hdr);
616 return 0; 614 return 0;
617} 615}
618 616
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 595d81e354d1..849ed784d6ac 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1377,6 +1377,36 @@ static int nfs_should_remove_suid(const struct inode *inode)
1377 return 0; 1377 return 0;
1378} 1378}
1379 1379
1380static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1381 struct nfs_fattr *fattr)
1382{
1383 struct nfs_pgio_args *argp = &hdr->args;
1384 struct nfs_pgio_res *resp = &hdr->res;
1385
1386 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1387 return;
1388 if (argp->offset + resp->count != fattr->size)
1389 return;
1390 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
1391 return;
1392 /* Set attribute barrier */
1393 nfs_fattr_set_barrier(fattr);
1394}
1395
1396void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1397{
1398 struct nfs_fattr *fattr = hdr->res.fattr;
1399 struct inode *inode = hdr->inode;
1400
1401 if (fattr == NULL)
1402 return;
1403 spin_lock(&inode->i_lock);
1404 nfs_writeback_check_extend(hdr, fattr);
1405 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1406 spin_unlock(&inode->i_lock);
1407}
1408EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1409
1380/* 1410/*
1381 * This function is called when the WRITE call is complete. 1411 * This function is called when the WRITE call is complete.
1382 */ 1412 */
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f6b2a09f793f..d2f2c37dc2db 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1638,7 +1638,7 @@ __destroy_client(struct nfs4_client *clp)
1638 nfs4_put_stid(&dp->dl_stid); 1638 nfs4_put_stid(&dp->dl_stid);
1639 } 1639 }
1640 while (!list_empty(&clp->cl_revoked)) { 1640 while (!list_empty(&clp->cl_revoked)) {
1641 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 1641 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1642 list_del_init(&dp->dl_recall_lru); 1642 list_del_init(&dp->dl_recall_lru);
1643 nfs4_put_stid(&dp->dl_stid); 1643 nfs4_put_stid(&dp->dl_stid);
1644 } 1644 }
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 469086b9f99b..0c3f303baf32 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1907 struct the_nilfs *nilfs) 1907 struct the_nilfs *nilfs)
1908{ 1908{
1909 struct nilfs_inode_info *ii, *n; 1909 struct nilfs_inode_info *ii, *n;
1910 int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
1910 int defer_iput = false; 1911 int defer_iput = false;
1911 1912
1912 spin_lock(&nilfs->ns_inode_lock); 1913 spin_lock(&nilfs->ns_inode_lock);
@@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1919 brelse(ii->i_bh); 1920 brelse(ii->i_bh);
1920 ii->i_bh = NULL; 1921 ii->i_bh = NULL;
1921 list_del_init(&ii->i_dirty); 1922 list_del_init(&ii->i_dirty);
1922 if (!ii->vfs_inode.i_nlink) { 1923 if (!ii->vfs_inode.i_nlink || during_mount) {
1923 /* 1924 /*
1924 * Defer calling iput() to avoid a deadlock 1925 * Defer calling iput() to avoid deadlocks if
1925 * over I_SYNC flag for inodes with i_nlink == 0 1926 * i_nlink == 0 or mount is not yet finished.
1926 */ 1927 */
1927 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); 1928 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
1928 defer_iput = true; 1929 defer_iput = true;
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 9a66ff79ff27..d2f97ecca6a5 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -143,7 +143,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
143 !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) 143 !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
144 return false; 144 return false;
145 145
146 if (event_mask & marks_mask & ~marks_ignored_mask) 146 if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
147 ~marks_ignored_mask)
147 return true; 148 return true;
148 149
149 return false; 150 return false;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 8490c64d34fe..460c6c37e683 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -502,7 +502,7 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb)
502 502
503static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) 503static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb)
504{ 504{
505 if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) 505 if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO)
506 return 1; 506 return 1;
507 return 0; 507 return 0;
508} 508}
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 20e37a3ed26f..db64ce2d4667 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -102,11 +102,11 @@
102 | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ 102 | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \
103 | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ 103 | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \
104 | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ 104 | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \
105 | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO) 105 | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO \
106 | OCFS2_FEATURE_INCOMPAT_APPEND_DIO)
106#define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ 107#define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \
107 | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ 108 | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \
108 | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \ 109 | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)
109 | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO)
110 110
111/* 111/*
112 * Heartbeat-only devices are missing journals and other files. The 112 * Heartbeat-only devices are missing journals and other files. The
@@ -179,6 +179,11 @@
179#define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 179#define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000
180 180
181/* 181/*
182 * Append Direct IO support
183 */
184#define OCFS2_FEATURE_INCOMPAT_APPEND_DIO 0x8000
185
186/*
182 * backup superblock flag is used to indicate that this volume 187 * backup superblock flag is used to indicate that this volume
183 * has backup superblocks. 188 * has backup superblocks.
184 */ 189 */
@@ -200,10 +205,6 @@
200#define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 205#define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002
201#define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 206#define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004
202 207
203/*
204 * Append Direct IO support
205 */
206#define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO 0x0008
207 208
208/* The byte offset of the first backup block will be 1G. 209/* The byte offset of the first backup block will be 1G.
209 * The following will be 4G, 16G, 64G, 256G and 1T. 210 * The following will be 4G, 16G, 64G, 256G and 1T.
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index a24addfdfcec..0de6290df4da 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -68,8 +68,8 @@ struct drm_mm_node {
68 unsigned scanned_preceeds_hole : 1; 68 unsigned scanned_preceeds_hole : 1;
69 unsigned allocated : 1; 69 unsigned allocated : 1;
70 unsigned long color; 70 unsigned long color;
71 unsigned long start; 71 u64 start;
72 unsigned long size; 72 u64 size;
73 struct drm_mm *mm; 73 struct drm_mm *mm;
74}; 74};
75 75
@@ -82,16 +82,16 @@ struct drm_mm {
82 unsigned int scan_check_range : 1; 82 unsigned int scan_check_range : 1;
83 unsigned scan_alignment; 83 unsigned scan_alignment;
84 unsigned long scan_color; 84 unsigned long scan_color;
85 unsigned long scan_size; 85 u64 scan_size;
86 unsigned long scan_hit_start; 86 u64 scan_hit_start;
87 unsigned long scan_hit_end; 87 u64 scan_hit_end;
88 unsigned scanned_blocks; 88 unsigned scanned_blocks;
89 unsigned long scan_start; 89 u64 scan_start;
90 unsigned long scan_end; 90 u64 scan_end;
91 struct drm_mm_node *prev_scanned_node; 91 struct drm_mm_node *prev_scanned_node;
92 92
93 void (*color_adjust)(struct drm_mm_node *node, unsigned long color, 93 void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
94 unsigned long *start, unsigned long *end); 94 u64 *start, u64 *end);
95}; 95};
96 96
97/** 97/**
@@ -124,7 +124,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
124 return mm->hole_stack.next; 124 return mm->hole_stack.next;
125} 125}
126 126
127static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node) 127static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
128{ 128{
129 return hole_node->start + hole_node->size; 129 return hole_node->start + hole_node->size;
130} 130}
@@ -140,13 +140,13 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no
140 * Returns: 140 * Returns:
141 * Start of the subsequent hole. 141 * Start of the subsequent hole.
142 */ 142 */
143static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) 143static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
144{ 144{
145 BUG_ON(!hole_node->hole_follows); 145 BUG_ON(!hole_node->hole_follows);
146 return __drm_mm_hole_node_start(hole_node); 146 return __drm_mm_hole_node_start(hole_node);
147} 147}
148 148
149static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node) 149static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
150{ 150{
151 return list_entry(hole_node->node_list.next, 151 return list_entry(hole_node->node_list.next,
152 struct drm_mm_node, node_list)->start; 152 struct drm_mm_node, node_list)->start;
@@ -163,7 +163,7 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node
163 * Returns: 163 * Returns:
164 * End of the subsequent hole. 164 * End of the subsequent hole.
165 */ 165 */
166static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) 166static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
167{ 167{
168 return __drm_mm_hole_node_end(hole_node); 168 return __drm_mm_hole_node_end(hole_node);
169} 169}
@@ -222,7 +222,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
222 222
223int drm_mm_insert_node_generic(struct drm_mm *mm, 223int drm_mm_insert_node_generic(struct drm_mm *mm,
224 struct drm_mm_node *node, 224 struct drm_mm_node *node,
225 unsigned long size, 225 u64 size,
226 unsigned alignment, 226 unsigned alignment,
227 unsigned long color, 227 unsigned long color,
228 enum drm_mm_search_flags sflags, 228 enum drm_mm_search_flags sflags,
@@ -245,7 +245,7 @@ int drm_mm_insert_node_generic(struct drm_mm *mm,
245 */ 245 */
246static inline int drm_mm_insert_node(struct drm_mm *mm, 246static inline int drm_mm_insert_node(struct drm_mm *mm,
247 struct drm_mm_node *node, 247 struct drm_mm_node *node,
248 unsigned long size, 248 u64 size,
249 unsigned alignment, 249 unsigned alignment,
250 enum drm_mm_search_flags flags) 250 enum drm_mm_search_flags flags)
251{ 251{
@@ -255,11 +255,11 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
255 255
256int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 256int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
257 struct drm_mm_node *node, 257 struct drm_mm_node *node,
258 unsigned long size, 258 u64 size,
259 unsigned alignment, 259 unsigned alignment,
260 unsigned long color, 260 unsigned long color,
261 unsigned long start, 261 u64 start,
262 unsigned long end, 262 u64 end,
263 enum drm_mm_search_flags sflags, 263 enum drm_mm_search_flags sflags,
264 enum drm_mm_allocator_flags aflags); 264 enum drm_mm_allocator_flags aflags);
265/** 265/**
@@ -282,10 +282,10 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
282 */ 282 */
283static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, 283static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
284 struct drm_mm_node *node, 284 struct drm_mm_node *node,
285 unsigned long size, 285 u64 size,
286 unsigned alignment, 286 unsigned alignment,
287 unsigned long start, 287 u64 start,
288 unsigned long end, 288 u64 end,
289 enum drm_mm_search_flags flags) 289 enum drm_mm_search_flags flags)
290{ 290{
291 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 291 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
@@ -296,21 +296,21 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
296void drm_mm_remove_node(struct drm_mm_node *node); 296void drm_mm_remove_node(struct drm_mm_node *node);
297void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); 297void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
298void drm_mm_init(struct drm_mm *mm, 298void drm_mm_init(struct drm_mm *mm,
299 unsigned long start, 299 u64 start,
300 unsigned long size); 300 u64 size);
301void drm_mm_takedown(struct drm_mm *mm); 301void drm_mm_takedown(struct drm_mm *mm);
302bool drm_mm_clean(struct drm_mm *mm); 302bool drm_mm_clean(struct drm_mm *mm);
303 303
304void drm_mm_init_scan(struct drm_mm *mm, 304void drm_mm_init_scan(struct drm_mm *mm,
305 unsigned long size, 305 u64 size,
306 unsigned alignment, 306 unsigned alignment,
307 unsigned long color); 307 unsigned long color);
308void drm_mm_init_scan_with_range(struct drm_mm *mm, 308void drm_mm_init_scan_with_range(struct drm_mm *mm,
309 unsigned long size, 309 u64 size,
310 unsigned alignment, 310 unsigned alignment,
311 unsigned long color, 311 unsigned long color,
312 unsigned long start, 312 u64 start,
313 unsigned long end); 313 u64 end);
314bool drm_mm_scan_add_block(struct drm_mm_node *node); 314bool drm_mm_scan_add_block(struct drm_mm_node *node);
315bool drm_mm_scan_remove_block(struct drm_mm_node *node); 315bool drm_mm_scan_remove_block(struct drm_mm_node *node);
316 316
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 0ccf7f267ff9..c768ddfbe53c 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -249,7 +249,7 @@ struct ttm_buffer_object {
249 * either of these locks held. 249 * either of these locks held.
250 */ 250 */
251 251
252 unsigned long offset; 252 uint64_t offset; /* GPU address space is independent of CPU word size */
253 uint32_t cur_placement; 253 uint32_t cur_placement;
254 254
255 struct sg_table *sg; 255 struct sg_table *sg;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 142d752fc450..813042cede57 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -277,7 +277,7 @@ struct ttm_mem_type_manager {
277 bool has_type; 277 bool has_type;
278 bool use_type; 278 bool use_type;
279 uint32_t flags; 279 uint32_t flags;
280 unsigned long gpu_offset; 280 uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
281 uint64_t size; 281 uint64_t size;
282 uint32_t available_caching; 282 uint32_t available_caching;
283 uint32_t default_caching; 283 uint32_t default_caching;
diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h
index 2fbc804e1a45..226f77246a70 100644
--- a/include/dt-bindings/pinctrl/am33xx.h
+++ b/include/dt-bindings/pinctrl/am33xx.h
@@ -13,7 +13,8 @@
13 13
14#define PULL_DISABLE (1 << 3) 14#define PULL_DISABLE (1 << 3)
15#define INPUT_EN (1 << 5) 15#define INPUT_EN (1 << 5)
16#define SLEWCTRL_FAST (1 << 6) 16#define SLEWCTRL_SLOW (1 << 6)
17#define SLEWCTRL_FAST 0
17 18
18/* update macro depending on INPUT_EN and PULL_ENA */ 19/* update macro depending on INPUT_EN and PULL_ENA */
19#undef PIN_OUTPUT 20#undef PIN_OUTPUT
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h
index 9c2e4f82381e..5f4d01898c9c 100644
--- a/include/dt-bindings/pinctrl/am43xx.h
+++ b/include/dt-bindings/pinctrl/am43xx.h
@@ -18,7 +18,8 @@
18#define PULL_DISABLE (1 << 16) 18#define PULL_DISABLE (1 << 16)
19#define PULL_UP (1 << 17) 19#define PULL_UP (1 << 17)
20#define INPUT_EN (1 << 18) 20#define INPUT_EN (1 << 18)
21#define SLEWCTRL_FAST (1 << 19) 21#define SLEWCTRL_SLOW (1 << 19)
22#define SLEWCTRL_FAST 0
22#define DS0_PULL_UP_DOWN_EN (1 << 27) 23#define DS0_PULL_UP_DOWN_EN (1 << 27)
23 24
24#define PIN_OUTPUT (PULL_DISABLE) 25#define PIN_OUTPUT (PULL_DISABLE)
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
index 79d6edf446d5..521ec1f2e6bc 100644
--- a/include/linux/arm-cci.h
+++ b/include/linux/arm-cci.h
@@ -24,16 +24,22 @@
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/types.h> 25#include <linux/types.h>
26 26
27#include <asm/arm-cci.h>
28
27struct device_node; 29struct device_node;
28 30
29#ifdef CONFIG_ARM_CCI 31#ifdef CONFIG_ARM_CCI
30extern bool cci_probed(void); 32extern bool cci_probed(void);
33#else
34static inline bool cci_probed(void) { return false; }
35#endif
36
37#ifdef CONFIG_ARM_CCI400_PORT_CTRL
31extern int cci_ace_get_port(struct device_node *dn); 38extern int cci_ace_get_port(struct device_node *dn);
32extern int cci_disable_port_by_cpu(u64 mpidr); 39extern int cci_disable_port_by_cpu(u64 mpidr);
33extern int __cci_control_port_by_device(struct device_node *dn, bool enable); 40extern int __cci_control_port_by_device(struct device_node *dn, bool enable);
34extern int __cci_control_port_by_index(u32 port, bool enable); 41extern int __cci_control_port_by_index(u32 port, bool enable);
35#else 42#else
36static inline bool cci_probed(void) { return false; }
37static inline int cci_ace_get_port(struct device_node *dn) 43static inline int cci_ace_get_port(struct device_node *dn)
38{ 44{
39 return -ENODEV; 45 return -ENODEV;
@@ -49,6 +55,7 @@ static inline int __cci_control_port_by_index(u32 port, bool enable)
49 return -ENODEV; 55 return -ENODEV;
50} 56}
51#endif 57#endif
58
52#define cci_disable_port_by_device(dev) \ 59#define cci_disable_port_by_device(dev) \
53 __cci_control_port_by_device(dev, false) 60 __cci_control_port_by_device(dev, false)
54#define cci_enable_port_by_device(dev) \ 61#define cci_enable_port_by_device(dev) \
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 8381bbfbc308..68c16a6bedb3 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -125,6 +125,19 @@ int clk_set_phase(struct clk *clk, int degrees);
125 */ 125 */
126int clk_get_phase(struct clk *clk); 126int clk_get_phase(struct clk *clk);
127 127
128/**
129 * clk_is_match - check if two clk's point to the same hardware clock
130 * @p: clk compared against q
131 * @q: clk compared against p
132 *
133 * Returns true if the two struct clk pointers both point to the same hardware
134 * clock node. Put differently, returns true if struct clk *p and struct clk *q
135 * share the same struct clk_core object.
136 *
137 * Returns false otherwise. Note that two NULL clks are treated as matching.
138 */
139bool clk_is_match(const struct clk *p, const struct clk *q);
140
128#else 141#else
129 142
130static inline long clk_get_accuracy(struct clk *clk) 143static inline long clk_get_accuracy(struct clk *clk)
@@ -142,6 +155,11 @@ static inline long clk_get_phase(struct clk *clk)
142 return -ENOTSUPP; 155 return -ENOTSUPP;
143} 156}
144 157
158static inline bool clk_is_match(const struct clk *p, const struct clk *q)
159{
160 return p == q;
161}
162
145#endif 163#endif
146 164
147/** 165/**
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index f551a9299ac9..306178d7309f 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -126,6 +126,8 @@ struct cpuidle_driver {
126 126
127#ifdef CONFIG_CPU_IDLE 127#ifdef CONFIG_CPU_IDLE
128extern void disable_cpuidle(void); 128extern void disable_cpuidle(void);
129extern bool cpuidle_not_available(struct cpuidle_driver *drv,
130 struct cpuidle_device *dev);
129 131
130extern int cpuidle_select(struct cpuidle_driver *drv, 132extern int cpuidle_select(struct cpuidle_driver *drv,
131 struct cpuidle_device *dev); 133 struct cpuidle_device *dev);
@@ -150,11 +152,17 @@ extern void cpuidle_resume(void);
150extern int cpuidle_enable_device(struct cpuidle_device *dev); 152extern int cpuidle_enable_device(struct cpuidle_device *dev);
151extern void cpuidle_disable_device(struct cpuidle_device *dev); 153extern void cpuidle_disable_device(struct cpuidle_device *dev);
152extern int cpuidle_play_dead(void); 154extern int cpuidle_play_dead(void);
153extern void cpuidle_enter_freeze(void); 155extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
156 struct cpuidle_device *dev);
157extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
158 struct cpuidle_device *dev);
154 159
155extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); 160extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
156#else 161#else
157static inline void disable_cpuidle(void) { } 162static inline void disable_cpuidle(void) { }
163static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
164 struct cpuidle_device *dev)
165{return true; }
158static inline int cpuidle_select(struct cpuidle_driver *drv, 166static inline int cpuidle_select(struct cpuidle_driver *drv,
159 struct cpuidle_device *dev) 167 struct cpuidle_device *dev)
160{return -ENODEV; } 168{return -ENODEV; }
@@ -183,7 +191,12 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
183{return -ENODEV; } 191{return -ENODEV; }
184static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } 192static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
185static inline int cpuidle_play_dead(void) {return -ENODEV; } 193static inline int cpuidle_play_dead(void) {return -ENODEV; }
186static inline void cpuidle_enter_freeze(void) { } 194static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
195 struct cpuidle_device *dev)
196{return -ENODEV; }
197static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
198 struct cpuidle_device *dev)
199{return -ENODEV; }
187static inline struct cpuidle_driver *cpuidle_get_cpu_driver( 200static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
188 struct cpuidle_device *dev) {return NULL; } 201 struct cpuidle_device *dev) {return NULL; }
189#endif 202#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index d9b05b5bf8c7..2e88580194f0 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -52,11 +52,17 @@
52 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. 52 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
53 * Used by threaded interrupts which need to keep the 53 * Used by threaded interrupts which need to keep the
54 * irq line disabled until the threaded handler has been run. 54 * irq line disabled until the threaded handler has been run.
55 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend 55 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
56 * that this interrupt will wake the system from a suspended
57 * state. See Documentation/power/suspend-and-interrupts.txt
56 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set 58 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
57 * IRQF_NO_THREAD - Interrupt cannot be threaded 59 * IRQF_NO_THREAD - Interrupt cannot be threaded
58 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device 60 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
59 * resume time. 61 * resume time.
62 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
63 * interrupt handler after suspending interrupts. For system
64 * wakeup devices users need to implement wakeup detection in
65 * their interrupt handlers.
60 */ 66 */
61#define IRQF_DISABLED 0x00000020 67#define IRQF_DISABLED 0x00000020
62#define IRQF_SHARED 0x00000080 68#define IRQF_SHARED 0x00000080
@@ -70,6 +76,7 @@
70#define IRQF_FORCE_RESUME 0x00008000 76#define IRQF_FORCE_RESUME 0x00008000
71#define IRQF_NO_THREAD 0x00010000 77#define IRQF_NO_THREAD 0x00010000
72#define IRQF_EARLY_RESUME 0x00020000 78#define IRQF_EARLY_RESUME 0x00020000
79#define IRQF_COND_SUSPEND 0x00040000
73 80
74#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) 81#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
75 82
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 800544bc7bfd..781974afff9f 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -166,6 +166,11 @@
166 166
167#define GITS_TRANSLATER 0x10040 167#define GITS_TRANSLATER 0x10040
168 168
169#define GITS_CTLR_ENABLE (1U << 0)
170#define GITS_CTLR_QUIESCENT (1U << 31)
171
172#define GITS_TYPER_DEVBITS_SHIFT 13
173#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
169#define GITS_TYPER_PTA (1UL << 19) 174#define GITS_TYPER_PTA (1UL << 19)
170 175
171#define GITS_CBASER_VALID (1UL << 63) 176#define GITS_CBASER_VALID (1UL << 63)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index faf433af425e..dd1109fb241e 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -78,6 +78,7 @@ struct irq_desc {
78#ifdef CONFIG_PM_SLEEP 78#ifdef CONFIG_PM_SLEEP
79 unsigned int nr_actions; 79 unsigned int nr_actions;
80 unsigned int no_suspend_depth; 80 unsigned int no_suspend_depth;
81 unsigned int cond_suspend_depth;
81 unsigned int force_resume_depth; 82 unsigned int force_resume_depth;
82#endif 83#endif
83#ifdef CONFIG_PROC_FS 84#ifdef CONFIG_PROC_FS
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 72ba725ddf9c..5bb074431eb0 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -5,6 +5,7 @@
5 5
6struct kmem_cache; 6struct kmem_cache;
7struct page; 7struct page;
8struct vm_struct;
8 9
9#ifdef CONFIG_KASAN 10#ifdef CONFIG_KASAN
10 11
@@ -49,15 +50,11 @@ void kasan_krealloc(const void *object, size_t new_size);
49void kasan_slab_alloc(struct kmem_cache *s, void *object); 50void kasan_slab_alloc(struct kmem_cache *s, void *object);
50void kasan_slab_free(struct kmem_cache *s, void *object); 51void kasan_slab_free(struct kmem_cache *s, void *object);
51 52
52#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
53
54int kasan_module_alloc(void *addr, size_t size); 53int kasan_module_alloc(void *addr, size_t size);
55void kasan_module_free(void *addr); 54void kasan_free_shadow(const struct vm_struct *vm);
56 55
57#else /* CONFIG_KASAN */ 56#else /* CONFIG_KASAN */
58 57
59#define MODULE_ALIGN 1
60
61static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 58static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
62 59
63static inline void kasan_enable_current(void) {} 60static inline void kasan_enable_current(void) {}
@@ -82,7 +79,7 @@ static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
82static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} 79static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
83 80
84static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 81static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
85static inline void kasan_module_free(void *addr) {} 82static inline void kasan_free_shadow(const struct vm_struct *vm) {}
86 83
87#endif /* CONFIG_KASAN */ 84#endif /* CONFIG_KASAN */
88 85
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 2bbc62aa818a..551f85456c11 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -427,7 +427,7 @@ struct mlx4_wqe_inline_seg {
427 427
428enum mlx4_update_qp_attr { 428enum mlx4_update_qp_attr {
429 MLX4_UPDATE_QP_SMAC = 1 << 0, 429 MLX4_UPDATE_QP_SMAC = 1 << 0,
430 MLX4_UPDATE_QP_VSD = 1 << 2, 430 MLX4_UPDATE_QP_VSD = 1 << 1,
431 MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 431 MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1
432}; 432};
433 433
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index f7556261fe3c..4d0cb9bba93e 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -84,4 +84,12 @@ void module_arch_cleanup(struct module *mod);
84 84
85/* Any cleanup before freeing mod->module_init */ 85/* Any cleanup before freeing mod->module_init */
86void module_arch_freeing_init(struct module *mod); 86void module_arch_freeing_init(struct module *mod);
87
88#ifdef CONFIG_KASAN
89#include <linux/kasan.h>
90#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
91#else
92#define MODULE_ALIGN PAGE_SIZE
93#endif
94
87#endif 95#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5897b4ea5a3f..429d1790a27e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2342,6 +2342,7 @@ struct gro_remcsum {
2342 2342
2343static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) 2343static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2344{ 2344{
2345 grc->offset = 0;
2345 grc->delta = 0; 2346 grc->delta = 0;
2346} 2347}
2347 2348
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 2f77e0c651c8..b01ccf371fdc 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -343,6 +343,7 @@ extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
343extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); 343extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
344extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); 344extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
345extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); 345extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
346extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr);
346extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); 347extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
347extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); 348extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
348extern void nfs_access_set_mask(struct nfs_access_entry *, u32); 349extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
@@ -355,8 +356,9 @@ extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
355extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode); 356extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode);
356extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); 357extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
357extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); 358extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
359extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping);
358extern int nfs_setattr(struct dentry *, struct iattr *); 360extern int nfs_setattr(struct dentry *, struct iattr *);
359extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); 361extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
360extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, 362extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
361 struct nfs4_label *label); 363 struct nfs4_label *label);
362extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); 364extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
@@ -369,6 +371,7 @@ extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ct
369extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); 371extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
370extern u64 nfs_compat_user_ino64(u64 fileid); 372extern u64 nfs_compat_user_ino64(u64 fileid);
371extern void nfs_fattr_init(struct nfs_fattr *fattr); 373extern void nfs_fattr_init(struct nfs_fattr *fattr);
374extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr);
372extern unsigned long nfs_inc_attr_generation_counter(void); 375extern unsigned long nfs_inc_attr_generation_counter(void);
373 376
374extern struct nfs_fattr *nfs_alloc_fattr(void); 377extern struct nfs_fattr *nfs_alloc_fattr(void);
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 8a860f096c35..611a691145c4 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -84,7 +84,7 @@ static inline int of_platform_populate(struct device_node *root,
84static inline void of_platform_depopulate(struct device *parent) { } 84static inline void of_platform_depopulate(struct device *parent) { }
85#endif 85#endif
86 86
87#ifdef CONFIG_OF_DYNAMIC 87#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
88extern void of_platform_register_reconfig_notifier(void); 88extern void of_platform_register_reconfig_notifier(void);
89#else 89#else
90static inline void of_platform_register_reconfig_notifier(void) { } 90static inline void of_platform_register_reconfig_notifier(void) { }
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 58851275fed9..d438eeb08bff 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -54,10 +54,11 @@ struct rhash_head {
54 * @buckets: size * hash buckets 54 * @buckets: size * hash buckets
55 */ 55 */
56struct bucket_table { 56struct bucket_table {
57 size_t size; 57 size_t size;
58 unsigned int locks_mask; 58 unsigned int locks_mask;
59 spinlock_t *locks; 59 spinlock_t *locks;
60 struct rhash_head __rcu *buckets[]; 60
61 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
61}; 62};
62 63
63typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); 64typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
@@ -78,12 +79,6 @@ struct rhashtable;
78 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) 79 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
79 * @hashfn: Function to hash key 80 * @hashfn: Function to hash key
80 * @obj_hashfn: Function to hash object 81 * @obj_hashfn: Function to hash object
81 * @grow_decision: If defined, may return true if table should expand
82 * @shrink_decision: If defined, may return true if table should shrink
83 *
84 * Note: when implementing the grow and shrink decision function, min/max
85 * shift must be enforced, otherwise, resizing watermarks they set may be
86 * useless.
87 */ 82 */
88struct rhashtable_params { 83struct rhashtable_params {
89 size_t nelem_hint; 84 size_t nelem_hint;
@@ -97,10 +92,6 @@ struct rhashtable_params {
97 size_t locks_mul; 92 size_t locks_mul;
98 rht_hashfn_t hashfn; 93 rht_hashfn_t hashfn;
99 rht_obj_hashfn_t obj_hashfn; 94 rht_obj_hashfn_t obj_hashfn;
100 bool (*grow_decision)(const struct rhashtable *ht,
101 size_t new_size);
102 bool (*shrink_decision)(const struct rhashtable *ht,
103 size_t new_size);
104}; 95};
105 96
106/** 97/**
@@ -192,9 +183,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
192void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); 183void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
193bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); 184bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
194 185
195bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
196bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
197
198int rhashtable_expand(struct rhashtable *ht); 186int rhashtable_expand(struct rhashtable *ht);
199int rhashtable_shrink(struct rhashtable *ht); 187int rhashtable_shrink(struct rhashtable *ht);
200 188
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index baf3e1d08416..d10965f0d8a4 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -143,13 +143,13 @@ struct uart_port {
143 unsigned char iotype; /* io access style */ 143 unsigned char iotype; /* io access style */
144 unsigned char unused1; 144 unsigned char unused1;
145 145
146#define UPIO_PORT (0) /* 8b I/O port access */ 146#define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */
147#define UPIO_HUB6 (1) /* Hub6 ISA card */ 147#define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */
148#define UPIO_MEM (2) /* 8b MMIO access */ 148#define UPIO_MEM (SERIAL_IO_MEM) /* 8b MMIO access */
149#define UPIO_MEM32 (3) /* 32b little endian */ 149#define UPIO_MEM32 (SERIAL_IO_MEM32) /* 32b little endian */
150#define UPIO_MEM32BE (4) /* 32b big endian */ 150#define UPIO_AU (SERIAL_IO_AU) /* Au1x00 and RT288x type IO */
151#define UPIO_AU (5) /* Au1x00 and RT288x type IO */ 151#define UPIO_TSI (SERIAL_IO_TSI) /* Tsi108/109 type IO */
152#define UPIO_TSI (6) /* Tsi108/109 type IO */ 152#define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */
153 153
154 unsigned int read_status_mask; /* driver specific */ 154 unsigned int read_status_mask; /* driver specific */
155 unsigned int ignore_status_mask; /* driver specific */ 155 unsigned int ignore_status_mask; /* driver specific */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index ed9489d893a4..856d34dde79b 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -649,7 +649,7 @@ struct spi_transfer {
649 * sequence completes. On some systems, many such sequences can execute as 649 * sequence completes. On some systems, many such sequences can execute as
650 * as single programmed DMA transfer. On all systems, these messages are 650 * as single programmed DMA transfer. On all systems, these messages are
651 * queued, and might complete after transactions to other devices. Messages 651 * queued, and might complete after transactions to other devices. Messages
652 * sent to a given spi_device are alway executed in FIFO order. 652 * sent to a given spi_device are always executed in FIFO order.
653 * 653 *
654 * The code that submits an spi_message (and its spi_transfers) 654 * The code that submits an spi_message (and its spi_transfers)
655 * to the lower layers is responsible for managing its memory. 655 * to the lower layers is responsible for managing its memory.
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 07a022641996..71880299ed48 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -98,6 +98,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
98 size_t maxsize, size_t *start); 98 size_t maxsize, size_t *start);
99int iov_iter_npages(const struct iov_iter *i, int maxpages); 99int iov_iter_npages(const struct iov_iter *i, int maxpages);
100 100
101const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
102
101static inline size_t iov_iter_count(struct iov_iter *i) 103static inline size_t iov_iter_count(struct iov_iter *i)
102{ 104{
103 return i->count; 105 return i->count;
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 9bb547c7bce7..704a1ab8240c 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -190,8 +190,7 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
190 * @num_ports: the number of different ports this device will have. 190 * @num_ports: the number of different ports this device will have.
191 * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer 191 * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer
192 * (0 = end-point size) 192 * (0 = end-point size)
193 * @bulk_out_size: minimum number of bytes to allocate for bulk-out buffer 193 * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size)
194 * (0 = end-point size)
195 * @calc_num_ports: pointer to a function to determine how many ports this 194 * @calc_num_ports: pointer to a function to determine how many ports this
196 * device has dynamically. It will be called after the probe() 195 * device has dynamically. It will be called after the probe()
197 * callback is called, but before attach() 196 * callback is called, but before attach()
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 7d7acb35603d..0ec598381f97 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -17,6 +17,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
17#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ 17#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
18#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ 18#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
19#define VM_NO_GUARD 0x00000040 /* don't add guard page */ 19#define VM_NO_GUARD 0x00000040 /* don't add guard page */
20#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
20/* bits [20..32] reserved for arch specific ioremap internals */ 21/* bits [20..32] reserved for arch specific ioremap internals */
21 22
22/* 23/*
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 74db135f9957..f597846ff605 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -70,7 +70,8 @@ enum {
70 /* data contains off-queue information when !WORK_STRUCT_PWQ */ 70 /* data contains off-queue information when !WORK_STRUCT_PWQ */
71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, 71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
72 72
73 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), 73 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
74 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
74 75
75 /* 76 /*
76 * When a work item is off queue, its high bits point to the last 77 * When a work item is off queue, its high bits point to the last
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index 1c1ad46250d5..fe328c52c46b 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -171,7 +171,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos);
171 * @return Checksum of buffer. 171 * @return Checksum of buffer.
172 */ 172 */
173 173
174u16 cfpkt_iterate(struct cfpkt *pkt, 174int cfpkt_iterate(struct cfpkt *pkt,
175 u16 (*iter_func)(u16 chks, void *buf, u16 len), 175 u16 (*iter_func)(u16 chks, void *buf, u16 len),
176 u16 data); 176 u16 data);
177 177
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 9eaaa7884586..decb9a095ae7 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -119,6 +119,22 @@ int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
119 const struct nft_data *data, 119 const struct nft_data *data,
120 enum nft_data_types type); 120 enum nft_data_types type);
121 121
122
123/**
124 * struct nft_userdata - user defined data associated with an object
125 *
126 * @len: length of the data
127 * @data: content
128 *
129 * The presence of user data is indicated in an object specific fashion,
130 * so a length of zero can't occur and the value "len" indicates data
131 * of length len + 1.
132 */
133struct nft_userdata {
134 u8 len;
135 unsigned char data[0];
136};
137
122/** 138/**
123 * struct nft_set_elem - generic representation of set elements 139 * struct nft_set_elem - generic representation of set elements
124 * 140 *
@@ -380,7 +396,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
380 * @handle: rule handle 396 * @handle: rule handle
381 * @genmask: generation mask 397 * @genmask: generation mask
382 * @dlen: length of expression data 398 * @dlen: length of expression data
383 * @ulen: length of user data (used for comments) 399 * @udata: user data is appended to the rule
384 * @data: expression data 400 * @data: expression data
385 */ 401 */
386struct nft_rule { 402struct nft_rule {
@@ -388,7 +404,7 @@ struct nft_rule {
388 u64 handle:42, 404 u64 handle:42,
389 genmask:2, 405 genmask:2,
390 dlen:12, 406 dlen:12,
391 ulen:8; 407 udata:1;
392 unsigned char data[] 408 unsigned char data[]
393 __attribute__((aligned(__alignof__(struct nft_expr)))); 409 __attribute__((aligned(__alignof__(struct nft_expr))));
394}; 410};
@@ -476,7 +492,7 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
476 return (struct nft_expr *)&rule->data[rule->dlen]; 492 return (struct nft_expr *)&rule->data[rule->dlen];
477} 493}
478 494
479static inline void *nft_userdata(const struct nft_rule *rule) 495static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
480{ 496{
481 return (void *)&rule->data[rule->dlen]; 497 return (void *)&rule->data[rule->dlen];
482} 498}
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h
index 5e0d0ed61cf3..25331f9faa76 100644
--- a/include/uapi/linux/serial.h
+++ b/include/uapi/linux/serial.h
@@ -65,6 +65,10 @@ struct serial_struct {
65#define SERIAL_IO_PORT 0 65#define SERIAL_IO_PORT 0
66#define SERIAL_IO_HUB6 1 66#define SERIAL_IO_HUB6 1
67#define SERIAL_IO_MEM 2 67#define SERIAL_IO_MEM 2
68#define SERIAL_IO_MEM32 3
69#define SERIAL_IO_AU 4
70#define SERIAL_IO_TSI 5
71#define SERIAL_IO_MEM32BE 6
68 72
69#define UART_CLEAR_FIFO 0x01 73#define UART_CLEAR_FIFO 0x01
70#define UART_USE_FIFO 0x02 74#define UART_USE_FIFO 0x02
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index 19d5219b0b99..242cf0c6e33d 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -9,3 +9,4 @@ header-y += tc_pedit.h
9header-y += tc_skbedit.h 9header-y += tc_skbedit.h
10header-y += tc_vlan.h 10header-y += tc_vlan.h
11header-y += tc_bpf.h 11header-y += tc_bpf.h
12header-y += tc_connmark.h
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 60de61fea8e3..c8ed15daad02 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -689,6 +689,7 @@ struct omapdss_dsi_ops {
689}; 689};
690 690
691struct omap_dss_device { 691struct omap_dss_device {
692 struct kobject kobj;
692 struct device *dev; 693 struct device *dev;
693 694
694 struct module *owner; 695 struct module *owner;
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index b78f21caf55a..b0f1c9e5d687 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -114,9 +114,9 @@ int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
114 const char *mod_name); 114 const char *mod_name);
115 115
116#define xenbus_register_frontend(drv) \ 116#define xenbus_register_frontend(drv) \
117 __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); 117 __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME)
118#define xenbus_register_backend(drv) \ 118#define xenbus_register_backend(drv) \
119 __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); 119 __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME)
120 120
121void xenbus_unregister_driver(struct xenbus_driver *drv); 121void xenbus_unregister_driver(struct xenbus_driver *drv);
122 122
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1d1fe9361d29..fc7f4748d34a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
548 548
549 rcu_read_lock(); 549 rcu_read_lock();
550 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 550 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
551 if (cp == root_cs)
552 continue;
553
554 /* skip the whole subtree if @cp doesn't have any CPU */ 551 /* skip the whole subtree if @cp doesn't have any CPU */
555 if (cpumask_empty(cp->cpus_allowed)) { 552 if (cpumask_empty(cp->cpus_allowed)) {
556 pos_css = css_rightmost_descendant(pos_css); 553 pos_css = css_rightmost_descendant(pos_css);
@@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
873 * If it becomes empty, inherit the effective mask of the 870 * If it becomes empty, inherit the effective mask of the
874 * parent, which is guaranteed to have some CPUs. 871 * parent, which is guaranteed to have some CPUs.
875 */ 872 */
876 if (cpumask_empty(new_cpus)) 873 if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus))
877 cpumask_copy(new_cpus, parent->effective_cpus); 874 cpumask_copy(new_cpus, parent->effective_cpus);
878 875
879 /* Skip the whole subtree if the cpumask remains the same. */ 876 /* Skip the whole subtree if the cpumask remains the same. */
@@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1129 * If it becomes empty, inherit the effective mask of the 1126 * If it becomes empty, inherit the effective mask of the
1130 * parent, which is guaranteed to have some MEMs. 1127 * parent, which is guaranteed to have some MEMs.
1131 */ 1128 */
1132 if (nodes_empty(*new_mems)) 1129 if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems))
1133 *new_mems = parent->effective_mems; 1130 *new_mems = parent->effective_mems;
1134 1131
1135 /* Skip the whole subtree if the nodemask remains the same. */ 1132 /* Skip the whole subtree if the nodemask remains the same. */
@@ -1979,7 +1976,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
1979 1976
1980 spin_lock_irq(&callback_lock); 1977 spin_lock_irq(&callback_lock);
1981 cs->mems_allowed = parent->mems_allowed; 1978 cs->mems_allowed = parent->mems_allowed;
1979 cs->effective_mems = parent->mems_allowed;
1982 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); 1980 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
1981 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
1983 spin_unlock_irq(&callback_lock); 1982 spin_unlock_irq(&callback_lock);
1984out_unlock: 1983out_unlock:
1985 mutex_unlock(&cpuset_mutex); 1984 mutex_unlock(&cpuset_mutex);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 196a06fbc122..886d09e691d5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1474,8 +1474,13 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1474 * otherwise we'll have trouble later trying to figure out 1474 * otherwise we'll have trouble later trying to figure out
1475 * which interrupt is which (messes up the interrupt freeing 1475 * which interrupt is which (messes up the interrupt freeing
1476 * logic etc). 1476 * logic etc).
1477 *
1478 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1479 * it cannot be set along with IRQF_NO_SUSPEND.
1477 */ 1480 */
1478 if ((irqflags & IRQF_SHARED) && !dev_id) 1481 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1482 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1483 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1479 return -EINVAL; 1484 return -EINVAL;
1480 1485
1481 desc = irq_to_desc(irq); 1486 desc = irq_to_desc(irq);
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 3ca532592704..5204a6d1b985 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -43,9 +43,12 @@ void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action)
43 43
44 if (action->flags & IRQF_NO_SUSPEND) 44 if (action->flags & IRQF_NO_SUSPEND)
45 desc->no_suspend_depth++; 45 desc->no_suspend_depth++;
46 else if (action->flags & IRQF_COND_SUSPEND)
47 desc->cond_suspend_depth++;
46 48
47 WARN_ON_ONCE(desc->no_suspend_depth && 49 WARN_ON_ONCE(desc->no_suspend_depth &&
48 desc->no_suspend_depth != desc->nr_actions); 50 (desc->no_suspend_depth +
51 desc->cond_suspend_depth) != desc->nr_actions);
49} 52}
50 53
51/* 54/*
@@ -61,6 +64,8 @@ void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action)
61 64
62 if (action->flags & IRQF_NO_SUSPEND) 65 if (action->flags & IRQF_NO_SUSPEND)
63 desc->no_suspend_depth--; 66 desc->no_suspend_depth--;
67 else if (action->flags & IRQF_COND_SUSPEND)
68 desc->cond_suspend_depth--;
64} 69}
65 70
66static bool suspend_device_irq(struct irq_desc *desc, int irq) 71static bool suspend_device_irq(struct irq_desc *desc, int irq)
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 782172f073c5..01ca08804f51 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -248,11 +248,12 @@ static int klp_find_external_symbol(struct module *pmod, const char *name,
248 /* first, check if it's an exported symbol */ 248 /* first, check if it's an exported symbol */
249 preempt_disable(); 249 preempt_disable();
250 sym = find_symbol(name, NULL, NULL, true, true); 250 sym = find_symbol(name, NULL, NULL, true, true);
251 preempt_enable();
252 if (sym) { 251 if (sym) {
253 *addr = sym->value; 252 *addr = sym->value;
253 preempt_enable();
254 return 0; 254 return 0;
255 } 255 }
256 preempt_enable();
256 257
257 /* otherwise check if it's in another .o within the patch module */ 258 /* otherwise check if it's in another .o within the patch module */
258 return klp_find_object_symbol(pmod->name, name, addr); 259 return klp_find_object_symbol(pmod->name, name, addr);
diff --git a/kernel/module.c b/kernel/module.c
index b34813f725e9..b3d634ed06c9 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -56,7 +56,6 @@
56#include <linux/async.h> 56#include <linux/async.h>
57#include <linux/percpu.h> 57#include <linux/percpu.h>
58#include <linux/kmemleak.h> 58#include <linux/kmemleak.h>
59#include <linux/kasan.h>
60#include <linux/jump_label.h> 59#include <linux/jump_label.h>
61#include <linux/pfn.h> 60#include <linux/pfn.h>
62#include <linux/bsearch.h> 61#include <linux/bsearch.h>
@@ -1814,7 +1813,6 @@ static void unset_module_init_ro_nx(struct module *mod) { }
1814void __weak module_memfree(void *module_region) 1813void __weak module_memfree(void *module_region)
1815{ 1814{
1816 vfree(module_region); 1815 vfree(module_region);
1817 kasan_module_free(module_region);
1818} 1816}
1819 1817
1820void __weak module_arch_cleanup(struct module *mod) 1818void __weak module_arch_cleanup(struct module *mod)
@@ -2313,11 +2311,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
2313 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); 2311 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2314 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); 2312 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2315 mod->core_size += strtab_size; 2313 mod->core_size += strtab_size;
2314 mod->core_size = debug_align(mod->core_size);
2316 2315
2317 /* Put string table section at end of init part of module. */ 2316 /* Put string table section at end of init part of module. */
2318 strsect->sh_flags |= SHF_ALLOC; 2317 strsect->sh_flags |= SHF_ALLOC;
2319 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, 2318 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2320 info->index.str) | INIT_OFFSET_MASK; 2319 info->index.str) | INIT_OFFSET_MASK;
2320 mod->init_size = debug_align(mod->init_size);
2321 pr_debug("\t%s\n", info->secstrings + strsect->sh_name); 2321 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2322} 2322}
2323 2323
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
index cbd69d842341..2ca4a8b5fe57 100644
--- a/kernel/printk/console_cmdline.h
+++ b/kernel/printk/console_cmdline.h
@@ -3,7 +3,7 @@
3 3
4struct console_cmdline 4struct console_cmdline
5{ 5{
6 char name[8]; /* Name of the driver */ 6 char name[16]; /* Name of the driver */
7 int index; /* Minor dev. to use */ 7 int index; /* Minor dev. to use */
8 char *options; /* Options for the driver */ 8 char *options; /* Options for the driver */
9#ifdef CONFIG_A11Y_BRAILLE_CONSOLE 9#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 01cfd69c54c6..bb0635bd74f2 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2464,6 +2464,7 @@ void register_console(struct console *newcon)
2464 for (i = 0, c = console_cmdline; 2464 for (i = 0, c = console_cmdline;
2465 i < MAX_CMDLINECONSOLES && c->name[0]; 2465 i < MAX_CMDLINECONSOLES && c->name[0];
2466 i++, c++) { 2466 i++, c++) {
2467 BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
2467 if (strcmp(c->name, newcon->name) != 0) 2468 if (strcmp(c->name, newcon->name) != 0)
2468 continue; 2469 continue;
2469 if (newcon->index >= 0 && 2470 if (newcon->index >= 0 &&
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 94b2d7b88a27..80014a178342 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -82,6 +82,7 @@ static void cpuidle_idle_call(void)
82 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 82 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
83 int next_state, entered_state; 83 int next_state, entered_state;
84 unsigned int broadcast; 84 unsigned int broadcast;
85 bool reflect;
85 86
86 /* 87 /*
87 * Check if the idle task must be rescheduled. If it is the 88 * Check if the idle task must be rescheduled. If it is the
@@ -105,6 +106,9 @@ static void cpuidle_idle_call(void)
105 */ 106 */
106 rcu_idle_enter(); 107 rcu_idle_enter();
107 108
109 if (cpuidle_not_available(drv, dev))
110 goto use_default;
111
108 /* 112 /*
109 * Suspend-to-idle ("freeze") is a system state in which all user space 113 * Suspend-to-idle ("freeze") is a system state in which all user space
110 * has been frozen, all I/O devices have been suspended and the only 114 * has been frozen, all I/O devices have been suspended and the only
@@ -115,30 +119,24 @@ static void cpuidle_idle_call(void)
115 * until a proper wakeup interrupt happens. 119 * until a proper wakeup interrupt happens.
116 */ 120 */
117 if (idle_should_freeze()) { 121 if (idle_should_freeze()) {
118 cpuidle_enter_freeze(); 122 entered_state = cpuidle_enter_freeze(drv, dev);
119 local_irq_enable(); 123 if (entered_state >= 0) {
120 goto exit_idle; 124 local_irq_enable();
121 } 125 goto exit_idle;
126 }
122 127
123 /* 128 reflect = false;
124 * Ask the cpuidle framework to choose a convenient idle state. 129 next_state = cpuidle_find_deepest_state(drv, dev);
125 * Fall back to the default arch idle method on errors. 130 } else {
126 */ 131 reflect = true;
127 next_state = cpuidle_select(drv, dev);
128 if (next_state < 0) {
129use_default:
130 /* 132 /*
131 * We can't use the cpuidle framework, let's use the default 133 * Ask the cpuidle framework to choose a convenient idle state.
132 * idle routine.
133 */ 134 */
134 if (current_clr_polling_and_test()) 135 next_state = cpuidle_select(drv, dev);
135 local_irq_enable();
136 else
137 arch_cpu_idle();
138
139 goto exit_idle;
140 } 136 }
141 137 /* Fall back to the default arch idle method on errors. */
138 if (next_state < 0)
139 goto use_default;
142 140
143 /* 141 /*
144 * The idle task must be scheduled, it is pointless to 142 * The idle task must be scheduled, it is pointless to
@@ -183,7 +181,8 @@ use_default:
183 /* 181 /*
184 * Give the governor an opportunity to reflect on the outcome 182 * Give the governor an opportunity to reflect on the outcome
185 */ 183 */
186 cpuidle_reflect(dev, entered_state); 184 if (reflect)
185 cpuidle_reflect(dev, entered_state);
187 186
188exit_idle: 187exit_idle:
189 __current_set_polling(); 188 __current_set_polling();
@@ -196,6 +195,19 @@ exit_idle:
196 195
197 rcu_idle_exit(); 196 rcu_idle_exit();
198 start_critical_timings(); 197 start_critical_timings();
198 return;
199
200use_default:
201 /*
202 * We can't use the cpuidle framework, let's use the default
203 * idle routine.
204 */
205 if (current_clr_polling_and_test())
206 local_irq_enable();
207 else
208 arch_cpu_idle();
209
210 goto exit_idle;
199} 211}
200 212
201/* 213/*
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 45e5cb143d17..4f228024055b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1059 1059
1060static struct pid * const ftrace_swapper_pid = &init_struct_pid; 1060static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1061 1061
1062#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1063static int ftrace_graph_active;
1064#else
1065# define ftrace_graph_active 0
1066#endif
1067
1062#ifdef CONFIG_DYNAMIC_FTRACE 1068#ifdef CONFIG_DYNAMIC_FTRACE
1063 1069
1064static struct ftrace_ops *removed_ops; 1070static struct ftrace_ops *removed_ops;
@@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
2041 if (!ftrace_rec_count(rec)) 2047 if (!ftrace_rec_count(rec))
2042 rec->flags = 0; 2048 rec->flags = 0;
2043 else 2049 else
2044 /* Just disable the record (keep REGS state) */ 2050 /*
2045 rec->flags &= ~FTRACE_FL_ENABLED; 2051 * Just disable the record, but keep the ops TRAMP
2052 * and REGS states. The _EN flags must be disabled though.
2053 */
2054 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2055 FTRACE_FL_REGS_EN);
2046 } 2056 }
2047 2057
2048 return FTRACE_UPDATE_MAKE_NOP; 2058 return FTRACE_UPDATE_MAKE_NOP;
@@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2688 2698
2689static void ftrace_startup_sysctl(void) 2699static void ftrace_startup_sysctl(void)
2690{ 2700{
2701 int command;
2702
2691 if (unlikely(ftrace_disabled)) 2703 if (unlikely(ftrace_disabled))
2692 return; 2704 return;
2693 2705
2694 /* Force update next time */ 2706 /* Force update next time */
2695 saved_ftrace_func = NULL; 2707 saved_ftrace_func = NULL;
2696 /* ftrace_start_up is true if we want ftrace running */ 2708 /* ftrace_start_up is true if we want ftrace running */
2697 if (ftrace_start_up) 2709 if (ftrace_start_up) {
2698 ftrace_run_update_code(FTRACE_UPDATE_CALLS); 2710 command = FTRACE_UPDATE_CALLS;
2711 if (ftrace_graph_active)
2712 command |= FTRACE_START_FUNC_RET;
2713 ftrace_startup_enable(command);
2714 }
2699} 2715}
2700 2716
2701static void ftrace_shutdown_sysctl(void) 2717static void ftrace_shutdown_sysctl(void)
2702{ 2718{
2719 int command;
2720
2703 if (unlikely(ftrace_disabled)) 2721 if (unlikely(ftrace_disabled))
2704 return; 2722 return;
2705 2723
2706 /* ftrace_start_up is true if ftrace is running */ 2724 /* ftrace_start_up is true if ftrace is running */
2707 if (ftrace_start_up) 2725 if (ftrace_start_up) {
2708 ftrace_run_update_code(FTRACE_DISABLE_CALLS); 2726 command = FTRACE_DISABLE_CALLS;
2727 if (ftrace_graph_active)
2728 command |= FTRACE_STOP_FUNC_RET;
2729 ftrace_run_update_code(command);
2730 }
2709} 2731}
2710 2732
2711static cycle_t ftrace_update_time; 2733static cycle_t ftrace_update_time;
@@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
5558 5580
5559 if (ftrace_enabled) { 5581 if (ftrace_enabled) {
5560 5582
5561 ftrace_startup_sysctl();
5562
5563 /* we are starting ftrace again */ 5583 /* we are starting ftrace again */
5564 if (ftrace_ops_list != &ftrace_list_end) 5584 if (ftrace_ops_list != &ftrace_list_end)
5565 update_ftrace_function(); 5585 update_ftrace_function();
5566 5586
5587 ftrace_startup_sysctl();
5588
5567 } else { 5589 } else {
5568 /* stopping ftrace calls (just send to ftrace_stub) */ 5590 /* stopping ftrace calls (just send to ftrace_stub) */
5569 ftrace_trace_function = ftrace_stub; 5591 ftrace_trace_function = ftrace_stub;
@@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = {
5590 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) 5612 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5591}; 5613};
5592 5614
5593static int ftrace_graph_active;
5594
5595int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 5615int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5596{ 5616{
5597 return 0; 5617 return 0;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f28849394791..41ff75b478c6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work)
2728} 2728}
2729EXPORT_SYMBOL_GPL(flush_work); 2729EXPORT_SYMBOL_GPL(flush_work);
2730 2730
2731struct cwt_wait {
2732 wait_queue_t wait;
2733 struct work_struct *work;
2734};
2735
2736static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
2737{
2738 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
2739
2740 if (cwait->work != key)
2741 return 0;
2742 return autoremove_wake_function(wait, mode, sync, key);
2743}
2744
2731static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 2745static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2732{ 2746{
2747 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
2733 unsigned long flags; 2748 unsigned long flags;
2734 int ret; 2749 int ret;
2735 2750
2736 do { 2751 do {
2737 ret = try_to_grab_pending(work, is_dwork, &flags); 2752 ret = try_to_grab_pending(work, is_dwork, &flags);
2738 /* 2753 /*
2739 * If someone else is canceling, wait for the same event it 2754 * If someone else is already canceling, wait for it to
2740 * would be waiting for before retrying. 2755 * finish. flush_work() doesn't work for PREEMPT_NONE
2756 * because we may get scheduled between @work's completion
2757 * and the other canceling task resuming and clearing
2758 * CANCELING - flush_work() will return false immediately
2759 * as @work is no longer busy, try_to_grab_pending() will
2760 * return -ENOENT as @work is still being canceled and the
2761 * other canceling task won't be able to clear CANCELING as
2762 * we're hogging the CPU.
2763 *
2764 * Let's wait for completion using a waitqueue. As this
2765 * may lead to the thundering herd problem, use a custom
2766 * wake function which matches @work along with exclusive
2767 * wait and wakeup.
2741 */ 2768 */
2742 if (unlikely(ret == -ENOENT)) 2769 if (unlikely(ret == -ENOENT)) {
2743 flush_work(work); 2770 struct cwt_wait cwait;
2771
2772 init_wait(&cwait.wait);
2773 cwait.wait.func = cwt_wakefn;
2774 cwait.work = work;
2775
2776 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
2777 TASK_UNINTERRUPTIBLE);
2778 if (work_is_canceling(work))
2779 schedule();
2780 finish_wait(&cancel_waitq, &cwait.wait);
2781 }
2744 } while (unlikely(ret < 0)); 2782 } while (unlikely(ret < 0));
2745 2783
2746 /* tell other tasks trying to grab @work to back off */ 2784 /* tell other tasks trying to grab @work to back off */
@@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2749 2787
2750 flush_work(work); 2788 flush_work(work);
2751 clear_work_data(work); 2789 clear_work_data(work);
2790
2791 /*
2792 * Paired with prepare_to_wait() above so that either
2793 * waitqueue_active() is visible here or !work_is_canceling() is
2794 * visible there.
2795 */
2796 smp_mb();
2797 if (waitqueue_active(&cancel_waitq))
2798 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
2799
2752 return ret; 2800 return ret;
2753} 2801}
2754 2802
diff --git a/lib/Makefile b/lib/Makefile
index 87eb3bffc283..58f74d2dd396 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -24,7 +24,7 @@ obj-y += lockref.o
24 24
25obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 25obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
26 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ 26 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
27 gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \ 27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ 28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
29 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o 29 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
30obj-y += string_helpers.o 30obj-y += string_helpers.o
diff --git a/mm/iov_iter.c b/lib/iov_iter.c
index 827732047da1..9d96e283520c 100644
--- a/mm/iov_iter.c
+++ b/lib/iov_iter.c
@@ -751,3 +751,18 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
751 return npages; 751 return npages;
752} 752}
753EXPORT_SYMBOL(iov_iter_npages); 753EXPORT_SYMBOL(iov_iter_npages);
754
755const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
756{
757 *new = *old;
758 if (new->type & ITER_BVEC)
759 return new->bvec = kmemdup(new->bvec,
760 new->nr_segs * sizeof(struct bio_vec),
761 flags);
762 else
763 /* iovec and kvec have identical layout */
764 return new->iov = kmemdup(new->iov,
765 new->nr_segs * sizeof(struct iovec),
766 flags);
767}
768EXPORT_SYMBOL(dup_iter);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 9cc4c4a90d00..b5344ef4c684 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -17,6 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/log2.h> 19#include <linux/log2.h>
20#include <linux/sched.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
@@ -217,15 +218,15 @@ static void bucket_table_free(const struct bucket_table *tbl)
217static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, 218static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
218 size_t nbuckets) 219 size_t nbuckets)
219{ 220{
220 struct bucket_table *tbl; 221 struct bucket_table *tbl = NULL;
221 size_t size; 222 size_t size;
222 int i; 223 int i;
223 224
224 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 225 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
225 tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 226 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
227 tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
226 if (tbl == NULL) 228 if (tbl == NULL)
227 tbl = vzalloc(size); 229 tbl = vzalloc(size);
228
229 if (tbl == NULL) 230 if (tbl == NULL)
230 return NULL; 231 return NULL;
231 232
@@ -247,26 +248,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
247 * @ht: hash table 248 * @ht: hash table
248 * @new_size: new table size 249 * @new_size: new table size
249 */ 250 */
250bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) 251static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
251{ 252{
252 /* Expand table when exceeding 75% load */ 253 /* Expand table when exceeding 75% load */
253 return atomic_read(&ht->nelems) > (new_size / 4 * 3) && 254 return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
254 (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); 255 (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
255} 256}
256EXPORT_SYMBOL_GPL(rht_grow_above_75);
257 257
258/** 258/**
259 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size 259 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
260 * @ht: hash table 260 * @ht: hash table
261 * @new_size: new table size 261 * @new_size: new table size
262 */ 262 */
263bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) 263static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
264{ 264{
265 /* Shrink table beneath 30% load */ 265 /* Shrink table beneath 30% load */
266 return atomic_read(&ht->nelems) < (new_size * 3 / 10) && 266 return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
267 (atomic_read(&ht->shift) > ht->p.min_shift); 267 (atomic_read(&ht->shift) > ht->p.min_shift);
268} 268}
269EXPORT_SYMBOL_GPL(rht_shrink_below_30);
270 269
271static void lock_buckets(struct bucket_table *new_tbl, 270static void lock_buckets(struct bucket_table *new_tbl,
272 struct bucket_table *old_tbl, unsigned int hash) 271 struct bucket_table *old_tbl, unsigned int hash)
@@ -414,6 +413,7 @@ int rhashtable_expand(struct rhashtable *ht)
414 } 413 }
415 } 414 }
416 unlock_buckets(new_tbl, old_tbl, new_hash); 415 unlock_buckets(new_tbl, old_tbl, new_hash);
416 cond_resched();
417 } 417 }
418 418
419 /* Unzip interleaved hash chains */ 419 /* Unzip interleaved hash chains */
@@ -437,6 +437,7 @@ int rhashtable_expand(struct rhashtable *ht)
437 complete = false; 437 complete = false;
438 438
439 unlock_buckets(new_tbl, old_tbl, old_hash); 439 unlock_buckets(new_tbl, old_tbl, old_hash);
440 cond_resched();
440 } 441 }
441 } 442 }
442 443
@@ -495,6 +496,7 @@ int rhashtable_shrink(struct rhashtable *ht)
495 tbl->buckets[new_hash + new_tbl->size]); 496 tbl->buckets[new_hash + new_tbl->size]);
496 497
497 unlock_buckets(new_tbl, tbl, new_hash); 498 unlock_buckets(new_tbl, tbl, new_hash);
499 cond_resched();
498 } 500 }
499 501
500 /* Publish the new, valid hash table */ 502 /* Publish the new, valid hash table */
@@ -528,31 +530,19 @@ static void rht_deferred_worker(struct work_struct *work)
528 list_for_each_entry(walker, &ht->walkers, list) 530 list_for_each_entry(walker, &ht->walkers, list)
529 walker->resize = true; 531 walker->resize = true;
530 532
531 if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) 533 if (rht_grow_above_75(ht, tbl->size))
532 rhashtable_expand(ht); 534 rhashtable_expand(ht);
533 else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) 535 else if (rht_shrink_below_30(ht, tbl->size))
534 rhashtable_shrink(ht); 536 rhashtable_shrink(ht);
535
536unlock: 537unlock:
537 mutex_unlock(&ht->mutex); 538 mutex_unlock(&ht->mutex);
538} 539}
539 540
540static void rhashtable_wakeup_worker(struct rhashtable *ht)
541{
542 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
543 struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
544 size_t size = tbl->size;
545
546 /* Only adjust the table if no resizing is currently in progress. */
547 if (tbl == new_tbl &&
548 ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
549 (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
550 schedule_work(&ht->run_work);
551}
552
553static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, 541static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
554 struct bucket_table *tbl, u32 hash) 542 struct bucket_table *tbl,
543 const struct bucket_table *old_tbl, u32 hash)
555{ 544{
545 bool no_resize_running = tbl == old_tbl;
556 struct rhash_head *head; 546 struct rhash_head *head;
557 547
558 hash = rht_bucket_index(tbl, hash); 548 hash = rht_bucket_index(tbl, hash);
@@ -568,8 +558,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
568 rcu_assign_pointer(tbl->buckets[hash], obj); 558 rcu_assign_pointer(tbl->buckets[hash], obj);
569 559
570 atomic_inc(&ht->nelems); 560 atomic_inc(&ht->nelems);
571 561 if (no_resize_running && rht_grow_above_75(ht, tbl->size))
572 rhashtable_wakeup_worker(ht); 562 schedule_work(&ht->run_work);
573} 563}
574 564
575/** 565/**
@@ -599,7 +589,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
599 hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); 589 hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
600 590
601 lock_buckets(tbl, old_tbl, hash); 591 lock_buckets(tbl, old_tbl, hash);
602 __rhashtable_insert(ht, obj, tbl, hash); 592 __rhashtable_insert(ht, obj, tbl, old_tbl, hash);
603 unlock_buckets(tbl, old_tbl, hash); 593 unlock_buckets(tbl, old_tbl, hash);
604 594
605 rcu_read_unlock(); 595 rcu_read_unlock();
@@ -681,8 +671,11 @@ found:
681 unlock_buckets(new_tbl, old_tbl, new_hash); 671 unlock_buckets(new_tbl, old_tbl, new_hash);
682 672
683 if (ret) { 673 if (ret) {
674 bool no_resize_running = new_tbl == old_tbl;
675
684 atomic_dec(&ht->nelems); 676 atomic_dec(&ht->nelems);
685 rhashtable_wakeup_worker(ht); 677 if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
678 schedule_work(&ht->run_work);
686 } 679 }
687 680
688 rcu_read_unlock(); 681 rcu_read_unlock();
@@ -852,7 +845,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
852 goto exit; 845 goto exit;
853 } 846 }
854 847
855 __rhashtable_insert(ht, obj, new_tbl, new_hash); 848 __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
856 849
857exit: 850exit:
858 unlock_buckets(new_tbl, old_tbl, new_hash); 851 unlock_buckets(new_tbl, old_tbl, new_hash);
@@ -894,6 +887,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
894 if (!iter->walker) 887 if (!iter->walker)
895 return -ENOMEM; 888 return -ENOMEM;
896 889
890 INIT_LIST_HEAD(&iter->walker->list);
891 iter->walker->resize = false;
892
897 mutex_lock(&ht->mutex); 893 mutex_lock(&ht->mutex);
898 list_add(&iter->walker->list, &ht->walkers); 894 list_add(&iter->walker->list, &ht->walkers);
899 mutex_unlock(&ht->mutex); 895 mutex_unlock(&ht->mutex);
@@ -1111,8 +1107,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
1111 if (!ht->p.hash_rnd) 1107 if (!ht->p.hash_rnd)
1112 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); 1108 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
1113 1109
1114 if (ht->p.grow_decision || ht->p.shrink_decision) 1110 INIT_WORK(&ht->run_work, rht_deferred_worker);
1115 INIT_WORK(&ht->run_work, rht_deferred_worker);
1116 1111
1117 return 0; 1112 return 0;
1118} 1113}
@@ -1130,8 +1125,7 @@ void rhashtable_destroy(struct rhashtable *ht)
1130{ 1125{
1131 ht->being_destroyed = true; 1126 ht->being_destroyed = true;
1132 1127
1133 if (ht->p.grow_decision || ht->p.shrink_decision) 1128 cancel_work_sync(&ht->run_work);
1134 cancel_work_sync(&ht->run_work);
1135 1129
1136 mutex_lock(&ht->mutex); 1130 mutex_lock(&ht->mutex);
1137 bucket_table_free(rht_dereference(ht->tbl, ht)); 1131 bucket_table_free(rht_dereference(ht->tbl, ht));
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 88c0854bd752..5c94e1012a91 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -61,7 +61,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
61 61
62 if (s->len < s->size) { 62 if (s->len < s->size) {
63 len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); 63 len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
64 if (seq_buf_can_fit(s, len)) { 64 if (s->len + len < s->size) {
65 s->len += len; 65 s->len += len;
66 return 0; 66 return 0;
67 } 67 }
@@ -118,7 +118,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
118 118
119 if (s->len < s->size) { 119 if (s->len < s->size) {
120 ret = bstr_printf(s->buffer + s->len, len, fmt, binary); 120 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
121 if (seq_buf_can_fit(s, ret)) { 121 if (s->len + ret < s->size) {
122 s->len += ret; 122 s->len += ret;
123 return 0; 123 return 0;
124 } 124 }
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 1dfeba73fc74..67c7593d1dd6 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -191,18 +191,18 @@ error:
191 return err; 191 return err;
192} 192}
193 193
194static struct rhashtable ht;
195
194static int __init test_rht_init(void) 196static int __init test_rht_init(void)
195{ 197{
196 struct rhashtable ht;
197 struct rhashtable_params params = { 198 struct rhashtable_params params = {
198 .nelem_hint = TEST_HT_SIZE, 199 .nelem_hint = TEST_HT_SIZE,
199 .head_offset = offsetof(struct test_obj, node), 200 .head_offset = offsetof(struct test_obj, node),
200 .key_offset = offsetof(struct test_obj, value), 201 .key_offset = offsetof(struct test_obj, value),
201 .key_len = sizeof(int), 202 .key_len = sizeof(int),
202 .hashfn = jhash, 203 .hashfn = jhash,
204 .max_shift = 1, /* we expand/shrink manually here */
203 .nulls_base = (3U << RHT_BASE_SHIFT), 205 .nulls_base = (3U << RHT_BASE_SHIFT),
204 .grow_decision = rht_grow_above_75,
205 .shrink_decision = rht_shrink_below_30,
206 }; 206 };
207 int err; 207 int err;
208 208
@@ -222,6 +222,11 @@ static int __init test_rht_init(void)
222 return err; 222 return err;
223} 223}
224 224
225static void __exit test_rht_exit(void)
226{
227}
228
225module_init(test_rht_init); 229module_init(test_rht_init);
230module_exit(test_rht_exit);
226 231
227MODULE_LICENSE("GPL v2"); 232MODULE_LICENSE("GPL v2");
diff --git a/mm/Makefile b/mm/Makefile
index 3c1caa2693bd..15dbe9903c27 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -21,7 +21,7 @@ obj-y := filemap.o mempool.o oom_kill.o \
21 mm_init.o mmu_context.o percpu.o slab_common.o \ 21 mm_init.o mmu_context.o percpu.o slab_common.o \
22 compaction.o vmacache.o \ 22 compaction.o vmacache.o \
23 interval_tree.o list_lru.o workingset.o \ 23 interval_tree.o list_lru.o workingset.o \
24 iov_iter.o debug.o $(mmu-y) 24 debug.o $(mmu-y)
25 25
26obj-y += init-mm.o 26obj-y += init-mm.o
27 27
diff --git a/mm/cma.c b/mm/cma.c
index 75016fd1de90..68ecb7a42983 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
64 return (1UL << (align_order - cma->order_per_bit)) - 1; 64 return (1UL << (align_order - cma->order_per_bit)) - 1;
65} 65}
66 66
67/*
68 * Find a PFN aligned to the specified order and return an offset represented in
69 * order_per_bits.
70 */
67static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) 71static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
68{ 72{
69 unsigned int alignment;
70
71 if (align_order <= cma->order_per_bit) 73 if (align_order <= cma->order_per_bit)
72 return 0; 74 return 0;
73 alignment = 1UL << (align_order - cma->order_per_bit); 75
74 return ALIGN(cma->base_pfn, alignment) - 76 return (ALIGN(cma->base_pfn, (1UL << align_order))
75 (cma->base_pfn >> cma->order_per_bit); 77 - cma->base_pfn) >> cma->order_per_bit;
76} 78}
77 79
78static unsigned long cma_bitmap_maxno(struct cma *cma) 80static unsigned long cma_bitmap_maxno(struct cma *cma)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index fc00c8cb5a82..626e93db28ba 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1295,8 +1295,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1295 * Avoid grouping on DSO/COW pages in specific and RO pages 1295 * Avoid grouping on DSO/COW pages in specific and RO pages
1296 * in general, RO pages shouldn't hurt as much anyway since 1296 * in general, RO pages shouldn't hurt as much anyway since
1297 * they can be in shared cache state. 1297 * they can be in shared cache state.
1298 *
1299 * FIXME! This checks "pmd_dirty()" as an approximation of
1300 * "is this a read-only page", since checking "pmd_write()"
1301 * is even more broken. We haven't actually turned this into
1302 * a writable page, so pmd_write() will always be false.
1298 */ 1303 */
1299 if (!pmd_write(pmd)) 1304 if (!pmd_dirty(pmd))
1300 flags |= TNF_NO_GROUP; 1305 flags |= TNF_NO_GROUP;
1301 1306
1302 /* 1307 /*
@@ -1482,6 +1487,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1482 1487
1483 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1488 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1484 pmd_t entry; 1489 pmd_t entry;
1490 ret = 1;
1485 1491
1486 /* 1492 /*
1487 * Avoid trapping faults against the zero page. The read-only 1493 * Avoid trapping faults against the zero page. The read-only
@@ -1490,11 +1496,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1490 */ 1496 */
1491 if (prot_numa && is_huge_zero_pmd(*pmd)) { 1497 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1492 spin_unlock(ptl); 1498 spin_unlock(ptl);
1493 return 0; 1499 return ret;
1494 } 1500 }
1495 1501
1496 if (!prot_numa || !pmd_protnone(*pmd)) { 1502 if (!prot_numa || !pmd_protnone(*pmd)) {
1497 ret = 1;
1498 entry = pmdp_get_and_clear_notify(mm, addr, pmd); 1503 entry = pmdp_get_and_clear_notify(mm, addr, pmd);
1499 entry = pmd_modify(entry, newprot); 1504 entry = pmd_modify(entry, newprot);
1500 ret = HPAGE_PMD_NR; 1505 ret = HPAGE_PMD_NR;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0a9ac6c26832..c41b2a0ee273 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
917 __SetPageHead(page); 917 __SetPageHead(page);
918 __ClearPageReserved(page); 918 __ClearPageReserved(page);
919 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 919 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
920 __SetPageTail(p);
921 /* 920 /*
922 * For gigantic hugepages allocated through bootmem at 921 * For gigantic hugepages allocated through bootmem at
923 * boot, it's safer to be consistent with the not-gigantic 922 * boot, it's safer to be consistent with the not-gigantic
@@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
933 __ClearPageReserved(p); 932 __ClearPageReserved(p);
934 set_page_count(p, 0); 933 set_page_count(p, 0);
935 p->first_page = page; 934 p->first_page = page;
935 /* Make sure p->first_page is always valid for PageTail() */
936 smp_wmb();
937 __SetPageTail(p);
936 } 938 }
937} 939}
938 940
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 78fee632a7ee..936d81661c47 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -29,6 +29,7 @@
29#include <linux/stacktrace.h> 29#include <linux/stacktrace.h>
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/vmalloc.h>
32#include <linux/kasan.h> 33#include <linux/kasan.h>
33 34
34#include "kasan.h" 35#include "kasan.h"
@@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size)
414 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 415 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
415 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 416 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
416 __builtin_return_address(0)); 417 __builtin_return_address(0));
417 return ret ? 0 : -ENOMEM; 418
419 if (ret) {
420 find_vm_area(addr)->flags |= VM_KASAN;
421 return 0;
422 }
423
424 return -ENOMEM;
418} 425}
419 426
420void kasan_module_free(void *addr) 427void kasan_free_shadow(const struct vm_struct *vm)
421{ 428{
422 vfree(kasan_mem_to_shadow(addr)); 429 if (vm->flags & VM_KASAN)
430 vfree(kasan_mem_to_shadow(vm->addr));
423} 431}
424 432
425static void register_global(struct kasan_global *global) 433static void register_global(struct kasan_global *global)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9fe07692eaad..b34ef4a32a3b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5232 * on for the root memcg is enough. 5232 * on for the root memcg is enough.
5233 */ 5233 */
5234 if (cgroup_on_dfl(root_css->cgroup)) 5234 if (cgroup_on_dfl(root_css->cgroup))
5235 mem_cgroup_from_css(root_css)->use_hierarchy = true; 5235 root_mem_cgroup->use_hierarchy = true;
5236 else
5237 root_mem_cgroup->use_hierarchy = false;
5236} 5238}
5237 5239
5238static u64 memory_current_read(struct cgroup_subsys_state *css, 5240static u64 memory_current_read(struct cgroup_subsys_state *css,
diff --git a/mm/memory.c b/mm/memory.c
index 8068893697bb..411144f977b1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3072,8 +3072,13 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3072 * Avoid grouping on DSO/COW pages in specific and RO pages 3072 * Avoid grouping on DSO/COW pages in specific and RO pages
3073 * in general, RO pages shouldn't hurt as much anyway since 3073 * in general, RO pages shouldn't hurt as much anyway since
3074 * they can be in shared cache state. 3074 * they can be in shared cache state.
3075 *
3076 * FIXME! This checks "pmd_dirty()" as an approximation of
3077 * "is this a read-only page", since checking "pmd_write()"
3078 * is even more broken. We haven't actually turned this into
3079 * a writable page, so pmd_write() will always be false.
3075 */ 3080 */
3076 if (!pte_write(pte)) 3081 if (!pte_dirty(pte))
3077 flags |= TNF_NO_GROUP; 3082 flags |= TNF_NO_GROUP;
3078 3083
3079 /* 3084 /*
diff --git a/mm/mlock.c b/mm/mlock.c
index 73cf0987088c..8a54cd214925 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -26,10 +26,10 @@
26 26
27int can_do_mlock(void) 27int can_do_mlock(void)
28{ 28{
29 if (capable(CAP_IPC_LOCK))
30 return 1;
31 if (rlimit(RLIMIT_MEMLOCK) != 0) 29 if (rlimit(RLIMIT_MEMLOCK) != 0)
32 return 1; 30 return 1;
31 if (capable(CAP_IPC_LOCK))
32 return 1;
33 return 0; 33 return 0;
34} 34}
35EXPORT_SYMBOL(can_do_mlock); 35EXPORT_SYMBOL(can_do_mlock);
diff --git a/mm/nommu.c b/mm/nommu.c
index 3e67e7538ecf..3fba2dc97c44 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -62,6 +62,7 @@ void *high_memory;
62EXPORT_SYMBOL(high_memory); 62EXPORT_SYMBOL(high_memory);
63struct page *mem_map; 63struct page *mem_map;
64unsigned long max_mapnr; 64unsigned long max_mapnr;
65EXPORT_SYMBOL(max_mapnr);
65unsigned long highest_memmap_pfn; 66unsigned long highest_memmap_pfn;
66struct percpu_counter vm_committed_as; 67struct percpu_counter vm_committed_as;
67int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 68int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7abfa70cdc1a..40e29429e7b0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2373,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2373 goto out; 2373 goto out;
2374 } 2374 }
2375 /* Exhausted what can be done so it's blamo time */ 2375 /* Exhausted what can be done so it's blamo time */
2376 if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)) 2376 if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
2377 || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
2377 *did_some_progress = 1; 2378 *did_some_progress = 1;
2378out: 2379out:
2379 oom_zonelist_unlock(ac->zonelist, gfp_mask); 2380 oom_zonelist_unlock(ac->zonelist, gfp_mask);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 35b25e1340ca..49abccf29a29 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr)
1418 spin_unlock(&vmap_area_lock); 1418 spin_unlock(&vmap_area_lock);
1419 1419
1420 vmap_debug_free_range(va->va_start, va->va_end); 1420 vmap_debug_free_range(va->va_start, va->va_end);
1421 kasan_free_shadow(vm);
1421 free_unmap_vmap_area(va); 1422 free_unmap_vmap_area(va);
1422 vm->size -= PAGE_SIZE; 1423 vm->size -= PAGE_SIZE;
1423 1424
diff --git a/net/bridge/br.c b/net/bridge/br.c
index fb57ab6b24f9..02c24cf63c34 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -190,6 +190,8 @@ static int __init br_init(void)
190{ 190{
191 int err; 191 int err;
192 192
193 BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
194
193 err = stp_proto_register(&br_stp_proto); 195 err = stp_proto_register(&br_stp_proto);
194 if (err < 0) { 196 if (err < 0) {
195 pr_err("bridge: can't register sap for STP\n"); 197 pr_err("bridge: can't register sap for STP\n");
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index 8bc7caa28e64..434ba8557826 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -84,7 +84,7 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
84 u16 tmp; 84 u16 tmp;
85 u16 len; 85 u16 len;
86 u16 hdrchks; 86 u16 hdrchks;
87 u16 pktchks; 87 int pktchks;
88 struct cffrml *this; 88 struct cffrml *this;
89 this = container_obj(layr); 89 this = container_obj(layr);
90 90
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 1be0b521ac49..f6c3b2137eea 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -255,9 +255,9 @@ inline u16 cfpkt_getlen(struct cfpkt *pkt)
255 return skb->len; 255 return skb->len;
256} 256}
257 257
258inline u16 cfpkt_iterate(struct cfpkt *pkt, 258int cfpkt_iterate(struct cfpkt *pkt,
259 u16 (*iter_func)(u16, void *, u16), 259 u16 (*iter_func)(u16, void *, u16),
260 u16 data) 260 u16 data)
261{ 261{
262 /* 262 /*
263 * Don't care about the performance hit of linearizing, 263 * Don't care about the performance hit of linearizing,
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 66e08040ced7..32d710eaf1fc 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop)
259 goto inval_skb; 259 goto inval_skb;
260 } 260 }
261 261
262 skb->ip_summed = CHECKSUM_UNNECESSARY;
263
264 skb_reset_mac_header(skb);
262 skb_reset_network_header(skb); 265 skb_reset_network_header(skb);
263 skb_reset_transport_header(skb); 266 skb_reset_transport_header(skb);
264 267
diff --git a/net/compat.c b/net/compat.c
index 3236b4167a32..94d3d5e97883 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -711,24 +711,18 @@ static unsigned char nas[21] = {
711 711
712COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) 712COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
713{ 713{
714 if (flags & MSG_CMSG_COMPAT)
715 return -EINVAL;
716 return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 714 return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
717} 715}
718 716
719COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg, 717COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
720 unsigned int, vlen, unsigned int, flags) 718 unsigned int, vlen, unsigned int, flags)
721{ 719{
722 if (flags & MSG_CMSG_COMPAT)
723 return -EINVAL;
724 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 720 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
725 flags | MSG_CMSG_COMPAT); 721 flags | MSG_CMSG_COMPAT);
726} 722}
727 723
728COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) 724COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
729{ 725{
730 if (flags & MSG_CMSG_COMPAT)
731 return -EINVAL;
732 return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 726 return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
733} 727}
734 728
@@ -751,9 +745,6 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
751 int datagrams; 745 int datagrams;
752 struct timespec ktspec; 746 struct timespec ktspec;
753 747
754 if (flags & MSG_CMSG_COMPAT)
755 return -EINVAL;
756
757 if (timeout == NULL) 748 if (timeout == NULL)
758 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 749 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
759 flags | MSG_CMSG_COMPAT, NULL); 750 flags | MSG_CMSG_COMPAT, NULL);
diff --git a/net/core/dev.c b/net/core/dev.c
index 8f9710c62e20..962ee9d71964 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -946,7 +946,7 @@ bool dev_valid_name(const char *name)
946 return false; 946 return false;
947 947
948 while (*name) { 948 while (*name) {
949 if (*name == '/' || isspace(*name)) 949 if (*name == '/' || *name == ':' || isspace(*name))
950 return false; 950 return false;
951 name++; 951 name++;
952 } 952 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 91f74f3eb204..aa378ecef186 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -98,6 +98,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
98 [NETIF_F_RXALL_BIT] = "rx-all", 98 [NETIF_F_RXALL_BIT] = "rx-all",
99 [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", 99 [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
100 [NETIF_F_BUSY_POLL_BIT] = "busy-poll", 100 [NETIF_F_BUSY_POLL_BIT] = "busy-poll",
101 [NETIF_F_HW_SWITCH_OFFLOAD_BIT] = "hw-switch-offload",
101}; 102};
102 103
103static const char 104static const char
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 0c08062d1796..1e2f46a69d50 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
32 return 0; 32 return 0;
33 33
34nla_put_failure: 34nla_put_failure:
35 kfree(d->xstats);
36 d->xstats = NULL;
37 d->xstats_len = 0;
35 spin_unlock_bh(d->lock); 38 spin_unlock_bh(d->lock);
36 return -1; 39 return -1;
37} 40}
@@ -305,7 +308,9 @@ int
305gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) 308gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
306{ 309{
307 if (d->compat_xstats) { 310 if (d->compat_xstats) {
308 d->xstats = st; 311 d->xstats = kmemdup(st, len, GFP_ATOMIC);
312 if (!d->xstats)
313 goto err_out;
309 d->xstats_len = len; 314 d->xstats_len = len;
310 } 315 }
311 316
@@ -313,6 +318,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
313 return gnet_stats_copy(d, TCA_STATS_APP, st, len); 318 return gnet_stats_copy(d, TCA_STATS_APP, st, len);
314 319
315 return 0; 320 return 0;
321
322err_out:
323 d->xstats_len = 0;
324 spin_unlock_bh(d->lock);
325 return -1;
316} 326}
317EXPORT_SYMBOL(gnet_stats_copy_app); 327EXPORT_SYMBOL(gnet_stats_copy_app);
318 328
@@ -345,6 +355,9 @@ gnet_stats_finish_copy(struct gnet_dump *d)
345 return -1; 355 return -1;
346 } 356 }
347 357
358 kfree(d->xstats);
359 d->xstats = NULL;
360 d->xstats_len = 0;
348 spin_unlock_bh(d->lock); 361 spin_unlock_bh(d->lock);
349 return 0; 362 return 0;
350} 363}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b4899f5b7388..508155b283dd 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1134,6 +1134,9 @@ static ssize_t pktgen_if_write(struct file *file,
1134 return len; 1134 return len;
1135 1135
1136 i += len; 1136 i += len;
1137 if ((value > 1) &&
1138 (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
1139 return -ENOTSUPP;
1137 pkt_dev->burst = value < 1 ? 1 : value; 1140 pkt_dev->burst = value < 1 ? 1 : value;
1138 sprintf(pg_result, "OK: burst=%d", pkt_dev->burst); 1141 sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
1139 return count; 1142 return count;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ab293a3066b3..25b4b5d23485 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1300,7 +1300,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1300 s_h = cb->args[0]; 1300 s_h = cb->args[0];
1301 s_idx = cb->args[1]; 1301 s_idx = cb->args[1];
1302 1302
1303 rcu_read_lock();
1304 cb->seq = net->dev_base_seq; 1303 cb->seq = net->dev_base_seq;
1305 1304
1306 /* A hack to preserve kernel<->userspace interface. 1305 /* A hack to preserve kernel<->userspace interface.
@@ -1322,7 +1321,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1322 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1321 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1323 idx = 0; 1322 idx = 0;
1324 head = &net->dev_index_head[h]; 1323 head = &net->dev_index_head[h];
1325 hlist_for_each_entry_rcu(dev, head, index_hlist) { 1324 hlist_for_each_entry(dev, head, index_hlist) {
1326 if (idx < s_idx) 1325 if (idx < s_idx)
1327 goto cont; 1326 goto cont;
1328 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1327 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -1344,7 +1343,6 @@ cont:
1344 } 1343 }
1345 } 1344 }
1346out: 1345out:
1347 rcu_read_unlock();
1348 cb->args[1] = idx; 1346 cb->args[1] = idx;
1349 cb->args[0] = h; 1347 cb->args[0] = h;
1350 1348
@@ -2012,8 +2010,8 @@ replay:
2012 } 2010 }
2013 2011
2014 if (1) { 2012 if (1) {
2015 struct nlattr *attr[ops ? ops->maxtype + 1 : 0]; 2013 struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
2016 struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0]; 2014 struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
2017 struct nlattr **data = NULL; 2015 struct nlattr **data = NULL;
2018 struct nlattr **slave_data = NULL; 2016 struct nlattr **slave_data = NULL;
2019 struct net *dest_net, *link_net = NULL; 2017 struct net *dest_net, *link_net = NULL;
@@ -2122,6 +2120,10 @@ replay:
2122 if (IS_ERR(dest_net)) 2120 if (IS_ERR(dest_net))
2123 return PTR_ERR(dest_net); 2121 return PTR_ERR(dest_net);
2124 2122
2123 err = -EPERM;
2124 if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
2125 goto out;
2126
2125 if (tb[IFLA_LINK_NETNSID]) { 2127 if (tb[IFLA_LINK_NETNSID]) {
2126 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 2128 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
2127 2129
@@ -2130,6 +2132,9 @@ replay:
2130 err = -EINVAL; 2132 err = -EINVAL;
2131 goto out; 2133 goto out;
2132 } 2134 }
2135 err = -EPERM;
2136 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
2137 goto out;
2133 } 2138 }
2134 2139
2135 dev = rtnl_create_link(link_net ? : dest_net, ifname, 2140 dev = rtnl_create_link(link_net ? : dest_net, ifname,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 88c613eab142..f80507823531 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3621,13 +3621,14 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3621{ 3621{
3622 struct sk_buff_head *q = &sk->sk_error_queue; 3622 struct sk_buff_head *q = &sk->sk_error_queue;
3623 struct sk_buff *skb, *skb_next; 3623 struct sk_buff *skb, *skb_next;
3624 unsigned long flags;
3624 int err = 0; 3625 int err = 0;
3625 3626
3626 spin_lock_bh(&q->lock); 3627 spin_lock_irqsave(&q->lock, flags);
3627 skb = __skb_dequeue(q); 3628 skb = __skb_dequeue(q);
3628 if (skb && (skb_next = skb_peek(q))) 3629 if (skb && (skb_next = skb_peek(q)))
3629 err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 3630 err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
3630 spin_unlock_bh(&q->lock); 3631 spin_unlock_irqrestore(&q->lock, flags);
3631 3632
3632 sk->sk_err = err; 3633 sk->sk_err = err;
3633 if (err) 3634 if (err)
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 1d7c1256e845..3b81092771f8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1062,7 +1062,7 @@ source_ok:
1062 if (decnet_debug_level & 16) 1062 if (decnet_debug_level & 16)
1063 printk(KERN_DEBUG 1063 printk(KERN_DEBUG
1064 "dn_route_output_slow: initial checks complete." 1064 "dn_route_output_slow: initial checks complete."
1065 " dst=%o4x src=%04x oif=%d try_hard=%d\n", 1065 " dst=%04x src=%04x oif=%d try_hard=%d\n",
1066 le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr), 1066 le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
1067 fld.flowidn_oif, try_hard); 1067 fld.flowidn_oif, try_hard);
1068 1068
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index a138d75751df..44d27469ae55 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -359,8 +359,11 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
359 struct hsr_port *port; 359 struct hsr_port *port;
360 360
361 hsr = netdev_priv(hsr_dev); 361 hsr = netdev_priv(hsr_dev);
362
363 rtnl_lock();
362 hsr_for_each_port(hsr, port) 364 hsr_for_each_port(hsr, port)
363 hsr_del_port(port); 365 hsr_del_port(port);
366 rtnl_unlock();
364 367
365 del_timer_sync(&hsr->prune_timer); 368 del_timer_sync(&hsr->prune_timer);
366 del_timer_sync(&hsr->announce_timer); 369 del_timer_sync(&hsr->announce_timer);
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index 779d28b65417..cd37d0011b42 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -36,6 +36,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
36 return NOTIFY_DONE; /* Not an HSR device */ 36 return NOTIFY_DONE; /* Not an HSR device */
37 hsr = netdev_priv(dev); 37 hsr = netdev_priv(dev);
38 port = hsr_port_get_hsr(hsr, HSR_PT_MASTER); 38 port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
39 if (port == NULL) {
40 /* Resend of notification concerning removed device? */
41 return NOTIFY_DONE;
42 }
39 } else { 43 } else {
40 hsr = port->hsr; 44 hsr = port->hsr;
41 } 45 }
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index a348dcbcd683..7d37366cc695 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -181,8 +181,10 @@ void hsr_del_port(struct hsr_port *port)
181 list_del_rcu(&port->port_list); 181 list_del_rcu(&port->port_list);
182 182
183 if (port != master) { 183 if (port != master) {
184 netdev_update_features(master->dev); 184 if (master != NULL) {
185 dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); 185 netdev_update_features(master->dev);
186 dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
187 }
186 netdev_rx_handler_unregister(port->dev); 188 netdev_rx_handler_unregister(port->dev);
187 dev_set_promiscuity(port->dev, -1); 189 dev_set_promiscuity(port->dev, -1);
188 } 190 }
@@ -192,5 +194,7 @@ void hsr_del_port(struct hsr_port *port)
192 */ 194 */
193 195
194 synchronize_rcu(); 196 synchronize_rcu();
195 dev_put(port->dev); 197
198 if (port != master)
199 dev_put(port->dev);
196} 200}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index e5b6d0ddcb58..145a50c4d566 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag);
659struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 659struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
660{ 660{
661 struct iphdr iph; 661 struct iphdr iph;
662 int netoff;
662 u32 len; 663 u32 len;
663 664
664 if (skb->protocol != htons(ETH_P_IP)) 665 if (skb->protocol != htons(ETH_P_IP))
665 return skb; 666 return skb;
666 667
667 if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) 668 netoff = skb_network_offset(skb);
669
670 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
668 return skb; 671 return skb;
669 672
670 if (iph.ihl < 5 || iph.version != 4) 673 if (iph.ihl < 5 || iph.version != 4)
671 return skb; 674 return skb;
672 675
673 len = ntohs(iph.tot_len); 676 len = ntohs(iph.tot_len);
674 if (skb->len < len || len < (iph.ihl * 4)) 677 if (skb->len < netoff + len || len < (iph.ihl * 4))
675 return skb; 678 return skb;
676 679
677 if (ip_is_fragment(&iph)) { 680 if (ip_is_fragment(&iph)) {
678 skb = skb_share_check(skb, GFP_ATOMIC); 681 skb = skb_share_check(skb, GFP_ATOMIC);
679 if (skb) { 682 if (skb) {
680 if (!pskb_may_pull(skb, iph.ihl*4)) 683 if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
681 return skb; 684 return skb;
682 if (pskb_trim_rcsum(skb, len)) 685 if (pskb_trim_rcsum(skb, netoff + len))
683 return skb; 686 return skb;
684 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 687 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
685 if (ip_defrag(skb, user)) 688 if (ip_defrag(skb, user))
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d68199d9b2b0..a7aea2048a0d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -888,7 +888,8 @@ static int __ip_append_data(struct sock *sk,
888 cork->length += length; 888 cork->length += length;
889 if (((length > mtu) || (skb && skb_is_gso(skb))) && 889 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
890 (sk->sk_protocol == IPPROTO_UDP) && 890 (sk->sk_protocol == IPPROTO_UDP) &&
891 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) { 891 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
892 (sk->sk_type == SOCK_DGRAM)) {
892 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 893 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
893 hh_len, fragheaderlen, transhdrlen, 894 hh_len, fragheaderlen, transhdrlen,
894 maxfraglen, flags); 895 maxfraglen, flags);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 31d8c71986b4..5cd99271d3a6 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -432,17 +432,32 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
432 kfree_skb(skb); 432 kfree_skb(skb);
433} 433}
434 434
435static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk, 435/* IPv4 supports cmsg on all imcp errors and some timestamps
436 const struct sk_buff *skb, 436 *
437 int ee_origin) 437 * Timestamp code paths do not initialize the fields expected by cmsg:
438 * the PKTINFO fields in skb->cb[]. Fill those in here.
439 */
440static bool ipv4_datagram_support_cmsg(const struct sock *sk,
441 struct sk_buff *skb,
442 int ee_origin)
438{ 443{
439 struct in_pktinfo *info = PKTINFO_SKB_CB(skb); 444 struct in_pktinfo *info;
445
446 if (ee_origin == SO_EE_ORIGIN_ICMP)
447 return true;
440 448
441 if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) || 449 if (ee_origin == SO_EE_ORIGIN_LOCAL)
442 (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || 450 return false;
451
452 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
453 * timestamp with egress dev. Not possible for packets without dev
454 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
455 */
456 if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
443 (!skb->dev)) 457 (!skb->dev))
444 return false; 458 return false;
445 459
460 info = PKTINFO_SKB_CB(skb);
446 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; 461 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
447 info->ipi_ifindex = skb->dev->ifindex; 462 info->ipi_ifindex = skb->dev->ifindex;
448 return true; 463 return true;
@@ -483,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
483 498
484 serr = SKB_EXT_ERR(skb); 499 serr = SKB_EXT_ERR(skb);
485 500
486 if (sin && skb->len) { 501 if (sin && serr->port) {
487 sin->sin_family = AF_INET; 502 sin->sin_family = AF_INET;
488 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + 503 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
489 serr->addr_offset); 504 serr->addr_offset);
@@ -496,9 +511,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
496 sin = &errhdr.offender; 511 sin = &errhdr.offender;
497 memset(sin, 0, sizeof(*sin)); 512 memset(sin, 0, sizeof(*sin));
498 513
499 if (skb->len && 514 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
500 (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
501 ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) {
502 sin->sin_family = AF_INET; 515 sin->sin_family = AF_INET;
503 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 516 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
504 if (inet_sk(sk)->cmsg_flags) 517 if (inet_sk(sk)->cmsg_flags)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index e9f66e1cda50..208d5439e59b 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk)
259 kgid_t low, high; 259 kgid_t low, high;
260 int ret = 0; 260 int ret = 0;
261 261
262 if (sk->sk_family == AF_INET6)
263 sk->sk_ipv6only = 1;
264
262 inet_get_ping_group_range_net(net, &low, &high); 265 inet_get_ping_group_range_net(net, &low, &high);
263 if (gid_lte(low, group) && gid_lte(group, high)) 266 if (gid_lte(low, group) && gid_lte(group, high))
264 return 0; 267 return 0;
@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
305 if (addr_len < sizeof(*addr)) 308 if (addr_len < sizeof(*addr))
306 return -EINVAL; 309 return -EINVAL;
307 310
311 if (addr->sin_family != AF_INET &&
312 !(addr->sin_family == AF_UNSPEC &&
313 addr->sin_addr.s_addr == htonl(INADDR_ANY)))
314 return -EAFNOSUPPORT;
315
308 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", 316 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
309 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); 317 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
310 318
@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
330 return -EINVAL; 338 return -EINVAL;
331 339
332 if (addr->sin6_family != AF_INET6) 340 if (addr->sin6_family != AF_INET6)
333 return -EINVAL; 341 return -EAFNOSUPPORT;
334 342
335 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", 343 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
336 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); 344 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
716 if (msg->msg_namelen < sizeof(*usin)) 724 if (msg->msg_namelen < sizeof(*usin))
717 return -EINVAL; 725 return -EINVAL;
718 if (usin->sin_family != AF_INET) 726 if (usin->sin_family != AF_INET)
719 return -EINVAL; 727 return -EAFNOSUPPORT;
720 daddr = usin->sin_addr.s_addr; 728 daddr = usin->sin_addr.s_addr;
721 /* no remote port */ 729 /* no remote port */
722 } else { 730 } else {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9d72a0fcd928..995a2259bcfc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -835,17 +835,13 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
835 int large_allowed) 835 int large_allowed)
836{ 836{
837 struct tcp_sock *tp = tcp_sk(sk); 837 struct tcp_sock *tp = tcp_sk(sk);
838 u32 new_size_goal, size_goal, hlen; 838 u32 new_size_goal, size_goal;
839 839
840 if (!large_allowed || !sk_can_gso(sk)) 840 if (!large_allowed || !sk_can_gso(sk))
841 return mss_now; 841 return mss_now;
842 842
843 /* Maybe we should/could use sk->sk_prot->max_header here ? */ 843 /* Note : tcp_tso_autosize() will eventually split this later */
844 hlen = inet_csk(sk)->icsk_af_ops->net_header_len + 844 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
845 inet_csk(sk)->icsk_ext_hdr_len +
846 tp->tcp_header_len;
847
848 new_size_goal = sk->sk_gso_max_size - 1 - hlen;
849 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); 845 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
850 846
851 /* We try hard to avoid divides here */ 847 /* We try hard to avoid divides here */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8fdd27b17306..fb4cf8b8e121 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4770,7 +4770,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
4770 return false; 4770 return false;
4771 4771
4772 /* If we filled the congestion window, do not expand. */ 4772 /* If we filled the congestion window, do not expand. */
4773 if (tp->packets_out >= tp->snd_cwnd) 4773 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
4774 return false; 4774 return false;
4775 4775
4776 return true; 4776 return true;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 98e4a63d72bb..b6030025f411 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4903,6 +4903,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
4903 return ret; 4903 return ret;
4904} 4904}
4905 4905
4906static
4907int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
4908 void __user *buffer, size_t *lenp, loff_t *ppos)
4909{
4910 struct inet6_dev *idev = ctl->extra1;
4911 int min_mtu = IPV6_MIN_MTU;
4912 struct ctl_table lctl;
4913
4914 lctl = *ctl;
4915 lctl.extra1 = &min_mtu;
4916 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
4917
4918 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
4919}
4920
4906static void dev_disable_change(struct inet6_dev *idev) 4921static void dev_disable_change(struct inet6_dev *idev)
4907{ 4922{
4908 struct netdev_notifier_info info; 4923 struct netdev_notifier_info info;
@@ -5054,7 +5069,7 @@ static struct addrconf_sysctl_table
5054 .data = &ipv6_devconf.mtu6, 5069 .data = &ipv6_devconf.mtu6,
5055 .maxlen = sizeof(int), 5070 .maxlen = sizeof(int),
5056 .mode = 0644, 5071 .mode = 0644,
5057 .proc_handler = proc_dointvec, 5072 .proc_handler = addrconf_sysctl_mtu,
5058 }, 5073 },
5059 { 5074 {
5060 .procname = "accept_ra", 5075 .procname = "accept_ra",
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index c215be70cac0..ace8daca5c83 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -325,14 +325,34 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
325 kfree_skb(skb); 325 kfree_skb(skb);
326} 326}
327 327
328static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb) 328/* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
329 *
330 * At one point, excluding local errors was a quick test to identify icmp/icmp6
331 * errors. This is no longer true, but the test remained, so the v6 stack,
332 * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
333 *
334 * Timestamp code paths do not initialize the fields expected by cmsg:
335 * the PKTINFO fields in skb->cb[]. Fill those in here.
336 */
337static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
338 struct sock_exterr_skb *serr)
329{ 339{
330 int ifindex = skb->dev ? skb->dev->ifindex : -1; 340 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
341 serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6)
342 return true;
343
344 if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
345 return false;
346
347 if (!skb->dev)
348 return false;
331 349
332 if (skb->protocol == htons(ETH_P_IPV6)) 350 if (skb->protocol == htons(ETH_P_IPV6))
333 IP6CB(skb)->iif = ifindex; 351 IP6CB(skb)->iif = skb->dev->ifindex;
334 else 352 else
335 PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex; 353 PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
354
355 return true;
336} 356}
337 357
338/* 358/*
@@ -369,7 +389,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
369 389
370 serr = SKB_EXT_ERR(skb); 390 serr = SKB_EXT_ERR(skb);
371 391
372 if (sin && skb->len) { 392 if (sin && serr->port) {
373 const unsigned char *nh = skb_network_header(skb); 393 const unsigned char *nh = skb_network_header(skb);
374 sin->sin6_family = AF_INET6; 394 sin->sin6_family = AF_INET6;
375 sin->sin6_flowinfo = 0; 395 sin->sin6_flowinfo = 0;
@@ -394,14 +414,11 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
394 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 414 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
395 sin = &errhdr.offender; 415 sin = &errhdr.offender;
396 memset(sin, 0, sizeof(*sin)); 416 memset(sin, 0, sizeof(*sin));
397 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) { 417
418 if (ip6_datagram_support_cmsg(skb, serr)) {
398 sin->sin6_family = AF_INET6; 419 sin->sin6_family = AF_INET6;
399 if (np->rxopt.all) { 420 if (np->rxopt.all)
400 if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP &&
401 serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6)
402 ip6_datagram_prepare_pktinfo_errqueue(skb);
403 ip6_datagram_recv_common_ctl(sk, msg, skb); 421 ip6_datagram_recv_common_ctl(sk, msg, skb);
404 }
405 if (skb->protocol == htons(ETH_P_IPV6)) { 422 if (skb->protocol == htons(ETH_P_IPV6)) {
406 sin->sin6_addr = ipv6_hdr(skb)->saddr; 423 sin->sin6_addr = ipv6_hdr(skb)->saddr;
407 if (np->rxopt.all) 424 if (np->rxopt.all)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 7deebf102cba..0a04a37305d5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1298,7 +1298,8 @@ emsgsize:
1298 if (((length > mtu) || 1298 if (((length > mtu) ||
1299 (skb && skb_is_gso(skb))) && 1299 (skb && skb_is_gso(skb))) &&
1300 (sk->sk_protocol == IPPROTO_UDP) && 1300 (sk->sk_protocol == IPPROTO_UDP) &&
1301 (rt->dst.dev->features & NETIF_F_UFO)) { 1301 (rt->dst.dev->features & NETIF_F_UFO) &&
1302 (sk->sk_type == SOCK_DGRAM)) {
1302 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1303 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1303 hh_len, fragheaderlen, 1304 hh_len, fragheaderlen,
1304 transhdrlen, mtu, flags, rt); 1305 transhdrlen, mtu, flags, rt);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index bd46f736f61d..a2dfff6ff227 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102 102
103 if (msg->msg_name) { 103 if (msg->msg_name) {
104 DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); 104 DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
105 if (msg->msg_namelen < sizeof(struct sockaddr_in6) || 105 if (msg->msg_namelen < sizeof(*u))
106 u->sin6_family != AF_INET6) {
107 return -EINVAL; 106 return -EINVAL;
107 if (u->sin6_family != AF_INET6) {
108 return -EAFNOSUPPORT;
108 } 109 }
109 if (sk->sk_bound_dev_if && 110 if (sk->sk_bound_dev_if &&
110 sk->sk_bound_dev_if != u->sin6_scope_id) { 111 sk->sk_bound_dev_if != u->sin6_scope_id) {
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 40695b9751c1..683346d2d633 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -798,7 +798,9 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
798 orig_jiffies = jiffies; 798 orig_jiffies = jiffies;
799 799
800 /* Set poll time to 200 ms */ 800 /* Set poll time to 200 ms */
801 poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200)); 801 poll_time = msecs_to_jiffies(200);
802 if (timeout)
803 poll_time = min_t(unsigned long, timeout, poll_time);
802 804
803 spin_lock_irqsave(&self->spinlock, flags); 805 spin_lock_irqsave(&self->spinlock, flags);
804 while (self->tx_skb && self->tx_skb->len) { 806 while (self->tx_skb && self->tx_skb->len) {
@@ -811,7 +813,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
811 break; 813 break;
812 } 814 }
813 spin_unlock_irqrestore(&self->spinlock, flags); 815 spin_unlock_irqrestore(&self->spinlock, flags);
814 current->state = TASK_RUNNING; 816 __set_current_state(TASK_RUNNING);
815} 817}
816 818
817/* 819/*
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 3c83a1e5ab03..1215693fdd22 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -305,7 +305,7 @@ irnet_ctrl_read(irnet_socket * ap,
305 305
306 /* Put ourselves on the wait queue to be woken up */ 306 /* Put ourselves on the wait queue to be woken up */
307 add_wait_queue(&irnet_events.rwait, &wait); 307 add_wait_queue(&irnet_events.rwait, &wait);
308 current->state = TASK_INTERRUPTIBLE; 308 set_current_state(TASK_INTERRUPTIBLE);
309 for(;;) 309 for(;;)
310 { 310 {
311 /* If there is unread events */ 311 /* If there is unread events */
@@ -321,7 +321,7 @@ irnet_ctrl_read(irnet_socket * ap,
321 /* Yield and wait to be woken up */ 321 /* Yield and wait to be woken up */
322 schedule(); 322 schedule();
323 } 323 }
324 current->state = TASK_RUNNING; 324 __set_current_state(TASK_RUNNING);
325 remove_wait_queue(&irnet_events.rwait, &wait); 325 remove_wait_queue(&irnet_events.rwait, &wait);
326 326
327 /* Did we got it ? */ 327 /* Did we got it ? */
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index ff0d2db09df9..5bcd4e5589d3 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1508,6 +1508,8 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
1508 if (ieee80211_chanctx_refcount(local, ctx) == 0) 1508 if (ieee80211_chanctx_refcount(local, ctx) == 0)
1509 ieee80211_free_chanctx(local, ctx); 1509 ieee80211_free_chanctx(local, ctx);
1510 1510
1511 sdata->radar_required = false;
1512
1511 /* Unreserving may ready an in-place reservation. */ 1513 /* Unreserving may ready an in-place reservation. */
1512 if (use_reserved_switch) 1514 if (use_reserved_switch)
1513 ieee80211_vif_use_reserved_switch(local); 1515 ieee80211_vif_use_reserved_switch(local);
@@ -1566,6 +1568,9 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
1566 ieee80211_recalc_smps_chanctx(local, ctx); 1568 ieee80211_recalc_smps_chanctx(local, ctx);
1567 ieee80211_recalc_radar_chanctx(local, ctx); 1569 ieee80211_recalc_radar_chanctx(local, ctx);
1568 out: 1570 out:
1571 if (ret)
1572 sdata->radar_required = false;
1573
1569 mutex_unlock(&local->chanctx_mtx); 1574 mutex_unlock(&local->chanctx_mtx);
1570 return ret; 1575 return ret;
1571} 1576}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 7c86a002df95..ef6e8a6c4253 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -373,7 +373,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
373 rate++; 373 rate++;
374 mi->sample_deferred++; 374 mi->sample_deferred++;
375 } else { 375 } else {
376 if (!msr->sample_limit != 0) 376 if (!msr->sample_limit)
377 return; 377 return;
378 378
379 mi->sample_packets++; 379 mi->sample_packets++;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 88a18ffe2975..07bd8db00af8 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -566,6 +566,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
566 if (tx->sdata->control_port_no_encrypt) 566 if (tx->sdata->control_port_no_encrypt)
567 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 567 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
568 info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; 568 info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
569 info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
569 } 570 }
570 571
571 return TX_CONTINUE; 572 return TX_CONTINUE;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e55759056361..ed99448671c3 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3402,7 +3402,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3402 if (udest.af == 0) 3402 if (udest.af == 0)
3403 udest.af = svc->af; 3403 udest.af = svc->af;
3404 3404
3405 if (udest.af != svc->af) { 3405 if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) {
3406 /* The synchronization protocol is incompatible 3406 /* The synchronization protocol is incompatible
3407 * with mixed family services 3407 * with mixed family services
3408 */ 3408 */
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index c47ffd7a0a70..d93ceeb3ef04 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -896,6 +896,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
896 IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); 896 IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
897 return; 897 return;
898 } 898 }
899 if (!(flags & IP_VS_CONN_F_TEMPLATE))
900 kfree(param->pe_data);
899 } 901 }
900 902
901 if (opt) 903 if (opt)
@@ -1169,6 +1171,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
1169 (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) 1171 (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
1170 ); 1172 );
1171#endif 1173#endif
1174 ip_vs_pe_put(param.pe);
1172 return 0; 1175 return 0;
1173 /* Error exit */ 1176 /* Error exit */
1174out: 1177out:
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 199fd0f27b0e..6ab777912237 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -227,7 +227,7 @@ nft_rule_deactivate_next(struct net *net, struct nft_rule *rule)
227 227
228static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) 228static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
229{ 229{
230 rule->genmask = 0; 230 rule->genmask &= ~(1 << gencursor_next(net));
231} 231}
232 232
233static int 233static int
@@ -1711,9 +1711,12 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
1711 } 1711 }
1712 nla_nest_end(skb, list); 1712 nla_nest_end(skb, list);
1713 1713
1714 if (rule->ulen && 1714 if (rule->udata) {
1715 nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule))) 1715 struct nft_userdata *udata = nft_userdata(rule);
1716 goto nla_put_failure; 1716 if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1,
1717 udata->data) < 0)
1718 goto nla_put_failure;
1719 }
1717 1720
1718 nlmsg_end(skb, nlh); 1721 nlmsg_end(skb, nlh);
1719 return 0; 1722 return 0;
@@ -1896,11 +1899,12 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1896 struct nft_table *table; 1899 struct nft_table *table;
1897 struct nft_chain *chain; 1900 struct nft_chain *chain;
1898 struct nft_rule *rule, *old_rule = NULL; 1901 struct nft_rule *rule, *old_rule = NULL;
1902 struct nft_userdata *udata;
1899 struct nft_trans *trans = NULL; 1903 struct nft_trans *trans = NULL;
1900 struct nft_expr *expr; 1904 struct nft_expr *expr;
1901 struct nft_ctx ctx; 1905 struct nft_ctx ctx;
1902 struct nlattr *tmp; 1906 struct nlattr *tmp;
1903 unsigned int size, i, n, ulen = 0; 1907 unsigned int size, i, n, ulen = 0, usize = 0;
1904 int err, rem; 1908 int err, rem;
1905 bool create; 1909 bool create;
1906 u64 handle, pos_handle; 1910 u64 handle, pos_handle;
@@ -1968,12 +1972,19 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1968 n++; 1972 n++;
1969 } 1973 }
1970 } 1974 }
1975 /* Check for overflow of dlen field */
1976 err = -EFBIG;
1977 if (size >= 1 << 12)
1978 goto err1;
1971 1979
1972 if (nla[NFTA_RULE_USERDATA]) 1980 if (nla[NFTA_RULE_USERDATA]) {
1973 ulen = nla_len(nla[NFTA_RULE_USERDATA]); 1981 ulen = nla_len(nla[NFTA_RULE_USERDATA]);
1982 if (ulen > 0)
1983 usize = sizeof(struct nft_userdata) + ulen;
1984 }
1974 1985
1975 err = -ENOMEM; 1986 err = -ENOMEM;
1976 rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL); 1987 rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL);
1977 if (rule == NULL) 1988 if (rule == NULL)
1978 goto err1; 1989 goto err1;
1979 1990
@@ -1981,10 +1992,13 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1981 1992
1982 rule->handle = handle; 1993 rule->handle = handle;
1983 rule->dlen = size; 1994 rule->dlen = size;
1984 rule->ulen = ulen; 1995 rule->udata = ulen ? 1 : 0;
1985 1996
1986 if (ulen) 1997 if (ulen) {
1987 nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen); 1998 udata = nft_userdata(rule);
1999 udata->len = ulen - 1;
2000 nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen);
2001 }
1988 2002
1989 expr = nft_expr_first(rule); 2003 expr = nft_expr_first(rule);
1990 for (i = 0; i < n; i++) { 2004 for (i = 0; i < n; i++) {
@@ -2031,12 +2045,6 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
2031 2045
2032err3: 2046err3:
2033 list_del_rcu(&rule->list); 2047 list_del_rcu(&rule->list);
2034 if (trans) {
2035 list_del_rcu(&nft_trans_rule(trans)->list);
2036 nft_rule_clear(net, nft_trans_rule(trans));
2037 nft_trans_destroy(trans);
2038 chain->use++;
2039 }
2040err2: 2048err2:
2041 nf_tables_rule_destroy(&ctx, rule); 2049 nf_tables_rule_destroy(&ctx, rule);
2042err1: 2050err1:
@@ -3612,12 +3620,11 @@ static int nf_tables_commit(struct sk_buff *skb)
3612 &te->elem, 3620 &te->elem,
3613 NFT_MSG_DELSETELEM, 0); 3621 NFT_MSG_DELSETELEM, 0);
3614 te->set->ops->get(te->set, &te->elem); 3622 te->set->ops->get(te->set, &te->elem);
3615 te->set->ops->remove(te->set, &te->elem);
3616 nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); 3623 nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
3617 if (te->elem.flags & NFT_SET_MAP) { 3624 if (te->set->flags & NFT_SET_MAP &&
3618 nft_data_uninit(&te->elem.data, 3625 !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
3619 te->set->dtype); 3626 nft_data_uninit(&te->elem.data, te->set->dtype);
3620 } 3627 te->set->ops->remove(te->set, &te->elem);
3621 nft_trans_destroy(trans); 3628 nft_trans_destroy(trans);
3622 break; 3629 break;
3623 } 3630 }
@@ -3658,7 +3665,7 @@ static int nf_tables_abort(struct sk_buff *skb)
3658{ 3665{
3659 struct net *net = sock_net(skb->sk); 3666 struct net *net = sock_net(skb->sk);
3660 struct nft_trans *trans, *next; 3667 struct nft_trans *trans, *next;
3661 struct nft_set *set; 3668 struct nft_trans_elem *te;
3662 3669
3663 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { 3670 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
3664 switch (trans->msg_type) { 3671 switch (trans->msg_type) {
@@ -3719,9 +3726,13 @@ static int nf_tables_abort(struct sk_buff *skb)
3719 break; 3726 break;
3720 case NFT_MSG_NEWSETELEM: 3727 case NFT_MSG_NEWSETELEM:
3721 nft_trans_elem_set(trans)->nelems--; 3728 nft_trans_elem_set(trans)->nelems--;
3722 set = nft_trans_elem_set(trans); 3729 te = (struct nft_trans_elem *)trans->data;
3723 set->ops->get(set, &nft_trans_elem(trans)); 3730 te->set->ops->get(te->set, &te->elem);
3724 set->ops->remove(set, &nft_trans_elem(trans)); 3731 nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
3732 if (te->set->flags & NFT_SET_MAP &&
3733 !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
3734 nft_data_uninit(&te->elem.data, te->set->dtype);
3735 te->set->ops->remove(te->set, &te->elem);
3725 nft_trans_destroy(trans); 3736 nft_trans_destroy(trans);
3726 break; 3737 break;
3727 case NFT_MSG_DELSETELEM: 3738 case NFT_MSG_DELSETELEM:
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index c598f74063a1..213584cf04b3 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -123,7 +123,7 @@ static void
123nft_target_set_tgchk_param(struct xt_tgchk_param *par, 123nft_target_set_tgchk_param(struct xt_tgchk_param *par,
124 const struct nft_ctx *ctx, 124 const struct nft_ctx *ctx,
125 struct xt_target *target, void *info, 125 struct xt_target *target, void *info,
126 union nft_entry *entry, u8 proto, bool inv) 126 union nft_entry *entry, u16 proto, bool inv)
127{ 127{
128 par->net = ctx->net; 128 par->net = ctx->net;
129 par->table = ctx->table->name; 129 par->table = ctx->table->name;
@@ -137,7 +137,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
137 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; 137 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
138 break; 138 break;
139 case NFPROTO_BRIDGE: 139 case NFPROTO_BRIDGE:
140 entry->ebt.ethproto = proto; 140 entry->ebt.ethproto = (__force __be16)proto;
141 entry->ebt.invflags = inv ? EBT_IPROTO : 0; 141 entry->ebt.invflags = inv ? EBT_IPROTO : 0;
142 break; 142 break;
143 } 143 }
@@ -171,7 +171,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1]
171 [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, 171 [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 },
172}; 172};
173 173
174static int nft_parse_compat(const struct nlattr *attr, u8 *proto, bool *inv) 174static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
175{ 175{
176 struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; 176 struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
177 u32 flags; 177 u32 flags;
@@ -203,7 +203,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
203 struct xt_target *target = expr->ops->data; 203 struct xt_target *target = expr->ops->data;
204 struct xt_tgchk_param par; 204 struct xt_tgchk_param par;
205 size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); 205 size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
206 u8 proto = 0; 206 u16 proto = 0;
207 bool inv = false; 207 bool inv = false;
208 union nft_entry e = {}; 208 union nft_entry e = {};
209 int ret; 209 int ret;
@@ -334,7 +334,7 @@ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
334static void 334static void
335nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, 335nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
336 struct xt_match *match, void *info, 336 struct xt_match *match, void *info,
337 union nft_entry *entry, u8 proto, bool inv) 337 union nft_entry *entry, u16 proto, bool inv)
338{ 338{
339 par->net = ctx->net; 339 par->net = ctx->net;
340 par->table = ctx->table->name; 340 par->table = ctx->table->name;
@@ -348,7 +348,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
348 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; 348 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
349 break; 349 break;
350 case NFPROTO_BRIDGE: 350 case NFPROTO_BRIDGE:
351 entry->ebt.ethproto = proto; 351 entry->ebt.ethproto = (__force __be16)proto;
352 entry->ebt.invflags = inv ? EBT_IPROTO : 0; 352 entry->ebt.invflags = inv ? EBT_IPROTO : 0;
353 break; 353 break;
354 } 354 }
@@ -385,7 +385,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
385 struct xt_match *match = expr->ops->data; 385 struct xt_match *match = expr->ops->data;
386 struct xt_mtchk_param par; 386 struct xt_mtchk_param par;
387 size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); 387 size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
388 u8 proto = 0; 388 u16 proto = 0;
389 bool inv = false; 389 bool inv = false;
390 union nft_entry e = {}; 390 union nft_entry e = {};
391 int ret; 391 int ret;
@@ -625,8 +625,12 @@ nft_match_select_ops(const struct nft_ctx *ctx,
625 struct xt_match *match = nft_match->ops.data; 625 struct xt_match *match = nft_match->ops.data;
626 626
627 if (strcmp(match->name, mt_name) == 0 && 627 if (strcmp(match->name, mt_name) == 0 &&
628 match->revision == rev && match->family == family) 628 match->revision == rev && match->family == family) {
629 if (!try_module_get(match->me))
630 return ERR_PTR(-ENOENT);
631
629 return &nft_match->ops; 632 return &nft_match->ops;
633 }
630 } 634 }
631 635
632 match = xt_request_find_match(family, mt_name, rev); 636 match = xt_request_find_match(family, mt_name, rev);
@@ -695,8 +699,12 @@ nft_target_select_ops(const struct nft_ctx *ctx,
695 struct xt_target *target = nft_target->ops.data; 699 struct xt_target *target = nft_target->ops.data;
696 700
697 if (strcmp(target->name, tg_name) == 0 && 701 if (strcmp(target->name, tg_name) == 0 &&
698 target->revision == rev && target->family == family) 702 target->revision == rev && target->family == family) {
703 if (!try_module_get(target->me))
704 return ERR_PTR(-ENOENT);
705
699 return &nft_target->ops; 706 return &nft_target->ops;
707 }
700 } 708 }
701 709
702 target = xt_request_find_target(family, tg_name, rev); 710 target = xt_request_find_target(family, tg_name, rev);
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 61e6c407476a..c82df0a48fcd 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -192,8 +192,6 @@ static int nft_hash_init(const struct nft_set *set,
192 .key_offset = offsetof(struct nft_hash_elem, key), 192 .key_offset = offsetof(struct nft_hash_elem, key),
193 .key_len = set->klen, 193 .key_len = set->klen,
194 .hashfn = jhash, 194 .hashfn = jhash,
195 .grow_decision = rht_grow_above_75,
196 .shrink_decision = rht_shrink_below_30,
197 }; 195 };
198 196
199 return rhashtable_init(priv, &params); 197 return rhashtable_init(priv, &params);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 30dbe34915ae..45e1b30e4fb2 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -378,12 +378,11 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
378 mutex_lock(&recent_mutex); 378 mutex_lock(&recent_mutex);
379 t = recent_table_lookup(recent_net, info->name); 379 t = recent_table_lookup(recent_net, info->name);
380 if (t != NULL) { 380 if (t != NULL) {
381 if (info->hit_count > t->nstamps_max_mask) { 381 if (nstamp_mask > t->nstamps_max_mask) {
382 pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n", 382 spin_lock_bh(&recent_lock);
383 info->hit_count, t->nstamps_max_mask + 1, 383 recent_table_flush(t);
384 info->name); 384 t->nstamps_max_mask = nstamp_mask;
385 ret = -EINVAL; 385 spin_unlock_bh(&recent_lock);
386 goto out;
387 } 386 }
388 387
389 t->refcnt++; 388 t->refcnt++;
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 1ba67931eb1b..13332dbf291d 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -243,12 +243,13 @@ static int
243extract_icmp6_fields(const struct sk_buff *skb, 243extract_icmp6_fields(const struct sk_buff *skb,
244 unsigned int outside_hdrlen, 244 unsigned int outside_hdrlen,
245 int *protocol, 245 int *protocol,
246 struct in6_addr **raddr, 246 const struct in6_addr **raddr,
247 struct in6_addr **laddr, 247 const struct in6_addr **laddr,
248 __be16 *rport, 248 __be16 *rport,
249 __be16 *lport) 249 __be16 *lport,
250 struct ipv6hdr *ipv6_var)
250{ 251{
251 struct ipv6hdr *inside_iph, _inside_iph; 252 const struct ipv6hdr *inside_iph;
252 struct icmp6hdr *icmph, _icmph; 253 struct icmp6hdr *icmph, _icmph;
253 __be16 *ports, _ports[2]; 254 __be16 *ports, _ports[2];
254 u8 inside_nexthdr; 255 u8 inside_nexthdr;
@@ -263,12 +264,14 @@ extract_icmp6_fields(const struct sk_buff *skb,
263 if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK) 264 if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK)
264 return 1; 265 return 1;
265 266
266 inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), sizeof(_inside_iph), &_inside_iph); 267 inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph),
268 sizeof(*ipv6_var), ipv6_var);
267 if (inside_iph == NULL) 269 if (inside_iph == NULL)
268 return 1; 270 return 1;
269 inside_nexthdr = inside_iph->nexthdr; 271 inside_nexthdr = inside_iph->nexthdr;
270 272
271 inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph), 273 inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) +
274 sizeof(*ipv6_var),
272 &inside_nexthdr, &inside_fragoff); 275 &inside_nexthdr, &inside_fragoff);
273 if (inside_hdrlen < 0) 276 if (inside_hdrlen < 0)
274 return 1; /* hjm: Packet has no/incomplete transport layer headers. */ 277 return 1; /* hjm: Packet has no/incomplete transport layer headers. */
@@ -315,10 +318,10 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol,
315static bool 318static bool
316socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) 319socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
317{ 320{
318 struct ipv6hdr *iph = ipv6_hdr(skb); 321 struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb);
319 struct udphdr _hdr, *hp = NULL; 322 struct udphdr _hdr, *hp = NULL;
320 struct sock *sk = skb->sk; 323 struct sock *sk = skb->sk;
321 struct in6_addr *daddr = NULL, *saddr = NULL; 324 const struct in6_addr *daddr = NULL, *saddr = NULL;
322 __be16 uninitialized_var(dport), uninitialized_var(sport); 325 __be16 uninitialized_var(dport), uninitialized_var(sport);
323 int thoff = 0, uninitialized_var(tproto); 326 int thoff = 0, uninitialized_var(tproto);
324 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; 327 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
@@ -342,7 +345,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
342 345
343 } else if (tproto == IPPROTO_ICMPV6) { 346 } else if (tproto == IPPROTO_ICMPV6) {
344 if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, 347 if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
345 &sport, &dport)) 348 &sport, &dport, &ipv6_var))
346 return false; 349 return false;
347 } else { 350 } else {
348 return false; 351 return false;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2702673f0f23..05919bf3f670 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -3126,8 +3126,6 @@ static int __init netlink_proto_init(void)
3126 .key_len = sizeof(u32), /* portid */ 3126 .key_len = sizeof(u32), /* portid */
3127 .hashfn = jhash, 3127 .hashfn = jhash,
3128 .max_shift = 16, /* 64K */ 3128 .max_shift = 16, /* 64K */
3129 .grow_decision = rht_grow_above_75,
3130 .shrink_decision = rht_shrink_below_30,
3131 }; 3129 };
3132 3130
3133 if (err != 0) 3131 if (err != 0)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index ae5e77cdc0ca..5bae7243c577 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2194,14 +2194,55 @@ static int __net_init ovs_init_net(struct net *net)
2194 return 0; 2194 return 0;
2195} 2195}
2196 2196
2197static void __net_exit ovs_exit_net(struct net *net) 2197static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2198 struct list_head *head)
2198{ 2199{
2199 struct datapath *dp, *dp_next;
2200 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2200 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2201 struct datapath *dp;
2202
2203 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2204 int i;
2205
2206 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2207 struct vport *vport;
2208
2209 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2210 struct netdev_vport *netdev_vport;
2211
2212 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2213 continue;
2214
2215 netdev_vport = netdev_vport_priv(vport);
2216 if (dev_net(netdev_vport->dev) == dnet)
2217 list_add(&vport->detach_list, head);
2218 }
2219 }
2220 }
2221}
2222
2223static void __net_exit ovs_exit_net(struct net *dnet)
2224{
2225 struct datapath *dp, *dp_next;
2226 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2227 struct vport *vport, *vport_next;
2228 struct net *net;
2229 LIST_HEAD(head);
2201 2230
2202 ovs_lock(); 2231 ovs_lock();
2203 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) 2232 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2204 __dp_destroy(dp); 2233 __dp_destroy(dp);
2234
2235 rtnl_lock();
2236 for_each_net(net)
2237 list_vports_from_net(net, dnet, &head);
2238 rtnl_unlock();
2239
2240 /* Detach all vports from given namespace. */
2241 list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2242 list_del(&vport->detach_list);
2243 ovs_dp_detach_port(vport);
2244 }
2245
2205 ovs_unlock(); 2246 ovs_unlock();
2206 2247
2207 cancel_work_sync(&ovs_net->dp_notify_work); 2248 cancel_work_sync(&ovs_net->dp_notify_work);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 216f20b90aa5..22b18c145c92 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2253,14 +2253,20 @@ static int masked_set_action_to_set_action_attr(const struct nlattr *a,
2253 struct sk_buff *skb) 2253 struct sk_buff *skb)
2254{ 2254{
2255 const struct nlattr *ovs_key = nla_data(a); 2255 const struct nlattr *ovs_key = nla_data(a);
2256 struct nlattr *nla;
2256 size_t key_len = nla_len(ovs_key) / 2; 2257 size_t key_len = nla_len(ovs_key) / 2;
2257 2258
2258 /* Revert the conversion we did from a non-masked set action to 2259 /* Revert the conversion we did from a non-masked set action to
2259 * masked set action. 2260 * masked set action.
2260 */ 2261 */
2261 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a) - key_len, ovs_key)) 2262 nla = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
2263 if (!nla)
2262 return -EMSGSIZE; 2264 return -EMSGSIZE;
2263 2265
2266 if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
2267 return -EMSGSIZE;
2268
2269 nla_nest_end(skb, nla);
2264 return 0; 2270 return 0;
2265} 2271}
2266 2272
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index f8ae295fb001..bc85331a6c60 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -103,6 +103,7 @@ struct vport_portids {
103 * @ops: Class structure. 103 * @ops: Class structure.
104 * @percpu_stats: Points to per-CPU statistics used and maintained by vport 104 * @percpu_stats: Points to per-CPU statistics used and maintained by vport
105 * @err_stats: Points to error statistics used and maintained by vport 105 * @err_stats: Points to error statistics used and maintained by vport
106 * @detach_list: list used for detaching vport in net-exit call.
106 */ 107 */
107struct vport { 108struct vport {
108 struct rcu_head rcu; 109 struct rcu_head rcu;
@@ -117,6 +118,7 @@ struct vport {
117 struct pcpu_sw_netstats __percpu *percpu_stats; 118 struct pcpu_sw_netstats __percpu *percpu_stats;
118 119
119 struct vport_err_stats err_stats; 120 struct vport_err_stats err_stats;
121 struct list_head detach_list;
120}; 122};
121 123
122/** 124/**
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9c28cec1a083..f8db7064d81c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -698,6 +698,10 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
698 698
699 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { 699 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
700 if (!frozen) { 700 if (!frozen) {
701 if (!BLOCK_NUM_PKTS(pbd)) {
702 /* An empty block. Just refresh the timer. */
703 goto refresh_timer;
704 }
701 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); 705 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
702 if (!prb_dispatch_next_block(pkc, po)) 706 if (!prb_dispatch_next_block(pkc, po))
703 goto refresh_timer; 707 goto refresh_timer;
@@ -798,7 +802,11 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
798 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; 802 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
799 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; 803 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
800 } else { 804 } else {
801 /* Ok, we tmo'd - so get the current time */ 805 /* Ok, we tmo'd - so get the current time.
806 *
807 * It shouldn't really happen as we don't close empty
808 * blocks. See prb_retire_rx_blk_timer_expired().
809 */
802 struct timespec ts; 810 struct timespec ts;
803 getnstimeofday(&ts); 811 getnstimeofday(&ts);
804 h1->ts_last_pkt.ts_sec = ts.tv_sec; 812 h1->ts_last_pkt.ts_sec = ts.tv_sec;
@@ -1349,14 +1357,14 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1349 return 0; 1357 return 0;
1350 } 1358 }
1351 1359
1360 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1361 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1362 if (!skb)
1363 return 0;
1364 }
1352 switch (f->type) { 1365 switch (f->type) {
1353 case PACKET_FANOUT_HASH: 1366 case PACKET_FANOUT_HASH:
1354 default: 1367 default:
1355 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1356 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1357 if (!skb)
1358 return 0;
1359 }
1360 idx = fanout_demux_hash(f, skb, num); 1368 idx = fanout_demux_hash(f, skb, num);
1361 break; 1369 break;
1362 case PACKET_FANOUT_LB: 1370 case PACKET_FANOUT_LB:
@@ -3115,11 +3123,18 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3115 return 0; 3123 return 0;
3116} 3124}
3117 3125
3118static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) 3126static void packet_dev_mclist_delete(struct net_device *dev,
3127 struct packet_mclist **mlp)
3119{ 3128{
3120 for ( ; i; i = i->next) { 3129 struct packet_mclist *ml;
3121 if (i->ifindex == dev->ifindex) 3130
3122 packet_dev_mc(dev, i, what); 3131 while ((ml = *mlp) != NULL) {
3132 if (ml->ifindex == dev->ifindex) {
3133 packet_dev_mc(dev, ml, -1);
3134 *mlp = ml->next;
3135 kfree(ml);
3136 } else
3137 mlp = &ml->next;
3123 } 3138 }
3124} 3139}
3125 3140
@@ -3196,12 +3211,11 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3196 packet_dev_mc(dev, ml, -1); 3211 packet_dev_mc(dev, ml, -1);
3197 kfree(ml); 3212 kfree(ml);
3198 } 3213 }
3199 rtnl_unlock(); 3214 break;
3200 return 0;
3201 } 3215 }
3202 } 3216 }
3203 rtnl_unlock(); 3217 rtnl_unlock();
3204 return -EADDRNOTAVAIL; 3218 return 0;
3205} 3219}
3206 3220
3207static void packet_flush_mclist(struct sock *sk) 3221static void packet_flush_mclist(struct sock *sk)
@@ -3551,7 +3565,7 @@ static int packet_notifier(struct notifier_block *this,
3551 switch (msg) { 3565 switch (msg) {
3552 case NETDEV_UNREGISTER: 3566 case NETDEV_UNREGISTER:
3553 if (po->mclist) 3567 if (po->mclist)
3554 packet_dev_mclist(dev, po->mclist, -1); 3568 packet_dev_mclist_delete(dev, &po->mclist);
3555 /* fallthrough */ 3569 /* fallthrough */
3556 3570
3557 case NETDEV_DOWN: 3571 case NETDEV_DOWN:
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index c6be17a959a6..e0547f521f20 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -218,7 +218,8 @@ static void rxrpc_resend(struct rxrpc_call *call)
218 struct rxrpc_header *hdr; 218 struct rxrpc_header *hdr;
219 struct sk_buff *txb; 219 struct sk_buff *txb;
220 unsigned long *p_txb, resend_at; 220 unsigned long *p_txb, resend_at;
221 int loop, stop; 221 bool stop;
222 int loop;
222 u8 resend; 223 u8 resend;
223 224
224 _enter("{%d,%d,%d,%d},", 225 _enter("{%d,%d,%d,%d},",
@@ -226,7 +227,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
226 atomic_read(&call->sequence), 227 atomic_read(&call->sequence),
227 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); 228 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
228 229
229 stop = 0; 230 stop = false;
230 resend = 0; 231 resend = 0;
231 resend_at = 0; 232 resend_at = 0;
232 233
@@ -255,11 +256,11 @@ static void rxrpc_resend(struct rxrpc_call *call)
255 _proto("Tx DATA %%%u { #%d }", 256 _proto("Tx DATA %%%u { #%d }",
256 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); 257 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
257 if (rxrpc_send_packet(call->conn->trans, txb) < 0) { 258 if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
258 stop = 0; 259 stop = true;
259 sp->resend_at = jiffies + 3; 260 sp->resend_at = jiffies + 3;
260 } else { 261 } else {
261 sp->resend_at = 262 sp->resend_at =
262 jiffies + rxrpc_resend_timeout * HZ; 263 jiffies + rxrpc_resend_timeout;
263 } 264 }
264 } 265 }
265 266
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index 5394b6be46ec..0610efa83d72 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -42,7 +42,8 @@ void rxrpc_UDP_error_report(struct sock *sk)
42 _leave("UDP socket errqueue empty"); 42 _leave("UDP socket errqueue empty");
43 return; 43 return;
44 } 44 }
45 if (!skb->len) { 45 serr = SKB_EXT_ERR(skb);
46 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
46 _leave("UDP empty message"); 47 _leave("UDP empty message");
47 kfree_skb(skb); 48 kfree_skb(skb);
48 return; 49 return;
@@ -50,7 +51,6 @@ void rxrpc_UDP_error_report(struct sock *sk)
50 51
51 rxrpc_new_skb(skb); 52 rxrpc_new_skb(skb);
52 53
53 serr = SKB_EXT_ERR(skb);
54 addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); 54 addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset);
55 port = serr->port; 55 port = serr->port;
56 56
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 6742200b1307..fbb7ebfc58c6 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -228,6 +228,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
228 * to replay the request. 228 * to replay the request.
229 */ 229 */
230 module_put(em->ops->owner); 230 module_put(em->ops->owner);
231 em->ops = NULL;
231 err = -EAGAIN; 232 err = -EAGAIN;
232 } 233 }
233#endif 234#endif
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index abbb7dcd1689..59eeed43eda2 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -217,6 +217,8 @@ static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg)
217 217
218 for (i = 0; i < arg->npages && arg->pages[i]; i++) 218 for (i = 0; i < arg->npages && arg->pages[i]; i++)
219 __free_page(arg->pages[i]); 219 __free_page(arg->pages[i]);
220
221 kfree(arg->pages);
220} 222}
221 223
222static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg) 224static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 224a82f24d3c..1095be9c80ab 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -463,6 +463,8 @@ static int rsc_parse(struct cache_detail *cd,
463 /* number of additional gid's */ 463 /* number of additional gid's */
464 if (get_int(&mesg, &N)) 464 if (get_int(&mesg, &N))
465 goto out; 465 goto out;
466 if (N < 0 || N > NGROUPS_MAX)
467 goto out;
466 status = -ENOMEM; 468 status = -ENOMEM;
467 rsci.cred.cr_group_info = groups_alloc(N); 469 rsci.cred.cr_group_info = groups_alloc(N);
468 if (rsci.cred.cr_group_info == NULL) 470 if (rsci.cred.cr_group_info == NULL)
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 33fb105d4352..5199bb1a017e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -921,7 +921,7 @@ static unsigned int cache_poll(struct file *filp, poll_table *wait,
921 poll_wait(filp, &queue_wait, wait); 921 poll_wait(filp, &queue_wait, wait);
922 922
923 /* alway allow write */ 923 /* alway allow write */
924 mask = POLL_OUT | POLLWRNORM; 924 mask = POLLOUT | POLLWRNORM;
925 925
926 if (!rp) 926 if (!rp)
927 return mask; 927 return mask;
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 7e9acd9361c5..91ffde82fa0c 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -738,8 +738,9 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
738 struct rpc_xprt *xprt = rep->rr_xprt; 738 struct rpc_xprt *xprt = rep->rr_xprt;
739 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 739 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
740 __be32 *iptr; 740 __be32 *iptr;
741 int credits, rdmalen, status; 741 int rdmalen, status;
742 unsigned long cwnd; 742 unsigned long cwnd;
743 u32 credits;
743 744
744 /* Check status. If bad, signal disconnect and return rep to pool */ 745 /* Check status. If bad, signal disconnect and return rep to pool */
745 if (rep->rr_len == ~0U) { 746 if (rep->rr_len == ~0U) {
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index d1b70397c60f..0a16fb6f0885 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -285,7 +285,7 @@ rpcr_to_rdmar(struct rpc_rqst *rqst)
285 */ 285 */
286struct rpcrdma_buffer { 286struct rpcrdma_buffer {
287 spinlock_t rb_lock; /* protects indexes */ 287 spinlock_t rb_lock; /* protects indexes */
288 int rb_max_requests;/* client max requests */ 288 u32 rb_max_requests;/* client max requests */
289 struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */ 289 struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */
290 struct list_head rb_all; 290 struct list_head rb_all;
291 int rb_send_index; 291 int rb_send_index;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a4cf364316de..14f09b3cb87c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -464,10 +464,11 @@ void tipc_link_reset(struct tipc_link *l_ptr)
464 /* Clean up all queues, except inputq: */ 464 /* Clean up all queues, except inputq: */
465 __skb_queue_purge(&l_ptr->outqueue); 465 __skb_queue_purge(&l_ptr->outqueue);
466 __skb_queue_purge(&l_ptr->deferred_queue); 466 __skb_queue_purge(&l_ptr->deferred_queue);
467 skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq); 467 if (!owner->inputq)
468 if (!skb_queue_empty(&l_ptr->inputq)) 468 owner->inputq = &l_ptr->inputq;
469 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
470 if (!skb_queue_empty(owner->inputq))
469 owner->action_flags |= TIPC_MSG_EVT; 471 owner->action_flags |= TIPC_MSG_EVT;
470 owner->inputq = &l_ptr->inputq;
471 l_ptr->next_out = NULL; 472 l_ptr->next_out = NULL;
472 l_ptr->unacked_window = 0; 473 l_ptr->unacked_window = 0;
473 l_ptr->checkpoint = 1; 474 l_ptr->checkpoint = 1;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f73e975af80b..b4d4467d0bb0 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2364,8 +2364,6 @@ int tipc_sk_rht_init(struct net *net)
2364 .hashfn = jhash, 2364 .hashfn = jhash,
2365 .max_shift = 20, /* 1M */ 2365 .max_shift = 20, /* 1M */
2366 .min_shift = 8, /* 256 */ 2366 .min_shift = 8, /* 256 */
2367 .grow_decision = rht_grow_above_75,
2368 .shrink_decision = rht_shrink_below_30,
2369 }; 2367 };
2370 2368
2371 return rhashtable_init(&tn->sk_rht, &rht_params); 2369 return rhashtable_init(&tn->sk_rht, &rht_params);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3af0ecf1cc16..2a0bbd22854b 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1199,6 +1199,7 @@ out_fail_wq:
1199 regulatory_exit(); 1199 regulatory_exit();
1200out_fail_reg: 1200out_fail_reg:
1201 debugfs_remove(ieee80211_debugfs_dir); 1201 debugfs_remove(ieee80211_debugfs_dir);
1202 nl80211_exit();
1202out_fail_nl80211: 1203out_fail_nl80211:
1203 unregister_netdevice_notifier(&cfg80211_netdev_notifier); 1204 unregister_netdevice_notifier(&cfg80211_netdev_notifier);
1204out_fail_notifier: 1205out_fail_notifier:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d78fd8b54515..be2501538011 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2654,10 +2654,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2654 return err; 2654 return err;
2655 } 2655 }
2656 2656
2657 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2658 if (!msg)
2659 return -ENOMEM;
2660
2661 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? 2657 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
2662 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, 2658 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
2663 &flags); 2659 &flags);
@@ -2666,6 +2662,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2666 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) 2662 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
2667 return -EOPNOTSUPP; 2663 return -EOPNOTSUPP;
2668 2664
2665 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2666 if (!msg)
2667 return -ENOMEM;
2668
2669 wdev = rdev_add_virtual_intf(rdev, 2669 wdev = rdev_add_virtual_intf(rdev,
2670 nla_data(info->attrs[NL80211_ATTR_IFNAME]), 2670 nla_data(info->attrs[NL80211_ATTR_IFNAME]),
2671 type, err ? NULL : &flags, &params); 2671 type, err ? NULL : &flags, &params);
@@ -12528,9 +12528,7 @@ static int cfg80211_net_detect_results(struct sk_buff *msg,
12528 } 12528 }
12529 12529
12530 for (j = 0; j < match->n_channels; j++) { 12530 for (j = 0; j < match->n_channels; j++) {
12531 if (nla_put_u32(msg, 12531 if (nla_put_u32(msg, j, match->channels[j])) {
12532 NL80211_ATTR_WIPHY_FREQ,
12533 match->channels[j])) {
12534 nla_nest_cancel(msg, nl_freqs); 12532 nla_nest_cancel(msg, nl_freqs);
12535 nla_nest_cancel(msg, nl_match); 12533 nla_nest_cancel(msg, nl_match);
12536 goto out; 12534 goto out;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index b586d0dcb09e..48dfc7b4e981 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -228,7 +228,7 @@ static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
228 228
229/* We keep a static world regulatory domain in case of the absence of CRDA */ 229/* We keep a static world regulatory domain in case of the absence of CRDA */
230static const struct ieee80211_regdomain world_regdom = { 230static const struct ieee80211_regdomain world_regdom = {
231 .n_reg_rules = 6, 231 .n_reg_rules = 8,
232 .alpha2 = "00", 232 .alpha2 = "00",
233 .reg_rules = { 233 .reg_rules = {
234 /* IEEE 802.11b/g, channels 1..11 */ 234 /* IEEE 802.11b/g, channels 1..11 */
diff --git a/sound/core/control.c b/sound/core/control.c
index 35324a8e83c8..eeb691d1911f 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1170,6 +1170,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1170 1170
1171 if (info->count < 1) 1171 if (info->count < 1)
1172 return -EINVAL; 1172 return -EINVAL;
1173 if (!*info->id.name)
1174 return -EINVAL;
1175 if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name))
1176 return -EINVAL;
1173 access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : 1177 access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
1174 (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| 1178 (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
1175 SNDRV_CTL_ELEM_ACCESS_INACTIVE| 1179 SNDRV_CTL_ELEM_ACCESS_INACTIVE|
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
index f62780ed64ad..7821b07415a7 100644
--- a/sound/drivers/opl3/opl3_midi.c
+++ b/sound/drivers/opl3/opl3_midi.c
@@ -105,6 +105,8 @@ static void snd_opl3_calc_pitch(unsigned char *fnum, unsigned char *blocknum,
105 int pitchbend = chan->midi_pitchbend; 105 int pitchbend = chan->midi_pitchbend;
106 int segment; 106 int segment;
107 107
108 if (pitchbend < -0x2000)
109 pitchbend = -0x2000;
108 if (pitchbend > 0x1FFF) 110 if (pitchbend > 0x1FFF)
109 pitchbend = 0x1FFF; 111 pitchbend = 0x1FFF;
110 112
diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c
index 5f17b77ee152..f0e4d502d604 100644
--- a/sound/firewire/iso-resources.c
+++ b/sound/firewire/iso-resources.c
@@ -26,7 +26,7 @@
26int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit) 26int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit)
27{ 27{
28 r->channels_mask = ~0uLL; 28 r->channels_mask = ~0uLL;
29 r->unit = fw_unit_get(unit); 29 r->unit = unit;
30 mutex_init(&r->mutex); 30 mutex_init(&r->mutex);
31 r->allocated = false; 31 r->allocated = false;
32 32
@@ -42,7 +42,6 @@ void fw_iso_resources_destroy(struct fw_iso_resources *r)
42{ 42{
43 WARN_ON(r->allocated); 43 WARN_ON(r->allocated);
44 mutex_destroy(&r->mutex); 44 mutex_destroy(&r->mutex);
45 fw_unit_put(r->unit);
46} 45}
47EXPORT_SYMBOL(fw_iso_resources_destroy); 46EXPORT_SYMBOL(fw_iso_resources_destroy);
48 47
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index 29ccb3637164..e6757cd85724 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -171,9 +171,10 @@ static int start_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream,
171 } 171 }
172 172
173 /* Wait first packet */ 173 /* Wait first packet */
174 err = amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT); 174 if (!amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT)) {
175 if (err < 0)
176 stop_stream(oxfw, stream); 175 stop_stream(oxfw, stream);
176 err = -ETIMEDOUT;
177 }
177end: 178end:
178 return err; 179 return err;
179} 180}
diff --git a/sound/isa/msnd/msnd_pinnacle_mixer.c b/sound/isa/msnd/msnd_pinnacle_mixer.c
index 17e49a071af4..b408540798c1 100644
--- a/sound/isa/msnd/msnd_pinnacle_mixer.c
+++ b/sound/isa/msnd/msnd_pinnacle_mixer.c
@@ -306,11 +306,12 @@ int snd_msndmix_new(struct snd_card *card)
306 spin_lock_init(&chip->mixer_lock); 306 spin_lock_init(&chip->mixer_lock);
307 strcpy(card->mixername, "MSND Pinnacle Mixer"); 307 strcpy(card->mixername, "MSND Pinnacle Mixer");
308 308
309 for (idx = 0; idx < ARRAY_SIZE(snd_msnd_controls); idx++) 309 for (idx = 0; idx < ARRAY_SIZE(snd_msnd_controls); idx++) {
310 err = snd_ctl_add(card, 310 err = snd_ctl_add(card,
311 snd_ctl_new1(snd_msnd_controls + idx, chip)); 311 snd_ctl_new1(snd_msnd_controls + idx, chip));
312 if (err < 0) 312 if (err < 0)
313 return err; 313 return err;
314 }
314 315
315 return 0; 316 return 0;
316} 317}
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index a2ce773bdc62..17c2637d842c 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1164,7 +1164,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1164 } 1164 }
1165 } 1165 }
1166 1166
1167 if (!bus->no_response_fallback) 1167 if (bus->no_response_fallback)
1168 return -1; 1168 return -1;
1169 1169
1170 if (!chip->polling_mode && chip->poll_count < 2) { 1170 if (!chip->polling_mode && chip->poll_count < 2) {
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index b680b4ec6331..fe18071bf93a 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -692,7 +692,23 @@ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
692{ 692{
693 unsigned int caps = query_amp_caps(codec, nid, dir); 693 unsigned int caps = query_amp_caps(codec, nid, dir);
694 int val = get_amp_val_to_activate(codec, nid, dir, caps, false); 694 int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
695 snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); 695
696 if (get_wcaps(codec, nid) & AC_WCAP_STEREO)
697 snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
698 else
699 snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
700}
701
702/* update the amp, doing in stereo or mono depending on NID */
703static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx,
704 unsigned int mask, unsigned int val)
705{
706 if (get_wcaps(codec, nid) & AC_WCAP_STEREO)
707 return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
708 mask, val);
709 else
710 return snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
711 mask, val);
696} 712}
697 713
698/* calculate amp value mask we can modify; 714/* calculate amp value mask we can modify;
@@ -732,7 +748,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
732 return; 748 return;
733 749
734 val &= mask; 750 val &= mask;
735 snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val); 751 update_amp(codec, nid, dir, idx, mask, val);
736} 752}
737 753
738static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, 754static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
@@ -4424,13 +4440,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
4424 has_amp = nid_has_mute(codec, mix, HDA_INPUT); 4440 has_amp = nid_has_mute(codec, mix, HDA_INPUT);
4425 for (i = 0; i < nums; i++) { 4441 for (i = 0; i < nums; i++) {
4426 if (has_amp) 4442 if (has_amp)
4427 snd_hda_codec_amp_stereo(codec, mix, 4443 update_amp(codec, mix, HDA_INPUT, i,
4428 HDA_INPUT, i, 4444 0xff, HDA_AMP_MUTE);
4429 0xff, HDA_AMP_MUTE);
4430 else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) 4445 else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
4431 snd_hda_codec_amp_stereo(codec, conn[i], 4446 update_amp(codec, conn[i], HDA_OUTPUT, 0,
4432 HDA_OUTPUT, 0, 4447 0xff, HDA_AMP_MUTE);
4433 0xff, HDA_AMP_MUTE);
4434 } 4448 }
4435} 4449}
4436 4450
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 1589c9bcce3e..dd2b3d92071f 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -393,6 +393,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
393 SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), 393 SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
394 SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), 394 SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
395 SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), 395 SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
396 SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81),
396 SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42), 397 SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
397 SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), 398 SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
398 {} /* terminator */ 399 {} /* terminator */
@@ -584,6 +585,7 @@ static int patch_cs420x(struct hda_codec *codec)
584 return -ENOMEM; 585 return -ENOMEM;
585 586
586 spec->gen.automute_hook = cs_automute; 587 spec->gen.automute_hook = cs_automute;
588 codec->single_adc_amp = 1;
587 589
588 snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, 590 snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl,
589 cs420x_fixups); 591 cs420x_fixups);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index fd3ed18670e9..da67ea8645a6 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -223,6 +223,7 @@ enum {
223 CXT_PINCFG_LENOVO_TP410, 223 CXT_PINCFG_LENOVO_TP410,
224 CXT_PINCFG_LEMOTE_A1004, 224 CXT_PINCFG_LEMOTE_A1004,
225 CXT_PINCFG_LEMOTE_A1205, 225 CXT_PINCFG_LEMOTE_A1205,
226 CXT_PINCFG_COMPAQ_CQ60,
226 CXT_FIXUP_STEREO_DMIC, 227 CXT_FIXUP_STEREO_DMIC,
227 CXT_FIXUP_INC_MIC_BOOST, 228 CXT_FIXUP_INC_MIC_BOOST,
228 CXT_FIXUP_HEADPHONE_MIC_PIN, 229 CXT_FIXUP_HEADPHONE_MIC_PIN,
@@ -660,6 +661,15 @@ static const struct hda_fixup cxt_fixups[] = {
660 .type = HDA_FIXUP_PINS, 661 .type = HDA_FIXUP_PINS,
661 .v.pins = cxt_pincfg_lemote, 662 .v.pins = cxt_pincfg_lemote,
662 }, 663 },
664 [CXT_PINCFG_COMPAQ_CQ60] = {
665 .type = HDA_FIXUP_PINS,
666 .v.pins = (const struct hda_pintbl[]) {
667 /* 0x17 was falsely set up as a mic, it should 0x1d */
668 { 0x17, 0x400001f0 },
669 { 0x1d, 0x97a70120 },
670 { }
671 }
672 },
663 [CXT_FIXUP_STEREO_DMIC] = { 673 [CXT_FIXUP_STEREO_DMIC] = {
664 .type = HDA_FIXUP_FUNC, 674 .type = HDA_FIXUP_FUNC,
665 .v.func = cxt_fixup_stereo_dmic, 675 .v.func = cxt_fixup_stereo_dmic,
@@ -769,6 +779,7 @@ static const struct hda_model_fixup cxt5047_fixup_models[] = {
769}; 779};
770 780
771static const struct snd_pci_quirk cxt5051_fixups[] = { 781static const struct snd_pci_quirk cxt5051_fixups[] = {
782 SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60),
772 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), 783 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
773 {} 784 {}
774}; 785};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b2b24a8b3dac..526398a4a442 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5209,6 +5209,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5209 {0x17, 0x40000000}, 5209 {0x17, 0x40000000},
5210 {0x1d, 0x40700001}, 5210 {0x1d, 0x40700001},
5211 {0x21, 0x02211040}), 5211 {0x21, 0x02211040}),
5212 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5213 ALC255_STANDARD_PINS,
5214 {0x12, 0x90a60170},
5215 {0x14, 0x90170140},
5216 {0x17, 0x40000000},
5217 {0x1d, 0x40700001},
5218 {0x21, 0x02211050}),
5212 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, 5219 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5213 {0x12, 0x90a60130}, 5220 {0x12, 0x90a60130},
5214 {0x13, 0x40000000}, 5221 {0x13, 0x40000000},
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index f5ad214663f9..8de836165cf2 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -46,8 +46,6 @@
46#include <sound/pcm_params.h> 46#include <sound/pcm_params.h>
47#include <sound/soc.h> 47#include <sound/soc.h>
48 48
49#include <asm/mach-types.h>
50
51#include "../codecs/wm8731.h" 49#include "../codecs/wm8731.h"
52#include "atmel-pcm.h" 50#include "atmel-pcm.h"
53#include "atmel_ssc_dai.h" 51#include "atmel_ssc_dai.h"
@@ -171,9 +169,7 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
171 int ret; 169 int ret;
172 170
173 if (!np) { 171 if (!np) {
174 if (!(machine_is_at91sam9g20ek() || 172 return -ENODEV;
175 machine_is_at91sam9g20ek_2mmc()))
176 return -ENODEV;
177 } 173 }
178 174
179 ret = atmel_ssc_set_audio(0); 175 ret = atmel_ssc_set_audio(0);
@@ -210,39 +206,37 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
210 card->dev = &pdev->dev; 206 card->dev = &pdev->dev;
211 207
212 /* Parse device node info */ 208 /* Parse device node info */
213 if (np) { 209 ret = snd_soc_of_parse_card_name(card, "atmel,model");
214 ret = snd_soc_of_parse_card_name(card, "atmel,model"); 210 if (ret)
215 if (ret) 211 goto err;
216 goto err; 212
217 213 ret = snd_soc_of_parse_audio_routing(card,
218 ret = snd_soc_of_parse_audio_routing(card, 214 "atmel,audio-routing");
219 "atmel,audio-routing"); 215 if (ret)
220 if (ret) 216 goto err;
221 goto err; 217
222 218 /* Parse codec info */
223 /* Parse codec info */ 219 at91sam9g20ek_dai.codec_name = NULL;
224 at91sam9g20ek_dai.codec_name = NULL; 220 codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
225 codec_np = of_parse_phandle(np, "atmel,audio-codec", 0); 221 if (!codec_np) {
226 if (!codec_np) { 222 dev_err(&pdev->dev, "codec info missing\n");
227 dev_err(&pdev->dev, "codec info missing\n"); 223 return -EINVAL;
228 return -EINVAL; 224 }
229 } 225 at91sam9g20ek_dai.codec_of_node = codec_np;
230 at91sam9g20ek_dai.codec_of_node = codec_np; 226
231 227 /* Parse dai and platform info */
232 /* Parse dai and platform info */ 228 at91sam9g20ek_dai.cpu_dai_name = NULL;
233 at91sam9g20ek_dai.cpu_dai_name = NULL; 229 at91sam9g20ek_dai.platform_name = NULL;
234 at91sam9g20ek_dai.platform_name = NULL; 230 cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
235 cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0); 231 if (!cpu_np) {
236 if (!cpu_np) { 232 dev_err(&pdev->dev, "dai and pcm info missing\n");
237 dev_err(&pdev->dev, "dai and pcm info missing\n"); 233 return -EINVAL;
238 return -EINVAL;
239 }
240 at91sam9g20ek_dai.cpu_of_node = cpu_np;
241 at91sam9g20ek_dai.platform_of_node = cpu_np;
242
243 of_node_put(codec_np);
244 of_node_put(cpu_np);
245 } 234 }
235 at91sam9g20ek_dai.cpu_of_node = cpu_np;
236 at91sam9g20ek_dai.platform_of_node = cpu_np;
237
238 of_node_put(codec_np);
239 of_node_put(cpu_np);
246 240
247 ret = snd_soc_register_card(card); 241 ret = snd_soc_register_card(card);
248 if (ret) { 242 if (ret) {
diff --git a/sound/soc/cirrus/Kconfig b/sound/soc/cirrus/Kconfig
index 7b7fbcd49e5e..c7cd60f009e9 100644
--- a/sound/soc/cirrus/Kconfig
+++ b/sound/soc/cirrus/Kconfig
@@ -16,7 +16,7 @@ config SND_EP93XX_SOC_AC97
16 16
17config SND_EP93XX_SOC_SNAPPERCL15 17config SND_EP93XX_SOC_SNAPPERCL15
18 tristate "SoC Audio support for Bluewater Systems Snapper CL15 module" 18 tristate "SoC Audio support for Bluewater Systems Snapper CL15 module"
19 depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 19 depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 && I2C
20 select SND_EP93XX_SOC_I2S 20 select SND_EP93XX_SOC_I2S
21 select SND_SOC_TLV320AIC23_I2C 21 select SND_SOC_TLV320AIC23_I2C
22 help 22 help
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 064e6c18e109..ea9f0e31f9d4 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -69,7 +69,7 @@ config SND_SOC_ALL_CODECS
69 select SND_SOC_MAX98088 if I2C 69 select SND_SOC_MAX98088 if I2C
70 select SND_SOC_MAX98090 if I2C 70 select SND_SOC_MAX98090 if I2C
71 select SND_SOC_MAX98095 if I2C 71 select SND_SOC_MAX98095 if I2C
72 select SND_SOC_MAX98357A 72 select SND_SOC_MAX98357A if GPIOLIB
73 select SND_SOC_MAX9850 if I2C 73 select SND_SOC_MAX9850 if I2C
74 select SND_SOC_MAX9768 if I2C 74 select SND_SOC_MAX9768 if I2C
75 select SND_SOC_MAX9877 if I2C 75 select SND_SOC_MAX9877 if I2C
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
index 1806333ea29e..e9e6efbc21dd 100644
--- a/sound/soc/codecs/max98357a.c
+++ b/sound/soc/codecs/max98357a.c
@@ -12,9 +12,19 @@
12 * max98357a.c -- MAX98357A ALSA SoC Codec driver 12 * max98357a.c -- MAX98357A ALSA SoC Codec driver
13 */ 13 */
14 14
15#include <linux/module.h> 15#include <linux/device.h>
16#include <linux/err.h>
16#include <linux/gpio.h> 17#include <linux/gpio.h>
18#include <linux/gpio/consumer.h>
19#include <linux/kernel.h>
20#include <linux/mod_devicetable.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/platform_device.h>
24#include <sound/pcm.h>
17#include <sound/soc.h> 25#include <sound/soc.h>
26#include <sound/soc-dai.h>
27#include <sound/soc-dapm.h>
18 28
19#define DRV_NAME "max98357a" 29#define DRV_NAME "max98357a"
20 30
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index e1a4a45c57e2..fd102613d20d 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -225,7 +225,6 @@ static bool rt5670_volatile_register(struct device *dev, unsigned int reg)
225 case RT5670_ADC_EQ_CTRL1: 225 case RT5670_ADC_EQ_CTRL1:
226 case RT5670_EQ_CTRL1: 226 case RT5670_EQ_CTRL1:
227 case RT5670_ALC_CTRL_1: 227 case RT5670_ALC_CTRL_1:
228 case RT5670_IRQ_CTRL1:
229 case RT5670_IRQ_CTRL2: 228 case RT5670_IRQ_CTRL2:
230 case RT5670_INT_IRQ_ST: 229 case RT5670_INT_IRQ_ST:
231 case RT5670_IL_CMD: 230 case RT5670_IL_CMD:
@@ -2703,6 +2702,12 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
2703 2702
2704 regmap_write(rt5670->regmap, RT5670_RESET, 0); 2703 regmap_write(rt5670->regmap, RT5670_RESET, 0);
2705 2704
2705 regmap_read(rt5670->regmap, RT5670_VENDOR_ID, &val);
2706 if (val >= 4)
2707 regmap_write(rt5670->regmap, RT5670_GPIO_CTRL3, 0x0980);
2708 else
2709 regmap_write(rt5670->regmap, RT5670_GPIO_CTRL3, 0x0d00);
2710
2706 ret = regmap_register_patch(rt5670->regmap, init_list, 2711 ret = regmap_register_patch(rt5670->regmap, init_list,
2707 ARRAY_SIZE(init_list)); 2712 ARRAY_SIZE(init_list));
2708 if (ret != 0) 2713 if (ret != 0)
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 5d0bb8748dd1..fb9c20eace3f 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -3284,8 +3284,8 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
3284 { "IB45 Bypass Mux", "Bypass", "IB45 Mux" }, 3284 { "IB45 Bypass Mux", "Bypass", "IB45 Mux" },
3285 { "IB45 Bypass Mux", "Pass SRC", "IB45 Mux" }, 3285 { "IB45 Bypass Mux", "Pass SRC", "IB45 Mux" },
3286 3286
3287 { "IB6 Mux", "IF1 DAC 6", "IF1 DAC6" }, 3287 { "IB6 Mux", "IF1 DAC 6", "IF1 DAC6 Mux" },
3288 { "IB6 Mux", "IF2 DAC 6", "IF2 DAC6" }, 3288 { "IB6 Mux", "IF2 DAC 6", "IF2 DAC6 Mux" },
3289 { "IB6 Mux", "SLB DAC 6", "SLB DAC6" }, 3289 { "IB6 Mux", "SLB DAC 6", "SLB DAC6" },
3290 { "IB6 Mux", "STO4 ADC MIX L", "Stereo4 ADC MIXL" }, 3290 { "IB6 Mux", "STO4 ADC MIX L", "Stereo4 ADC MIXL" },
3291 { "IB6 Mux", "IF4 DAC L", "IF4 DAC L" }, 3291 { "IB6 Mux", "IF4 DAC L", "IF4 DAC L" },
@@ -3293,8 +3293,8 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
3293 { "IB6 Mux", "STO2 ADC MIX L", "Stereo2 ADC MIXL" }, 3293 { "IB6 Mux", "STO2 ADC MIX L", "Stereo2 ADC MIXL" },
3294 { "IB6 Mux", "STO3 ADC MIX L", "Stereo3 ADC MIXL" }, 3294 { "IB6 Mux", "STO3 ADC MIX L", "Stereo3 ADC MIXL" },
3295 3295
3296 { "IB7 Mux", "IF1 DAC 7", "IF1 DAC7" }, 3296 { "IB7 Mux", "IF1 DAC 7", "IF1 DAC7 Mux" },
3297 { "IB7 Mux", "IF2 DAC 7", "IF2 DAC7" }, 3297 { "IB7 Mux", "IF2 DAC 7", "IF2 DAC7 Mux" },
3298 { "IB7 Mux", "SLB DAC 7", "SLB DAC7" }, 3298 { "IB7 Mux", "SLB DAC 7", "SLB DAC7" },
3299 { "IB7 Mux", "STO4 ADC MIX R", "Stereo4 ADC MIXR" }, 3299 { "IB7 Mux", "STO4 ADC MIX R", "Stereo4 ADC MIXR" },
3300 { "IB7 Mux", "IF4 DAC R", "IF4 DAC R" }, 3300 { "IB7 Mux", "IF4 DAC R", "IF4 DAC R" },
@@ -3635,15 +3635,15 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
3635 { "DAC1 FS", NULL, "DAC1 MIXL" }, 3635 { "DAC1 FS", NULL, "DAC1 MIXL" },
3636 { "DAC1 FS", NULL, "DAC1 MIXR" }, 3636 { "DAC1 FS", NULL, "DAC1 MIXR" },
3637 3637
3638 { "DAC2 L Mux", "IF1 DAC 2", "IF1 DAC2" }, 3638 { "DAC2 L Mux", "IF1 DAC 2", "IF1 DAC2 Mux" },
3639 { "DAC2 L Mux", "IF2 DAC 2", "IF2 DAC2" }, 3639 { "DAC2 L Mux", "IF2 DAC 2", "IF2 DAC2 Mux" },
3640 { "DAC2 L Mux", "IF3 DAC L", "IF3 DAC L" }, 3640 { "DAC2 L Mux", "IF3 DAC L", "IF3 DAC L" },
3641 { "DAC2 L Mux", "IF4 DAC L", "IF4 DAC L" }, 3641 { "DAC2 L Mux", "IF4 DAC L", "IF4 DAC L" },
3642 { "DAC2 L Mux", "SLB DAC 2", "SLB DAC2" }, 3642 { "DAC2 L Mux", "SLB DAC 2", "SLB DAC2" },
3643 { "DAC2 L Mux", "OB 2", "OutBound2" }, 3643 { "DAC2 L Mux", "OB 2", "OutBound2" },
3644 3644
3645 { "DAC2 R Mux", "IF1 DAC 3", "IF1 DAC3" }, 3645 { "DAC2 R Mux", "IF1 DAC 3", "IF1 DAC3 Mux" },
3646 { "DAC2 R Mux", "IF2 DAC 3", "IF2 DAC3" }, 3646 { "DAC2 R Mux", "IF2 DAC 3", "IF2 DAC3 Mux" },
3647 { "DAC2 R Mux", "IF3 DAC R", "IF3 DAC R" }, 3647 { "DAC2 R Mux", "IF3 DAC R", "IF3 DAC R" },
3648 { "DAC2 R Mux", "IF4 DAC R", "IF4 DAC R" }, 3648 { "DAC2 R Mux", "IF4 DAC R", "IF4 DAC R" },
3649 { "DAC2 R Mux", "SLB DAC 3", "SLB DAC3" }, 3649 { "DAC2 R Mux", "SLB DAC 3", "SLB DAC3" },
@@ -3651,29 +3651,29 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
3651 { "DAC2 R Mux", "Haptic Generator", "Haptic Generator" }, 3651 { "DAC2 R Mux", "Haptic Generator", "Haptic Generator" },
3652 { "DAC2 R Mux", "VAD ADC", "VAD ADC Mux" }, 3652 { "DAC2 R Mux", "VAD ADC", "VAD ADC Mux" },
3653 3653
3654 { "DAC3 L Mux", "IF1 DAC 4", "IF1 DAC4" }, 3654 { "DAC3 L Mux", "IF1 DAC 4", "IF1 DAC4 Mux" },
3655 { "DAC3 L Mux", "IF2 DAC 4", "IF2 DAC4" }, 3655 { "DAC3 L Mux", "IF2 DAC 4", "IF2 DAC4 Mux" },
3656 { "DAC3 L Mux", "IF3 DAC L", "IF3 DAC L" }, 3656 { "DAC3 L Mux", "IF3 DAC L", "IF3 DAC L" },
3657 { "DAC3 L Mux", "IF4 DAC L", "IF4 DAC L" }, 3657 { "DAC3 L Mux", "IF4 DAC L", "IF4 DAC L" },
3658 { "DAC3 L Mux", "SLB DAC 4", "SLB DAC4" }, 3658 { "DAC3 L Mux", "SLB DAC 4", "SLB DAC4" },
3659 { "DAC3 L Mux", "OB 4", "OutBound4" }, 3659 { "DAC3 L Mux", "OB 4", "OutBound4" },
3660 3660
3661 { "DAC3 R Mux", "IF1 DAC 5", "IF1 DAC4" }, 3661 { "DAC3 R Mux", "IF1 DAC 5", "IF1 DAC5 Mux" },
3662 { "DAC3 R Mux", "IF2 DAC 5", "IF2 DAC4" }, 3662 { "DAC3 R Mux", "IF2 DAC 5", "IF2 DAC5 Mux" },
3663 { "DAC3 R Mux", "IF3 DAC R", "IF3 DAC R" }, 3663 { "DAC3 R Mux", "IF3 DAC R", "IF3 DAC R" },
3664 { "DAC3 R Mux", "IF4 DAC R", "IF4 DAC R" }, 3664 { "DAC3 R Mux", "IF4 DAC R", "IF4 DAC R" },
3665 { "DAC3 R Mux", "SLB DAC 5", "SLB DAC5" }, 3665 { "DAC3 R Mux", "SLB DAC 5", "SLB DAC5" },
3666 { "DAC3 R Mux", "OB 5", "OutBound5" }, 3666 { "DAC3 R Mux", "OB 5", "OutBound5" },
3667 3667
3668 { "DAC4 L Mux", "IF1 DAC 6", "IF1 DAC6" }, 3668 { "DAC4 L Mux", "IF1 DAC 6", "IF1 DAC6 Mux" },
3669 { "DAC4 L Mux", "IF2 DAC 6", "IF2 DAC6" }, 3669 { "DAC4 L Mux", "IF2 DAC 6", "IF2 DAC6 Mux" },
3670 { "DAC4 L Mux", "IF3 DAC L", "IF3 DAC L" }, 3670 { "DAC4 L Mux", "IF3 DAC L", "IF3 DAC L" },
3671 { "DAC4 L Mux", "IF4 DAC L", "IF4 DAC L" }, 3671 { "DAC4 L Mux", "IF4 DAC L", "IF4 DAC L" },
3672 { "DAC4 L Mux", "SLB DAC 6", "SLB DAC6" }, 3672 { "DAC4 L Mux", "SLB DAC 6", "SLB DAC6" },
3673 { "DAC4 L Mux", "OB 6", "OutBound6" }, 3673 { "DAC4 L Mux", "OB 6", "OutBound6" },
3674 3674
3675 { "DAC4 R Mux", "IF1 DAC 7", "IF1 DAC7" }, 3675 { "DAC4 R Mux", "IF1 DAC 7", "IF1 DAC7 Mux" },
3676 { "DAC4 R Mux", "IF2 DAC 7", "IF2 DAC7" }, 3676 { "DAC4 R Mux", "IF2 DAC 7", "IF2 DAC7 Mux" },
3677 { "DAC4 R Mux", "IF3 DAC R", "IF3 DAC R" }, 3677 { "DAC4 R Mux", "IF3 DAC R", "IF3 DAC R" },
3678 { "DAC4 R Mux", "IF4 DAC R", "IF4 DAC R" }, 3678 { "DAC4 R Mux", "IF4 DAC R", "IF4 DAC R" },
3679 { "DAC4 R Mux", "SLB DAC 7", "SLB DAC7" }, 3679 { "DAC4 R Mux", "SLB DAC 7", "SLB DAC7" },
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 3a1343fa109b..007a0e3bc273 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -106,13 +106,11 @@ static const struct reg_default sta32x_regs[] = {
106}; 106};
107 107
108static const struct regmap_range sta32x_write_regs_range[] = { 108static const struct regmap_range sta32x_write_regs_range[] = {
109 regmap_reg_range(STA32X_CONFA, STA32X_AUTO2), 109 regmap_reg_range(STA32X_CONFA, STA32X_FDRC2),
110 regmap_reg_range(STA32X_C1CFG, STA32X_FDRC2),
111}; 110};
112 111
113static const struct regmap_range sta32x_read_regs_range[] = { 112static const struct regmap_range sta32x_read_regs_range[] = {
114 regmap_reg_range(STA32X_CONFA, STA32X_AUTO2), 113 regmap_reg_range(STA32X_CONFA, STA32X_FDRC2),
115 regmap_reg_range(STA32X_C1CFG, STA32X_FDRC2),
116}; 114};
117 115
118static const struct regmap_range sta32x_volatile_regs_range[] = { 116static const struct regmap_range sta32x_volatile_regs_range[] = {
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 75870c0ea2c9..91eb3aef7f02 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -1049,7 +1049,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
1049 enum spdif_txrate index, bool round) 1049 enum spdif_txrate index, bool round)
1050{ 1050{
1051 const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; 1051 const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 };
1052 bool is_sysclk = clk == spdif_priv->sysclk; 1052 bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk);
1053 u64 rate_ideal, rate_actual, sub; 1053 u64 rate_ideal, rate_actual, sub;
1054 u32 sysclk_dfmin, sysclk_dfmax; 1054 u32 sysclk_dfmin, sysclk_dfmax;
1055 u32 txclk_df, sysclk_df, arate; 1055 u32 txclk_df, sysclk_df, arate;
@@ -1143,7 +1143,7 @@ static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv,
1143 spdif_priv->txclk_src[index], rate[index]); 1143 spdif_priv->txclk_src[index], rate[index]);
1144 dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n", 1144 dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n",
1145 spdif_priv->txclk_df[index], rate[index]); 1145 spdif_priv->txclk_df[index], rate[index]);
1146 if (spdif_priv->txclk[index] == spdif_priv->sysclk) 1146 if (clk_is_match(spdif_priv->txclk[index], spdif_priv->sysclk))
1147 dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n", 1147 dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n",
1148 spdif_priv->sysclk_df[index], rate[index]); 1148 spdif_priv->sysclk_df[index], rate[index]);
1149 dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n", 1149 dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n",
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 2595611e8a6d..b9fabbf69db6 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -603,10 +603,6 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
603 factor = (div2 + 1) * (7 * psr + 1) * 2; 603 factor = (div2 + 1) * (7 * psr + 1) * 2;
604 604
605 for (i = 0; i < 255; i++) { 605 for (i = 0; i < 255; i++) {
606 /* The bclk rate must be smaller than 1/5 sysclk rate */
607 if (factor * (i + 1) < 5)
608 continue;
609
610 tmprate = freq * factor * (i + 2); 606 tmprate = freq * factor * (i + 2);
611 607
612 if (baudclk_is_used) 608 if (baudclk_is_used)
@@ -614,6 +610,13 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
614 else 610 else
615 clkrate = clk_round_rate(ssi_private->baudclk, tmprate); 611 clkrate = clk_round_rate(ssi_private->baudclk, tmprate);
616 612
613 /*
614 * Hardware limitation: The bclk rate must be
615 * never greater than 1/5 IPG clock rate
616 */
617 if (clkrate * 5 > clk_get_rate(ssi_private->clk))
618 continue;
619
617 clkrate /= factor; 620 clkrate /= factor;
618 afreq = clkrate / (i + 1); 621 afreq = clkrate / (i + 1);
619 622
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index f7c6734bd5da..fb550b5869d2 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -372,6 +372,11 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
372 strlen(dai_link->cpu_dai_name) + 372 strlen(dai_link->cpu_dai_name) +
373 strlen(dai_link->codec_dai_name) + 2, 373 strlen(dai_link->codec_dai_name) + 2,
374 GFP_KERNEL); 374 GFP_KERNEL);
375 if (!name) {
376 ret = -ENOMEM;
377 goto dai_link_of_err;
378 }
379
375 sprintf(name, "%s-%s", dai_link->cpu_dai_name, 380 sprintf(name, "%s-%s", dai_link->cpu_dai_name,
376 dai_link->codec_dai_name); 381 dai_link->codec_dai_name);
377 dai_link->name = dai_link->stream_name = name; 382 dai_link->name = dai_link->stream_name = name;
diff --git a/sound/soc/intel/sst-atom-controls.h b/sound/soc/intel/sst-atom-controls.h
index dfebfdd5eb2a..daecc58f28af 100644
--- a/sound/soc/intel/sst-atom-controls.h
+++ b/sound/soc/intel/sst-atom-controls.h
@@ -150,7 +150,7 @@ enum sst_cmd_type {
150 150
151enum sst_task { 151enum sst_task {
152 SST_TASK_SBA = 1, 152 SST_TASK_SBA = 1,
153 SST_TASK_MMX, 153 SST_TASK_MMX = 3,
154}; 154};
155 155
156enum sst_type { 156enum sst_type {
diff --git a/sound/soc/intel/sst/sst.c b/sound/soc/intel/sst/sst.c
index 8a8d56a146e7..11c578651c1c 100644
--- a/sound/soc/intel/sst/sst.c
+++ b/sound/soc/intel/sst/sst.c
@@ -350,7 +350,9 @@ static inline void sst_save_shim64(struct intel_sst_drv *ctx,
350 350
351 spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); 351 spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
352 352
353 shim_regs->imrx = sst_shim_read64(shim, SST_IMRX), 353 shim_regs->imrx = sst_shim_read64(shim, SST_IMRX);
354 shim_regs->csr = sst_shim_read64(shim, SST_CSR);
355
354 356
355 spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); 357 spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
356} 358}
@@ -367,6 +369,7 @@ static inline void sst_restore_shim64(struct intel_sst_drv *ctx,
367 */ 369 */
368 spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); 370 spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
369 sst_shim_write64(shim, SST_IMRX, shim_regs->imrx), 371 sst_shim_write64(shim, SST_IMRX, shim_regs->imrx),
372 sst_shim_write64(shim, SST_CSR, shim_regs->csr),
370 spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); 373 spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
371} 374}
372 375
@@ -379,6 +382,10 @@ void sst_configure_runtime_pm(struct intel_sst_drv *ctx)
379 * initially active. So change the state to active before 382 * initially active. So change the state to active before
380 * enabling the pm 383 * enabling the pm
381 */ 384 */
385
386 if (!acpi_disabled)
387 pm_runtime_set_active(ctx->dev);
388
382 pm_runtime_enable(ctx->dev); 389 pm_runtime_enable(ctx->dev);
383 390
384 if (acpi_disabled) 391 if (acpi_disabled)
@@ -409,6 +416,7 @@ static int intel_sst_runtime_suspend(struct device *dev)
409 synchronize_irq(ctx->irq_num); 416 synchronize_irq(ctx->irq_num);
410 flush_workqueue(ctx->post_msg_wq); 417 flush_workqueue(ctx->post_msg_wq);
411 418
419 ctx->ops->reset(ctx);
412 /* save the shim registers because PMC doesn't save state */ 420 /* save the shim registers because PMC doesn't save state */
413 sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64); 421 sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
414 422
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index def7d8260c4e..d19483081f9b 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -579,7 +579,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
579 if (PTR_ERR(priv->extclk) == -EPROBE_DEFER) 579 if (PTR_ERR(priv->extclk) == -EPROBE_DEFER)
580 return -EPROBE_DEFER; 580 return -EPROBE_DEFER;
581 } else { 581 } else {
582 if (priv->extclk == priv->clk) { 582 if (clk_is_match(priv->extclk, priv->clk)) {
583 devm_clk_put(&pdev->dev, priv->extclk); 583 devm_clk_put(&pdev->dev, priv->extclk);
584 priv->extclk = ERR_PTR(-EINVAL); 584 priv->extclk = ERR_PTR(-EINVAL);
585 } else { 585 } else {
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index ccfb41c22e53..f7eb42aa3f38 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -352,6 +352,9 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
352 return ret; 352 return ret;
353 353
354 card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); 354 card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
355 if (!card)
356 return -ENOMEM;
357
355 card->name = devm_kasprintf(dev, GFP_KERNEL, 358 card->name = devm_kasprintf(dev, GFP_KERNEL,
356 "HDMI %s", dev_name(ad->dssdev)); 359 "HDMI %s", dev_name(ad->dssdev));
357 card->owner = THIS_MODULE; 360 card->owner = THIS_MODULE;
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index c7eb9dd67f60..fd99d89de6a8 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -530,8 +530,19 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
530 530
531 case OMAP_MCBSP_SYSCLK_CLKX_EXT: 531 case OMAP_MCBSP_SYSCLK_CLKX_EXT:
532 regs->srgr2 |= CLKSM; 532 regs->srgr2 |= CLKSM;
533 regs->pcr0 |= SCLKME;
534 /*
535 * If McBSP is master but yet the CLKX/CLKR pin drives the SRG,
536 * disable output on those pins. This enables to inject the
537 * reference clock through CLKX/CLKR. For this to work
538 * set_dai_sysclk() _needs_ to be called after set_dai_fmt().
539 */
540 regs->pcr0 &= ~CLKXM;
541 break;
533 case OMAP_MCBSP_SYSCLK_CLKR_EXT: 542 case OMAP_MCBSP_SYSCLK_CLKR_EXT:
534 regs->pcr0 |= SCLKME; 543 regs->pcr0 |= SCLKME;
544 /* Disable ouput on CLKR pin in master mode */
545 regs->pcr0 &= ~CLKRM;
535 break; 546 break;
536 default: 547 default:
537 err = -ENODEV; 548 err = -ENODEV;
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index f4b05bc23e4b..1343ecbf0bd5 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -201,7 +201,7 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
201 struct snd_pcm *pcm = rtd->pcm; 201 struct snd_pcm *pcm = rtd->pcm;
202 int ret; 202 int ret;
203 203
204 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64)); 204 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
205 if (ret) 205 if (ret)
206 return ret; 206 return ret;
207 207
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 3cebf6ca03df..0632a36852c8 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -174,7 +174,7 @@ config SND_SOC_SMDK_WM8994_PCM
174 174
175config SND_SOC_SPEYSIDE 175config SND_SOC_SPEYSIDE
176 tristate "Audio support for Wolfson Speyside" 176 tristate "Audio support for Wolfson Speyside"
177 depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 177 depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && I2C && SPI_MASTER
178 select SND_SAMSUNG_I2S 178 select SND_SAMSUNG_I2S
179 select SND_SOC_WM8996 179 select SND_SOC_WM8996
180 select SND_SOC_WM9081 180 select SND_SOC_WM9081
@@ -189,7 +189,7 @@ config SND_SOC_TOBERMORY
189 189
190config SND_SOC_BELLS 190config SND_SOC_BELLS
191 tristate "Audio support for Wolfson Bells" 191 tristate "Audio support for Wolfson Bells"
192 depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA 192 depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA && I2C && SPI_MASTER
193 select SND_SAMSUNG_I2S 193 select SND_SAMSUNG_I2S
194 select SND_SOC_WM5102 194 select SND_SOC_WM5102
195 select SND_SOC_WM5110 195 select SND_SOC_WM5110
@@ -206,7 +206,7 @@ config SND_SOC_LOWLAND
206 206
207config SND_SOC_LITTLEMILL 207config SND_SOC_LITTLEMILL
208 tristate "Audio support for Wolfson Littlemill" 208 tristate "Audio support for Wolfson Littlemill"
209 depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 209 depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && I2C
210 select SND_SAMSUNG_I2S 210 select SND_SAMSUNG_I2S
211 select MFD_WM8994 211 select MFD_WM8994
212 select SND_SOC_WM8994 212 select SND_SOC_WM8994
@@ -223,7 +223,7 @@ config SND_SOC_SNOW
223 223
224config SND_SOC_ODROIDX2 224config SND_SOC_ODROIDX2
225 tristate "Audio support for Odroid-X2 and Odroid-U3" 225 tristate "Audio support for Odroid-X2 and Odroid-U3"
226 depends on SND_SOC_SAMSUNG 226 depends on SND_SOC_SAMSUNG && I2C
227 select SND_SOC_MAX98090 227 select SND_SOC_MAX98090
228 select SND_SAMSUNG_I2S 228 select SND_SAMSUNG_I2S
229 help 229 help
@@ -231,6 +231,6 @@ config SND_SOC_ODROIDX2
231 231
232config SND_SOC_ARNDALE_RT5631_ALC5631 232config SND_SOC_ARNDALE_RT5631_ALC5631
233 tristate "Audio support for RT5631(ALC5631) on Arndale Board" 233 tristate "Audio support for RT5631(ALC5631) on Arndale Board"
234 depends on SND_SOC_SAMSUNG 234 depends on SND_SOC_SAMSUNG && I2C
235 select SND_SAMSUNG_I2S 235 select SND_SAMSUNG_I2S
236 select SND_SOC_RT5631 236 select SND_SOC_RT5631
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 1b53605f7154..110577c52317 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1252,6 +1252,8 @@ static int rsnd_probe(struct platform_device *pdev)
1252 goto exit_snd_probe; 1252 goto exit_snd_probe;
1253 } 1253 }
1254 1254
1255 dev_set_drvdata(dev, priv);
1256
1255 /* 1257 /*
1256 * asoc register 1258 * asoc register
1257 */ 1259 */
@@ -1268,8 +1270,6 @@ static int rsnd_probe(struct platform_device *pdev)
1268 goto exit_snd_soc; 1270 goto exit_snd_soc;
1269 } 1271 }
1270 1272
1271 dev_set_drvdata(dev, priv);
1272
1273 pm_runtime_enable(dev); 1273 pm_runtime_enable(dev);
1274 1274
1275 dev_info(dev, "probed\n"); 1275 dev_info(dev, "probed\n");
diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c
index 05dee690f487..97ed593f6010 100644
--- a/sound/usb/line6/playback.c
+++ b/sound/usb/line6/playback.c
@@ -39,7 +39,7 @@ static void change_volume(struct urb *urb_out, int volume[],
39 for (; p < buf_end; ++p) { 39 for (; p < buf_end; ++p) {
40 short pv = le16_to_cpu(*p); 40 short pv = le16_to_cpu(*p);
41 int val = (pv * volume[chn & 1]) >> 8; 41 int val = (pv * volume[chn & 1]) >> 8;
42 pv = clamp(val, 0x7fff, -0x8000); 42 pv = clamp(val, -0x8000, 0x7fff);
43 *p = cpu_to_le16(pv); 43 *p = cpu_to_le16(pv);
44 ++chn; 44 ++chn;
45 } 45 }
@@ -54,7 +54,7 @@ static void change_volume(struct urb *urb_out, int volume[],
54 54
55 val = p[0] + (p[1] << 8) + ((signed char)p[2] << 16); 55 val = p[0] + (p[1] << 8) + ((signed char)p[2] << 16);
56 val = (val * volume[chn & 1]) >> 8; 56 val = (val * volume[chn & 1]) >> 8;
57 val = clamp(val, 0x7fffff, -0x800000); 57 val = clamp(val, -0x800000, 0x7fffff);
58 p[0] = val; 58 p[0] = val;
59 p[1] = val >> 8; 59 p[1] = val >> 8;
60 p[2] = val >> 16; 60 p[2] = val >> 16;
@@ -126,7 +126,7 @@ static void add_monitor_signal(struct urb *urb_out, unsigned char *signal,
126 short pov = le16_to_cpu(*po); 126 short pov = le16_to_cpu(*po);
127 short piv = le16_to_cpu(*pi); 127 short piv = le16_to_cpu(*pi);
128 int val = pov + ((piv * volume) >> 8); 128 int val = pov + ((piv * volume) >> 8);
129 pov = clamp(val, 0x7fff, -0x8000); 129 pov = clamp(val, -0x8000, 0x7fff);
130 *po = cpu_to_le16(pov); 130 *po = cpu_to_le16(pov);
131 } 131 }
132 } 132 }
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 67d476548dcf..07f984d5f516 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1773,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1773 } 1773 }
1774 } 1774 }
1775}, 1775},
1776{
1777 USB_DEVICE(0x0582, 0x0159),
1778 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
1779 /* .vendor_name = "Roland", */
1780 /* .product_name = "UA-22", */
1781 .ifnum = QUIRK_ANY_INTERFACE,
1782 .type = QUIRK_COMPOSITE,
1783 .data = (const struct snd_usb_audio_quirk[]) {
1784 {
1785 .ifnum = 0,
1786 .type = QUIRK_AUDIO_STANDARD_INTERFACE
1787 },
1788 {
1789 .ifnum = 1,
1790 .type = QUIRK_AUDIO_STANDARD_INTERFACE
1791 },
1792 {
1793 .ifnum = 2,
1794 .type = QUIRK_MIDI_FIXED_ENDPOINT,
1795 .data = & (const struct snd_usb_midi_endpoint_info) {
1796 .out_cables = 0x0001,
1797 .in_cables = 0x0001
1798 }
1799 },
1800 {
1801 .ifnum = -1
1802 }
1803 }
1804 }
1805},
1776/* this catches most recent vendor-specific Roland devices */ 1806/* this catches most recent vendor-specific Roland devices */
1777{ 1807{
1778 .match_flags = USB_DEVICE_ID_MATCH_VENDOR | 1808 .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 3ed7c0476d48..2e2ba2efa0d9 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -209,7 +209,7 @@ $(OUTPUT)%.o: %.c
209 209
210$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ) 210$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
211 $(ECHO) " CC " $@ 211 $(ECHO) " CC " $@
212 $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@ 212 $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@
213 $(QUIET) $(STRIPCMD) $@ 213 $(QUIET) $(STRIPCMD) $@
214 214
215$(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC) 215$(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC)
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index e238c9559caf..8d5d1d2ee7c1 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -30,7 +30,7 @@ static int execveat_(int fd, const char *path, char **argv, char **envp,
30#ifdef __NR_execveat 30#ifdef __NR_execveat
31 return syscall(__NR_execveat, fd, path, argv, envp, flags); 31 return syscall(__NR_execveat, fd, path, argv, envp, flags);
32#else 32#else
33 errno = -ENOSYS; 33 errno = ENOSYS;
34 return -1; 34 return -1;
35#endif 35#endif
36} 36}
@@ -234,6 +234,14 @@ static int run_tests(void)
234 int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC); 234 int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC);
235 int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC); 235 int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC);
236 236
237 /* Check if we have execveat at all, and bail early if not */
238 errno = 0;
239 execveat_(-1, NULL, NULL, NULL, 0);
240 if (errno == ENOSYS) {
241 printf("[FAIL] ENOSYS calling execveat - no kernel support?\n");
242 return 1;
243 }
244
237 /* Change file position to confirm it doesn't affect anything */ 245 /* Change file position to confirm it doesn't affect anything */
238 lseek(fd, 10, SEEK_SET); 246 lseek(fd, 10, SEEK_SET);
239 247