aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-11-15 10:54:36 -0500
committerDavid S. Miller <davem@davemloft.net>2016-11-15 10:54:36 -0500
commitbb598c1b8c9bf56981927dcb8c0dc34b8ff95342 (patch)
tree69fe6d3bcdbf0acb76e42b144d8af5a0234ccdcb
parenteb2ca35f1814dad3ca547261eedfbbd0d65a0efc (diff)
parente76d21c40bd6c67fd4e2c1540d77e113df962b4d (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several cases of bug fixes in 'net' overlapping other changes in 'net-next-. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-ibm-rtl4
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-pcie.txt11
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt10
-rw-r--r--Documentation/filesystems/Locking1
-rw-r--r--Documentation/filesystems/vfs.txt1
-rw-r--r--Documentation/networking/dsa/dsa.txt3
-rw-r--r--Documentation/virtual/kvm/locking.txt12
-rw-r--r--MAINTAINERS20
-rw-r--r--Makefile12
-rw-r--r--arch/arc/Makefile7
-rw-r--r--arch/arc/boot/dts/axc001.dtsi2
-rw-r--r--arch/arc/boot/dts/nsim_700.dts2
-rw-r--r--arch/arc/boot/dts/nsimosci.dts4
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig3
-rw-r--r--arch/arc/include/asm/arcregs.h2
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/kernel/devtree.c2
-rw-r--r--arch/arc/kernel/mcip.c32
-rw-r--r--arch/arc/kernel/process.c20
-rw-r--r--arch/arc/kernel/smp.c23
-rw-r--r--arch/arc/kernel/time.c19
-rw-r--r--arch/arc/mm/dma.c26
-rw-r--r--arch/arc/plat-eznps/smp.c6
-rw-r--r--arch/arm/include/asm/kvm_asm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h3
-rw-r--r--arch/arm/kernel/calls.S3
-rw-r--r--arch/arm/kvm/arm.c27
-rw-r--r--arch/arm/kvm/hyp/tlb.c15
-rw-r--r--arch/arm/mm/abort-lv4t.S34
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi7
-rw-r--r--arch/arm64/include/asm/alternative.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h40
-rw-r--r--arch/arm64/include/asm/cpufeature.h20
-rw-r--r--arch/arm64/include/asm/kvm_asm.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm64/include/asm/lse.h1
-rw-r--r--arch/arm64/kvm/hyp/tlb.c15
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/boot/dts/mti/malta.dts3
-rw-r--r--arch/mips/generic/init.c16
-rw-r--r--arch/mips/include/asm/fpu_emulator.h13
-rw-r--r--arch/mips/include/asm/kvm_host.h7
-rw-r--r--arch/mips/include/asm/switch_to.h18
-rw-r--r--arch/mips/kernel/mips-cpc.c11
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c10
-rw-r--r--arch/mips/kernel/ptrace.c8
-rw-r--r--arch/mips/kernel/r2300_fpu.S138
-rw-r--r--arch/mips/kernel/r6000_fpu.S89
-rw-r--r--arch/mips/kernel/relocate.c2
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/traps.c137
-rw-r--r--arch/mips/kvm/emulate.c32
-rw-r--r--arch/mips/kvm/mips.c5
-rw-r--r--arch/mips/kvm/mmu.c4
-rw-r--r--arch/mips/lib/dump_tlb.c44
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c18
-rw-r--r--arch/nios2/kernel/time.c1
-rw-r--r--arch/openrisc/include/asm/cache.h2
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h4
-rw-r--r--arch/parisc/kernel/drivers.c6
-rw-r--r--arch/parisc/kernel/syscall.S66
-rw-r--r--arch/s390/hypfs/hypfs_diag.c6
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kvm/sthyi.c4
-rw-r--r--arch/s390/pci/pci_dma.c2
-rw-r--r--arch/sparc/include/asm/cpudata_64.h5
-rw-r--r--arch/sparc/include/asm/spinlock_32.h2
-rw-r--r--arch/sparc/include/asm/spinlock_64.h12
-rw-r--r--arch/sparc/include/asm/topology_64.h8
-rw-r--r--arch/sparc/include/asm/uaccess_64.h28
-rw-r--r--arch/sparc/kernel/head_64.S37
-rw-r--r--arch/sparc/kernel/jump_label.c23
-rw-r--r--arch/sparc/kernel/mdesc.c46
-rw-r--r--arch/sparc/kernel/smp_64.c8
-rw-r--r--arch/sparc/lib/GENcopy_from_user.S4
-rw-r--r--arch/sparc/lib/GENcopy_to_user.S4
-rw-r--r--arch/sparc/lib/GENmemcpy.S48
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/lib/NG2copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG2copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG2memcpy.S228
-rw-r--r--arch/sparc/lib/NG4copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG4copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG4memcpy.S294
-rw-r--r--arch/sparc/lib/NGcopy_from_user.S4
-rw-r--r--arch/sparc/lib/NGcopy_to_user.S4
-rw-r--r--arch/sparc/lib/NGmemcpy.S233
-rw-r--r--arch/sparc/lib/U1copy_from_user.S8
-rw-r--r--arch/sparc/lib/U1copy_to_user.S8
-rw-r--r--arch/sparc/lib/U1memcpy.S345
-rw-r--r--arch/sparc/lib/U3copy_from_user.S8
-rw-r--r--arch/sparc/lib/U3copy_to_user.S8
-rw-r--r--arch/sparc/lib/U3memcpy.S227
-rw-r--r--arch/sparc/lib/copy_in_user.S35
-rw-r--r--arch/sparc/lib/user_fixup.c71
-rw-r--r--arch/sparc/mm/tsb.c17
-rw-r--r--arch/sparc/mm/ultra.S374
-rw-r--r--arch/tile/include/asm/cache.h3
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c4
-rw-r--r--arch/x86/events/intel/uncore_snb.c32
-rw-r--r--arch/x86/include/asm/intel-mid.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kernel/apm_32.c5
-rw-r--r--arch/x86/kernel/cpu/amd.c6
-rw-r--r--arch/x86/kernel/cpu/common.c32
-rw-r--r--arch/x86/kvm/emulate.c2
-rw-r--r--arch/x86/kvm/svm.c23
-rw-r--r--arch/x86/kvm/vmx.c65
-rw-r--r--arch/x86/kvm/x86.c16
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c80
-rw-r--r--arch/x86/platform/intel-mid/pwr.c19
-rw-r--r--drivers/acpi/acpi_apd.c10
-rw-r--r--drivers/acpi/acpi_lpss.c10
-rw-r--r--drivers/acpi/acpi_platform.c5
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c4
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/power/main.c8
-rw-r--r--drivers/block/aoe/aoecmd.c41
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/virtio_blk.c10
-rw-r--r--drivers/char/ppdev.c3
-rw-r--r--drivers/char/tpm/tpm-interface.c3
-rw-r--r--drivers/char/virtio_console.c22
-rw-r--r--drivers/clk/clk-qoriq.c13
-rw-r--r--drivers/clk/clk-xgene.c10
-rw-r--r--drivers/clk/imx/clk-pllv3.c8
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c2
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c2
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c4
-rw-r--r--drivers/clk/rockchip/clk-ddr.c5
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c22
-rw-r--r--drivers/firewire/net.c59
-rw-r--r--drivers/gpio/gpio-mvebu.c92
-rw-r--r--drivers/gpio/gpiolib-of.c14
-rw-r--r--drivers/gpio/gpiolib.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c87
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c12
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h6
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c23
-rw-r--r--drivers/gpu/drm/drm_atomic.c9
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c68
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c30
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c21
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c122
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c34
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c75
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c84
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c24
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c9
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c28
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c14
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c46
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c9
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c53
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c12
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c22
-rw-r--r--drivers/gpu/drm/udl/udl_main.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-sensor-custom.c6
-rw-r--r--drivers/hid/hid-sensor-hub.c15
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c102
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/hwmon.c6
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c12
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c56
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c8
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c1
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c2
-rw-r--r--drivers/infiniband/core/cma.c54
-rw-r--r--drivers/input/mouse/focaltech.c6
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/iommu/arm-smmu-v3.c25
-rw-r--r--drivers/iommu/arm-smmu.c16
-rw-r--r--drivers/iommu/intel-iommu.c14
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/raid1.c26
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5-cache.c6
-rw-r--r--drivers/media/dvb-frontends/Kconfig5
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.c (renamed from drivers/media/usb/dvb-usb/gp8psk-fe.c)156
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.h82
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c2
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c105
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.h4
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c34
-rw-r--r--drivers/media/usb/dvb-usb/Makefile2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c304
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c77
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c100
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c27
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h5
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c36
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c25
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c113
-rw-r--r--drivers/media/usb/dvb-usb/dibusb.h3
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c26
-rw-r--r--drivers/media/usb/dvb-usb/digitv.h5
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c128
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c104
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c10
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h9
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c2
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c132
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.h63
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c25
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c136
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c16
-rw-r--r--drivers/media/usb/s2255/s2255drv.c15
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c16
-rw-r--r--drivers/misc/mei/bus-fixup.c2
-rw-r--r--drivers/mmc/card/mmc_test.c8
-rw-r--r--drivers/mmc/core/mmc.c3
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c5
-rw-r--r--drivers/mmc/host/dw_mmc.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c4
-rw-r--r--drivers/mmc/host/sdhci-msm.c1
-rw-r--r--drivers/mmc/host/sdhci.c36
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c6
-rw-r--r--drivers/mtd/nand/mtk_ecc.c19
-rw-r--r--drivers/mtd/nand/nand_base.c60
-rw-r--r--drivers/net/can/sja1000/plx_pci.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c8
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c10
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c17
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c25
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c15
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c3
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c10
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
-rw-r--r--drivers/net/macvlan.c31
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c17
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/virtio_net.c30
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/nfc/mei_phy.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c15
-rw-r--r--drivers/ntb/ntb_transport.c2
-rw-r--r--drivers/ntb/test/ntb_perf.c8
-rw-r--r--drivers/ntb/test/ntb_pingpong.c2
-rw-r--r--drivers/nvme/host/lightnvm.c2
-rw-r--r--drivers/of/base.c2
-rw-r--r--drivers/pci/host/pcie-designware.c7
-rw-r--r--drivers/pci/host/pcie-qcom.c2
-rw-r--r--drivers/pci/host/pcie-rockchip.c62
-rw-r--r--drivers/pci/pci-mid.c6
-rw-r--r--drivers/pci/setup-res.c8
-rw-r--r--drivers/pcmcia/soc_common.c2
-rw-r--r--drivers/phy/phy-da8xx-usb.c5
-rw-r--r--drivers/phy/phy-rockchip-pcie.c13
-rw-r--r--drivers/phy/phy-sun4i-usb.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c17
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c8
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel-vbtn.c2
-rw-r--r--drivers/platform/x86/toshiba-wmi.c26
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/rtc/rtc-asm9260.c1
-rw-r--r--drivers/rtc/rtc-cmos.c15
-rw-r--r--drivers/rtc/rtc-omap.c38
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c9
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c16
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/scsi/vmw_pvscsi.c5
-rw-r--r--drivers/scsi/vmw_pvscsi.h2
-rw-r--r--drivers/spi/spi-fsl-dspi.c7
-rw-r--r--drivers/spi/spi-fsl-espi.c2
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c3
-rw-r--r--drivers/staging/greybus/arche-platform.c1
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c17
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/staging/nvec/nvec_ps2.c8
-rw-r--r--drivers/staging/sm750fb/ddk750_reg.h8
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/dwc3/core.c5
-rw-r--r--drivers/usb/dwc3/dwc3-st.c1
-rw-r--r--drivers/usb/gadget/function/u_ether.c8
-rw-r--r--drivers/usb/host/pci-quirks.c8
-rw-r--r--drivers/usb/musb/da8xx.c3
-rw-r--r--drivers/usb/musb/musb_core.c5
-rw-r--r--drivers/uwb/lc-rc.c16
-rw-r--r--drivers/uwb/pal.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c33
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/virtio/config.c12
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_pci_legacy.c16
-rw-r--r--drivers/virtio/virtio_ring.c16
-rw-r--r--fs/aio.c207
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/extent_io.c8
-rw-r--r--fs/btrfs/inode.c13
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/btrfs/relocation.c9
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/coredump.c3
-rw-r--r--fs/nfs/client.c3
-rw-r--r--fs/nfs/namespace.c2
-rw-r--r--fs/nfs/nfs4session.c12
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfsd/netns.h5
-rw-r--r--fs/nfsd/nfs4state.c38
-rw-r--r--fs/ntfs/dir.c2
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/orangefs/orangefs-debugfs.c147
-rw-r--r--fs/orangefs/orangefs-mod.c6
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/super.c15
-rw-r--r--fs/splice.c5
-rw-r--r--fs/xfs/libxfs/xfs_defer.c17
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/asm-generic/sections.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h5
-rw-r--r--include/drm/drm_plane.h8
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/console.h6
-rw-r--r--include/linux/frontswap.h5
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--include/linux/netdevice.h15
-rw-r--r--include/linux/phy/phy.h7
-rw-r--r--include/linux/regmap.h11
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ip6_tunnel.h1
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h3
-rw-r--r--include/net/netfilter/nf_tables.h8
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/uapi/linux/atm_zatm.h1
-rw-r--r--include/uapi/linux/bpqether.h2
-rw-r--r--include/uapi/sound/asoc.h6
-rw-r--r--kernel/bpf/hashtab.c3
-rw-r--r--kernel/bpf/syscall.c4
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/power/suspend_test.c4
-rw-r--r--kernel/printk/printk.c24
-rw-r--r--kernel/sched/core.c12
-rw-r--r--kernel/taskstats.c6
-rw-r--r--lib/stackdepot.c2
-rw-r--r--mm/cma.c3
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/hugetlb.c66
-rw-r--r--mm/kmemleak.c1
-rw-r--r--mm/memory-failure.c12
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slab_common.c4
-rw-r--r--mm/swapfile.c2
-rw-r--r--net/can/bcm.c32
-rw-r--r--net/ceph/ceph_fs.c3
-rw-r--r--net/ceph/osd_client.c1
-rw-r--r--net/core/dev.c19
-rw-r--r--net/core/filter.c68
-rw-r--r--net/core/flow_dissector.c11
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--net/core/sock.c6
-rw-r--r--net/dccp/ipv4.c16
-rw-r--r--net/dccp/ipv6.c19
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/ipv4/af_inet.c9
-rw-r--r--net/ipv4/fib_trie.c21
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_output.c25
-rw-r--r--net/ipv4/ip_tunnel_core.c11
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/nft_dup_ipv4.c6
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_dctcp.c13
-rw-r--r--net/ipv4/tcp_ipv4.c19
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_udp_tunnel.c3
-rw-r--r--net/ipv6/netfilter/nft_dup_ipv6.c6
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/tcp_ipv6.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c7
-rw-r--r--net/netfilter/nf_conntrack_core.c49
-rw-r--r--net/netfilter/nf_conntrack_helper.c11
-rw-r--r--net/netfilter/nf_conntrack_sip.c5
-rw-r--r--net/netfilter/nf_tables_api.c18
-rw-r--r--net/netfilter/nft_dynset.c19
-rw-r--r--net/netfilter/nft_set_hash.c19
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/netfilter/xt_connmark.c4
-rw-r--r--net/netlink/diag.c5
-rw-r--r--net/netlink/genetlink.c1
-rw-r--r--net/sctp/input.c35
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/socket.c27
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c13
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c82
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c21
-rw-r--r--net/sunrpc/clnt.c7
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c37
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c12
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h3
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--net/unix/af_unix.c3
-rw-r--r--samples/bpf/Makefile4
-rwxr-xr-xsamples/bpf/tc_l2_redirect.sh173
-rw-r--r--samples/bpf/tc_l2_redirect_kern.c236
-rw-r--r--samples/bpf/tc_l2_redirect_user.c73
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--scripts/Makefile.ubsan4
-rwxr-xr-xscripts/bloat-o-meter3
-rw-r--r--scripts/gcc-plugins/cyc_complexity_plugin.c4
-rw-r--r--scripts/gcc-plugins/gcc-common.h1
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c25
-rw-r--r--scripts/gcc-plugins/sancov_plugin.c4
-rw-r--r--sound/core/info.c9
-rw-r--r--sound/soc/codecs/cs4270.c8
-rw-r--r--sound/soc/codecs/da7219.c3
-rw-r--r--sound/soc/codecs/hdmi-codec.c7
-rw-r--r--sound/soc/codecs/rt298.c5
-rw-r--r--sound/soc/codecs/rt5663.c4
-rw-r--r--sound/soc/codecs/sti-sas.c2
-rw-r--r--sound/soc/codecs/tas571x.c37
-rw-r--r--sound/soc/intel/Kconfig3
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c1
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c4
-rw-r--r--sound/soc/intel/skylake/skl.c8
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/qcom/lpass-cpu.c3
-rw-r--r--sound/soc/qcom/lpass-platform.c166
-rw-r--r--sound/soc/qcom/lpass.h1
-rw-r--r--sound/soc/samsung/ac97.c10
-rw-r--r--sound/soc/samsung/i2s.c19
-rw-r--r--sound/soc/samsung/pcm.c19
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c16
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c14
-rw-r--r--sound/soc/samsung/spdif.c19
-rw-r--r--sound/soc/sti/uniperif_player.c6
-rw-r--r--sound/soc/sunxi/sun4i-codec.c19
-rw-r--r--tools/perf/ui/browsers/hists.c48
-rw-r--r--tools/perf/util/hist.c12
-rw-r--r--tools/power/cpupower/utils/cpufreq-set.c7
-rw-r--r--tools/virtio/ringtest/Makefile4
-rw-r--r--tools/virtio/ringtest/main.c20
-rw-r--r--tools/virtio/ringtest/main.h4
-rw-r--r--tools/virtio/ringtest/noring.c6
-rw-r--r--tools/virtio/ringtest/ptr_ring.c22
-rw-r--r--tools/virtio/ringtest/ring.c18
-rw-r--r--tools/virtio/ringtest/virtio_ring_0_9.c64
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c41
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.h14
-rw-r--r--virt/kvm/arm/vgic/vgic.c12
-rw-r--r--virt/kvm/eventfd.c22
-rw-r--r--virt/kvm/kvm_main.c6
580 files changed, 7286 insertions, 4003 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
index b82deeaec314..470def06ab0a 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
+++ b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
@@ -1,4 +1,4 @@
1What: state 1What: /sys/devices/system/ibm_rtl/state
2Date: Sep 2010 2Date: Sep 2010
3KernelVersion: 2.6.37 3KernelVersion: 2.6.37
4Contact: Vernon Mauery <vernux@us.ibm.com> 4Contact: Vernon Mauery <vernux@us.ibm.com>
@@ -10,7 +10,7 @@ Description: The state file allows a means by which to change in and
10Users: The ibm-prtm userspace daemon uses this interface. 10Users: The ibm-prtm userspace daemon uses this interface.
11 11
12 12
13What: version 13What: /sys/devices/system/ibm_rtl/version
14Date: Sep 2010 14Date: Sep 2010
15KernelVersion: 2.6.37 15KernelVersion: 2.6.37
16Contact: Vernon Mauery <vernux@us.ibm.com> 16Contact: Vernon Mauery <vernux@us.ibm.com>
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index 4e00e859e885..bfa461aaac99 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -43,6 +43,9 @@ Optional properties:
43 reset signal present internally in some host controller IC designs. 43 reset signal present internally in some host controller IC designs.
44 See Documentation/devicetree/bindings/reset/reset.txt for details. 44 See Documentation/devicetree/bindings/reset/reset.txt for details.
45 45
46* reset-names: request name for using "resets" property. Must be "reset".
47 (It will be used together with "resets" property.)
48
46* clocks: from common clock binding: handle to biu and ciu clocks for the 49* clocks: from common clock binding: handle to biu and ciu clocks for the
47 bus interface unit clock and the card interface unit clock. 50 bus interface unit clock and the card interface unit clock.
48 51
@@ -103,6 +106,8 @@ board specific portions as listed below.
103 interrupts = <0 75 0>; 106 interrupts = <0 75 0>;
104 #address-cells = <1>; 107 #address-cells = <1>;
105 #size-cells = <0>; 108 #size-cells = <0>;
109 resets = <&rst 20>;
110 reset-names = "reset";
106 }; 111 };
107 112
108[board specific internal DMA resources] 113[board specific internal DMA resources]
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
index ba67b39939c1..71aeda1ca055 100644
--- a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
@@ -26,13 +26,16 @@ Required properties:
26 - "sys" 26 - "sys"
27 - "legacy" 27 - "legacy"
28 - "client" 28 - "client"
29- resets: Must contain five entries for each entry in reset-names. 29- resets: Must contain seven entries for each entry in reset-names.
30 See ../reset/reset.txt for details. 30 See ../reset/reset.txt for details.
31- reset-names: Must include the following names 31- reset-names: Must include the following names
32 - "core" 32 - "core"
33 - "mgmt" 33 - "mgmt"
34 - "mgmt-sticky" 34 - "mgmt-sticky"
35 - "pipe" 35 - "pipe"
36 - "pm"
37 - "aclk"
38 - "pclk"
36- pinctrl-names : The pin control state names 39- pinctrl-names : The pin control state names
37- pinctrl-0: The "default" pinctrl state 40- pinctrl-0: The "default" pinctrl state
38- #interrupt-cells: specifies the number of cells needed to encode an 41- #interrupt-cells: specifies the number of cells needed to encode an
@@ -86,8 +89,10 @@ pcie0: pcie@f8000000 {
86 reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>; 89 reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
87 reg-names = "axi-base", "apb-base"; 90 reg-names = "axi-base", "apb-base";
88 resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, 91 resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
89 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>; 92 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> ,
90 reset-names = "core", "mgmt", "mgmt-sticky", "pipe"; 93 <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>;
94 reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
95 "pm", "pclk", "aclk";
91 phys = <&pcie_phy>; 96 phys = <&pcie_phy>;
92 phy-names = "pcie-phy"; 97 phy-names = "pcie-phy";
93 pinctrl-names = "default"; 98 pinctrl-names = "default";
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
index f9753c416974..b24583aa34c3 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
@@ -14,11 +14,6 @@ Required properies:
14 - #size-cells : The value of this property must be 1 14 - #size-cells : The value of this property must be 1
15 - ranges : defines mapping between pin controller node (parent) to 15 - ranges : defines mapping between pin controller node (parent) to
16 gpio-bank node (children). 16 gpio-bank node (children).
17 - interrupt-parent: phandle of the interrupt parent to which the external
18 GPIO interrupts are forwarded to.
19 - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
20 which includes IRQ mux selection register, and the offset of the IRQ mux
21 selection register.
22 - pins-are-numbered: Specify the subnodes are using numbered pinmux to 17 - pins-are-numbered: Specify the subnodes are using numbered pinmux to
23 specify pins. 18 specify pins.
24 19
@@ -37,6 +32,11 @@ Required properties:
37 32
38Optional properties: 33Optional properties:
39 - reset: : Reference to the reset controller 34 - reset: : Reference to the reset controller
35 - interrupt-parent: phandle of the interrupt parent to which the external
36 GPIO interrupts are forwarded to.
37 - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
38 which includes IRQ mux selection register, and the offset of the IRQ mux
39 selection register.
40 40
41Example: 41Example:
42#include <dt-bindings/pinctrl/stm32f429-pinfunc.h> 42#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 14cdc101d165..1b5f15653b1b 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -447,7 +447,6 @@ prototypes:
447 int (*flush) (struct file *); 447 int (*flush) (struct file *);
448 int (*release) (struct inode *, struct file *); 448 int (*release) (struct inode *, struct file *);
449 int (*fsync) (struct file *, loff_t start, loff_t end, int datasync); 449 int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
450 int (*aio_fsync) (struct kiocb *, int datasync);
451 int (*fasync) (int, struct file *, int); 450 int (*fasync) (int, struct file *, int);
452 int (*lock) (struct file *, int, struct file_lock *); 451 int (*lock) (struct file *, int, struct file_lock *);
453 ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, 452 ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index d619c8d71966..b5039a00caaf 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -828,7 +828,6 @@ struct file_operations {
828 int (*flush) (struct file *, fl_owner_t id); 828 int (*flush) (struct file *, fl_owner_t id);
829 int (*release) (struct inode *, struct file *); 829 int (*release) (struct inode *, struct file *);
830 int (*fsync) (struct file *, loff_t, loff_t, int datasync); 830 int (*fsync) (struct file *, loff_t, loff_t, int datasync);
831 int (*aio_fsync) (struct kiocb *, int datasync);
832 int (*fasync) (int, struct file *, int); 831 int (*fasync) (int, struct file *, int);
833 int (*lock) (struct file *, int, struct file_lock *); 832 int (*lock) (struct file *, int, struct file_lock *);
834 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); 833 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 6d6c07cf1a9a..63912ef34606 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and
67Switch tagging protocols 67Switch tagging protocols
68------------------------ 68------------------------
69 69
70DSA currently supports 4 different tagging protocols, and a tag-less mode as 70DSA currently supports 5 different tagging protocols, and a tag-less mode as
71well. The different protocols are implemented in: 71well. The different protocols are implemented in:
72 72
73net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy) 73net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
74net/dsa/tag_dsa.c: Marvell's original DSA tag 74net/dsa/tag_dsa.c: Marvell's original DSA tag
75net/dsa/tag_edsa.c: Marvell's enhanced DSA tag 75net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
76net/dsa/tag_brcm.c: Broadcom's 4 bytes tag 76net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
77net/dsa/tag_qca.c: Qualcomm's 2 bytes tag
77 78
78The exact format of the tag protocol is vendor specific, but in general, they 79The exact format of the tag protocol is vendor specific, but in general, they
79all contain something which: 80all contain something which:
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index f2491a8c68b4..e5dd9f4d6100 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -4,7 +4,17 @@ KVM Lock Overview
41. Acquisition Orders 41. Acquisition Orders
5--------------------- 5---------------------
6 6
7(to be written) 7The acquisition orders for mutexes are as follows:
8
9- kvm->lock is taken outside vcpu->mutex
10
11- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
12
13- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
14 them together is quite rare.
15
16For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything
17else is a leaf: no other lock is taken inside the critical sections.
8 18
92: Exception 192: Exception
10------------ 20------------
diff --git a/MAINTAINERS b/MAINTAINERS
index e5c17a951b7d..987081272e25 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7927,6 +7927,10 @@ F: mm/
7927MEMORY TECHNOLOGY DEVICES (MTD) 7927MEMORY TECHNOLOGY DEVICES (MTD)
7928M: David Woodhouse <dwmw2@infradead.org> 7928M: David Woodhouse <dwmw2@infradead.org>
7929M: Brian Norris <computersforpeace@gmail.com> 7929M: Brian Norris <computersforpeace@gmail.com>
7930M: Boris Brezillon <boris.brezillon@free-electrons.com>
7931M: Marek Vasut <marek.vasut@gmail.com>
7932M: Richard Weinberger <richard@nod.at>
7933M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
7930L: linux-mtd@lists.infradead.org 7934L: linux-mtd@lists.infradead.org
7931W: http://www.linux-mtd.infradead.org/ 7935W: http://www.linux-mtd.infradead.org/
7932Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ 7936Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
@@ -8055,6 +8059,7 @@ F: drivers/infiniband/hw/mlx4/
8055F: include/linux/mlx4/ 8059F: include/linux/mlx4/
8056 8060
8057MELLANOX MLX5 core VPI driver 8061MELLANOX MLX5 core VPI driver
8062M: Saeed Mahameed <saeedm@mellanox.com>
8058M: Matan Barak <matanb@mellanox.com> 8063M: Matan Barak <matanb@mellanox.com>
8059M: Leon Romanovsky <leonro@mellanox.com> 8064M: Leon Romanovsky <leonro@mellanox.com>
8060L: netdev@vger.kernel.org 8065L: netdev@vger.kernel.org
@@ -9332,7 +9337,7 @@ PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
9332M: Keith Busch <keith.busch@intel.com> 9337M: Keith Busch <keith.busch@intel.com>
9333L: linux-pci@vger.kernel.org 9338L: linux-pci@vger.kernel.org
9334S: Supported 9339S: Supported
9335F: arch/x86/pci/vmd.c 9340F: drivers/pci/host/vmd.c
9336 9341
9337PCIE DRIVER FOR ST SPEAR13XX 9342PCIE DRIVER FOR ST SPEAR13XX
9338M: Pratyush Anand <pratyush.anand@gmail.com> 9343M: Pratyush Anand <pratyush.anand@gmail.com>
@@ -11405,6 +11410,17 @@ W: http://www.st.com/spear
11405S: Maintained 11410S: Maintained
11406F: drivers/clk/spear/ 11411F: drivers/clk/spear/
11407 11412
11413SPI NOR SUBSYSTEM
11414M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
11415M: Marek Vasut <marek.vasut@gmail.com>
11416L: linux-mtd@lists.infradead.org
11417W: http://www.linux-mtd.infradead.org/
11418Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
11419T: git git://github.com/spi-nor/linux.git
11420S: Maintained
11421F: drivers/mtd/spi-nor/
11422F: include/linux/mtd/spi-nor.h
11423
11408SPI SUBSYSTEM 11424SPI SUBSYSTEM
11409M: Mark Brown <broonie@kernel.org> 11425M: Mark Brown <broonie@kernel.org>
11410L: linux-spi@vger.kernel.org 11426L: linux-spi@vger.kernel.org
@@ -12784,6 +12800,7 @@ F: include/uapi/linux/virtio_console.h
12784 12800
12785VIRTIO CORE, NET AND BLOCK DRIVERS 12801VIRTIO CORE, NET AND BLOCK DRIVERS
12786M: "Michael S. Tsirkin" <mst@redhat.com> 12802M: "Michael S. Tsirkin" <mst@redhat.com>
12803M: Jason Wang <jasowang@redhat.com>
12787L: virtualization@lists.linux-foundation.org 12804L: virtualization@lists.linux-foundation.org
12788S: Maintained 12805S: Maintained
12789F: Documentation/devicetree/bindings/virtio/ 12806F: Documentation/devicetree/bindings/virtio/
@@ -12814,6 +12831,7 @@ F: include/uapi/linux/virtio_gpu.h
12814 12831
12815VIRTIO HOST (VHOST) 12832VIRTIO HOST (VHOST)
12816M: "Michael S. Tsirkin" <mst@redhat.com> 12833M: "Michael S. Tsirkin" <mst@redhat.com>
12834M: Jason Wang <jasowang@redhat.com>
12817L: kvm@vger.kernel.org 12835L: kvm@vger.kernel.org
12818L: virtualization@lists.linux-foundation.org 12836L: virtualization@lists.linux-foundation.org
12819L: netdev@vger.kernel.org 12837L: netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index a2650f9c6a25..247430abfc73 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 9 2PATCHLEVEL = 9
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc5
5NAME = Psychotic Stoned Sheep 5NAME = Psychotic Stoned Sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -370,7 +370,7 @@ LDFLAGS_MODULE =
370CFLAGS_KERNEL = 370CFLAGS_KERNEL =
371AFLAGS_KERNEL = 371AFLAGS_KERNEL =
372LDFLAGS_vmlinux = 372LDFLAGS_vmlinux =
373CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im 373CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
374CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) 374CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
375 375
376 376
@@ -620,7 +620,6 @@ ARCH_CFLAGS :=
620include arch/$(SRCARCH)/Makefile 620include arch/$(SRCARCH)/Makefile
621 621
622KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) 622KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
623KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
624KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) 623KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
625 624
626ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 625ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
@@ -629,15 +628,18 @@ KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
629endif 628endif
630 629
631ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 630ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
632KBUILD_CFLAGS += -Os 631KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
633else 632else
634ifdef CONFIG_PROFILE_ALL_BRANCHES 633ifdef CONFIG_PROFILE_ALL_BRANCHES
635KBUILD_CFLAGS += -O2 634KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
636else 635else
637KBUILD_CFLAGS += -O2 636KBUILD_CFLAGS += -O2
638endif 637endif
639endif 638endif
640 639
640KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
641 $(call cc-disable-warning,maybe-uninitialized,))
642
641# Tell gcc to never replace conditional load with a non-conditional one 643# Tell gcc to never replace conditional load with a non-conditional one
642KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) 644KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
643 645
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 864adad52280..19cce226d1a8 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -50,6 +50,9 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
50 50
51cflags-$(atleast_gcc44) += -fsection-anchors 51cflags-$(atleast_gcc44) += -fsection-anchors
52 52
53cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
54cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
55
53ifdef CONFIG_ISA_ARCV2 56ifdef CONFIG_ISA_ARCV2
54 57
55ifndef CONFIG_ARC_HAS_LL64 58ifndef CONFIG_ARC_HAS_LL64
@@ -68,7 +71,9 @@ cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi)
68ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE 71ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
69# Generic build system uses -O2, we want -O3 72# Generic build system uses -O2, we want -O3
70# Note: No need to add to cflags-y as that happens anyways 73# Note: No need to add to cflags-y as that happens anyways
71ARCH_CFLAGS += -O3 74#
75# Disable the false maybe-uninitialized warings gcc spits out at -O3
76ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,)
72endif 77endif
73 78
74# small data is default for elf32 tool-chain. If not usable, disable it 79# small data is default for elf32 tool-chain. If not usable, disable it
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index 6ae2c476ad82..53ce226f77a5 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -71,7 +71,7 @@
71 reg-io-width = <4>; 71 reg-io-width = <4>;
72 }; 72 };
73 73
74 arcpmu0: pmu { 74 arcpct0: pct {
75 compatible = "snps,arc700-pct"; 75 compatible = "snps,arc700-pct";
76 }; 76 };
77 }; 77 };
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
index ce0ccd20b5bf..5ee96b067c08 100644
--- a/arch/arc/boot/dts/nsim_700.dts
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -69,7 +69,7 @@
69 }; 69 };
70 }; 70 };
71 71
72 arcpmu0: pmu { 72 arcpct0: pct {
73 compatible = "snps,arc700-pct"; 73 compatible = "snps,arc700-pct";
74 }; 74 };
75 }; 75 };
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index bcf603142a33..3c391ba565ed 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -83,5 +83,9 @@
83 reg = <0xf0003000 0x44>; 83 reg = <0xf0003000 0x44>;
84 interrupts = <7>; 84 interrupts = <7>;
85 }; 85 };
86
87 arcpct0: pct {
88 compatible = "snps,arc700-pct";
89 };
86 }; 90 };
87}; 91};
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 7314f538847b..b0066a749d4c 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
14CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" 14CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
15CONFIG_KALLSYMS_ALL=y 15CONFIG_KALLSYMS_ALL=y
16CONFIG_EMBEDDED=y 16CONFIG_EMBEDDED=y
17CONFIG_PERF_EVENTS=y
17# CONFIG_SLUB_DEBUG is not set 18# CONFIG_SLUB_DEBUG is not set
18# CONFIG_COMPAT_BRK is not set 19# CONFIG_COMPAT_BRK is not set
19CONFIG_KPROBES=y 20CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index 65ab9fbf83f2..ebe9ebb92933 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
14CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" 14CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
15CONFIG_KALLSYMS_ALL=y 15CONFIG_KALLSYMS_ALL=y
16CONFIG_EMBEDDED=y 16CONFIG_EMBEDDED=y
17CONFIG_PERF_EVENTS=y
17# CONFIG_SLUB_DEBUG is not set 18# CONFIG_SLUB_DEBUG is not set
18# CONFIG_COMPAT_BRK is not set 19# CONFIG_COMPAT_BRK is not set
19CONFIG_KPROBES=y 20CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 3b3990cddbe1..4bde43278be6 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -12,6 +12,7 @@ CONFIG_BLK_DEV_INITRD=y
12CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" 12CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
13CONFIG_KALLSYMS_ALL=y 13CONFIG_KALLSYMS_ALL=y
14CONFIG_EMBEDDED=y 14CONFIG_EMBEDDED=y
15CONFIG_PERF_EVENTS=y
15# CONFIG_SLUB_DEBUG is not set 16# CONFIG_SLUB_DEBUG is not set
16# CONFIG_COMPAT_BRK is not set 17# CONFIG_COMPAT_BRK is not set
17CONFIG_KPROBES=y 18CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 98cf20933bbb..f6fb3d26557e 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
14CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" 14CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
15CONFIG_KALLSYMS_ALL=y 15CONFIG_KALLSYMS_ALL=y
16CONFIG_EMBEDDED=y 16CONFIG_EMBEDDED=y
17CONFIG_PERF_EVENTS=y
17# CONFIG_SLUB_DEBUG is not set 18# CONFIG_SLUB_DEBUG is not set
18# CONFIG_COMPAT_BRK is not set 19# CONFIG_COMPAT_BRK is not set
19CONFIG_KPROBES=y 20CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index ddf8b96d494e..b9f0fe00044b 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
14CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" 14CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
15CONFIG_KALLSYMS_ALL=y 15CONFIG_KALLSYMS_ALL=y
16CONFIG_EMBEDDED=y 16CONFIG_EMBEDDED=y
17CONFIG_PERF_EVENTS=y
17# CONFIG_SLUB_DEBUG is not set 18# CONFIG_SLUB_DEBUG is not set
18# CONFIG_COMPAT_BRK is not set 19# CONFIG_COMPAT_BRK is not set
19CONFIG_KPROBES=y 20CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index ceb90745326e..6da71ba253a9 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -10,6 +10,7 @@ CONFIG_IKCONFIG_PROC=y
10# CONFIG_PID_NS is not set 10# CONFIG_PID_NS is not set
11CONFIG_BLK_DEV_INITRD=y 11CONFIG_BLK_DEV_INITRD=y
12CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" 12CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
13CONFIG_PERF_EVENTS=y
13# CONFIG_COMPAT_BRK is not set 14# CONFIG_COMPAT_BRK is not set
14CONFIG_KPROBES=y 15CONFIG_KPROBES=y
15CONFIG_MODULES=y 16CONFIG_MODULES=y
@@ -34,7 +35,6 @@ CONFIG_INET=y
34# CONFIG_INET_XFRM_MODE_TRANSPORT is not set 35# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
35# CONFIG_INET_XFRM_MODE_TUNNEL is not set 36# CONFIG_INET_XFRM_MODE_TUNNEL is not set
36# CONFIG_INET_XFRM_MODE_BEET is not set 37# CONFIG_INET_XFRM_MODE_BEET is not set
37# CONFIG_INET_LRO is not set
38# CONFIG_IPV6 is not set 38# CONFIG_IPV6 is not set
39# CONFIG_WIRELESS is not set 39# CONFIG_WIRELESS is not set
40CONFIG_DEVTMPFS=y 40CONFIG_DEVTMPFS=y
@@ -72,7 +72,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
72# CONFIG_HWMON is not set 72# CONFIG_HWMON is not set
73CONFIG_DRM=y 73CONFIG_DRM=y
74CONFIG_DRM_ARCPGU=y 74CONFIG_DRM_ARCPGU=y
75CONFIG_FRAMEBUFFER_CONSOLE=y
76CONFIG_LOGO=y 75CONFIG_LOGO=y
77# CONFIG_HID is not set 76# CONFIG_HID is not set
78# CONFIG_USB_SUPPORT is not set 77# CONFIG_USB_SUPPORT is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 7f3f9f63708c..1bd24ec3e350 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -43,12 +43,14 @@
43#define STATUS_AE_BIT 5 /* Exception active */ 43#define STATUS_AE_BIT 5 /* Exception active */
44#define STATUS_DE_BIT 6 /* PC is in delay slot */ 44#define STATUS_DE_BIT 6 /* PC is in delay slot */
45#define STATUS_U_BIT 7 /* User/Kernel mode */ 45#define STATUS_U_BIT 7 /* User/Kernel mode */
46#define STATUS_Z_BIT 11
46#define STATUS_L_BIT 12 /* Loop inhibit */ 47#define STATUS_L_BIT 12 /* Loop inhibit */
47 48
48/* These masks correspond to the status word(STATUS_32) bits */ 49/* These masks correspond to the status word(STATUS_32) bits */
49#define STATUS_AE_MASK (1<<STATUS_AE_BIT) 50#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
50#define STATUS_DE_MASK (1<<STATUS_DE_BIT) 51#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
51#define STATUS_U_MASK (1<<STATUS_U_BIT) 52#define STATUS_U_MASK (1<<STATUS_U_BIT)
53#define STATUS_Z_MASK (1<<STATUS_Z_BIT)
52#define STATUS_L_MASK (1<<STATUS_L_BIT) 54#define STATUS_L_MASK (1<<STATUS_L_BIT)
53 55
54/* 56/*
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 89fdd1b0a76e..0861007d9ef3 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -37,9 +37,9 @@ extern const char *arc_platform_smp_cpuinfo(void);
37 * API expected BY platform smp code (FROM arch smp code) 37 * API expected BY platform smp code (FROM arch smp code)
38 * 38 *
39 * smp_ipi_irq_setup: 39 * smp_ipi_irq_setup:
40 * Takes @cpu and @irq to which the arch-common ISR is hooked up 40 * Takes @cpu and @hwirq to which the arch-common ISR is hooked up
41 */ 41 */
42extern int smp_ipi_irq_setup(int cpu, int irq); 42extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
43 43
44/* 44/*
45 * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP 45 * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index f1e07c2344f8..3b67f538f142 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -31,6 +31,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root)
31 arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */ 31 arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */
32 else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp")) 32 else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
33 arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */ 33 arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */
34 else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps"))
35 arc_base_baud = 800000000; /* Fixed 800MHz clk (NPS) */
34 else 36 else
35 arc_base_baud = 50000000; /* Fixed default 50MHz */ 37 arc_base_baud = 50000000; /* Fixed default 50MHz */
36} 38}
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index c424d5abc318..f39142acc89e 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -181,6 +181,8 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
181{ 181{
182 unsigned long flags; 182 unsigned long flags;
183 cpumask_t online; 183 cpumask_t online;
184 unsigned int destination_bits;
185 unsigned int distribution_mode;
184 186
185 /* errout if no online cpu per @cpumask */ 187 /* errout if no online cpu per @cpumask */
186 if (!cpumask_and(&online, cpumask, cpu_online_mask)) 188 if (!cpumask_and(&online, cpumask, cpu_online_mask))
@@ -188,8 +190,15 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
188 190
189 raw_spin_lock_irqsave(&mcip_lock, flags); 191 raw_spin_lock_irqsave(&mcip_lock, flags);
190 192
191 idu_set_dest(data->hwirq, cpumask_bits(&online)[0]); 193 destination_bits = cpumask_bits(&online)[0];
192 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); 194 idu_set_dest(data->hwirq, destination_bits);
195
196 if (ffs(destination_bits) == fls(destination_bits))
197 distribution_mode = IDU_M_DISTRI_DEST;
198 else
199 distribution_mode = IDU_M_DISTRI_RR;
200
201 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
193 202
194 raw_spin_unlock_irqrestore(&mcip_lock, flags); 203 raw_spin_unlock_irqrestore(&mcip_lock, flags);
195 204
@@ -207,16 +216,15 @@ static struct irq_chip idu_irq_chip = {
207 216
208}; 217};
209 218
210static int idu_first_irq; 219static irq_hw_number_t idu_first_hwirq;
211 220
212static void idu_cascade_isr(struct irq_desc *desc) 221static void idu_cascade_isr(struct irq_desc *desc)
213{ 222{
214 struct irq_domain *domain = irq_desc_get_handler_data(desc); 223 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
215 unsigned int core_irq = irq_desc_get_irq(desc); 224 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
216 unsigned int idu_irq; 225 irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
217 226
218 idu_irq = core_irq - idu_first_irq; 227 generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
219 generic_handle_irq(irq_find_mapping(domain, idu_irq));
220} 228}
221 229
222static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) 230static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
@@ -282,7 +290,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
282 struct irq_domain *domain; 290 struct irq_domain *domain;
283 /* Read IDU BCR to confirm nr_irqs */ 291 /* Read IDU BCR to confirm nr_irqs */
284 int nr_irqs = of_irq_count(intc); 292 int nr_irqs = of_irq_count(intc);
285 int i, irq; 293 int i, virq;
286 struct mcip_bcr mp; 294 struct mcip_bcr mp;
287 295
288 READ_BCR(ARC_REG_MCIP_BCR, mp); 296 READ_BCR(ARC_REG_MCIP_BCR, mp);
@@ -303,11 +311,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
303 * however we need it to get the parent virq and set IDU handler 311 * however we need it to get the parent virq and set IDU handler
304 * as first level isr 312 * as first level isr
305 */ 313 */
306 irq = irq_of_parse_and_map(intc, i); 314 virq = irq_of_parse_and_map(intc, i);
307 if (!i) 315 if (!i)
308 idu_first_irq = irq; 316 idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
309 317
310 irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain); 318 irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
311 } 319 }
312 320
313 __mcip_cmd(CMD_IDU_ENABLE, 0); 321 __mcip_cmd(CMD_IDU_ENABLE, 0);
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 59aa43cb146e..a41a79a4f4fe 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -43,8 +43,8 @@ SYSCALL_DEFINE0(arc_gettls)
43 43
44SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) 44SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
45{ 45{
46 int uval; 46 struct pt_regs *regs = current_pt_regs();
47 int ret; 47 int uval = -EFAULT;
48 48
49 /* 49 /*
50 * This is only for old cores lacking LLOCK/SCOND, which by defintion 50 * This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -54,24 +54,26 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
54 */ 54 */
55 WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP)); 55 WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
56 56
57 /* Z indicates to userspace if operation succeded */
58 regs->status32 &= ~STATUS_Z_MASK;
59
57 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 60 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
58 return -EFAULT; 61 return -EFAULT;
59 62
60 preempt_disable(); 63 preempt_disable();
61 64
62 ret = __get_user(uval, uaddr); 65 if (__get_user(uval, uaddr))
63 if (ret)
64 goto done; 66 goto done;
65 67
66 if (uval != expected) 68 if (uval == expected) {
67 ret = -EAGAIN; 69 if (!__put_user(new, uaddr))
68 else 70 regs->status32 |= STATUS_Z_MASK;
69 ret = __put_user(new, uaddr); 71 }
70 72
71done: 73done:
72 preempt_enable(); 74 preempt_enable();
73 75
74 return ret; 76 return uval;
75} 77}
76 78
77void arch_cpu_idle(void) 79void arch_cpu_idle(void)
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index f183cc648851..88674d972c9d 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -22,6 +22,7 @@
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <linux/cpumask.h> 23#include <linux/cpumask.h>
24#include <linux/reboot.h> 24#include <linux/reboot.h>
25#include <linux/irqdomain.h>
25#include <asm/processor.h> 26#include <asm/processor.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include <asm/mach_desc.h> 28#include <asm/mach_desc.h>
@@ -67,11 +68,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
67 int i; 68 int i;
68 69
69 /* 70 /*
70 * Initialise the present map, which describes the set of CPUs 71 * if platform didn't set the present map already, do it now
71 * actually populated at the present time. 72 * boot cpu is set to present already by init/main.c
72 */ 73 */
73 for (i = 0; i < max_cpus; i++) 74 if (num_present_cpus() <= 1) {
74 set_cpu_present(i, true); 75 for (i = 0; i < max_cpus; i++)
76 set_cpu_present(i, true);
77 }
75} 78}
76 79
77void __init smp_cpus_done(unsigned int max_cpus) 80void __init smp_cpus_done(unsigned int max_cpus)
@@ -351,20 +354,24 @@ irqreturn_t do_IPI(int irq, void *dev_id)
351 */ 354 */
352static DEFINE_PER_CPU(int, ipi_dev); 355static DEFINE_PER_CPU(int, ipi_dev);
353 356
354int smp_ipi_irq_setup(int cpu, int irq) 357int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
355{ 358{
356 int *dev = per_cpu_ptr(&ipi_dev, cpu); 359 int *dev = per_cpu_ptr(&ipi_dev, cpu);
360 unsigned int virq = irq_find_mapping(NULL, hwirq);
361
362 if (!virq)
363 panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
357 364
358 /* Boot cpu calls request, all call enable */ 365 /* Boot cpu calls request, all call enable */
359 if (!cpu) { 366 if (!cpu) {
360 int rc; 367 int rc;
361 368
362 rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev); 369 rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
363 if (rc) 370 if (rc)
364 panic("Percpu IRQ request failed for %d\n", irq); 371 panic("Percpu IRQ request failed for %u\n", virq);
365 } 372 }
366 373
367 enable_percpu_irq(irq, 0); 374 enable_percpu_irq(virq, 0);
368 375
369 return 0; 376 return 0;
370} 377}
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index f927b8dc6edd..c10390d1ddb6 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -152,14 +152,17 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
152 cycle_t full; 152 cycle_t full;
153 } stamp; 153 } stamp;
154 154
155 155 /*
156 __asm__ __volatile( 156 * hardware has an internal state machine which tracks readout of
157 "1: \n" 157 * low/high and updates the CTRL.status if
158 " lr %0, [AUX_RTC_LOW] \n" 158 * - interrupt/exception taken between the two reads
159 " lr %1, [AUX_RTC_HIGH] \n" 159 * - high increments after low has been read
160 " lr %2, [AUX_RTC_CTRL] \n" 160 */
161 " bbit0.nt %2, 31, 1b \n" 161 do {
162 : "=r" (stamp.low), "=r" (stamp.high), "=r" (status)); 162 stamp.low = read_aux_reg(AUX_RTC_LOW);
163 stamp.high = read_aux_reg(AUX_RTC_HIGH);
164 status = read_aux_reg(AUX_RTC_CTRL);
165 } while (!(status & _BITUL(31)));
163 166
164 return stamp.full; 167 return stamp.full;
165} 168}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 60aab5a7522b..cd8aad8226dd 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
105 __free_pages(page, get_order(size)); 105 __free_pages(page, get_order(size));
106} 106}
107 107
108static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
109 void *cpu_addr, dma_addr_t dma_addr, size_t size,
110 unsigned long attrs)
111{
112 unsigned long user_count = vma_pages(vma);
113 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
114 unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
115 unsigned long off = vma->vm_pgoff;
116 int ret = -ENXIO;
117
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119
120 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
121 return ret;
122
123 if (off < count && user_count <= (count - off)) {
124 ret = remap_pfn_range(vma, vma->vm_start,
125 pfn + off,
126 user_count << PAGE_SHIFT,
127 vma->vm_page_prot);
128 }
129
130 return ret;
131}
132
108/* 133/*
109 * streaming DMA Mapping API... 134 * streaming DMA Mapping API...
110 * CPU accesses page via normal paddr, thus needs to explicitly made 135 * CPU accesses page via normal paddr, thus needs to explicitly made
@@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
193struct dma_map_ops arc_dma_ops = { 218struct dma_map_ops arc_dma_ops = {
194 .alloc = arc_dma_alloc, 219 .alloc = arc_dma_alloc,
195 .free = arc_dma_free, 220 .free = arc_dma_free,
221 .mmap = arc_dma_mmap,
196 .map_page = arc_dma_map_page, 222 .map_page = arc_dma_map_page,
197 .map_sg = arc_dma_map_sg, 223 .map_sg = arc_dma_map_sg,
198 .sync_single_for_device = arc_dma_sync_single_for_device, 224 .sync_single_for_device = arc_dma_sync_single_for_device,
diff --git a/arch/arc/plat-eznps/smp.c b/arch/arc/plat-eznps/smp.c
index 5e901f86e4bd..56a4c8522f11 100644
--- a/arch/arc/plat-eznps/smp.c
+++ b/arch/arc/plat-eznps/smp.c
@@ -140,16 +140,10 @@ static void eznps_init_per_cpu(int cpu)
140 mtm_enable_core(cpu); 140 mtm_enable_core(cpu);
141} 141}
142 142
143static void eznps_ipi_clear(int irq)
144{
145 write_aux_reg(CTOP_AUX_IACK, 1 << irq);
146}
147
148struct plat_smp_ops plat_smp_ops = { 143struct plat_smp_ops plat_smp_ops = {
149 .info = smp_cpuinfo_buf, 144 .info = smp_cpuinfo_buf,
150 .init_early_smp = eznps_init_cpumasks, 145 .init_early_smp = eznps_init_cpumasks,
151 .cpu_kick = eznps_smp_wakeup_cpu, 146 .cpu_kick = eznps_smp_wakeup_cpu,
152 .ipi_send = eznps_ipi_send, 147 .ipi_send = eznps_ipi_send,
153 .init_per_cpu = eznps_init_per_cpu, 148 .init_per_cpu = eznps_init_per_cpu,
154 .ipi_clear = eznps_ipi_clear,
155}; 149};
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index d7ea6bcb29bf..8ef05381984b 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[];
66extern void __kvm_flush_vm_context(void); 66extern void __kvm_flush_vm_context(void);
67extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 67extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
68extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 68extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
69extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
69 70
70extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 71extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
71 72
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 2d19e02d03fd..d5423ab15ed5 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -57,6 +57,9 @@ struct kvm_arch {
57 /* VTTBR value associated with below pgd and vmid */ 57 /* VTTBR value associated with below pgd and vmid */
58 u64 vttbr; 58 u64 vttbr;
59 59
60 /* The last vcpu id that ran on each physical CPU */
61 int __percpu *last_vcpu_ran;
62
60 /* Timer */ 63 /* Timer */
61 struct arch_timer_kvm timer; 64 struct arch_timer_kvm timer;
62 65
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 343135ede5fa..58508900c4bb 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -71,6 +71,7 @@
71#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) 71#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
72#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) 72#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
73#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) 73#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
74#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
74#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) 75#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
75#define PRRR __ACCESS_CP15(c10, 0, c2, 0) 76#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
76#define NMRR __ACCESS_CP15(c10, 0, c2, 1) 77#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 194b69923389..ada0d29a660f 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
19 * This may need to be greater than __NR_last_syscall+1 in order to 19 * This may need to be greater than __NR_last_syscall+1 in order to
20 * account for the padding in the syscall table 20 * account for the padding in the syscall table
21 */ 21 */
22#define __NR_syscalls (396) 22#define __NR_syscalls (400)
23 23
24#define __ARCH_WANT_STAT64 24#define __ARCH_WANT_STAT64
25#define __ARCH_WANT_SYS_GETHOSTNAME 25#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 2cb9dc770e1d..314100a06ccb 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -420,6 +420,9 @@
420#define __NR_copy_file_range (__NR_SYSCALL_BASE+391) 420#define __NR_copy_file_range (__NR_SYSCALL_BASE+391)
421#define __NR_preadv2 (__NR_SYSCALL_BASE+392) 421#define __NR_preadv2 (__NR_SYSCALL_BASE+392)
422#define __NR_pwritev2 (__NR_SYSCALL_BASE+393) 422#define __NR_pwritev2 (__NR_SYSCALL_BASE+393)
423#define __NR_pkey_mprotect (__NR_SYSCALL_BASE+394)
424#define __NR_pkey_alloc (__NR_SYSCALL_BASE+395)
425#define __NR_pkey_free (__NR_SYSCALL_BASE+396)
423 426
424/* 427/*
425 * The following SWIs are ARM private. 428 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 703fa0f3cd8f..08030b18f10a 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -403,6 +403,9 @@
403 CALL(sys_copy_file_range) 403 CALL(sys_copy_file_range)
404 CALL(sys_preadv2) 404 CALL(sys_preadv2)
405 CALL(sys_pwritev2) 405 CALL(sys_pwritev2)
406 CALL(sys_pkey_mprotect)
407/* 395 */ CALL(sys_pkey_alloc)
408 CALL(sys_pkey_free)
406#ifndef syscalls_counted 409#ifndef syscalls_counted
407.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 410.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
408#define syscalls_counted 411#define syscalls_counted
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 08bb84f2ad58..19b5f5c1c0ff 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
114 */ 114 */
115int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 115int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
116{ 116{
117 int ret = 0; 117 int ret, cpu;
118 118
119 if (type) 119 if (type)
120 return -EINVAL; 120 return -EINVAL;
121 121
122 kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
123 if (!kvm->arch.last_vcpu_ran)
124 return -ENOMEM;
125
126 for_each_possible_cpu(cpu)
127 *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
128
122 ret = kvm_alloc_stage2_pgd(kvm); 129 ret = kvm_alloc_stage2_pgd(kvm);
123 if (ret) 130 if (ret)
124 goto out_fail_alloc; 131 goto out_fail_alloc;
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
141out_free_stage2_pgd: 148out_free_stage2_pgd:
142 kvm_free_stage2_pgd(kvm); 149 kvm_free_stage2_pgd(kvm);
143out_fail_alloc: 150out_fail_alloc:
151 free_percpu(kvm->arch.last_vcpu_ran);
152 kvm->arch.last_vcpu_ran = NULL;
144 return ret; 153 return ret;
145} 154}
146 155
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
168{ 177{
169 int i; 178 int i;
170 179
180 free_percpu(kvm->arch.last_vcpu_ran);
181 kvm->arch.last_vcpu_ran = NULL;
182
171 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 183 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
172 if (kvm->vcpus[i]) { 184 if (kvm->vcpus[i]) {
173 kvm_arch_vcpu_free(kvm->vcpus[i]); 185 kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
312 324
313void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 325void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
314{ 326{
327 int *last_ran;
328
329 last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
330
331 /*
332 * We might get preempted before the vCPU actually runs, but
333 * over-invalidation doesn't affect correctness.
334 */
335 if (*last_ran != vcpu->vcpu_id) {
336 kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
337 *last_ran = vcpu->vcpu_id;
338 }
339
315 vcpu->cpu = cpu; 340 vcpu->cpu = cpu;
316 vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); 341 vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
317 342
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
index 729652854f90..6d810af2d9fd 100644
--- a/arch/arm/kvm/hyp/tlb.c
+++ b/arch/arm/kvm/hyp/tlb.c
@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
55 __kvm_tlb_flush_vmid(kvm); 55 __kvm_tlb_flush_vmid(kvm);
56} 56}
57 57
58void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
59{
60 struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
61
62 /* Switch to requested VMID */
63 write_sysreg(kvm->arch.vttbr, VTTBR);
64 isb();
65
66 write_sysreg(0, TLBIALL);
67 dsb(nsh);
68 isb();
69
70 write_sysreg(0, VTTBR);
71}
72
58void __hyp_text __kvm_flush_vm_context(void) 73void __hyp_text __kvm_flush_vm_context(void)
59{ 74{
60 write_sysreg(0, TLBIALLNSNHIS); 75 write_sysreg(0, TLBIALLNSNHIS);
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index 6d8e8e3365d1..4cdfab31a0b6 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -7,7 +7,7 @@
7 * : r4 = aborted context pc 7 * : r4 = aborted context pc
8 * : r5 = aborted context psr 8 * : r5 = aborted context psr
9 * 9 *
10 * Returns : r4-r5, r10-r11, r13 preserved 10 * Returns : r4-r5, r9-r11, r13 preserved
11 * 11 *
12 * Purpose : obtain information about current aborted instruction. 12 * Purpose : obtain information about current aborted instruction.
13 * Note: we read user space. This means we might cause a data 13 * Note: we read user space. This means we might cause a data
@@ -48,7 +48,10 @@ ENTRY(v4t_late_abort)
48/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m 48/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
49/* d */ b do_DataAbort @ ldc rd, [rn, #m] 49/* d */ b do_DataAbort @ ldc rd, [rn, #m]
50/* e */ b .data_unknown 50/* e */ b .data_unknown
51/* f */ 51/* f */ b .data_unknown
52
53.data_unknown_r9:
54 ldr r9, [sp], #4
52.data_unknown: @ Part of jumptable 55.data_unknown: @ Part of jumptable
53 mov r0, r4 56 mov r0, r4
54 mov r1, r8 57 mov r1, r8
@@ -57,6 +60,7 @@ ENTRY(v4t_late_abort)
57.data_arm_ldmstm: 60.data_arm_ldmstm:
58 tst r8, #1 << 21 @ check writeback bit 61 tst r8, #1 << 21 @ check writeback bit
59 beq do_DataAbort @ no writeback -> no fixup 62 beq do_DataAbort @ no writeback -> no fixup
63 str r9, [sp, #-4]!
60 mov r7, #0x11 64 mov r7, #0x11
61 orr r7, r7, #0x1100 65 orr r7, r7, #0x1100
62 and r6, r8, r7 66 and r6, r8, r7
@@ -75,12 +79,14 @@ ENTRY(v4t_late_abort)
75 subne r7, r7, r6, lsl #2 @ Undo increment 79 subne r7, r7, r6, lsl #2 @ Undo increment
76 addeq r7, r7, r6, lsl #2 @ Undo decrement 80 addeq r7, r7, r6, lsl #2 @ Undo decrement
77 str r7, [r2, r9, lsr #14] @ Put register 'Rn' 81 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
82 ldr r9, [sp], #4
78 b do_DataAbort 83 b do_DataAbort
79 84
80.data_arm_lateldrhpre: 85.data_arm_lateldrhpre:
81 tst r8, #1 << 21 @ Check writeback bit 86 tst r8, #1 << 21 @ Check writeback bit
82 beq do_DataAbort @ No writeback -> no fixup 87 beq do_DataAbort @ No writeback -> no fixup
83.data_arm_lateldrhpost: 88.data_arm_lateldrhpost:
89 str r9, [sp, #-4]!
84 and r9, r8, #0x00f @ get Rm / low nibble of immediate value 90 and r9, r8, #0x00f @ get Rm / low nibble of immediate value
85 tst r8, #1 << 22 @ if (immediate offset) 91 tst r8, #1 << 22 @ if (immediate offset)
86 andne r6, r8, #0xf00 @ { immediate high nibble 92 andne r6, r8, #0xf00 @ { immediate high nibble
@@ -93,6 +99,7 @@ ENTRY(v4t_late_abort)
93 subne r7, r7, r6 @ Undo incrmenet 99 subne r7, r7, r6 @ Undo incrmenet
94 addeq r7, r7, r6 @ Undo decrement 100 addeq r7, r7, r6 @ Undo decrement
95 str r7, [r2, r9, lsr #14] @ Put register 'Rn' 101 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
102 ldr r9, [sp], #4
96 b do_DataAbort 103 b do_DataAbort
97 104
98.data_arm_lateldrpreconst: 105.data_arm_lateldrpreconst:
@@ -101,12 +108,14 @@ ENTRY(v4t_late_abort)
101.data_arm_lateldrpostconst: 108.data_arm_lateldrpostconst:
102 movs r6, r8, lsl #20 @ Get offset 109 movs r6, r8, lsl #20 @ Get offset
103 beq do_DataAbort @ zero -> no fixup 110 beq do_DataAbort @ zero -> no fixup
111 str r9, [sp, #-4]!
104 and r9, r8, #15 << 16 @ Extract 'n' from instruction 112 and r9, r8, #15 << 16 @ Extract 'n' from instruction
105 ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' 113 ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
106 tst r8, #1 << 23 @ Check U bit 114 tst r8, #1 << 23 @ Check U bit
107 subne r7, r7, r6, lsr #20 @ Undo increment 115 subne r7, r7, r6, lsr #20 @ Undo increment
108 addeq r7, r7, r6, lsr #20 @ Undo decrement 116 addeq r7, r7, r6, lsr #20 @ Undo decrement
109 str r7, [r2, r9, lsr #14] @ Put register 'Rn' 117 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
118 ldr r9, [sp], #4
110 b do_DataAbort 119 b do_DataAbort
111 120
112.data_arm_lateldrprereg: 121.data_arm_lateldrprereg:
@@ -115,6 +124,7 @@ ENTRY(v4t_late_abort)
115.data_arm_lateldrpostreg: 124.data_arm_lateldrpostreg:
116 and r7, r8, #15 @ Extract 'm' from instruction 125 and r7, r8, #15 @ Extract 'm' from instruction
117 ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' 126 ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
127 str r9, [sp, #-4]!
118 mov r9, r8, lsr #7 @ get shift count 128 mov r9, r8, lsr #7 @ get shift count
119 ands r9, r9, #31 129 ands r9, r9, #31
120 and r7, r8, #0x70 @ get shift type 130 and r7, r8, #0x70 @ get shift type
@@ -126,33 +136,33 @@ ENTRY(v4t_late_abort)
126 b .data_arm_apply_r6_and_rn 136 b .data_arm_apply_r6_and_rn
127 b .data_arm_apply_r6_and_rn @ 1: LSL #0 137 b .data_arm_apply_r6_and_rn @ 1: LSL #0
128 nop 138 nop
129 b .data_unknown @ 2: MUL? 139 b .data_unknown_r9 @ 2: MUL?
130 nop 140 nop
131 b .data_unknown @ 3: MUL? 141 b .data_unknown_r9 @ 3: MUL?
132 nop 142 nop
133 mov r6, r6, lsr r9 @ 4: LSR #!0 143 mov r6, r6, lsr r9 @ 4: LSR #!0
134 b .data_arm_apply_r6_and_rn 144 b .data_arm_apply_r6_and_rn
135 mov r6, r6, lsr #32 @ 5: LSR #32 145 mov r6, r6, lsr #32 @ 5: LSR #32
136 b .data_arm_apply_r6_and_rn 146 b .data_arm_apply_r6_and_rn
137 b .data_unknown @ 6: MUL? 147 b .data_unknown_r9 @ 6: MUL?
138 nop 148 nop
139 b .data_unknown @ 7: MUL? 149 b .data_unknown_r9 @ 7: MUL?
140 nop 150 nop
141 mov r6, r6, asr r9 @ 8: ASR #!0 151 mov r6, r6, asr r9 @ 8: ASR #!0
142 b .data_arm_apply_r6_and_rn 152 b .data_arm_apply_r6_and_rn
143 mov r6, r6, asr #32 @ 9: ASR #32 153 mov r6, r6, asr #32 @ 9: ASR #32
144 b .data_arm_apply_r6_and_rn 154 b .data_arm_apply_r6_and_rn
145 b .data_unknown @ A: MUL? 155 b .data_unknown_r9 @ A: MUL?
146 nop 156 nop
147 b .data_unknown @ B: MUL? 157 b .data_unknown_r9 @ B: MUL?
148 nop 158 nop
149 mov r6, r6, ror r9 @ C: ROR #!0 159 mov r6, r6, ror r9 @ C: ROR #!0
150 b .data_arm_apply_r6_and_rn 160 b .data_arm_apply_r6_and_rn
151 mov r6, r6, rrx @ D: RRX 161 mov r6, r6, rrx @ D: RRX
152 b .data_arm_apply_r6_and_rn 162 b .data_arm_apply_r6_and_rn
153 b .data_unknown @ E: MUL? 163 b .data_unknown_r9 @ E: MUL?
154 nop 164 nop
155 b .data_unknown @ F: MUL? 165 b .data_unknown_r9 @ F: MUL?
156 166
157.data_thumb_abort: 167.data_thumb_abort:
158 ldrh r8, [r4] @ read instruction 168 ldrh r8, [r4] @ read instruction
@@ -190,6 +200,7 @@ ENTRY(v4t_late_abort)
190.data_thumb_pushpop: 200.data_thumb_pushpop:
191 tst r8, #1 << 10 201 tst r8, #1 << 10
192 beq .data_unknown 202 beq .data_unknown
203 str r9, [sp, #-4]!
193 and r6, r8, #0x55 @ hweight8(r8) + R bit 204 and r6, r8, #0x55 @ hweight8(r8) + R bit
194 and r9, r8, #0xaa 205 and r9, r8, #0xaa
195 add r6, r6, r9, lsr #1 206 add r6, r6, r9, lsr #1
@@ -204,9 +215,11 @@ ENTRY(v4t_late_abort)
204 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH 215 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
205 subne r7, r7, r6, lsl #2 @ decrement SP if POP 216 subne r7, r7, r6, lsl #2 @ decrement SP if POP
206 str r7, [r2, #13 << 2] 217 str r7, [r2, #13 << 2]
218 ldr r9, [sp], #4
207 b do_DataAbort 219 b do_DataAbort
208 220
209.data_thumb_ldmstm: 221.data_thumb_ldmstm:
222 str r9, [sp, #-4]!
210 and r6, r8, #0x55 @ hweight8(r8) 223 and r6, r8, #0x55 @ hweight8(r8)
211 and r9, r8, #0xaa 224 and r9, r8, #0xaa
212 add r6, r6, r9, lsr #1 225 add r6, r6, r9, lsr #1
@@ -219,4 +232,5 @@ ENTRY(v4t_late_abort)
219 and r6, r6, #15 @ number of regs to transfer 232 and r6, r6, #15 @ number of regs to transfer
220 sub r7, r7, r6, lsl #2 @ always decrement 233 sub r7, r7, r6, lsl #2 @ always decrement
221 str r7, [r2, r9, lsr #6] 234 str r7, [r2, r9, lsr #6]
235 ldr r9, [sp], #4
222 b do_DataAbort 236 b do_DataAbort
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index b65c193dc64e..7afbfb0f96a3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -300,8 +300,11 @@
300 ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000 300 ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
301 0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>; 301 0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
302 resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, 302 resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
303 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>; 303 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
304 reset-names = "core", "mgmt", "mgmt-sticky", "pipe"; 304 <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
305 <&cru SRST_A_PCIE>;
306 reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
307 "pm", "pclk", "aclk";
305 status = "disabled"; 308 status = "disabled";
306 309
307 pcie0_intc: interrupt-controller { 310 pcie0_intc: interrupt-controller {
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 39feb85a6931..6e1cb8c5af4d 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,7 +1,7 @@
1#ifndef __ASM_ALTERNATIVE_H 1#ifndef __ASM_ALTERNATIVE_H
2#define __ASM_ALTERNATIVE_H 2#define __ASM_ALTERNATIVE_H
3 3
4#include <asm/cpufeature.h> 4#include <asm/cpucaps.h>
5#include <asm/insn.h> 5#include <asm/insn.h>
6 6
7#ifndef __ASSEMBLY__ 7#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
new file mode 100644
index 000000000000..87b446535185
--- /dev/null
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -0,0 +1,40 @@
1/*
2 * arch/arm64/include/asm/cpucaps.h
3 *
4 * Copyright (C) 2016 ARM Ltd.
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_CPUCAPS_H
19#define __ASM_CPUCAPS_H
20
21#define ARM64_WORKAROUND_CLEAN_CACHE 0
22#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
23#define ARM64_WORKAROUND_845719 2
24#define ARM64_HAS_SYSREG_GIC_CPUIF 3
25#define ARM64_HAS_PAN 4
26#define ARM64_HAS_LSE_ATOMICS 5
27#define ARM64_WORKAROUND_CAVIUM_23154 6
28#define ARM64_WORKAROUND_834220 7
29#define ARM64_HAS_NO_HW_PREFETCH 8
30#define ARM64_HAS_UAO 9
31#define ARM64_ALT_PAN_NOT_UAO 10
32#define ARM64_HAS_VIRT_HOST_EXTN 11
33#define ARM64_WORKAROUND_CAVIUM_27456 12
34#define ARM64_HAS_32BIT_EL0 13
35#define ARM64_HYP_OFFSET_LOW 14
36#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
37
38#define ARM64_NCAPS 16
39
40#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index a27c3245ba21..0bc0b1de90c4 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/jump_label.h> 12#include <linux/jump_label.h>
13 13
14#include <asm/cpucaps.h>
14#include <asm/hwcap.h> 15#include <asm/hwcap.h>
15#include <asm/sysreg.h> 16#include <asm/sysreg.h>
16 17
@@ -24,25 +25,6 @@
24#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) 25#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
25#define cpu_feature(x) ilog2(HWCAP_ ## x) 26#define cpu_feature(x) ilog2(HWCAP_ ## x)
26 27
27#define ARM64_WORKAROUND_CLEAN_CACHE 0
28#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
29#define ARM64_WORKAROUND_845719 2
30#define ARM64_HAS_SYSREG_GIC_CPUIF 3
31#define ARM64_HAS_PAN 4
32#define ARM64_HAS_LSE_ATOMICS 5
33#define ARM64_WORKAROUND_CAVIUM_23154 6
34#define ARM64_WORKAROUND_834220 7
35#define ARM64_HAS_NO_HW_PREFETCH 8
36#define ARM64_HAS_UAO 9
37#define ARM64_ALT_PAN_NOT_UAO 10
38#define ARM64_HAS_VIRT_HOST_EXTN 11
39#define ARM64_WORKAROUND_CAVIUM_27456 12
40#define ARM64_HAS_32BIT_EL0 13
41#define ARM64_HYP_OFFSET_LOW 14
42#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
43
44#define ARM64_NCAPS 16
45
46#ifndef __ASSEMBLY__ 28#ifndef __ASSEMBLY__
47 29
48#include <linux/kernel.h> 30#include <linux/kernel.h>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 18f746551bf6..ec3553eb9349 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[];
54extern void __kvm_flush_vm_context(void); 54extern void __kvm_flush_vm_context(void);
55extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 55extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
56extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 56extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
57extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
57 58
58extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 59extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
59 60
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index bd94e6766759..e5050388e062 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -62,6 +62,9 @@ struct kvm_arch {
62 /* VTTBR value associated with above pgd and vmid */ 62 /* VTTBR value associated with above pgd and vmid */
63 u64 vttbr; 63 u64 vttbr;
64 64
65 /* The last vcpu id that ran on each physical CPU */
66 int __percpu *last_vcpu_ran;
67
65 /* The maximum number of vCPUs depends on the used GIC model */ 68 /* The maximum number of vCPUs depends on the used GIC model */
66 int max_vcpus; 69 int max_vcpus;
67 70
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index a79b969c26fc..6f72fe8b0e3e 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
128 return v; 128 return v;
129} 129}
130 130
131#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) 131#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
132 132
133/* 133/*
134 * We currently only support a 40bit IPA. 134 * We currently only support a 40bit IPA.
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 23acc00be32d..fc756e22c84c 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -5,7 +5,6 @@
5 5
6#include <linux/stringify.h> 6#include <linux/stringify.h>
7#include <asm/alternative.h> 7#include <asm/alternative.h>
8#include <asm/cpufeature.h>
9 8
10#ifdef __ASSEMBLER__ 9#ifdef __ASSEMBLER__
11 10
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 9cc0ea784ae6..88e2f2b938f0 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
64 write_sysreg(0, vttbr_el2); 64 write_sysreg(0, vttbr_el2);
65} 65}
66 66
67void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
68{
69 struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
70
71 /* Switch to requested VMID */
72 write_sysreg(kvm->arch.vttbr, vttbr_el2);
73 isb();
74
75 asm volatile("tlbi vmalle1" : : );
76 dsb(nsh);
77 isb();
78
79 write_sysreg(0, vttbr_el2);
80}
81
67void __hyp_text __kvm_flush_vm_context(void) 82void __hyp_text __kvm_flush_vm_context(void)
68{ 83{
69 dsb(ishst); 84 dsb(ishst);
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index fbf40d3c8123..1a6bac7b076f 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -263,7 +263,7 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
263 263
264bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ 264bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
265 VMLINUX_ENTRY_ADDRESS=$(entry-y) \ 265 VMLINUX_ENTRY_ADDRESS=$(entry-y) \
266 PLATFORM=$(platform-y) 266 PLATFORM="$(platform-y)"
267ifdef CONFIG_32BIT 267ifdef CONFIG_32BIT
268bootvars-y += ADDR_BITS=32 268bootvars-y += ADDR_BITS=32
269endif 269endif
diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts
index f604a272d91d..ffe3a1508e72 100644
--- a/arch/mips/boot/dts/mti/malta.dts
+++ b/arch/mips/boot/dts/mti/malta.dts
@@ -84,12 +84,13 @@
84 fpga_regs: system-controller@1f000000 { 84 fpga_regs: system-controller@1f000000 {
85 compatible = "mti,malta-fpga", "syscon", "simple-mfd"; 85 compatible = "mti,malta-fpga", "syscon", "simple-mfd";
86 reg = <0x1f000000 0x1000>; 86 reg = <0x1f000000 0x1000>;
87 native-endian;
87 88
88 reboot { 89 reboot {
89 compatible = "syscon-reboot"; 90 compatible = "syscon-reboot";
90 regmap = <&fpga_regs>; 91 regmap = <&fpga_regs>;
91 offset = <0x500>; 92 offset = <0x500>;
92 mask = <0x4d>; 93 mask = <0x42>;
93 }; 94 };
94 }; 95 };
95 96
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 0ea73e845440..d493ccbf274a 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -30,9 +30,19 @@ static __initdata const void *mach_match_data;
30 30
31void __init prom_init(void) 31void __init prom_init(void)
32{ 32{
33 plat_get_fdt();
34 BUG_ON(!fdt);
35}
36
37void __init *plat_get_fdt(void)
38{
33 const struct mips_machine *check_mach; 39 const struct mips_machine *check_mach;
34 const struct of_device_id *match; 40 const struct of_device_id *match;
35 41
42 if (fdt)
43 /* Already set up */
44 return (void *)fdt;
45
36 if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) { 46 if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
37 /* 47 /*
38 * We booted using the UHI boot protocol, so we have been 48 * We booted using the UHI boot protocol, so we have been
@@ -75,12 +85,6 @@ void __init prom_init(void)
75 /* Retrieve the machine's FDT */ 85 /* Retrieve the machine's FDT */
76 fdt = mach->fdt; 86 fdt = mach->fdt;
77 } 87 }
78
79 BUG_ON(!fdt);
80}
81
82void __init *plat_get_fdt(void)
83{
84 return (void *)fdt; 88 return (void *)fdt;
85} 89}
86 90
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 355dc25172e7..c05369e0b8d6 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -63,6 +63,8 @@ do { \
63extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, 63extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
64 struct mips_fpu_struct *ctx, int has_fpu, 64 struct mips_fpu_struct *ctx, int has_fpu,
65 void *__user *fault_addr); 65 void *__user *fault_addr);
66void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
67 struct task_struct *tsk);
66int process_fpemu_return(int sig, void __user *fault_addr, 68int process_fpemu_return(int sig, void __user *fault_addr,
67 unsigned long fcr31); 69 unsigned long fcr31);
68int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, 70int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
@@ -81,4 +83,15 @@ static inline void fpu_emulator_init_fpu(void)
81 set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN); 83 set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
82} 84}
83 85
86/*
87 * Mask the FCSR Cause bits according to the Enable bits, observing
88 * that Unimplemented is always enabled.
89 */
90static inline unsigned long mask_fcr31_x(unsigned long fcr31)
91{
92 return fcr31 & (FPU_CSR_UNI_X |
93 ((fcr31 & FPU_CSR_ALL_E) <<
94 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
95}
96
84#endif /* _ASM_FPU_EMULATOR_H */ 97#endif /* _ASM_FPU_EMULATOR_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 07f58cfc1ab9..bebec370324f 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -293,7 +293,10 @@ struct kvm_vcpu_arch {
293 /* Host KSEG0 address of the EI/DI offset */ 293 /* Host KSEG0 address of the EI/DI offset */
294 void *kseg0_commpage; 294 void *kseg0_commpage;
295 295
296 u32 io_gpr; /* GPR used as IO source/target */ 296 /* Resume PC after MMIO completion */
297 unsigned long io_pc;
298 /* GPR used as IO source/target */
299 u32 io_gpr;
297 300
298 struct hrtimer comparecount_timer; 301 struct hrtimer comparecount_timer;
299 /* Count timer control KVM register */ 302 /* Count timer control KVM register */
@@ -315,8 +318,6 @@ struct kvm_vcpu_arch {
315 /* Bitmask of pending exceptions to be cleared */ 318 /* Bitmask of pending exceptions to be cleared */
316 unsigned long pending_exceptions_clr; 319 unsigned long pending_exceptions_clr;
317 320
318 u32 pending_load_cause;
319
320 /* Save/Restore the entryhi register when are are preempted/scheduled back in */ 321 /* Save/Restore the entryhi register when are are preempted/scheduled back in */
321 unsigned long preempt_entryhi; 322 unsigned long preempt_entryhi;
322 323
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index ebb5c0f2f90d..c0ae27971e31 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -76,6 +76,22 @@ do { if (cpu_has_rw_llb) { \
76} while (0) 76} while (0)
77 77
78/* 78/*
79 * Check FCSR for any unmasked exceptions pending set with `ptrace',
80 * clear them and send a signal.
81 */
82#define __sanitize_fcr31(next) \
83do { \
84 unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
85 void __user *pc; \
86 \
87 if (unlikely(fcr31)) { \
88 pc = (void __user *)task_pt_regs(next)->cp0_epc; \
89 next->thread.fpu.fcr31 &= ~fcr31; \
90 force_fcr31_sig(fcr31, pc, next); \
91 } \
92} while (0)
93
94/*
79 * For newly created kernel threads switch_to() will return to 95 * For newly created kernel threads switch_to() will return to
80 * ret_from_kernel_thread, newly created user threads to ret_from_fork. 96 * ret_from_kernel_thread, newly created user threads to ret_from_fork.
81 * That is, everything following resume() will be skipped for new threads. 97 * That is, everything following resume() will be skipped for new threads.
@@ -85,6 +101,8 @@ do { if (cpu_has_rw_llb) { \
85do { \ 101do { \
86 __mips_mt_fpaff_switch_to(prev); \ 102 __mips_mt_fpaff_switch_to(prev); \
87 lose_fpu_inatomic(1, prev); \ 103 lose_fpu_inatomic(1, prev); \
104 if (tsk_used_math(next)) \
105 __sanitize_fcr31(next); \
88 if (cpu_has_dsp) { \ 106 if (cpu_has_dsp) { \
89 __save_dsp(prev); \ 107 __save_dsp(prev); \
90 __restore_dsp(next); \ 108 __restore_dsp(next); \
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 2a45867d3b4f..a4964c334cab 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,6 +21,11 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
21 21
22static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); 22static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
23 23
24phys_addr_t __weak mips_cpc_default_phys_base(void)
25{
26 return 0;
27}
28
24/** 29/**
25 * mips_cpc_phys_base - retrieve the physical base address of the CPC 30 * mips_cpc_phys_base - retrieve the physical base address of the CPC
26 * 31 *
@@ -43,8 +48,12 @@ static phys_addr_t mips_cpc_phys_base(void)
43 if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK) 48 if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
44 return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK; 49 return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
45 50
46 /* Otherwise, give it the default address & enable it */ 51 /* Otherwise, use the default address */
47 cpc_base = mips_cpc_default_phys_base(); 52 cpc_base = mips_cpc_default_phys_base();
53 if (!cpc_base)
54 return cpc_base;
55
56 /* Enable the CPC, mapped at the default address */
48 write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK); 57 write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
49 return cpc_base; 58 return cpc_base;
50} 59}
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 22dedd62818a..bd09853aecdf 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -899,7 +899,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
899 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction 899 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
900 * @regs: Process register set 900 * @regs: Process register set
901 * @inst: Instruction to decode and emulate 901 * @inst: Instruction to decode and emulate
902 * @fcr31: Floating Point Control and Status Register returned 902 * @fcr31: Floating Point Control and Status Register Cause bits returned
903 */ 903 */
904int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) 904int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
905{ 905{
@@ -1172,13 +1172,13 @@ fpu_emul:
1172 1172
1173 err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0, 1173 err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1174 &fault_addr); 1174 &fault_addr);
1175 *fcr31 = current->thread.fpu.fcr31;
1176 1175
1177 /* 1176 /*
1178 * We can't allow the emulated instruction to leave any of 1177 * We can't allow the emulated instruction to leave any
1179 * the cause bits set in $fcr31. 1178 * enabled Cause bits set in $fcr31.
1180 */ 1179 */
1181 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 1180 *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
1181 current->thread.fpu.fcr31 &= ~res;
1182 1182
1183 /* 1183 /*
1184 * this is a tricky issue - lose_fpu() uses LL/SC atomics 1184 * this is a tricky issue - lose_fpu() uses LL/SC atomics
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 6103b24d1bfc..a92994d60e91 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child)
79} 79}
80 80
81/* 81/*
82 * Poke at FCSR according to its mask. Don't set the cause bits as 82 * Poke at FCSR according to its mask. Set the Cause bits even
83 * this is currently not handled correctly in FP context restoration 83 * if a corresponding Enable bit is set. This will be noticed at
84 * and will cause an oops if a corresponding enable bit is set. 84 * the time the thread is switched to and SIGFPE thrown accordingly.
85 */ 85 */
86static void ptrace_setfcr31(struct task_struct *child, u32 value) 86static void ptrace_setfcr31(struct task_struct *child, u32 value)
87{ 87{
88 u32 fcr31; 88 u32 fcr31;
89 u32 mask; 89 u32 mask;
90 90
91 value &= ~FPU_CSR_ALL_X;
92 fcr31 = child->thread.fpu.fcr31; 91 fcr31 = child->thread.fpu.fcr31;
93 mask = boot_cpu_data.fpu_msk31; 92 mask = boot_cpu_data.fpu_msk31;
94 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); 93 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
@@ -817,6 +816,7 @@ long arch_ptrace(struct task_struct *child, long request,
817 break; 816 break;
818#endif 817#endif
819 case FPC_CSR: 818 case FPC_CSR:
819 init_fp_ctx(child);
820 ptrace_setfcr31(child, data); 820 ptrace_setfcr31(child, data);
821 break; 821 break;
822 case DSP_BASE ... DSP_BASE + 5: { 822 case DSP_BASE ... DSP_BASE + 5: {
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index b4ac6374a38f..918f2f6d3861 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -21,106 +21,84 @@
21#define EX(a,b) \ 21#define EX(a,b) \
229: a,##b; \ 229: a,##b; \
23 .section __ex_table,"a"; \ 23 .section __ex_table,"a"; \
24 PTR 9b,fault; \
25 .previous
26
27#define EX2(a,b) \
289: a,##b; \
29 .section __ex_table,"a"; \
24 PTR 9b,bad_stack; \ 30 PTR 9b,bad_stack; \
31 PTR 9b+4,bad_stack; \
25 .previous 32 .previous
26 33
27 .set noreorder 34 .set noreorder
28 .set mips1 35 .set mips1
29 /* Save floating point context */ 36
37/**
38 * _save_fp_context() - save FP context from the FPU
39 * @a0 - pointer to fpregs field of sigcontext
40 * @a1 - pointer to fpc_csr field of sigcontext
41 *
42 * Save FP context, including the 32 FP data registers and the FP
43 * control & status register, from the FPU to signal context.
44 */
30LEAF(_save_fp_context) 45LEAF(_save_fp_context)
31 .set push 46 .set push
32 SET_HARDFLOAT 47 SET_HARDFLOAT
33 li v0, 0 # assume success 48 li v0, 0 # assume success
34 cfc1 t1,fcr31 49 cfc1 t1, fcr31
35 EX(swc1 $f0,(SC_FPREGS+0)(a0)) 50 EX2(s.d $f0, 0(a0))
36 EX(swc1 $f1,(SC_FPREGS+8)(a0)) 51 EX2(s.d $f2, 16(a0))
37 EX(swc1 $f2,(SC_FPREGS+16)(a0)) 52 EX2(s.d $f4, 32(a0))
38 EX(swc1 $f3,(SC_FPREGS+24)(a0)) 53 EX2(s.d $f6, 48(a0))
39 EX(swc1 $f4,(SC_FPREGS+32)(a0)) 54 EX2(s.d $f8, 64(a0))
40 EX(swc1 $f5,(SC_FPREGS+40)(a0)) 55 EX2(s.d $f10, 80(a0))
41 EX(swc1 $f6,(SC_FPREGS+48)(a0)) 56 EX2(s.d $f12, 96(a0))
42 EX(swc1 $f7,(SC_FPREGS+56)(a0)) 57 EX2(s.d $f14, 112(a0))
43 EX(swc1 $f8,(SC_FPREGS+64)(a0)) 58 EX2(s.d $f16, 128(a0))
44 EX(swc1 $f9,(SC_FPREGS+72)(a0)) 59 EX2(s.d $f18, 144(a0))
45 EX(swc1 $f10,(SC_FPREGS+80)(a0)) 60 EX2(s.d $f20, 160(a0))
46 EX(swc1 $f11,(SC_FPREGS+88)(a0)) 61 EX2(s.d $f22, 176(a0))
47 EX(swc1 $f12,(SC_FPREGS+96)(a0)) 62 EX2(s.d $f24, 192(a0))
48 EX(swc1 $f13,(SC_FPREGS+104)(a0)) 63 EX2(s.d $f26, 208(a0))
49 EX(swc1 $f14,(SC_FPREGS+112)(a0)) 64 EX2(s.d $f28, 224(a0))
50 EX(swc1 $f15,(SC_FPREGS+120)(a0)) 65 EX2(s.d $f30, 240(a0))
51 EX(swc1 $f16,(SC_FPREGS+128)(a0))
52 EX(swc1 $f17,(SC_FPREGS+136)(a0))
53 EX(swc1 $f18,(SC_FPREGS+144)(a0))
54 EX(swc1 $f19,(SC_FPREGS+152)(a0))
55 EX(swc1 $f20,(SC_FPREGS+160)(a0))
56 EX(swc1 $f21,(SC_FPREGS+168)(a0))
57 EX(swc1 $f22,(SC_FPREGS+176)(a0))
58 EX(swc1 $f23,(SC_FPREGS+184)(a0))
59 EX(swc1 $f24,(SC_FPREGS+192)(a0))
60 EX(swc1 $f25,(SC_FPREGS+200)(a0))
61 EX(swc1 $f26,(SC_FPREGS+208)(a0))
62 EX(swc1 $f27,(SC_FPREGS+216)(a0))
63 EX(swc1 $f28,(SC_FPREGS+224)(a0))
64 EX(swc1 $f29,(SC_FPREGS+232)(a0))
65 EX(swc1 $f30,(SC_FPREGS+240)(a0))
66 EX(swc1 $f31,(SC_FPREGS+248)(a0))
67 EX(sw t1,(SC_FPC_CSR)(a0))
68 cfc1 t0,$0 # implementation/version
69 jr ra 66 jr ra
67 EX(sw t1, (a1))
70 .set pop 68 .set pop
71 .set nomacro
72 EX(sw t0,(SC_FPC_EIR)(a0))
73 .set macro
74 END(_save_fp_context) 69 END(_save_fp_context)
75 70
76/* 71/**
77 * Restore FPU state: 72 * _restore_fp_context() - restore FP context to the FPU
78 * - fp gp registers 73 * @a0 - pointer to fpregs field of sigcontext
79 * - cp1 status/control register 74 * @a1 - pointer to fpc_csr field of sigcontext
80 * 75 *
81 * We base the decision which registers to restore from the signal stack 76 * Restore FP context, including the 32 FP data registers and the FP
82 * frame on the current content of c0_status, not on the content of the 77 * control & status register, from signal context to the FPU.
83 * stack frame which might have been changed by the user.
84 */ 78 */
85LEAF(_restore_fp_context) 79LEAF(_restore_fp_context)
86 .set push 80 .set push
87 SET_HARDFLOAT 81 SET_HARDFLOAT
88 li v0, 0 # assume success 82 li v0, 0 # assume success
89 EX(lw t0,(SC_FPC_CSR)(a0)) 83 EX(lw t0, (a1))
90 EX(lwc1 $f0,(SC_FPREGS+0)(a0)) 84 EX2(l.d $f0, 0(a0))
91 EX(lwc1 $f1,(SC_FPREGS+8)(a0)) 85 EX2(l.d $f2, 16(a0))
92 EX(lwc1 $f2,(SC_FPREGS+16)(a0)) 86 EX2(l.d $f4, 32(a0))
93 EX(lwc1 $f3,(SC_FPREGS+24)(a0)) 87 EX2(l.d $f6, 48(a0))
94 EX(lwc1 $f4,(SC_FPREGS+32)(a0)) 88 EX2(l.d $f8, 64(a0))
95 EX(lwc1 $f5,(SC_FPREGS+40)(a0)) 89 EX2(l.d $f10, 80(a0))
96 EX(lwc1 $f6,(SC_FPREGS+48)(a0)) 90 EX2(l.d $f12, 96(a0))
97 EX(lwc1 $f7,(SC_FPREGS+56)(a0)) 91 EX2(l.d $f14, 112(a0))
98 EX(lwc1 $f8,(SC_FPREGS+64)(a0)) 92 EX2(l.d $f16, 128(a0))
99 EX(lwc1 $f9,(SC_FPREGS+72)(a0)) 93 EX2(l.d $f18, 144(a0))
100 EX(lwc1 $f10,(SC_FPREGS+80)(a0)) 94 EX2(l.d $f20, 160(a0))
101 EX(lwc1 $f11,(SC_FPREGS+88)(a0)) 95 EX2(l.d $f22, 176(a0))
102 EX(lwc1 $f12,(SC_FPREGS+96)(a0)) 96 EX2(l.d $f24, 192(a0))
103 EX(lwc1 $f13,(SC_FPREGS+104)(a0)) 97 EX2(l.d $f26, 208(a0))
104 EX(lwc1 $f14,(SC_FPREGS+112)(a0)) 98 EX2(l.d $f28, 224(a0))
105 EX(lwc1 $f15,(SC_FPREGS+120)(a0)) 99 EX2(l.d $f30, 240(a0))
106 EX(lwc1 $f16,(SC_FPREGS+128)(a0))
107 EX(lwc1 $f17,(SC_FPREGS+136)(a0))
108 EX(lwc1 $f18,(SC_FPREGS+144)(a0))
109 EX(lwc1 $f19,(SC_FPREGS+152)(a0))
110 EX(lwc1 $f20,(SC_FPREGS+160)(a0))
111 EX(lwc1 $f21,(SC_FPREGS+168)(a0))
112 EX(lwc1 $f22,(SC_FPREGS+176)(a0))
113 EX(lwc1 $f23,(SC_FPREGS+184)(a0))
114 EX(lwc1 $f24,(SC_FPREGS+192)(a0))
115 EX(lwc1 $f25,(SC_FPREGS+200)(a0))
116 EX(lwc1 $f26,(SC_FPREGS+208)(a0))
117 EX(lwc1 $f27,(SC_FPREGS+216)(a0))
118 EX(lwc1 $f28,(SC_FPREGS+224)(a0))
119 EX(lwc1 $f29,(SC_FPREGS+232)(a0))
120 EX(lwc1 $f30,(SC_FPREGS+240)(a0))
121 EX(lwc1 $f31,(SC_FPREGS+248)(a0))
122 jr ra 100 jr ra
123 ctc1 t0,fcr31 101 ctc1 t0, fcr31
124 .set pop 102 .set pop
125 END(_restore_fp_context) 103 END(_restore_fp_context)
126 .set reorder 104 .set reorder
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
index 47077380c15c..9cc7bfab3419 100644
--- a/arch/mips/kernel/r6000_fpu.S
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -21,7 +21,14 @@
21 .set push 21 .set push
22 SET_HARDFLOAT 22 SET_HARDFLOAT
23 23
24 /* Save floating point context */ 24/**
25 * _save_fp_context() - save FP context from the FPU
26 * @a0 - pointer to fpregs field of sigcontext
27 * @a1 - pointer to fpc_csr field of sigcontext
28 *
29 * Save FP context, including the 32 FP data registers and the FP
30 * control & status register, from the FPU to signal context.
31 */
25 LEAF(_save_fp_context) 32 LEAF(_save_fp_context)
26 mfc0 t0,CP0_STATUS 33 mfc0 t0,CP0_STATUS
27 sll t0,t0,2 34 sll t0,t0,2
@@ -30,59 +37,59 @@
30 37
31 cfc1 t1,fcr31 38 cfc1 t1,fcr31
32 /* Store the 16 double precision registers */ 39 /* Store the 16 double precision registers */
33 sdc1 $f0,(SC_FPREGS+0)(a0) 40 sdc1 $f0,0(a0)
34 sdc1 $f2,(SC_FPREGS+16)(a0) 41 sdc1 $f2,16(a0)
35 sdc1 $f4,(SC_FPREGS+32)(a0) 42 sdc1 $f4,32(a0)
36 sdc1 $f6,(SC_FPREGS+48)(a0) 43 sdc1 $f6,48(a0)
37 sdc1 $f8,(SC_FPREGS+64)(a0) 44 sdc1 $f8,64(a0)
38 sdc1 $f10,(SC_FPREGS+80)(a0) 45 sdc1 $f10,80(a0)
39 sdc1 $f12,(SC_FPREGS+96)(a0) 46 sdc1 $f12,96(a0)
40 sdc1 $f14,(SC_FPREGS+112)(a0) 47 sdc1 $f14,112(a0)
41 sdc1 $f16,(SC_FPREGS+128)(a0) 48 sdc1 $f16,128(a0)
42 sdc1 $f18,(SC_FPREGS+144)(a0) 49 sdc1 $f18,144(a0)
43 sdc1 $f20,(SC_FPREGS+160)(a0) 50 sdc1 $f20,160(a0)
44 sdc1 $f22,(SC_FPREGS+176)(a0) 51 sdc1 $f22,176(a0)
45 sdc1 $f24,(SC_FPREGS+192)(a0) 52 sdc1 $f24,192(a0)
46 sdc1 $f26,(SC_FPREGS+208)(a0) 53 sdc1 $f26,208(a0)
47 sdc1 $f28,(SC_FPREGS+224)(a0) 54 sdc1 $f28,224(a0)
48 sdc1 $f30,(SC_FPREGS+240)(a0) 55 sdc1 $f30,240(a0)
49 jr ra 56 jr ra
50 sw t0,SC_FPC_CSR(a0) 57 sw t0,(a1)
511: jr ra 581: jr ra
52 nop 59 nop
53 END(_save_fp_context) 60 END(_save_fp_context)
54 61
55/* Restore FPU state: 62/**
56 * - fp gp registers 63 * _restore_fp_context() - restore FP context to the FPU
57 * - cp1 status/control register 64 * @a0 - pointer to fpregs field of sigcontext
65 * @a1 - pointer to fpc_csr field of sigcontext
58 * 66 *
59 * We base the decision which registers to restore from the signal stack 67 * Restore FP context, including the 32 FP data registers and the FP
60 * frame on the current content of c0_status, not on the content of the 68 * control & status register, from signal context to the FPU.
61 * stack frame which might have been changed by the user.
62 */ 69 */
63 LEAF(_restore_fp_context) 70 LEAF(_restore_fp_context)
64 mfc0 t0,CP0_STATUS 71 mfc0 t0,CP0_STATUS
65 sll t0,t0,2 72 sll t0,t0,2
66 73
67 bgez t0,1f 74 bgez t0,1f
68 lw t0,SC_FPC_CSR(a0) 75 lw t0,(a1)
69 /* Restore the 16 double precision registers */ 76 /* Restore the 16 double precision registers */
70 ldc1 $f0,(SC_FPREGS+0)(a0) 77 ldc1 $f0,0(a0)
71 ldc1 $f2,(SC_FPREGS+16)(a0) 78 ldc1 $f2,16(a0)
72 ldc1 $f4,(SC_FPREGS+32)(a0) 79 ldc1 $f4,32(a0)
73 ldc1 $f6,(SC_FPREGS+48)(a0) 80 ldc1 $f6,48(a0)
74 ldc1 $f8,(SC_FPREGS+64)(a0) 81 ldc1 $f8,64(a0)
75 ldc1 $f10,(SC_FPREGS+80)(a0) 82 ldc1 $f10,80(a0)
76 ldc1 $f12,(SC_FPREGS+96)(a0) 83 ldc1 $f12,96(a0)
77 ldc1 $f14,(SC_FPREGS+112)(a0) 84 ldc1 $f14,112(a0)
78 ldc1 $f16,(SC_FPREGS+128)(a0) 85 ldc1 $f16,128(a0)
79 ldc1 $f18,(SC_FPREGS+144)(a0) 86 ldc1 $f18,144(a0)
80 ldc1 $f20,(SC_FPREGS+160)(a0) 87 ldc1 $f20,160(a0)
81 ldc1 $f22,(SC_FPREGS+176)(a0) 88 ldc1 $f22,176(a0)
82 ldc1 $f24,(SC_FPREGS+192)(a0) 89 ldc1 $f24,192(a0)
83 ldc1 $f26,(SC_FPREGS+208)(a0) 90 ldc1 $f26,208(a0)
84 ldc1 $f28,(SC_FPREGS+224)(a0) 91 ldc1 $f28,224(a0)
85 ldc1 $f30,(SC_FPREGS+240)(a0) 92 ldc1 $f30,240(a0)
86 jr ra 93 jr ra
87 ctc1 t0,fcr31 94 ctc1 t0,fcr31
881: jr ra 951: jr ra
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index ca1cc30c0891..1958910b75c0 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
200 200
201#if defined(CONFIG_USE_OF) 201#if defined(CONFIG_USE_OF)
202 /* Get any additional entropy passed in device tree */ 202 /* Get any additional entropy passed in device tree */
203 { 203 if (initial_boot_params) {
204 int node, len; 204 int node, len;
205 u64 *prop; 205 u64 *prop;
206 206
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 0d57909d9026..f66e5ce505b2 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -368,6 +368,19 @@ static void __init bootmem_init(void)
368 end = PFN_DOWN(boot_mem_map.map[i].addr 368 end = PFN_DOWN(boot_mem_map.map[i].addr
369 + boot_mem_map.map[i].size); 369 + boot_mem_map.map[i].size);
370 370
371#ifndef CONFIG_HIGHMEM
372 /*
373 * Skip highmem here so we get an accurate max_low_pfn if low
374 * memory stops short of high memory.
375 * If the region overlaps HIGHMEM_START, end is clipped so
376 * max_pfn excludes the highmem portion.
377 */
378 if (start >= PFN_DOWN(HIGHMEM_START))
379 continue;
380 if (end > PFN_DOWN(HIGHMEM_START))
381 end = PFN_DOWN(HIGHMEM_START);
382#endif
383
371 if (end > max_low_pfn) 384 if (end > max_low_pfn)
372 max_low_pfn = end; 385 max_low_pfn = end;
373 if (start < min_low_pfn) 386 if (start < min_low_pfn)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 1f5fdee1dfc3..3905003dfe2b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -156,7 +156,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
156 print_ip_sym(pc); 156 print_ip_sym(pc);
157 pc = unwind_stack(task, &sp, pc, &ra); 157 pc = unwind_stack(task, &sp, pc, &ra);
158 } while (pc); 158 } while (pc);
159 printk("\n"); 159 pr_cont("\n");
160} 160}
161 161
162/* 162/*
@@ -174,22 +174,24 @@ static void show_stacktrace(struct task_struct *task,
174 printk("Stack :"); 174 printk("Stack :");
175 i = 0; 175 i = 0;
176 while ((unsigned long) sp & (PAGE_SIZE - 1)) { 176 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
177 if (i && ((i % (64 / field)) == 0)) 177 if (i && ((i % (64 / field)) == 0)) {
178 printk("\n "); 178 pr_cont("\n");
179 printk(" ");
180 }
179 if (i > 39) { 181 if (i > 39) {
180 printk(" ..."); 182 pr_cont(" ...");
181 break; 183 break;
182 } 184 }
183 185
184 if (__get_user(stackdata, sp++)) { 186 if (__get_user(stackdata, sp++)) {
185 printk(" (Bad stack address)"); 187 pr_cont(" (Bad stack address)");
186 break; 188 break;
187 } 189 }
188 190
189 printk(" %0*lx", field, stackdata); 191 pr_cont(" %0*lx", field, stackdata);
190 i++; 192 i++;
191 } 193 }
192 printk("\n"); 194 pr_cont("\n");
193 show_backtrace(task, regs); 195 show_backtrace(task, regs);
194} 196}
195 197
@@ -229,18 +231,19 @@ static void show_code(unsigned int __user *pc)
229 long i; 231 long i;
230 unsigned short __user *pc16 = NULL; 232 unsigned short __user *pc16 = NULL;
231 233
232 printk("\nCode:"); 234 printk("Code:");
233 235
234 if ((unsigned long)pc & 1) 236 if ((unsigned long)pc & 1)
235 pc16 = (unsigned short __user *)((unsigned long)pc & ~1); 237 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
236 for(i = -3 ; i < 6 ; i++) { 238 for(i = -3 ; i < 6 ; i++) {
237 unsigned int insn; 239 unsigned int insn;
238 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { 240 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
239 printk(" (Bad address in epc)\n"); 241 pr_cont(" (Bad address in epc)\n");
240 break; 242 break;
241 } 243 }
242 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); 244 pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
243 } 245 }
246 pr_cont("\n");
244} 247}
245 248
246static void __show_regs(const struct pt_regs *regs) 249static void __show_regs(const struct pt_regs *regs)
@@ -259,15 +262,15 @@ static void __show_regs(const struct pt_regs *regs)
259 if ((i % 4) == 0) 262 if ((i % 4) == 0)
260 printk("$%2d :", i); 263 printk("$%2d :", i);
261 if (i == 0) 264 if (i == 0)
262 printk(" %0*lx", field, 0UL); 265 pr_cont(" %0*lx", field, 0UL);
263 else if (i == 26 || i == 27) 266 else if (i == 26 || i == 27)
264 printk(" %*s", field, ""); 267 pr_cont(" %*s", field, "");
265 else 268 else
266 printk(" %0*lx", field, regs->regs[i]); 269 pr_cont(" %0*lx", field, regs->regs[i]);
267 270
268 i++; 271 i++;
269 if ((i % 4) == 0) 272 if ((i % 4) == 0)
270 printk("\n"); 273 pr_cont("\n");
271 } 274 }
272 275
273#ifdef CONFIG_CPU_HAS_SMARTMIPS 276#ifdef CONFIG_CPU_HAS_SMARTMIPS
@@ -288,46 +291,46 @@ static void __show_regs(const struct pt_regs *regs)
288 291
289 if (cpu_has_3kex) { 292 if (cpu_has_3kex) {
290 if (regs->cp0_status & ST0_KUO) 293 if (regs->cp0_status & ST0_KUO)
291 printk("KUo "); 294 pr_cont("KUo ");
292 if (regs->cp0_status & ST0_IEO) 295 if (regs->cp0_status & ST0_IEO)
293 printk("IEo "); 296 pr_cont("IEo ");
294 if (regs->cp0_status & ST0_KUP) 297 if (regs->cp0_status & ST0_KUP)
295 printk("KUp "); 298 pr_cont("KUp ");
296 if (regs->cp0_status & ST0_IEP) 299 if (regs->cp0_status & ST0_IEP)
297 printk("IEp "); 300 pr_cont("IEp ");
298 if (regs->cp0_status & ST0_KUC) 301 if (regs->cp0_status & ST0_KUC)
299 printk("KUc "); 302 pr_cont("KUc ");
300 if (regs->cp0_status & ST0_IEC) 303 if (regs->cp0_status & ST0_IEC)
301 printk("IEc "); 304 pr_cont("IEc ");
302 } else if (cpu_has_4kex) { 305 } else if (cpu_has_4kex) {
303 if (regs->cp0_status & ST0_KX) 306 if (regs->cp0_status & ST0_KX)
304 printk("KX "); 307 pr_cont("KX ");
305 if (regs->cp0_status & ST0_SX) 308 if (regs->cp0_status & ST0_SX)
306 printk("SX "); 309 pr_cont("SX ");
307 if (regs->cp0_status & ST0_UX) 310 if (regs->cp0_status & ST0_UX)
308 printk("UX "); 311 pr_cont("UX ");
309 switch (regs->cp0_status & ST0_KSU) { 312 switch (regs->cp0_status & ST0_KSU) {
310 case KSU_USER: 313 case KSU_USER:
311 printk("USER "); 314 pr_cont("USER ");
312 break; 315 break;
313 case KSU_SUPERVISOR: 316 case KSU_SUPERVISOR:
314 printk("SUPERVISOR "); 317 pr_cont("SUPERVISOR ");
315 break; 318 break;
316 case KSU_KERNEL: 319 case KSU_KERNEL:
317 printk("KERNEL "); 320 pr_cont("KERNEL ");
318 break; 321 break;
319 default: 322 default:
320 printk("BAD_MODE "); 323 pr_cont("BAD_MODE ");
321 break; 324 break;
322 } 325 }
323 if (regs->cp0_status & ST0_ERL) 326 if (regs->cp0_status & ST0_ERL)
324 printk("ERL "); 327 pr_cont("ERL ");
325 if (regs->cp0_status & ST0_EXL) 328 if (regs->cp0_status & ST0_EXL)
326 printk("EXL "); 329 pr_cont("EXL ");
327 if (regs->cp0_status & ST0_IE) 330 if (regs->cp0_status & ST0_IE)
328 printk("IE "); 331 pr_cont("IE ");
329 } 332 }
330 printk("\n"); 333 pr_cont("\n");
331 334
332 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 335 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
333 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode); 336 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
@@ -705,6 +708,32 @@ asmlinkage void do_ov(struct pt_regs *regs)
705 exception_exit(prev_state); 708 exception_exit(prev_state);
706} 709}
707 710
711/*
712 * Send SIGFPE according to FCSR Cause bits, which must have already
713 * been masked against Enable bits. This is impotant as Inexact can
714 * happen together with Overflow or Underflow, and `ptrace' can set
715 * any bits.
716 */
717void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
718 struct task_struct *tsk)
719{
720 struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
721
722 if (fcr31 & FPU_CSR_INV_X)
723 si.si_code = FPE_FLTINV;
724 else if (fcr31 & FPU_CSR_DIV_X)
725 si.si_code = FPE_FLTDIV;
726 else if (fcr31 & FPU_CSR_OVF_X)
727 si.si_code = FPE_FLTOVF;
728 else if (fcr31 & FPU_CSR_UDF_X)
729 si.si_code = FPE_FLTUND;
730 else if (fcr31 & FPU_CSR_INE_X)
731 si.si_code = FPE_FLTRES;
732 else
733 si.si_code = __SI_FAULT;
734 force_sig_info(SIGFPE, &si, tsk);
735}
736
708int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) 737int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
709{ 738{
710 struct siginfo si = { 0 }; 739 struct siginfo si = { 0 };
@@ -715,27 +744,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
715 return 0; 744 return 0;
716 745
717 case SIGFPE: 746 case SIGFPE:
718 si.si_addr = fault_addr; 747 force_fcr31_sig(fcr31, fault_addr, current);
719 si.si_signo = sig;
720 /*
721 * Inexact can happen together with Overflow or Underflow.
722 * Respect the mask to deliver the correct exception.
723 */
724 fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
725 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
726 if (fcr31 & FPU_CSR_INV_X)
727 si.si_code = FPE_FLTINV;
728 else if (fcr31 & FPU_CSR_DIV_X)
729 si.si_code = FPE_FLTDIV;
730 else if (fcr31 & FPU_CSR_OVF_X)
731 si.si_code = FPE_FLTOVF;
732 else if (fcr31 & FPU_CSR_UDF_X)
733 si.si_code = FPE_FLTUND;
734 else if (fcr31 & FPU_CSR_INE_X)
735 si.si_code = FPE_FLTRES;
736 else
737 si.si_code = __SI_FAULT;
738 force_sig_info(sig, &si, current);
739 return 1; 748 return 1;
740 749
741 case SIGBUS: 750 case SIGBUS:
@@ -799,13 +808,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
799 /* Run the emulator */ 808 /* Run the emulator */
800 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, 809 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
801 &fault_addr); 810 &fault_addr);
802 fcr31 = current->thread.fpu.fcr31;
803 811
804 /* 812 /*
805 * We can't allow the emulated instruction to leave any of 813 * We can't allow the emulated instruction to leave any
806 * the cause bits set in $fcr31. 814 * enabled Cause bits set in $fcr31.
807 */ 815 */
808 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 816 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
817 current->thread.fpu.fcr31 &= ~fcr31;
809 818
810 /* Restore the hardware register state */ 819 /* Restore the hardware register state */
811 own_fpu(1); 820 own_fpu(1);
@@ -831,7 +840,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
831 goto out; 840 goto out;
832 841
833 /* Clear FCSR.Cause before enabling interrupts */ 842 /* Clear FCSR.Cause before enabling interrupts */
834 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X); 843 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
835 local_irq_enable(); 844 local_irq_enable();
836 845
837 die_if_kernel("FP exception in kernel code", regs); 846 die_if_kernel("FP exception in kernel code", regs);
@@ -853,13 +862,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
853 /* Run the emulator */ 862 /* Run the emulator */
854 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, 863 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
855 &fault_addr); 864 &fault_addr);
856 fcr31 = current->thread.fpu.fcr31;
857 865
858 /* 866 /*
859 * We can't allow the emulated instruction to leave any of 867 * We can't allow the emulated instruction to leave any
860 * the cause bits set in $fcr31. 868 * enabled Cause bits set in $fcr31.
861 */ 869 */
862 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 870 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
871 current->thread.fpu.fcr31 &= ~fcr31;
863 872
864 /* Restore the hardware register state */ 873 /* Restore the hardware register state */
865 own_fpu(1); /* Using the FPU again. */ 874 own_fpu(1); /* Using the FPU again. */
@@ -1424,13 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1424 1433
1425 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0, 1434 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1426 &fault_addr); 1435 &fault_addr);
1427 fcr31 = current->thread.fpu.fcr31;
1428 1436
1429 /* 1437 /*
1430 * We can't allow the emulated instruction to leave 1438 * We can't allow the emulated instruction to leave
1431 * any of the cause bits set in $fcr31. 1439 * any enabled Cause bits set in $fcr31.
1432 */ 1440 */
1433 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 1441 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1442 current->thread.fpu.fcr31 &= ~fcr31;
1434 1443
1435 /* Send a signal if required. */ 1444 /* Send a signal if required. */
1436 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err) 1445 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 8770f32c9e0b..aa0937423e28 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
790 struct mips_coproc *cop0 = vcpu->arch.cop0; 790 struct mips_coproc *cop0 = vcpu->arch.cop0;
791 enum emulation_result er = EMULATE_DONE; 791 enum emulation_result er = EMULATE_DONE;
792 792
793 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { 793 if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
794 kvm_clear_c0_guest_status(cop0, ST0_ERL);
795 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
796 } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
794 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, 797 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
795 kvm_read_c0_guest_epc(cop0)); 798 kvm_read_c0_guest_epc(cop0));
796 kvm_clear_c0_guest_status(cop0, ST0_EXL); 799 kvm_clear_c0_guest_status(cop0, ST0_EXL);
797 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); 800 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
798 801
799 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
800 kvm_clear_c0_guest_status(cop0, ST0_ERL);
801 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
802 } else { 802 } else {
803 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", 803 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
804 vcpu->arch.pc); 804 vcpu->arch.pc);
@@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1528 struct kvm_vcpu *vcpu) 1528 struct kvm_vcpu *vcpu)
1529{ 1529{
1530 enum emulation_result er = EMULATE_DO_MMIO; 1530 enum emulation_result er = EMULATE_DO_MMIO;
1531 unsigned long curr_pc;
1531 u32 op, rt; 1532 u32 op, rt;
1532 u32 bytes; 1533 u32 bytes;
1533 1534
1534 rt = inst.i_format.rt; 1535 rt = inst.i_format.rt;
1535 op = inst.i_format.opcode; 1536 op = inst.i_format.opcode;
1536 1537
1537 vcpu->arch.pending_load_cause = cause; 1538 /*
1539 * Find the resume PC now while we have safe and easy access to the
1540 * prior branch instruction, and save it for
1541 * kvm_mips_complete_mmio_load() to restore later.
1542 */
1543 curr_pc = vcpu->arch.pc;
1544 er = update_pc(vcpu, cause);
1545 if (er == EMULATE_FAIL)
1546 return er;
1547 vcpu->arch.io_pc = vcpu->arch.pc;
1548 vcpu->arch.pc = curr_pc;
1549
1538 vcpu->arch.io_gpr = rt; 1550 vcpu->arch.io_gpr = rt;
1539 1551
1540 switch (op) { 1552 switch (op) {
@@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2494 goto done; 2506 goto done;
2495 } 2507 }
2496 2508
2497 er = update_pc(vcpu, vcpu->arch.pending_load_cause); 2509 /* Restore saved resume PC */
2498 if (er == EMULATE_FAIL) 2510 vcpu->arch.pc = vcpu->arch.io_pc;
2499 return er;
2500 2511
2501 switch (run->mmio.len) { 2512 switch (run->mmio.len) {
2502 case 4: 2513 case 4:
@@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2518 break; 2529 break;
2519 } 2530 }
2520 2531
2521 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2522 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2523 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2524 vcpu->mmio_needed);
2525
2526done: 2532done:
2527 return er; 2533 return er;
2528} 2534}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 622037d851a3..06a60b19acfb 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
426static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) 426static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
427{ 427{
428 struct mips_coproc *cop0 = vcpu->arch.cop0; 428 struct mips_coproc *cop0 = vcpu->arch.cop0;
429 int cpu = smp_processor_id(); 429 int i, cpu = smp_processor_id();
430 unsigned int gasid; 430 unsigned int gasid;
431 431
432 /* 432 /*
@@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
442 vcpu); 442 vcpu);
443 vcpu->arch.guest_user_asid[cpu] = 443 vcpu->arch.guest_user_asid[cpu] =
444 vcpu->arch.guest_user_mm.context.asid[cpu]; 444 vcpu->arch.guest_user_mm.context.asid[cpu];
445 for_each_possible_cpu(i)
446 if (i != cpu)
447 vcpu->arch.guest_user_asid[cpu] = 0;
445 vcpu->arch.last_user_gasid = gasid; 448 vcpu->arch.last_user_gasid = gasid;
446 } 449 }
447 } 450 }
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 03883ba806e2..3b677c851be0 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
260 260
261 if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & 261 if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
262 asid_version_mask(cpu)) { 262 asid_version_mask(cpu)) {
263 u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
264 KVM_ENTRYHI_ASID;
265
266 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); 263 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
267 vcpu->arch.guest_user_asid[cpu] = 264 vcpu->arch.guest_user_asid[cpu] =
268 vcpu->arch.guest_user_mm.context.asid[cpu]; 265 vcpu->arch.guest_user_mm.context.asid[cpu];
269 vcpu->arch.last_user_gasid = gasid;
270 newasid++; 266 newasid++;
271 267
272 kvm_debug("[%d]: cpu_context: %#lx\n", cpu, 268 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 0f80b936e75e..6eb50a7137db 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -135,42 +135,42 @@ static void dump_tlb(int first, int last)
135 c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; 135 c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
136 c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; 136 c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
137 137
138 printk("va=%0*lx asid=%0*lx", 138 pr_cont("va=%0*lx asid=%0*lx",
139 vwidth, (entryhi & ~0x1fffUL), 139 vwidth, (entryhi & ~0x1fffUL),
140 asidwidth, entryhi & asidmask); 140 asidwidth, entryhi & asidmask);
141 if (cpu_has_guestid) 141 if (cpu_has_guestid)
142 printk(" gid=%02lx", 142 pr_cont(" gid=%02lx",
143 (guestctl1 & MIPS_GCTL1_RID) 143 (guestctl1 & MIPS_GCTL1_RID)
144 >> MIPS_GCTL1_RID_SHIFT); 144 >> MIPS_GCTL1_RID_SHIFT);
145 /* RI/XI are in awkward places, so mask them off separately */ 145 /* RI/XI are in awkward places, so mask them off separately */
146 pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); 146 pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
147 if (xpa) 147 if (xpa)
148 pa |= (unsigned long long)readx_c0_entrylo0() << 30; 148 pa |= (unsigned long long)readx_c0_entrylo0() << 30;
149 pa = (pa << 6) & PAGE_MASK; 149 pa = (pa << 6) & PAGE_MASK;
150 printk("\n\t["); 150 pr_cont("\n\t[");
151 if (cpu_has_rixi) 151 if (cpu_has_rixi)
152 printk("ri=%d xi=%d ", 152 pr_cont("ri=%d xi=%d ",
153 (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0, 153 (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
154 (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0); 154 (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
155 printk("pa=%0*llx c=%d d=%d v=%d g=%d] [", 155 pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
156 pwidth, pa, c0, 156 pwidth, pa, c0,
157 (entrylo0 & ENTRYLO_D) ? 1 : 0, 157 (entrylo0 & ENTRYLO_D) ? 1 : 0,
158 (entrylo0 & ENTRYLO_V) ? 1 : 0, 158 (entrylo0 & ENTRYLO_V) ? 1 : 0,
159 (entrylo0 & ENTRYLO_G) ? 1 : 0); 159 (entrylo0 & ENTRYLO_G) ? 1 : 0);
160 /* RI/XI are in awkward places, so mask them off separately */ 160 /* RI/XI are in awkward places, so mask them off separately */
161 pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); 161 pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
162 if (xpa) 162 if (xpa)
163 pa |= (unsigned long long)readx_c0_entrylo1() << 30; 163 pa |= (unsigned long long)readx_c0_entrylo1() << 30;
164 pa = (pa << 6) & PAGE_MASK; 164 pa = (pa << 6) & PAGE_MASK;
165 if (cpu_has_rixi) 165 if (cpu_has_rixi)
166 printk("ri=%d xi=%d ", 166 pr_cont("ri=%d xi=%d ",
167 (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0, 167 (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
168 (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0); 168 (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
169 printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n", 169 pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
170 pwidth, pa, c1, 170 pwidth, pa, c1,
171 (entrylo1 & ENTRYLO_D) ? 1 : 0, 171 (entrylo1 & ENTRYLO_D) ? 1 : 0,
172 (entrylo1 & ENTRYLO_V) ? 1 : 0, 172 (entrylo1 & ENTRYLO_V) ? 1 : 0,
173 (entrylo1 & ENTRYLO_G) ? 1 : 0); 173 (entrylo1 & ENTRYLO_G) ? 1 : 0);
174 } 174 }
175 printk("\n"); 175 printk("\n");
176 176
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 744f4a7bc49d..85b4086e553e 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -53,15 +53,15 @@ static void dump_tlb(int first, int last)
53 */ 53 */
54 printk("Index: %2d ", i); 54 printk("Index: %2d ", i);
55 55
56 printk("va=%08lx asid=%08lx" 56 pr_cont("va=%08lx asid=%08lx"
57 " [pa=%06lx n=%d d=%d v=%d g=%d]", 57 " [pa=%06lx n=%d d=%d v=%d g=%d]",
58 entryhi & PAGE_MASK, 58 entryhi & PAGE_MASK,
59 entryhi & asid_mask, 59 entryhi & asid_mask,
60 entrylo0 & PAGE_MASK, 60 entrylo0 & PAGE_MASK,
61 (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0, 61 (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
62 (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0, 62 (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
63 (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0, 63 (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
64 (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0); 64 (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
65 } 65 }
66 } 66 }
67 printk("\n"); 67 printk("\n");
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index d9563ddb337e..746bf5caaffc 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -324,6 +324,7 @@ static int __init nios2_time_init(struct device_node *timer)
324 ret = nios2_clocksource_init(timer); 324 ret = nios2_clocksource_init(timer);
325 break; 325 break;
326 default: 326 default:
327 ret = 0;
327 break; 328 break;
328 } 329 }
329 330
diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
index 4ce7a01a252d..5f55da9cbfd5 100644
--- a/arch/openrisc/include/asm/cache.h
+++ b/arch/openrisc/include/asm/cache.h
@@ -23,6 +23,8 @@
23 * they shouldn't be hard-coded! 23 * they shouldn't be hard-coded!
24 */ 24 */
25 25
26#define __ro_after_init __read_mostly
27
26#define L1_CACHE_BYTES 16 28#define L1_CACHE_BYTES 16
27#define L1_CACHE_SHIFT 4 29#define L1_CACHE_SHIFT 4
28 30
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index a9b9407f38f7..6b0741e7a7ed 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -368,7 +368,9 @@
368 368
369#define __IGNORE_select /* newselect */ 369#define __IGNORE_select /* newselect */
370#define __IGNORE_fadvise64 /* fadvise64_64 */ 370#define __IGNORE_fadvise64 /* fadvise64_64 */
371 371#define __IGNORE_pkey_mprotect
372#define __IGNORE_pkey_alloc
373#define __IGNORE_pkey_free
372 374
373#define LINUX_GATEWAY_ADDR 0x100 375#define LINUX_GATEWAY_ADDR 0x100
374 376
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index f8150669b8c6..700e2d2da096 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -873,11 +873,11 @@ static void print_parisc_device(struct parisc_device *dev)
873 873
874 if (dev->num_addrs) { 874 if (dev->num_addrs) {
875 int k; 875 int k;
876 printk(", additional addresses: "); 876 pr_cont(", additional addresses: ");
877 for (k = 0; k < dev->num_addrs; k++) 877 for (k = 0; k < dev->num_addrs; k++)
878 printk("0x%lx ", dev->addr[k]); 878 pr_cont("0x%lx ", dev->addr[k]);
879 } 879 }
880 printk("\n"); 880 pr_cont("\n");
881} 881}
882 882
883/** 883/**
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index d03422e5f188..23de307c3052 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -100,14 +100,12 @@ set_thread_pointer:
100 .endr 100 .endr
101 101
102/* This address must remain fixed at 0x100 for glibc's syscalls to work */ 102/* This address must remain fixed at 0x100 for glibc's syscalls to work */
103 .align 256 103 .align LINUX_GATEWAY_ADDR
104linux_gateway_entry: 104linux_gateway_entry:
105 gate .+8, %r0 /* become privileged */ 105 gate .+8, %r0 /* become privileged */
106 mtsp %r0,%sr4 /* get kernel space into sr4 */ 106 mtsp %r0,%sr4 /* get kernel space into sr4 */
107 mtsp %r0,%sr5 /* get kernel space into sr5 */ 107 mtsp %r0,%sr5 /* get kernel space into sr5 */
108 mtsp %r0,%sr6 /* get kernel space into sr6 */ 108 mtsp %r0,%sr6 /* get kernel space into sr6 */
109 mfsp %sr7,%r1 /* save user sr7 */
110 mtsp %r1,%sr3 /* and store it in sr3 */
111 109
112#ifdef CONFIG_64BIT 110#ifdef CONFIG_64BIT
113 /* for now we can *always* set the W bit on entry to the syscall 111 /* for now we can *always* set the W bit on entry to the syscall
@@ -133,6 +131,14 @@ linux_gateway_entry:
133 depdi 0, 31, 32, %r21 131 depdi 0, 31, 32, %r21
1341: 1321:
135#endif 133#endif
134
135 /* We use a rsm/ssm pair to prevent sr3 from being clobbered
136 * by external interrupts.
137 */
138 mfsp %sr7,%r1 /* save user sr7 */
139 rsm PSW_SM_I, %r0 /* disable interrupts */
140 mtsp %r1,%sr3 /* and store it in sr3 */
141
136 mfctl %cr30,%r1 142 mfctl %cr30,%r1
137 xor %r1,%r30,%r30 /* ye olde xor trick */ 143 xor %r1,%r30,%r30 /* ye olde xor trick */
138 xor %r1,%r30,%r1 144 xor %r1,%r30,%r1
@@ -147,6 +153,7 @@ linux_gateway_entry:
147 */ 153 */
148 154
149 mtsp %r0,%sr7 /* get kernel space into sr7 */ 155 mtsp %r0,%sr7 /* get kernel space into sr7 */
156 ssm PSW_SM_I, %r0 /* enable interrupts */
150 STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */ 157 STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
151 mfctl %cr30,%r1 /* get task ptr in %r1 */ 158 mfctl %cr30,%r1 /* get task ptr in %r1 */
152 LDREG TI_TASK(%r1),%r1 159 LDREG TI_TASK(%r1),%r1
@@ -474,11 +481,6 @@ lws_start:
474 comiclr,>> __NR_lws_entries, %r20, %r0 481 comiclr,>> __NR_lws_entries, %r20, %r0
475 b,n lws_exit_nosys 482 b,n lws_exit_nosys
476 483
477 /* WARNING: Trashing sr2 and sr3 */
478 mfsp %sr7,%r1 /* get userspace into sr3 */
479 mtsp %r1,%sr3
480 mtsp %r0,%sr2 /* get kernel space into sr2 */
481
482 /* Load table start */ 484 /* Load table start */
483 ldil L%lws_table, %r1 485 ldil L%lws_table, %r1
484 ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */ 486 ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
@@ -627,9 +629,9 @@ cas_action:
627 stw %r1, 4(%sr2,%r20) 629 stw %r1, 4(%sr2,%r20)
628#endif 630#endif
629 /* The load and store could fail */ 631 /* The load and store could fail */
6301: ldw,ma 0(%sr3,%r26), %r28 6321: ldw,ma 0(%r26), %r28
631 sub,<> %r28, %r25, %r0 633 sub,<> %r28, %r25, %r0
6322: stw,ma %r24, 0(%sr3,%r26) 6342: stw,ma %r24, 0(%r26)
633 /* Free lock */ 635 /* Free lock */
634 stw,ma %r20, 0(%sr2,%r20) 636 stw,ma %r20, 0(%sr2,%r20)
635#if ENABLE_LWS_DEBUG 637#if ENABLE_LWS_DEBUG
@@ -706,9 +708,9 @@ lws_compare_and_swap_2:
706 nop 708 nop
707 709
708 /* 8bit load */ 710 /* 8bit load */
7094: ldb 0(%sr3,%r25), %r25 7114: ldb 0(%r25), %r25
710 b cas2_lock_start 712 b cas2_lock_start
7115: ldb 0(%sr3,%r24), %r24 7135: ldb 0(%r24), %r24
712 nop 714 nop
713 nop 715 nop
714 nop 716 nop
@@ -716,9 +718,9 @@ lws_compare_and_swap_2:
716 nop 718 nop
717 719
718 /* 16bit load */ 720 /* 16bit load */
7196: ldh 0(%sr3,%r25), %r25 7216: ldh 0(%r25), %r25
720 b cas2_lock_start 722 b cas2_lock_start
7217: ldh 0(%sr3,%r24), %r24 7237: ldh 0(%r24), %r24
722 nop 724 nop
723 nop 725 nop
724 nop 726 nop
@@ -726,9 +728,9 @@ lws_compare_and_swap_2:
726 nop 728 nop
727 729
728 /* 32bit load */ 730 /* 32bit load */
7298: ldw 0(%sr3,%r25), %r25 7318: ldw 0(%r25), %r25
730 b cas2_lock_start 732 b cas2_lock_start
7319: ldw 0(%sr3,%r24), %r24 7339: ldw 0(%r24), %r24
732 nop 734 nop
733 nop 735 nop
734 nop 736 nop
@@ -737,14 +739,14 @@ lws_compare_and_swap_2:
737 739
738 /* 64bit load */ 740 /* 64bit load */
739#ifdef CONFIG_64BIT 741#ifdef CONFIG_64BIT
74010: ldd 0(%sr3,%r25), %r25 74210: ldd 0(%r25), %r25
74111: ldd 0(%sr3,%r24), %r24 74311: ldd 0(%r24), %r24
742#else 744#else
743 /* Load new value into r22/r23 - high/low */ 745 /* Load new value into r22/r23 - high/low */
74410: ldw 0(%sr3,%r25), %r22 74610: ldw 0(%r25), %r22
74511: ldw 4(%sr3,%r25), %r23 74711: ldw 4(%r25), %r23
746 /* Load new value into fr4 for atomic store later */ 748 /* Load new value into fr4 for atomic store later */
74712: flddx 0(%sr3,%r24), %fr4 74912: flddx 0(%r24), %fr4
748#endif 750#endif
749 751
750cas2_lock_start: 752cas2_lock_start:
@@ -794,30 +796,30 @@ cas2_action:
794 ldo 1(%r0),%r28 796 ldo 1(%r0),%r28
795 797
796 /* 8bit CAS */ 798 /* 8bit CAS */
79713: ldb,ma 0(%sr3,%r26), %r29 79913: ldb,ma 0(%r26), %r29
798 sub,= %r29, %r25, %r0 800 sub,= %r29, %r25, %r0
799 b,n cas2_end 801 b,n cas2_end
80014: stb,ma %r24, 0(%sr3,%r26) 80214: stb,ma %r24, 0(%r26)
801 b cas2_end 803 b cas2_end
802 copy %r0, %r28 804 copy %r0, %r28
803 nop 805 nop
804 nop 806 nop
805 807
806 /* 16bit CAS */ 808 /* 16bit CAS */
80715: ldh,ma 0(%sr3,%r26), %r29 80915: ldh,ma 0(%r26), %r29
808 sub,= %r29, %r25, %r0 810 sub,= %r29, %r25, %r0
809 b,n cas2_end 811 b,n cas2_end
81016: sth,ma %r24, 0(%sr3,%r26) 81216: sth,ma %r24, 0(%r26)
811 b cas2_end 813 b cas2_end
812 copy %r0, %r28 814 copy %r0, %r28
813 nop 815 nop
814 nop 816 nop
815 817
816 /* 32bit CAS */ 818 /* 32bit CAS */
81717: ldw,ma 0(%sr3,%r26), %r29 81917: ldw,ma 0(%r26), %r29
818 sub,= %r29, %r25, %r0 820 sub,= %r29, %r25, %r0
819 b,n cas2_end 821 b,n cas2_end
82018: stw,ma %r24, 0(%sr3,%r26) 82218: stw,ma %r24, 0(%r26)
821 b cas2_end 823 b cas2_end
822 copy %r0, %r28 824 copy %r0, %r28
823 nop 825 nop
@@ -825,22 +827,22 @@ cas2_action:
825 827
826 /* 64bit CAS */ 828 /* 64bit CAS */
827#ifdef CONFIG_64BIT 829#ifdef CONFIG_64BIT
82819: ldd,ma 0(%sr3,%r26), %r29 83019: ldd,ma 0(%r26), %r29
829 sub,*= %r29, %r25, %r0 831 sub,*= %r29, %r25, %r0
830 b,n cas2_end 832 b,n cas2_end
83120: std,ma %r24, 0(%sr3,%r26) 83320: std,ma %r24, 0(%r26)
832 copy %r0, %r28 834 copy %r0, %r28
833#else 835#else
834 /* Compare first word */ 836 /* Compare first word */
83519: ldw,ma 0(%sr3,%r26), %r29 83719: ldw,ma 0(%r26), %r29
836 sub,= %r29, %r22, %r0 838 sub,= %r29, %r22, %r0
837 b,n cas2_end 839 b,n cas2_end
838 /* Compare second word */ 840 /* Compare second word */
83920: ldw,ma 4(%sr3,%r26), %r29 84120: ldw,ma 4(%r26), %r29
840 sub,= %r29, %r23, %r0 842 sub,= %r29, %r23, %r0
841 b,n cas2_end 843 b,n cas2_end
842 /* Perform the store */ 844 /* Perform the store */
84321: fstdx %fr4, 0(%sr3,%r26) 84521: fstdx %fr4, 0(%r26)
844 copy %r0, %r28 846 copy %r0, %r28
845#endif 847#endif
846 848
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 28f03ca60100..794bebb43d23 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -363,11 +363,11 @@ out:
363static int diag224_get_name_table(void) 363static int diag224_get_name_table(void)
364{ 364{
365 /* memory must be below 2GB */ 365 /* memory must be below 2GB */
366 diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA); 366 diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
367 if (!diag224_cpu_names) 367 if (!diag224_cpu_names)
368 return -ENOMEM; 368 return -ENOMEM;
369 if (diag224(diag224_cpu_names)) { 369 if (diag224(diag224_cpu_names)) {
370 kfree(diag224_cpu_names); 370 free_page((unsigned long) diag224_cpu_names);
371 return -EOPNOTSUPP; 371 return -EOPNOTSUPP;
372 } 372 }
373 EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); 373 EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
@@ -376,7 +376,7 @@ static int diag224_get_name_table(void)
376 376
377static void diag224_delete_name_table(void) 377static void diag224_delete_name_table(void)
378{ 378{
379 kfree(diag224_cpu_names); 379 free_page((unsigned long) diag224_cpu_names);
380} 380}
381 381
382static int diag224_idx2name(int index, char *name) 382static int diag224_idx2name(int index, char *name)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 000e6e91f6a0..3667d20e997f 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -62,9 +62,11 @@ SECTIONS
62 62
63 . = ALIGN(PAGE_SIZE); 63 . = ALIGN(PAGE_SIZE);
64 __start_ro_after_init = .; 64 __start_ro_after_init = .;
65 __start_data_ro_after_init = .;
65 .data..ro_after_init : { 66 .data..ro_after_init : {
66 *(.data..ro_after_init) 67 *(.data..ro_after_init)
67 } 68 }
69 __end_data_ro_after_init = .;
68 EXCEPTION_TABLE(16) 70 EXCEPTION_TABLE(16)
69 . = ALIGN(PAGE_SIZE); 71 . = ALIGN(PAGE_SIZE);
70 __end_ro_after_init = .; 72 __end_ro_after_init = .;
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
index bd98b7d25200..05c98bb853cf 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kvm/sthyi.c
@@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
315 if (r < 0) 315 if (r < 0)
316 goto out; 316 goto out;
317 317
318 diag224_buf = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA); 318 diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
319 if (!diag224_buf || diag224(diag224_buf)) 319 if (!diag224_buf || diag224(diag224_buf))
320 goto out; 320 goto out;
321 321
@@ -378,7 +378,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
378 sctns->par.infpval1 |= PAR_WGHT_VLD; 378 sctns->par.infpval1 |= PAR_WGHT_VLD;
379 379
380out: 380out:
381 kfree(diag224_buf); 381 free_page((unsigned long)diag224_buf);
382 vfree(diag204_buf); 382 vfree(diag204_buf);
383} 383}
384 384
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 7350c8bc13a2..6b2f72f523b9 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -423,7 +423,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
423 dma_addr_t dma_addr_base, dma_addr; 423 dma_addr_t dma_addr_base, dma_addr;
424 int flags = ZPCI_PTE_VALID; 424 int flags = ZPCI_PTE_VALID;
425 struct scatterlist *s; 425 struct scatterlist *s;
426 unsigned long pa; 426 unsigned long pa = 0;
427 int ret; 427 int ret;
428 428
429 size = PAGE_ALIGN(size); 429 size = PAGE_ALIGN(size);
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a6cfdabb6054..5b0ed48e5b0c 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -24,9 +24,10 @@ typedef struct {
24 unsigned int icache_line_size; 24 unsigned int icache_line_size;
25 unsigned int ecache_size; 25 unsigned int ecache_size;
26 unsigned int ecache_line_size; 26 unsigned int ecache_line_size;
27 unsigned short sock_id; 27 unsigned short sock_id; /* physical package */
28 unsigned short core_id; 28 unsigned short core_id;
29 int proc_id; 29 unsigned short max_cache_id; /* groupings of highest shared cache */
30 unsigned short proc_id; /* strand (aka HW thread) id */
30} cpuinfo_sparc; 31} cpuinfo_sparc;
31 32
32DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); 33DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index d9c5876c6121..8011e79f59c9 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -134,7 +134,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
134 *(volatile __u32 *)&lp->lock = ~0U; 134 *(volatile __u32 *)&lp->lock = ~0U;
135} 135}
136 136
137static void inline arch_write_unlock(arch_rwlock_t *lock) 137static inline void arch_write_unlock(arch_rwlock_t *lock)
138{ 138{
139 __asm__ __volatile__( 139 __asm__ __volatile__(
140" st %%g0, [%0]" 140" st %%g0, [%0]"
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 87990b7c6b0d..07c9f2e9bf57 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -96,7 +96,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
96 96
97/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 97/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
98 98
99static void inline arch_read_lock(arch_rwlock_t *lock) 99static inline void arch_read_lock(arch_rwlock_t *lock)
100{ 100{
101 unsigned long tmp1, tmp2; 101 unsigned long tmp1, tmp2;
102 102
@@ -119,7 +119,7 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
119 : "memory"); 119 : "memory");
120} 120}
121 121
122static int inline arch_read_trylock(arch_rwlock_t *lock) 122static inline int arch_read_trylock(arch_rwlock_t *lock)
123{ 123{
124 int tmp1, tmp2; 124 int tmp1, tmp2;
125 125
@@ -140,7 +140,7 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
140 return tmp1; 140 return tmp1;
141} 141}
142 142
143static void inline arch_read_unlock(arch_rwlock_t *lock) 143static inline void arch_read_unlock(arch_rwlock_t *lock)
144{ 144{
145 unsigned long tmp1, tmp2; 145 unsigned long tmp1, tmp2;
146 146
@@ -156,7 +156,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
156 : "memory"); 156 : "memory");
157} 157}
158 158
159static void inline arch_write_lock(arch_rwlock_t *lock) 159static inline void arch_write_lock(arch_rwlock_t *lock)
160{ 160{
161 unsigned long mask, tmp1, tmp2; 161 unsigned long mask, tmp1, tmp2;
162 162
@@ -181,7 +181,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
181 : "memory"); 181 : "memory");
182} 182}
183 183
184static void inline arch_write_unlock(arch_rwlock_t *lock) 184static inline void arch_write_unlock(arch_rwlock_t *lock)
185{ 185{
186 __asm__ __volatile__( 186 __asm__ __volatile__(
187" stw %%g0, [%0]" 187" stw %%g0, [%0]"
@@ -190,7 +190,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
190 : "memory"); 190 : "memory");
191} 191}
192 192
193static int inline arch_write_trylock(arch_rwlock_t *lock) 193static inline int arch_write_trylock(arch_rwlock_t *lock)
194{ 194{
195 unsigned long mask, tmp1, tmp2, result; 195 unsigned long mask, tmp1, tmp2, result;
196 196
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index bec481aaca16..7b4898a36eee 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -44,14 +44,20 @@ int __node_distance(int, int);
44#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) 44#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
45#define topology_core_id(cpu) (cpu_data(cpu).core_id) 45#define topology_core_id(cpu) (cpu_data(cpu).core_id)
46#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) 46#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
47#define topology_core_cache_cpumask(cpu) (&cpu_core_sib_cache_map[cpu])
47#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 48#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
48#endif /* CONFIG_SMP */ 49#endif /* CONFIG_SMP */
49 50
50extern cpumask_t cpu_core_map[NR_CPUS]; 51extern cpumask_t cpu_core_map[NR_CPUS];
51extern cpumask_t cpu_core_sib_map[NR_CPUS]; 52extern cpumask_t cpu_core_sib_map[NR_CPUS];
53extern cpumask_t cpu_core_sib_cache_map[NR_CPUS];
54
55/**
56 * Return cores that shares the last level cache.
57 */
52static inline const struct cpumask *cpu_coregroup_mask(int cpu) 58static inline const struct cpumask *cpu_coregroup_mask(int cpu)
53{ 59{
54 return &cpu_core_map[cpu]; 60 return &cpu_core_sib_cache_map[cpu];
55} 61}
56 62
57#endif /* _ASM_SPARC64_TOPOLOGY_H */ 63#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index b68acc563235..5373136c412b 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -82,7 +82,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si
82 return 1; 82 return 1;
83} 83}
84 84
85void __ret_efault(void);
86void __retl_efault(void); 85void __retl_efault(void);
87 86
88/* Uh, these should become the main single-value transfer routines.. 87/* Uh, these should become the main single-value transfer routines..
@@ -189,55 +188,34 @@ int __get_user_bad(void);
189unsigned long __must_check ___copy_from_user(void *to, 188unsigned long __must_check ___copy_from_user(void *to,
190 const void __user *from, 189 const void __user *from,
191 unsigned long size); 190 unsigned long size);
192unsigned long copy_from_user_fixup(void *to, const void __user *from,
193 unsigned long size);
194static inline unsigned long __must_check 191static inline unsigned long __must_check
195copy_from_user(void *to, const void __user *from, unsigned long size) 192copy_from_user(void *to, const void __user *from, unsigned long size)
196{ 193{
197 unsigned long ret;
198
199 check_object_size(to, size, false); 194 check_object_size(to, size, false);
200 195
201 ret = ___copy_from_user(to, from, size); 196 return ___copy_from_user(to, from, size);
202 if (unlikely(ret))
203 ret = copy_from_user_fixup(to, from, size);
204
205 return ret;
206} 197}
207#define __copy_from_user copy_from_user 198#define __copy_from_user copy_from_user
208 199
209unsigned long __must_check ___copy_to_user(void __user *to, 200unsigned long __must_check ___copy_to_user(void __user *to,
210 const void *from, 201 const void *from,
211 unsigned long size); 202 unsigned long size);
212unsigned long copy_to_user_fixup(void __user *to, const void *from,
213 unsigned long size);
214static inline unsigned long __must_check 203static inline unsigned long __must_check
215copy_to_user(void __user *to, const void *from, unsigned long size) 204copy_to_user(void __user *to, const void *from, unsigned long size)
216{ 205{
217 unsigned long ret;
218
219 check_object_size(from, size, true); 206 check_object_size(from, size, true);
220 207
221 ret = ___copy_to_user(to, from, size); 208 return ___copy_to_user(to, from, size);
222 if (unlikely(ret))
223 ret = copy_to_user_fixup(to, from, size);
224 return ret;
225} 209}
226#define __copy_to_user copy_to_user 210#define __copy_to_user copy_to_user
227 211
228unsigned long __must_check ___copy_in_user(void __user *to, 212unsigned long __must_check ___copy_in_user(void __user *to,
229 const void __user *from, 213 const void __user *from,
230 unsigned long size); 214 unsigned long size);
231unsigned long copy_in_user_fixup(void __user *to, void __user *from,
232 unsigned long size);
233static inline unsigned long __must_check 215static inline unsigned long __must_check
234copy_in_user(void __user *to, void __user *from, unsigned long size) 216copy_in_user(void __user *to, void __user *from, unsigned long size)
235{ 217{
236 unsigned long ret = ___copy_in_user(to, from, size); 218 return ___copy_in_user(to, from, size);
237
238 if (unlikely(ret))
239 ret = copy_in_user_fixup(to, from, size);
240 return ret;
241} 219}
242#define __copy_in_user copy_in_user 220#define __copy_in_user copy_in_user
243 221
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index beba6c11554c..6aa3da152c20 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -926,48 +926,11 @@ tlb_type: .word 0 /* Must NOT end up in BSS */
926EXPORT_SYMBOL(tlb_type) 926EXPORT_SYMBOL(tlb_type)
927 .section ".fixup",#alloc,#execinstr 927 .section ".fixup",#alloc,#execinstr
928 928
929 .globl __ret_efault, __retl_efault, __ret_one, __retl_one
930ENTRY(__ret_efault)
931 ret
932 restore %g0, -EFAULT, %o0
933ENDPROC(__ret_efault)
934EXPORT_SYMBOL(__ret_efault)
935
936ENTRY(__retl_efault) 929ENTRY(__retl_efault)
937 retl 930 retl
938 mov -EFAULT, %o0 931 mov -EFAULT, %o0
939ENDPROC(__retl_efault) 932ENDPROC(__retl_efault)
940 933
941ENTRY(__retl_one)
942 retl
943 mov 1, %o0
944ENDPROC(__retl_one)
945
946ENTRY(__retl_one_fp)
947 VISExitHalf
948 retl
949 mov 1, %o0
950ENDPROC(__retl_one_fp)
951
952ENTRY(__ret_one_asi)
953 wr %g0, ASI_AIUS, %asi
954 ret
955 restore %g0, 1, %o0
956ENDPROC(__ret_one_asi)
957
958ENTRY(__retl_one_asi)
959 wr %g0, ASI_AIUS, %asi
960 retl
961 mov 1, %o0
962ENDPROC(__retl_one_asi)
963
964ENTRY(__retl_one_asi_fp)
965 wr %g0, ASI_AIUS, %asi
966 VISExitHalf
967 retl
968 mov 1, %o0
969ENDPROC(__retl_one_asi_fp)
970
971ENTRY(__retl_o1) 934ENTRY(__retl_o1)
972 retl 935 retl
973 mov %o1, %o0 936 mov %o1, %o0
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 59bbeff55024..07933b9e9ce0 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -13,19 +13,30 @@
13void arch_jump_label_transform(struct jump_entry *entry, 13void arch_jump_label_transform(struct jump_entry *entry,
14 enum jump_label_type type) 14 enum jump_label_type type)
15{ 15{
16 u32 val;
17 u32 *insn = (u32 *) (unsigned long) entry->code; 16 u32 *insn = (u32 *) (unsigned long) entry->code;
17 u32 val;
18 18
19 if (type == JUMP_LABEL_JMP) { 19 if (type == JUMP_LABEL_JMP) {
20 s32 off = (s32)entry->target - (s32)entry->code; 20 s32 off = (s32)entry->target - (s32)entry->code;
21 bool use_v9_branch = false;
22
23 BUG_ON(off & 3);
21 24
22#ifdef CONFIG_SPARC64 25#ifdef CONFIG_SPARC64
23 /* ba,pt %xcc, . + (off << 2) */ 26 if (off <= 0xfffff && off >= -0x100000)
24 val = 0x10680000 | ((u32) off >> 2); 27 use_v9_branch = true;
25#else
26 /* ba . + (off << 2) */
27 val = 0x10800000 | ((u32) off >> 2);
28#endif 28#endif
29 if (use_v9_branch) {
30 /* WDISP19 - target is . + immed << 2 */
31 /* ba,pt %xcc, . + off */
32 val = 0x10680000 | (((u32) off >> 2) & 0x7ffff);
33 } else {
34 /* WDISP22 - target is . + immed << 2 */
35 BUG_ON(off > 0x7fffff);
36 BUG_ON(off < -0x800000);
37 /* ba . + off */
38 val = 0x10800000 | (((u32) off >> 2) & 0x3fffff);
39 }
29 } else { 40 } else {
30 val = 0x01000000; 41 val = 0x01000000;
31 } 42 }
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 11228861d9b4..8a6982dfd733 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -645,13 +645,20 @@ static void __mark_core_id(struct mdesc_handle *hp, u64 node,
645 cpu_data(*id).core_id = core_id; 645 cpu_data(*id).core_id = core_id;
646} 646}
647 647
648static void __mark_sock_id(struct mdesc_handle *hp, u64 node, 648static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node,
649 int sock_id) 649 int max_cache_id)
650{ 650{
651 const u64 *id = mdesc_get_property(hp, node, "id", NULL); 651 const u64 *id = mdesc_get_property(hp, node, "id", NULL);
652 652
653 if (*id < num_possible_cpus()) 653 if (*id < num_possible_cpus()) {
654 cpu_data(*id).sock_id = sock_id; 654 cpu_data(*id).max_cache_id = max_cache_id;
655
656 /**
657 * On systems without explicit socket descriptions socket
658 * is max_cache_id
659 */
660 cpu_data(*id).sock_id = max_cache_id;
661 }
655} 662}
656 663
657static void mark_core_ids(struct mdesc_handle *hp, u64 mp, 664static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
@@ -660,10 +667,11 @@ static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
660 find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); 667 find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
661} 668}
662 669
663static void mark_sock_ids(struct mdesc_handle *hp, u64 mp, 670static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp,
664 int sock_id) 671 int max_cache_id)
665{ 672{
666 find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10); 673 find_back_node_value(hp, mp, "cpu", __mark_max_cache_id,
674 max_cache_id, 10);
667} 675}
668 676
669static void set_core_ids(struct mdesc_handle *hp) 677static void set_core_ids(struct mdesc_handle *hp)
@@ -694,14 +702,15 @@ static void set_core_ids(struct mdesc_handle *hp)
694 } 702 }
695} 703}
696 704
697static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) 705static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level)
698{ 706{
699 u64 mp; 707 u64 mp;
700 int idx = 1; 708 int idx = 1;
701 int fnd = 0; 709 int fnd = 0;
702 710
703 /* Identify unique sockets by looking for cpus backpointed to by 711 /**
704 * shared level n caches. 712 * Identify unique highest level of shared cache by looking for cpus
713 * backpointed to by shared level N caches.
705 */ 714 */
706 mdesc_for_each_node_by_name(hp, mp, "cache") { 715 mdesc_for_each_node_by_name(hp, mp, "cache") {
707 const u64 *cur_lvl; 716 const u64 *cur_lvl;
@@ -709,8 +718,7 @@ static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
709 cur_lvl = mdesc_get_property(hp, mp, "level", NULL); 718 cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
710 if (*cur_lvl != level) 719 if (*cur_lvl != level)
711 continue; 720 continue;
712 721 mark_max_cache_ids(hp, mp, idx);
713 mark_sock_ids(hp, mp, idx);
714 idx++; 722 idx++;
715 fnd = 1; 723 fnd = 1;
716 } 724 }
@@ -745,15 +753,17 @@ static void set_sock_ids(struct mdesc_handle *hp)
745{ 753{
746 u64 mp; 754 u64 mp;
747 755
748 /* If machine description exposes sockets data use it. 756 /**
749 * Otherwise fallback to use shared L3 or L2 caches. 757 * Find the highest level of shared cache which pre-T7 is also
758 * the socket.
750 */ 759 */
760 if (!set_max_cache_ids_by_cache(hp, 3))
761 set_max_cache_ids_by_cache(hp, 2);
762
763 /* If machine description exposes sockets data use it.*/
751 mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); 764 mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
752 if (mp != MDESC_NODE_NULL) 765 if (mp != MDESC_NODE_NULL)
753 return set_sock_ids_by_socket(hp, mp); 766 set_sock_ids_by_socket(hp, mp);
754
755 if (!set_sock_ids_by_cache(hp, 3))
756 set_sock_ids_by_cache(hp, 2);
757} 767}
758 768
759static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) 769static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index d3035ba6cd31..8182f7caf5b1 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -63,9 +63,13 @@ cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
63cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { 63cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
64 [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 64 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
65 65
66cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
67 [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
68
66EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 69EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
67EXPORT_SYMBOL(cpu_core_map); 70EXPORT_SYMBOL(cpu_core_map);
68EXPORT_SYMBOL(cpu_core_sib_map); 71EXPORT_SYMBOL(cpu_core_sib_map);
72EXPORT_SYMBOL(cpu_core_sib_cache_map);
69 73
70static cpumask_t smp_commenced_mask; 74static cpumask_t smp_commenced_mask;
71 75
@@ -1265,6 +1269,10 @@ void smp_fill_in_sib_core_maps(void)
1265 unsigned int j; 1269 unsigned int j;
1266 1270
1267 for_each_present_cpu(j) { 1271 for_each_present_cpu(j) {
1272 if (cpu_data(i).max_cache_id ==
1273 cpu_data(j).max_cache_id)
1274 cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
1275
1268 if (cpu_data(i).sock_id == cpu_data(j).sock_id) 1276 if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1269 cpumask_set_cpu(j, &cpu_core_sib_map[i]); 1277 cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1270 } 1278 }
diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S
index b7d0bd6b1406..69a439fa2fc1 100644
--- a/arch/sparc/lib/GENcopy_from_user.S
+++ b/arch/sparc/lib/GENcopy_from_user.S
@@ -3,11 +3,11 @@
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#define EX_LD(x) \ 6#define EX_LD(x,y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __retl_one; \ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S
index 780550e1afc7..9947427ce354 100644
--- a/arch/sparc/lib/GENcopy_to_user.S
+++ b/arch/sparc/lib/GENcopy_to_user.S
@@ -3,11 +3,11 @@
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#define EX_ST(x) \ 6#define EX_ST(x,y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __retl_one; \ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
diff --git a/arch/sparc/lib/GENmemcpy.S b/arch/sparc/lib/GENmemcpy.S
index 89358ee94851..059ea24ad73d 100644
--- a/arch/sparc/lib/GENmemcpy.S
+++ b/arch/sparc/lib/GENmemcpy.S
@@ -4,21 +4,18 @@
4 */ 4 */
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7#include <linux/linkage.h>
7#define GLOBAL_SPARE %g7 8#define GLOBAL_SPARE %g7
8#else 9#else
9#define GLOBAL_SPARE %g5 10#define GLOBAL_SPARE %g5
10#endif 11#endif
11 12
12#ifndef EX_LD 13#ifndef EX_LD
13#define EX_LD(x) x 14#define EX_LD(x,y) x
14#endif 15#endif
15 16
16#ifndef EX_ST 17#ifndef EX_ST
17#define EX_ST(x) x 18#define EX_ST(x,y) x
18#endif
19
20#ifndef EX_RETVAL
21#define EX_RETVAL(x) x
22#endif 19#endif
23 20
24#ifndef LOAD 21#ifndef LOAD
@@ -45,6 +42,29 @@
45 .register %g3,#scratch 42 .register %g3,#scratch
46 43
47 .text 44 .text
45
46#ifndef EX_RETVAL
47#define EX_RETVAL(x) x
48ENTRY(GEN_retl_o4_1)
49 add %o4, %o2, %o4
50 retl
51 add %o4, 1, %o0
52ENDPROC(GEN_retl_o4_1)
53ENTRY(GEN_retl_g1_8)
54 add %g1, %o2, %g1
55 retl
56 add %g1, 8, %o0
57ENDPROC(GEN_retl_g1_8)
58ENTRY(GEN_retl_o2_4)
59 retl
60 add %o2, 4, %o0
61ENDPROC(GEN_retl_o2_4)
62ENTRY(GEN_retl_o2_1)
63 retl
64 add %o2, 1, %o0
65ENDPROC(GEN_retl_o2_1)
66#endif
67
48 .align 64 68 .align 64
49 69
50 .globl FUNC_NAME 70 .globl FUNC_NAME
@@ -73,8 +93,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
73 sub %g0, %o4, %o4 93 sub %g0, %o4, %o4
74 sub %o2, %o4, %o2 94 sub %o2, %o4, %o2
751: subcc %o4, 1, %o4 951: subcc %o4, 1, %o4
76 EX_LD(LOAD(ldub, %o1, %g1)) 96 EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o4_1)
77 EX_ST(STORE(stb, %g1, %o0)) 97 EX_ST(STORE(stb, %g1, %o0),GEN_retl_o4_1)
78 add %o1, 1, %o1 98 add %o1, 1, %o1
79 bne,pt %XCC, 1b 99 bne,pt %XCC, 1b
80 add %o0, 1, %o0 100 add %o0, 1, %o0
@@ -82,8 +102,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
82 andn %o2, 0x7, %g1 102 andn %o2, 0x7, %g1
83 sub %o2, %g1, %o2 103 sub %o2, %g1, %o2
841: subcc %g1, 0x8, %g1 1041: subcc %g1, 0x8, %g1
85 EX_LD(LOAD(ldx, %o1, %g2)) 105 EX_LD(LOAD(ldx, %o1, %g2),GEN_retl_g1_8)
86 EX_ST(STORE(stx, %g2, %o0)) 106 EX_ST(STORE(stx, %g2, %o0),GEN_retl_g1_8)
87 add %o1, 0x8, %o1 107 add %o1, 0x8, %o1
88 bne,pt %XCC, 1b 108 bne,pt %XCC, 1b
89 add %o0, 0x8, %o0 109 add %o0, 0x8, %o0
@@ -100,8 +120,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
100 120
1011: 1211:
102 subcc %o2, 4, %o2 122 subcc %o2, 4, %o2
103 EX_LD(LOAD(lduw, %o1, %g1)) 123 EX_LD(LOAD(lduw, %o1, %g1),GEN_retl_o2_4)
104 EX_ST(STORE(stw, %g1, %o1 + %o3)) 124 EX_ST(STORE(stw, %g1, %o1 + %o3),GEN_retl_o2_4)
105 bgu,pt %XCC, 1b 125 bgu,pt %XCC, 1b
106 add %o1, 4, %o1 126 add %o1, 4, %o1
107 127
@@ -111,8 +131,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
111 .align 32 131 .align 32
11290: 13290:
113 subcc %o2, 1, %o2 133 subcc %o2, 1, %o2
114 EX_LD(LOAD(ldub, %o1, %g1)) 134 EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o2_1)
115 EX_ST(STORE(stb, %g1, %o1 + %o3)) 135 EX_ST(STORE(stb, %g1, %o1 + %o3),GEN_retl_o2_1)
116 bgu,pt %XCC, 90b 136 bgu,pt %XCC, 90b
117 add %o1, 1, %o1 137 add %o1, 1, %o1
118 retl 138 retl
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 885f00e81d1a..69912d2f8b54 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -38,7 +38,7 @@ lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o
38lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o 38lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
39lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o 39lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
40 40
41lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o 41lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o
42lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o 42lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
43 43
44obj-$(CONFIG_SPARC64) += iomap.o 44obj-$(CONFIG_SPARC64) += iomap.o
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S
index d5242b8c4f94..b79a6998d87c 100644
--- a/arch/sparc/lib/NG2copy_from_user.S
+++ b/arch/sparc/lib/NG2copy_from_user.S
@@ -3,19 +3,19 @@
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#define EX_LD(x) \ 6#define EX_LD(x,y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __retl_one_asi;\ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \ 14#define EX_LD_FP(x,y) \
1598: x; \ 1598: x; \
16 .section __ex_table,"a";\ 16 .section __ex_table,"a";\
17 .align 4; \ 17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\ 18 .word 98b, y##_fp; \
19 .text; \ 19 .text; \
20 .align 4; 20 .align 4;
21 21
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S
index 4e962d993b10..dcec55f254ab 100644
--- a/arch/sparc/lib/NG2copy_to_user.S
+++ b/arch/sparc/lib/NG2copy_to_user.S
@@ -3,19 +3,19 @@
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#define EX_ST(x) \ 6#define EX_ST(x,y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __retl_one_asi;\ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \ 14#define EX_ST_FP(x,y) \
1598: x; \ 1598: x; \
16 .section __ex_table,"a";\ 16 .section __ex_table,"a";\
17 .align 4; \ 17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\ 18 .word 98b, y##_fp; \
19 .text; \ 19 .text; \
20 .align 4; 20 .align 4;
21 21
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index d5f585df2f3f..c629dbd121b6 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7#include <linux/linkage.h>
7#include <asm/visasm.h> 8#include <asm/visasm.h>
8#include <asm/asi.h> 9#include <asm/asi.h>
9#define GLOBAL_SPARE %g7 10#define GLOBAL_SPARE %g7
@@ -32,21 +33,17 @@
32#endif 33#endif
33 34
34#ifndef EX_LD 35#ifndef EX_LD
35#define EX_LD(x) x 36#define EX_LD(x,y) x
36#endif 37#endif
37#ifndef EX_LD_FP 38#ifndef EX_LD_FP
38#define EX_LD_FP(x) x 39#define EX_LD_FP(x,y) x
39#endif 40#endif
40 41
41#ifndef EX_ST 42#ifndef EX_ST
42#define EX_ST(x) x 43#define EX_ST(x,y) x
43#endif 44#endif
44#ifndef EX_ST_FP 45#ifndef EX_ST_FP
45#define EX_ST_FP(x) x 46#define EX_ST_FP(x,y) x
46#endif
47
48#ifndef EX_RETVAL
49#define EX_RETVAL(x) x
50#endif 47#endif
51 48
52#ifndef LOAD 49#ifndef LOAD
@@ -140,45 +137,110 @@
140 fsrc2 %x6, %f12; \ 137 fsrc2 %x6, %f12; \
141 fsrc2 %x7, %f14; 138 fsrc2 %x7, %f14;
142#define FREG_LOAD_1(base, x0) \ 139#define FREG_LOAD_1(base, x0) \
143 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)) 140 EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1)
144#define FREG_LOAD_2(base, x0, x1) \ 141#define FREG_LOAD_2(base, x0, x1) \
145 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 142 EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
146 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); 143 EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1);
147#define FREG_LOAD_3(base, x0, x1, x2) \ 144#define FREG_LOAD_3(base, x0, x1, x2) \
148 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 145 EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
149 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ 146 EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
150 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); 147 EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1);
151#define FREG_LOAD_4(base, x0, x1, x2, x3) \ 148#define FREG_LOAD_4(base, x0, x1, x2, x3) \
152 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 149 EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
153 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ 150 EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
154 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ 151 EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
155 EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); 152 EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1);
156#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ 153#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
157 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 154 EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
158 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ 155 EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
159 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ 156 EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
160 EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ 157 EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
161 EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); 158 EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1);
162#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ 159#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
163 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 160 EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
164 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ 161 EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
165 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ 162 EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
166 EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ 163 EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
167 EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ 164 EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
168 EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); 165 EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1);
169#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ 166#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
170 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 167 EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
171 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ 168 EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
172 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ 169 EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
173 EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ 170 EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
174 EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ 171 EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
175 EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \ 172 EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); \
176 EX_LD_FP(LOAD(ldd, base + 0x30, %x6)); 173 EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1);
177 174
178 .register %g2,#scratch 175 .register %g2,#scratch
179 .register %g3,#scratch 176 .register %g3,#scratch
180 177
181 .text 178 .text
179#ifndef EX_RETVAL
180#define EX_RETVAL(x) x
181__restore_fp:
182 VISExitHalf
183__restore_asi:
184 retl
185 wr %g0, ASI_AIUS, %asi
186ENTRY(NG2_retl_o2)
187 ba,pt %xcc, __restore_asi
188 mov %o2, %o0
189ENDPROC(NG2_retl_o2)
190ENTRY(NG2_retl_o2_plus_1)
191 ba,pt %xcc, __restore_asi
192 add %o2, 1, %o0
193ENDPROC(NG2_retl_o2_plus_1)
194ENTRY(NG2_retl_o2_plus_4)
195 ba,pt %xcc, __restore_asi
196 add %o2, 4, %o0
197ENDPROC(NG2_retl_o2_plus_4)
198ENTRY(NG2_retl_o2_plus_8)
199 ba,pt %xcc, __restore_asi
200 add %o2, 8, %o0
201ENDPROC(NG2_retl_o2_plus_8)
202ENTRY(NG2_retl_o2_plus_o4_plus_1)
203 add %o4, 1, %o4
204 ba,pt %xcc, __restore_asi
205 add %o2, %o4, %o0
206ENDPROC(NG2_retl_o2_plus_o4_plus_1)
207ENTRY(NG2_retl_o2_plus_o4_plus_8)
208 add %o4, 8, %o4
209 ba,pt %xcc, __restore_asi
210 add %o2, %o4, %o0
211ENDPROC(NG2_retl_o2_plus_o4_plus_8)
212ENTRY(NG2_retl_o2_plus_o4_plus_16)
213 add %o4, 16, %o4
214 ba,pt %xcc, __restore_asi
215 add %o2, %o4, %o0
216ENDPROC(NG2_retl_o2_plus_o4_plus_16)
217ENTRY(NG2_retl_o2_plus_g1_fp)
218 ba,pt %xcc, __restore_fp
219 add %o2, %g1, %o0
220ENDPROC(NG2_retl_o2_plus_g1_fp)
221ENTRY(NG2_retl_o2_plus_g1_plus_64_fp)
222 add %g1, 64, %g1
223 ba,pt %xcc, __restore_fp
224 add %o2, %g1, %o0
225ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp)
226ENTRY(NG2_retl_o2_plus_g1_plus_1)
227 add %g1, 1, %g1
228 ba,pt %xcc, __restore_asi
229 add %o2, %g1, %o0
230ENDPROC(NG2_retl_o2_plus_g1_plus_1)
231ENTRY(NG2_retl_o2_and_7_plus_o4)
232 and %o2, 7, %o2
233 ba,pt %xcc, __restore_asi
234 add %o2, %o4, %o0
235ENDPROC(NG2_retl_o2_and_7_plus_o4)
236ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8)
237 and %o2, 7, %o2
238 add %o4, 8, %o4
239 ba,pt %xcc, __restore_asi
240 add %o2, %o4, %o0
241ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8)
242#endif
243
182 .align 64 244 .align 64
183 245
184 .globl FUNC_NAME 246 .globl FUNC_NAME
@@ -230,8 +292,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
230 sub %g0, %o4, %o4 ! bytes to align dst 292 sub %g0, %o4, %o4 ! bytes to align dst
231 sub %o2, %o4, %o2 293 sub %o2, %o4, %o2
2321: subcc %o4, 1, %o4 2941: subcc %o4, 1, %o4
233 EX_LD(LOAD(ldub, %o1, %g1)) 295 EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_o4_plus_1)
234 EX_ST(STORE(stb, %g1, %o0)) 296 EX_ST(STORE(stb, %g1, %o0), NG2_retl_o2_plus_o4_plus_1)
235 add %o1, 1, %o1 297 add %o1, 1, %o1
236 bne,pt %XCC, 1b 298 bne,pt %XCC, 1b
237 add %o0, 1, %o0 299 add %o0, 1, %o0
@@ -281,11 +343,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
281 nop 343 nop
282 /* fall through for 0 < low bits < 8 */ 344 /* fall through for 0 < low bits < 8 */
283110: sub %o4, 64, %g2 345110: sub %o4, 64, %g2
284 EX_LD_FP(LOAD_BLK(%g2, %f0)) 346 EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_o2_plus_g1)
2851: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 3471: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
286 EX_LD_FP(LOAD_BLK(%o4, %f16)) 348 EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
287 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) 349 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
288 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 350 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
289 FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) 351 FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
290 subcc %g1, 64, %g1 352 subcc %g1, 64, %g1
291 add %o4, 64, %o4 353 add %o4, 64, %o4
@@ -296,10 +358,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
296 358
297120: sub %o4, 56, %g2 359120: sub %o4, 56, %g2
298 FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) 360 FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
2991: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 3611: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
300 EX_LD_FP(LOAD_BLK(%o4, %f16)) 362 EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
301 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) 363 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
302 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 364 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
303 FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) 365 FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
304 subcc %g1, 64, %g1 366 subcc %g1, 64, %g1
305 add %o4, 64, %o4 367 add %o4, 64, %o4
@@ -310,10 +372,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
310 372
311130: sub %o4, 48, %g2 373130: sub %o4, 48, %g2
312 FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) 374 FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
3131: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 3751: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
314 EX_LD_FP(LOAD_BLK(%o4, %f16)) 376 EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
315 FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) 377 FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
316 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 378 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
317 FREG_MOVE_6(f20, f22, f24, f26, f28, f30) 379 FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
318 subcc %g1, 64, %g1 380 subcc %g1, 64, %g1
319 add %o4, 64, %o4 381 add %o4, 64, %o4
@@ -324,10 +386,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
324 386
325140: sub %o4, 40, %g2 387140: sub %o4, 40, %g2
326 FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) 388 FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
3271: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 3891: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
328 EX_LD_FP(LOAD_BLK(%o4, %f16)) 390 EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
329 FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) 391 FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
330 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 392 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
331 FREG_MOVE_5(f22, f24, f26, f28, f30) 393 FREG_MOVE_5(f22, f24, f26, f28, f30)
332 subcc %g1, 64, %g1 394 subcc %g1, 64, %g1
333 add %o4, 64, %o4 395 add %o4, 64, %o4
@@ -338,10 +400,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
338 400
339150: sub %o4, 32, %g2 401150: sub %o4, 32, %g2
340 FREG_LOAD_4(%g2, f0, f2, f4, f6) 402 FREG_LOAD_4(%g2, f0, f2, f4, f6)
3411: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 4031: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
342 EX_LD_FP(LOAD_BLK(%o4, %f16)) 404 EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
343 FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) 405 FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
344 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 406 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
345 FREG_MOVE_4(f24, f26, f28, f30) 407 FREG_MOVE_4(f24, f26, f28, f30)
346 subcc %g1, 64, %g1 408 subcc %g1, 64, %g1
347 add %o4, 64, %o4 409 add %o4, 64, %o4
@@ -352,10 +414,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
352 414
353160: sub %o4, 24, %g2 415160: sub %o4, 24, %g2
354 FREG_LOAD_3(%g2, f0, f2, f4) 416 FREG_LOAD_3(%g2, f0, f2, f4)
3551: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 4171: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
356 EX_LD_FP(LOAD_BLK(%o4, %f16)) 418 EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
357 FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) 419 FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
358 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 420 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
359 FREG_MOVE_3(f26, f28, f30) 421 FREG_MOVE_3(f26, f28, f30)
360 subcc %g1, 64, %g1 422 subcc %g1, 64, %g1
361 add %o4, 64, %o4 423 add %o4, 64, %o4
@@ -366,10 +428,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
366 428
367170: sub %o4, 16, %g2 429170: sub %o4, 16, %g2
368 FREG_LOAD_2(%g2, f0, f2) 430 FREG_LOAD_2(%g2, f0, f2)
3691: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 4311: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
370 EX_LD_FP(LOAD_BLK(%o4, %f16)) 432 EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
371 FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) 433 FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
372 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 434 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
373 FREG_MOVE_2(f28, f30) 435 FREG_MOVE_2(f28, f30)
374 subcc %g1, 64, %g1 436 subcc %g1, 64, %g1
375 add %o4, 64, %o4 437 add %o4, 64, %o4
@@ -380,10 +442,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
380 442
381180: sub %o4, 8, %g2 443180: sub %o4, 8, %g2
382 FREG_LOAD_1(%g2, f0) 444 FREG_LOAD_1(%g2, f0)
3831: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 4451: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
384 EX_LD_FP(LOAD_BLK(%o4, %f16)) 446 EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
385 FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) 447 FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
386 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 448 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
387 FREG_MOVE_1(f30) 449 FREG_MOVE_1(f30)
388 subcc %g1, 64, %g1 450 subcc %g1, 64, %g1
389 add %o4, 64, %o4 451 add %o4, 64, %o4
@@ -393,10 +455,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
393 nop 455 nop
394 456
395190: 457190:
3961: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 4581: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
397 subcc %g1, 64, %g1 459 subcc %g1, 64, %g1
398 EX_LD_FP(LOAD_BLK(%o4, %f0)) 460 EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_o2_plus_g1_plus_64)
399 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 461 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1_plus_64)
400 add %o4, 64, %o4 462 add %o4, 64, %o4
401 bne,pt %xcc, 1b 463 bne,pt %xcc, 1b
402 LOAD(prefetch, %o4 + 64, #one_read) 464 LOAD(prefetch, %o4 + 64, #one_read)
@@ -423,28 +485,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
423 andn %o2, 0xf, %o4 485 andn %o2, 0xf, %o4
424 and %o2, 0xf, %o2 486 and %o2, 0xf, %o2
4251: subcc %o4, 0x10, %o4 4871: subcc %o4, 0x10, %o4
426 EX_LD(LOAD(ldx, %o1, %o5)) 488 EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_o4_plus_16)
427 add %o1, 0x08, %o1 489 add %o1, 0x08, %o1
428 EX_LD(LOAD(ldx, %o1, %g1)) 490 EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2_plus_o4_plus_16)
429 sub %o1, 0x08, %o1 491 sub %o1, 0x08, %o1
430 EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE)) 492 EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_16)
431 add %o1, 0x8, %o1 493 add %o1, 0x8, %o1
432 EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE)) 494 EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_8)
433 bgu,pt %XCC, 1b 495 bgu,pt %XCC, 1b
434 add %o1, 0x8, %o1 496 add %o1, 0x8, %o1
43573: andcc %o2, 0x8, %g0 49773: andcc %o2, 0x8, %g0
436 be,pt %XCC, 1f 498 be,pt %XCC, 1f
437 nop 499 nop
438 sub %o2, 0x8, %o2 500 sub %o2, 0x8, %o2
439 EX_LD(LOAD(ldx, %o1, %o5)) 501 EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_8)
440 EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE)) 502 EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_8)
441 add %o1, 0x8, %o1 503 add %o1, 0x8, %o1
4421: andcc %o2, 0x4, %g0 5041: andcc %o2, 0x4, %g0
443 be,pt %XCC, 1f 505 be,pt %XCC, 1f
444 nop 506 nop
445 sub %o2, 0x4, %o2 507 sub %o2, 0x4, %o2
446 EX_LD(LOAD(lduw, %o1, %o5)) 508 EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o2_plus_4)
447 EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE)) 509 EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
448 add %o1, 0x4, %o1 510 add %o1, 0x4, %o1
4491: cmp %o2, 0 5111: cmp %o2, 0
450 be,pt %XCC, 85f 512 be,pt %XCC, 85f
@@ -460,8 +522,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
460 sub %o2, %g1, %o2 522 sub %o2, %g1, %o2
461 523
4621: subcc %g1, 1, %g1 5241: subcc %g1, 1, %g1
463 EX_LD(LOAD(ldub, %o1, %o5)) 525 EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o2_plus_g1_plus_1)
464 EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE)) 526 EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_g1_plus_1)
465 bgu,pt %icc, 1b 527 bgu,pt %icc, 1b
466 add %o1, 1, %o1 528 add %o1, 1, %o1
467 529
@@ -477,16 +539,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
477 539
4788: mov 64, GLOBAL_SPARE 5408: mov 64, GLOBAL_SPARE
479 andn %o1, 0x7, %o1 541 andn %o1, 0x7, %o1
480 EX_LD(LOAD(ldx, %o1, %g2)) 542 EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2)
481 sub GLOBAL_SPARE, %g1, GLOBAL_SPARE 543 sub GLOBAL_SPARE, %g1, GLOBAL_SPARE
482 andn %o2, 0x7, %o4 544 andn %o2, 0x7, %o4
483 sllx %g2, %g1, %g2 545 sllx %g2, %g1, %g2
4841: add %o1, 0x8, %o1 5461: add %o1, 0x8, %o1
485 EX_LD(LOAD(ldx, %o1, %g3)) 547 EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2_and_7_plus_o4)
486 subcc %o4, 0x8, %o4 548 subcc %o4, 0x8, %o4
487 srlx %g3, GLOBAL_SPARE, %o5 549 srlx %g3, GLOBAL_SPARE, %o5
488 or %o5, %g2, %o5 550 or %o5, %g2, %o5
489 EX_ST(STORE(stx, %o5, %o0)) 551 EX_ST(STORE(stx, %o5, %o0), NG2_retl_o2_and_7_plus_o4_plus_8)
490 add %o0, 0x8, %o0 552 add %o0, 0x8, %o0
491 bgu,pt %icc, 1b 553 bgu,pt %icc, 1b
492 sllx %g3, %g1, %g2 554 sllx %g3, %g1, %g2
@@ -506,8 +568,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
506 568
5071: 5691:
508 subcc %o2, 4, %o2 570 subcc %o2, 4, %o2
509 EX_LD(LOAD(lduw, %o1, %g1)) 571 EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o2_plus_4)
510 EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE)) 572 EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
511 bgu,pt %XCC, 1b 573 bgu,pt %XCC, 1b
512 add %o1, 4, %o1 574 add %o1, 4, %o1
513 575
@@ -517,8 +579,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
517 .align 32 579 .align 32
51890: 58090:
519 subcc %o2, 1, %o2 581 subcc %o2, 1, %o2
520 EX_LD(LOAD(ldub, %o1, %g1)) 582 EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_1)
521 EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE)) 583 EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_1)
522 bgu,pt %XCC, 90b 584 bgu,pt %XCC, 90b
523 add %o1, 1, %o1 585 add %o1, 1, %o1
524 retl 586 retl
diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S
index 2e8ee7ad07a9..16a286c1a528 100644
--- a/arch/sparc/lib/NG4copy_from_user.S
+++ b/arch/sparc/lib/NG4copy_from_user.S
@@ -3,19 +3,19 @@
3 * Copyright (C) 2012 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2012 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#define EX_LD(x) \ 6#define EX_LD(x, y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __retl_one_asi;\ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \ 14#define EX_LD_FP(x,y) \
1598: x; \ 1598: x; \
16 .section __ex_table,"a";\ 16 .section __ex_table,"a";\
17 .align 4; \ 17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\ 18 .word 98b, y##_fp; \
19 .text; \ 19 .text; \
20 .align 4; 20 .align 4;
21 21
diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S
index be0bf4590df8..6b0276ffc858 100644
--- a/arch/sparc/lib/NG4copy_to_user.S
+++ b/arch/sparc/lib/NG4copy_to_user.S
@@ -3,19 +3,19 @@
3 * Copyright (C) 2012 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2012 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#define EX_ST(x) \ 6#define EX_ST(x,y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __retl_one_asi;\ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \ 14#define EX_ST_FP(x,y) \
1598: x; \ 1598: x; \
16 .section __ex_table,"a";\ 16 .section __ex_table,"a";\
17 .align 4; \ 17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\ 18 .word 98b, y##_fp; \
19 .text; \ 19 .text; \
20 .align 4; 20 .align 4;
21 21
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 8e13ee1f4454..75bb93b1437f 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7#include <linux/linkage.h>
7#include <asm/visasm.h> 8#include <asm/visasm.h>
8#include <asm/asi.h> 9#include <asm/asi.h>
9#define GLOBAL_SPARE %g7 10#define GLOBAL_SPARE %g7
@@ -46,22 +47,19 @@
46#endif 47#endif
47 48
48#ifndef EX_LD 49#ifndef EX_LD
49#define EX_LD(x) x 50#define EX_LD(x,y) x
50#endif 51#endif
51#ifndef EX_LD_FP 52#ifndef EX_LD_FP
52#define EX_LD_FP(x) x 53#define EX_LD_FP(x,y) x
53#endif 54#endif
54 55
55#ifndef EX_ST 56#ifndef EX_ST
56#define EX_ST(x) x 57#define EX_ST(x,y) x
57#endif 58#endif
58#ifndef EX_ST_FP 59#ifndef EX_ST_FP
59#define EX_ST_FP(x) x 60#define EX_ST_FP(x,y) x
60#endif 61#endif
61 62
62#ifndef EX_RETVAL
63#define EX_RETVAL(x) x
64#endif
65 63
66#ifndef LOAD 64#ifndef LOAD
67#define LOAD(type,addr,dest) type [addr], dest 65#define LOAD(type,addr,dest) type [addr], dest
@@ -94,6 +92,158 @@
94 .register %g3,#scratch 92 .register %g3,#scratch
95 93
96 .text 94 .text
95#ifndef EX_RETVAL
96#define EX_RETVAL(x) x
97__restore_asi_fp:
98 VISExitHalf
99__restore_asi:
100 retl
101 wr %g0, ASI_AIUS, %asi
102
103ENTRY(NG4_retl_o2)
104 ba,pt %xcc, __restore_asi
105 mov %o2, %o0
106ENDPROC(NG4_retl_o2)
107ENTRY(NG4_retl_o2_plus_1)
108 ba,pt %xcc, __restore_asi
109 add %o2, 1, %o0
110ENDPROC(NG4_retl_o2_plus_1)
111ENTRY(NG4_retl_o2_plus_4)
112 ba,pt %xcc, __restore_asi
113 add %o2, 4, %o0
114ENDPROC(NG4_retl_o2_plus_4)
115ENTRY(NG4_retl_o2_plus_o5)
116 ba,pt %xcc, __restore_asi
117 add %o2, %o5, %o0
118ENDPROC(NG4_retl_o2_plus_o5)
119ENTRY(NG4_retl_o2_plus_o5_plus_4)
120 add %o5, 4, %o5
121 ba,pt %xcc, __restore_asi
122 add %o2, %o5, %o0
123ENDPROC(NG4_retl_o2_plus_o5_plus_4)
124ENTRY(NG4_retl_o2_plus_o5_plus_8)
125 add %o5, 8, %o5
126 ba,pt %xcc, __restore_asi
127 add %o2, %o5, %o0
128ENDPROC(NG4_retl_o2_plus_o5_plus_8)
129ENTRY(NG4_retl_o2_plus_o5_plus_16)
130 add %o5, 16, %o5
131 ba,pt %xcc, __restore_asi
132 add %o2, %o5, %o0
133ENDPROC(NG4_retl_o2_plus_o5_plus_16)
134ENTRY(NG4_retl_o2_plus_o5_plus_24)
135 add %o5, 24, %o5
136 ba,pt %xcc, __restore_asi
137 add %o2, %o5, %o0
138ENDPROC(NG4_retl_o2_plus_o5_plus_24)
139ENTRY(NG4_retl_o2_plus_o5_plus_32)
140 add %o5, 32, %o5
141 ba,pt %xcc, __restore_asi
142 add %o2, %o5, %o0
143ENDPROC(NG4_retl_o2_plus_o5_plus_32)
144ENTRY(NG4_retl_o2_plus_g1)
145 ba,pt %xcc, __restore_asi
146 add %o2, %g1, %o0
147ENDPROC(NG4_retl_o2_plus_g1)
148ENTRY(NG4_retl_o2_plus_g1_plus_1)
149 add %g1, 1, %g1
150 ba,pt %xcc, __restore_asi
151 add %o2, %g1, %o0
152ENDPROC(NG4_retl_o2_plus_g1_plus_1)
153ENTRY(NG4_retl_o2_plus_g1_plus_8)
154 add %g1, 8, %g1
155 ba,pt %xcc, __restore_asi
156 add %o2, %g1, %o0
157ENDPROC(NG4_retl_o2_plus_g1_plus_8)
158ENTRY(NG4_retl_o2_plus_o4)
159 ba,pt %xcc, __restore_asi
160 add %o2, %o4, %o0
161ENDPROC(NG4_retl_o2_plus_o4)
162ENTRY(NG4_retl_o2_plus_o4_plus_8)
163 add %o4, 8, %o4
164 ba,pt %xcc, __restore_asi
165 add %o2, %o4, %o0
166ENDPROC(NG4_retl_o2_plus_o4_plus_8)
167ENTRY(NG4_retl_o2_plus_o4_plus_16)
168 add %o4, 16, %o4
169 ba,pt %xcc, __restore_asi
170 add %o2, %o4, %o0
171ENDPROC(NG4_retl_o2_plus_o4_plus_16)
172ENTRY(NG4_retl_o2_plus_o4_plus_24)
173 add %o4, 24, %o4
174 ba,pt %xcc, __restore_asi
175 add %o2, %o4, %o0
176ENDPROC(NG4_retl_o2_plus_o4_plus_24)
177ENTRY(NG4_retl_o2_plus_o4_plus_32)
178 add %o4, 32, %o4
179 ba,pt %xcc, __restore_asi
180 add %o2, %o4, %o0
181ENDPROC(NG4_retl_o2_plus_o4_plus_32)
182ENTRY(NG4_retl_o2_plus_o4_plus_40)
183 add %o4, 40, %o4
184 ba,pt %xcc, __restore_asi
185 add %o2, %o4, %o0
186ENDPROC(NG4_retl_o2_plus_o4_plus_40)
187ENTRY(NG4_retl_o2_plus_o4_plus_48)
188 add %o4, 48, %o4
189 ba,pt %xcc, __restore_asi
190 add %o2, %o4, %o0
191ENDPROC(NG4_retl_o2_plus_o4_plus_48)
192ENTRY(NG4_retl_o2_plus_o4_plus_56)
193 add %o4, 56, %o4
194 ba,pt %xcc, __restore_asi
195 add %o2, %o4, %o0
196ENDPROC(NG4_retl_o2_plus_o4_plus_56)
197ENTRY(NG4_retl_o2_plus_o4_plus_64)
198 add %o4, 64, %o4
199 ba,pt %xcc, __restore_asi
200 add %o2, %o4, %o0
201ENDPROC(NG4_retl_o2_plus_o4_plus_64)
202ENTRY(NG4_retl_o2_plus_o4_fp)
203 ba,pt %xcc, __restore_asi_fp
204 add %o2, %o4, %o0
205ENDPROC(NG4_retl_o2_plus_o4_fp)
206ENTRY(NG4_retl_o2_plus_o4_plus_8_fp)
207 add %o4, 8, %o4
208 ba,pt %xcc, __restore_asi_fp
209 add %o2, %o4, %o0
210ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp)
211ENTRY(NG4_retl_o2_plus_o4_plus_16_fp)
212 add %o4, 16, %o4
213 ba,pt %xcc, __restore_asi_fp
214 add %o2, %o4, %o0
215ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp)
216ENTRY(NG4_retl_o2_plus_o4_plus_24_fp)
217 add %o4, 24, %o4
218 ba,pt %xcc, __restore_asi_fp
219 add %o2, %o4, %o0
220ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp)
221ENTRY(NG4_retl_o2_plus_o4_plus_32_fp)
222 add %o4, 32, %o4
223 ba,pt %xcc, __restore_asi_fp
224 add %o2, %o4, %o0
225ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp)
226ENTRY(NG4_retl_o2_plus_o4_plus_40_fp)
227 add %o4, 40, %o4
228 ba,pt %xcc, __restore_asi_fp
229 add %o2, %o4, %o0
230ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp)
231ENTRY(NG4_retl_o2_plus_o4_plus_48_fp)
232 add %o4, 48, %o4
233 ba,pt %xcc, __restore_asi_fp
234 add %o2, %o4, %o0
235ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp)
236ENTRY(NG4_retl_o2_plus_o4_plus_56_fp)
237 add %o4, 56, %o4
238 ba,pt %xcc, __restore_asi_fp
239 add %o2, %o4, %o0
240ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp)
241ENTRY(NG4_retl_o2_plus_o4_plus_64_fp)
242 add %o4, 64, %o4
243 ba,pt %xcc, __restore_asi_fp
244 add %o2, %o4, %o0
245ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp)
246#endif
97 .align 64 247 .align 64
98 248
99 .globl FUNC_NAME 249 .globl FUNC_NAME
@@ -124,12 +274,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
124 brz,pt %g1, 51f 274 brz,pt %g1, 51f
125 sub %o2, %g1, %o2 275 sub %o2, %g1, %o2
126 276
1271: EX_LD(LOAD(ldub, %o1 + 0x00, %g2)) 277
2781: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
128 add %o1, 1, %o1 279 add %o1, 1, %o1
129 subcc %g1, 1, %g1 280 subcc %g1, 1, %g1
130 add %o0, 1, %o0 281 add %o0, 1, %o0
131 bne,pt %icc, 1b 282 bne,pt %icc, 1b
132 EX_ST(STORE(stb, %g2, %o0 - 0x01)) 283 EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1)
133 284
13451: LOAD(prefetch, %o1 + 0x040, #n_reads_strong) 28551: LOAD(prefetch, %o1 + 0x040, #n_reads_strong)
135 LOAD(prefetch, %o1 + 0x080, #n_reads_strong) 286 LOAD(prefetch, %o1 + 0x080, #n_reads_strong)
@@ -154,43 +305,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
154 brz,pt %g1, .Llarge_aligned 305 brz,pt %g1, .Llarge_aligned
155 sub %o2, %g1, %o2 306 sub %o2, %g1, %o2
156 307
1571: EX_LD(LOAD(ldx, %o1 + 0x00, %g2)) 3081: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
158 add %o1, 8, %o1 309 add %o1, 8, %o1
159 subcc %g1, 8, %g1 310 subcc %g1, 8, %g1
160 add %o0, 8, %o0 311 add %o0, 8, %o0
161 bne,pt %icc, 1b 312 bne,pt %icc, 1b
162 EX_ST(STORE(stx, %g2, %o0 - 0x08)) 313 EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8)
163 314
164.Llarge_aligned: 315.Llarge_aligned:
165 /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */ 316 /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */
166 andn %o2, 0x3f, %o4 317 andn %o2, 0x3f, %o4
167 sub %o2, %o4, %o2 318 sub %o2, %o4, %o2
168 319
1691: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) 3201: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4)
170 add %o1, 0x40, %o1 321 add %o1, 0x40, %o1
171 EX_LD(LOAD(ldx, %o1 - 0x38, %g2)) 322 EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4)
172 subcc %o4, 0x40, %o4 323 subcc %o4, 0x40, %o4
173 EX_LD(LOAD(ldx, %o1 - 0x30, %g3)) 324 EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64)
174 EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE)) 325 EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64)
175 EX_LD(LOAD(ldx, %o1 - 0x20, %o5)) 326 EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64)
176 EX_ST(STORE_INIT(%g1, %o0)) 327 EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64)
177 add %o0, 0x08, %o0 328 add %o0, 0x08, %o0
178 EX_ST(STORE_INIT(%g2, %o0)) 329 EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56)
179 add %o0, 0x08, %o0 330 add %o0, 0x08, %o0
180 EX_LD(LOAD(ldx, %o1 - 0x18, %g2)) 331 EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48)
181 EX_ST(STORE_INIT(%g3, %o0)) 332 EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48)
182 add %o0, 0x08, %o0 333 add %o0, 0x08, %o0
183 EX_LD(LOAD(ldx, %o1 - 0x10, %g3)) 334 EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40)
184 EX_ST(STORE_INIT(GLOBAL_SPARE, %o0)) 335 EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40)
185 add %o0, 0x08, %o0 336 add %o0, 0x08, %o0
186 EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE)) 337 EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32)
187 EX_ST(STORE_INIT(%o5, %o0)) 338 EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32)
188 add %o0, 0x08, %o0 339 add %o0, 0x08, %o0
189 EX_ST(STORE_INIT(%g2, %o0)) 340 EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24)
190 add %o0, 0x08, %o0 341 add %o0, 0x08, %o0
191 EX_ST(STORE_INIT(%g3, %o0)) 342 EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16)
192 add %o0, 0x08, %o0 343 add %o0, 0x08, %o0
193 EX_ST(STORE_INIT(GLOBAL_SPARE, %o0)) 344 EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8)
194 add %o0, 0x08, %o0 345 add %o0, 0x08, %o0
195 bne,pt %icc, 1b 346 bne,pt %icc, 1b
196 LOAD(prefetch, %o1 + 0x200, #n_reads_strong) 347 LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
@@ -216,17 +367,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
216 sub %o2, %o4, %o2 367 sub %o2, %o4, %o2
217 alignaddr %o1, %g0, %g1 368 alignaddr %o1, %g0, %g1
218 add %o1, %o4, %o1 369 add %o1, %o4, %o1
219 EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0)) 370 EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4)
2201: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2)) 3711: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4)
221 subcc %o4, 0x40, %o4 372 subcc %o4, 0x40, %o4
222 EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4)) 373 EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64)
223 EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6)) 374 EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64)
224 EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8)) 375 EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64)
225 EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10)) 376 EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64)
226 EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12)) 377 EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64)
227 EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14)) 378 EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64)
228 faligndata %f0, %f2, %f16 379 faligndata %f0, %f2, %f16
229 EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0)) 380 EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64)
230 faligndata %f2, %f4, %f18 381 faligndata %f2, %f4, %f18
231 add %g1, 0x40, %g1 382 add %g1, 0x40, %g1
232 faligndata %f4, %f6, %f20 383 faligndata %f4, %f6, %f20
@@ -235,14 +386,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
235 faligndata %f10, %f12, %f26 386 faligndata %f10, %f12, %f26
236 faligndata %f12, %f14, %f28 387 faligndata %f12, %f14, %f28
237 faligndata %f14, %f0, %f30 388 faligndata %f14, %f0, %f30
238 EX_ST_FP(STORE(std, %f16, %o0 + 0x00)) 389 EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64)
239 EX_ST_FP(STORE(std, %f18, %o0 + 0x08)) 390 EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56)
240 EX_ST_FP(STORE(std, %f20, %o0 + 0x10)) 391 EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48)
241 EX_ST_FP(STORE(std, %f22, %o0 + 0x18)) 392 EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40)
242 EX_ST_FP(STORE(std, %f24, %o0 + 0x20)) 393 EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32)
243 EX_ST_FP(STORE(std, %f26, %o0 + 0x28)) 394 EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24)
244 EX_ST_FP(STORE(std, %f28, %o0 + 0x30)) 395 EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16)
245 EX_ST_FP(STORE(std, %f30, %o0 + 0x38)) 396 EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8)
246 add %o0, 0x40, %o0 397 add %o0, 0x40, %o0
247 bne,pt %icc, 1b 398 bne,pt %icc, 1b
248 LOAD(prefetch, %g1 + 0x200, #n_reads_strong) 399 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
@@ -270,37 +421,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
270 andncc %o2, 0x20 - 1, %o5 421 andncc %o2, 0x20 - 1, %o5
271 be,pn %icc, 2f 422 be,pn %icc, 2f
272 sub %o2, %o5, %o2 423 sub %o2, %o5, %o2
2731: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) 4241: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
274 EX_LD(LOAD(ldx, %o1 + 0x08, %g2)) 425 EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5)
275 EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE)) 426 EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5)
276 EX_LD(LOAD(ldx, %o1 + 0x18, %o4)) 427 EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5)
277 add %o1, 0x20, %o1 428 add %o1, 0x20, %o1
278 subcc %o5, 0x20, %o5 429 subcc %o5, 0x20, %o5
279 EX_ST(STORE(stx, %g1, %o0 + 0x00)) 430 EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32)
280 EX_ST(STORE(stx, %g2, %o0 + 0x08)) 431 EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24)
281 EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10)) 432 EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24)
282 EX_ST(STORE(stx, %o4, %o0 + 0x18)) 433 EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8)
283 bne,pt %icc, 1b 434 bne,pt %icc, 1b
284 add %o0, 0x20, %o0 435 add %o0, 0x20, %o0
2852: andcc %o2, 0x18, %o5 4362: andcc %o2, 0x18, %o5
286 be,pt %icc, 3f 437 be,pt %icc, 3f
287 sub %o2, %o5, %o2 438 sub %o2, %o5, %o2
2881: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) 439
4401: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
289 add %o1, 0x08, %o1 441 add %o1, 0x08, %o1
290 add %o0, 0x08, %o0 442 add %o0, 0x08, %o0
291 subcc %o5, 0x08, %o5 443 subcc %o5, 0x08, %o5
292 bne,pt %icc, 1b 444 bne,pt %icc, 1b
293 EX_ST(STORE(stx, %g1, %o0 - 0x08)) 445 EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8)
2943: brz,pt %o2, .Lexit 4463: brz,pt %o2, .Lexit
295 cmp %o2, 0x04 447 cmp %o2, 0x04
296 bl,pn %icc, .Ltiny 448 bl,pn %icc, .Ltiny
297 nop 449 nop
298 EX_LD(LOAD(lduw, %o1 + 0x00, %g1)) 450 EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2)
299 add %o1, 0x04, %o1 451 add %o1, 0x04, %o1
300 add %o0, 0x04, %o0 452 add %o0, 0x04, %o0
301 subcc %o2, 0x04, %o2 453 subcc %o2, 0x04, %o2
302 bne,pn %icc, .Ltiny 454 bne,pn %icc, .Ltiny
303 EX_ST(STORE(stw, %g1, %o0 - 0x04)) 455 EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4)
304 ba,a,pt %icc, .Lexit 456 ba,a,pt %icc, .Lexit
305.Lmedium_unaligned: 457.Lmedium_unaligned:
306 /* First get dest 8 byte aligned. */ 458 /* First get dest 8 byte aligned. */
@@ -309,12 +461,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
309 brz,pt %g1, 2f 461 brz,pt %g1, 2f
310 sub %o2, %g1, %o2 462 sub %o2, %g1, %o2
311 463
3121: EX_LD(LOAD(ldub, %o1 + 0x00, %g2)) 4641: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
313 add %o1, 1, %o1 465 add %o1, 1, %o1
314 subcc %g1, 1, %g1 466 subcc %g1, 1, %g1
315 add %o0, 1, %o0 467 add %o0, 1, %o0
316 bne,pt %icc, 1b 468 bne,pt %icc, 1b
317 EX_ST(STORE(stb, %g2, %o0 - 0x01)) 469 EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1)
3182: 4702:
319 and %o1, 0x7, %g1 471 and %o1, 0x7, %g1
320 brz,pn %g1, .Lmedium_noprefetch 472 brz,pn %g1, .Lmedium_noprefetch
@@ -322,16 +474,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
322 mov 64, %g2 474 mov 64, %g2
323 sub %g2, %g1, %g2 475 sub %g2, %g1, %g2
324 andn %o1, 0x7, %o1 476 andn %o1, 0x7, %o1
325 EX_LD(LOAD(ldx, %o1 + 0x00, %o4)) 477 EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2)
326 sllx %o4, %g1, %o4 478 sllx %o4, %g1, %o4
327 andn %o2, 0x08 - 1, %o5 479 andn %o2, 0x08 - 1, %o5
328 sub %o2, %o5, %o2 480 sub %o2, %o5, %o2
3291: EX_LD(LOAD(ldx, %o1 + 0x08, %g3)) 4811: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5)
330 add %o1, 0x08, %o1 482 add %o1, 0x08, %o1
331 subcc %o5, 0x08, %o5 483 subcc %o5, 0x08, %o5
332 srlx %g3, %g2, GLOBAL_SPARE 484 srlx %g3, %g2, GLOBAL_SPARE
333 or GLOBAL_SPARE, %o4, GLOBAL_SPARE 485 or GLOBAL_SPARE, %o4, GLOBAL_SPARE
334 EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00)) 486 EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8)
335 add %o0, 0x08, %o0 487 add %o0, 0x08, %o0
336 bne,pt %icc, 1b 488 bne,pt %icc, 1b
337 sllx %g3, %g1, %o4 489 sllx %g3, %g1, %o4
@@ -342,17 +494,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
342 ba,pt %icc, .Lsmall_unaligned 494 ba,pt %icc, .Lsmall_unaligned
343 495
344.Ltiny: 496.Ltiny:
345 EX_LD(LOAD(ldub, %o1 + 0x00, %g1)) 497 EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2)
346 subcc %o2, 1, %o2 498 subcc %o2, 1, %o2
347 be,pn %icc, .Lexit 499 be,pn %icc, .Lexit
348 EX_ST(STORE(stb, %g1, %o0 + 0x00)) 500 EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1)
349 EX_LD(LOAD(ldub, %o1 + 0x01, %g1)) 501 EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2)
350 subcc %o2, 1, %o2 502 subcc %o2, 1, %o2
351 be,pn %icc, .Lexit 503 be,pn %icc, .Lexit
352 EX_ST(STORE(stb, %g1, %o0 + 0x01)) 504 EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1)
353 EX_LD(LOAD(ldub, %o1 + 0x02, %g1)) 505 EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2)
354 ba,pt %icc, .Lexit 506 ba,pt %icc, .Lexit
355 EX_ST(STORE(stb, %g1, %o0 + 0x02)) 507 EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2)
356 508
357.Lsmall: 509.Lsmall:
358 andcc %g2, 0x3, %g0 510 andcc %g2, 0x3, %g0
@@ -360,22 +512,22 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
360 andn %o2, 0x4 - 1, %o5 512 andn %o2, 0x4 - 1, %o5
361 sub %o2, %o5, %o2 513 sub %o2, %o5, %o2
3621: 5141:
363 EX_LD(LOAD(lduw, %o1 + 0x00, %g1)) 515 EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
364 add %o1, 0x04, %o1 516 add %o1, 0x04, %o1
365 subcc %o5, 0x04, %o5 517 subcc %o5, 0x04, %o5
366 add %o0, 0x04, %o0 518 add %o0, 0x04, %o0
367 bne,pt %icc, 1b 519 bne,pt %icc, 1b
368 EX_ST(STORE(stw, %g1, %o0 - 0x04)) 520 EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4)
369 brz,pt %o2, .Lexit 521 brz,pt %o2, .Lexit
370 nop 522 nop
371 ba,a,pt %icc, .Ltiny 523 ba,a,pt %icc, .Ltiny
372 524
373.Lsmall_unaligned: 525.Lsmall_unaligned:
3741: EX_LD(LOAD(ldub, %o1 + 0x00, %g1)) 5261: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2)
375 add %o1, 1, %o1 527 add %o1, 1, %o1
376 add %o0, 1, %o0 528 add %o0, 1, %o0
377 subcc %o2, 1, %o2 529 subcc %o2, 1, %o2
378 bne,pt %icc, 1b 530 bne,pt %icc, 1b
379 EX_ST(STORE(stb, %g1, %o0 - 0x01)) 531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
380 ba,a,pt %icc, .Lexit 532 ba,a,pt %icc, .Lexit
381 .size FUNC_NAME, .-FUNC_NAME 533 .size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S
index 5d1e4d1ac21e..9cd42fcbc781 100644
--- a/arch/sparc/lib/NGcopy_from_user.S
+++ b/arch/sparc/lib/NGcopy_from_user.S
@@ -3,11 +3,11 @@
3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#define EX_LD(x) \ 6#define EX_LD(x,y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __ret_one_asi;\ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S
index ff630dcb273c..5c358afd464e 100644
--- a/arch/sparc/lib/NGcopy_to_user.S
+++ b/arch/sparc/lib/NGcopy_to_user.S
@@ -3,11 +3,11 @@
3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#define EX_ST(x) \ 6#define EX_ST(x,y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __ret_one_asi;\ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index 96a14caf6966..d88c4ed50a00 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7#include <linux/linkage.h>
7#include <asm/asi.h> 8#include <asm/asi.h>
8#include <asm/thread_info.h> 9#include <asm/thread_info.h>
9#define GLOBAL_SPARE %g7 10#define GLOBAL_SPARE %g7
@@ -27,15 +28,11 @@
27#endif 28#endif
28 29
29#ifndef EX_LD 30#ifndef EX_LD
30#define EX_LD(x) x 31#define EX_LD(x,y) x
31#endif 32#endif
32 33
33#ifndef EX_ST 34#ifndef EX_ST
34#define EX_ST(x) x 35#define EX_ST(x,y) x
35#endif
36
37#ifndef EX_RETVAL
38#define EX_RETVAL(x) x
39#endif 36#endif
40 37
41#ifndef LOAD 38#ifndef LOAD
@@ -79,6 +76,92 @@
79 .register %g3,#scratch 76 .register %g3,#scratch
80 77
81 .text 78 .text
79#ifndef EX_RETVAL
80#define EX_RETVAL(x) x
81__restore_asi:
82 ret
83 wr %g0, ASI_AIUS, %asi
84 restore
85ENTRY(NG_ret_i2_plus_i4_plus_1)
86 ba,pt %xcc, __restore_asi
87 add %i2, %i5, %i0
88ENDPROC(NG_ret_i2_plus_i4_plus_1)
89ENTRY(NG_ret_i2_plus_g1)
90 ba,pt %xcc, __restore_asi
91 add %i2, %g1, %i0
92ENDPROC(NG_ret_i2_plus_g1)
93ENTRY(NG_ret_i2_plus_g1_minus_8)
94 sub %g1, 8, %g1
95 ba,pt %xcc, __restore_asi
96 add %i2, %g1, %i0
97ENDPROC(NG_ret_i2_plus_g1_minus_8)
98ENTRY(NG_ret_i2_plus_g1_minus_16)
99 sub %g1, 16, %g1
100 ba,pt %xcc, __restore_asi
101 add %i2, %g1, %i0
102ENDPROC(NG_ret_i2_plus_g1_minus_16)
103ENTRY(NG_ret_i2_plus_g1_minus_24)
104 sub %g1, 24, %g1
105 ba,pt %xcc, __restore_asi
106 add %i2, %g1, %i0
107ENDPROC(NG_ret_i2_plus_g1_minus_24)
108ENTRY(NG_ret_i2_plus_g1_minus_32)
109 sub %g1, 32, %g1
110 ba,pt %xcc, __restore_asi
111 add %i2, %g1, %i0
112ENDPROC(NG_ret_i2_plus_g1_minus_32)
113ENTRY(NG_ret_i2_plus_g1_minus_40)
114 sub %g1, 40, %g1
115 ba,pt %xcc, __restore_asi
116 add %i2, %g1, %i0
117ENDPROC(NG_ret_i2_plus_g1_minus_40)
118ENTRY(NG_ret_i2_plus_g1_minus_48)
119 sub %g1, 48, %g1
120 ba,pt %xcc, __restore_asi
121 add %i2, %g1, %i0
122ENDPROC(NG_ret_i2_plus_g1_minus_48)
123ENTRY(NG_ret_i2_plus_g1_minus_56)
124 sub %g1, 56, %g1
125 ba,pt %xcc, __restore_asi
126 add %i2, %g1, %i0
127ENDPROC(NG_ret_i2_plus_g1_minus_56)
128ENTRY(NG_ret_i2_plus_i4)
129 ba,pt %xcc, __restore_asi
130 add %i2, %i4, %i0
131ENDPROC(NG_ret_i2_plus_i4)
132ENTRY(NG_ret_i2_plus_i4_minus_8)
133 sub %i4, 8, %i4
134 ba,pt %xcc, __restore_asi
135 add %i2, %i4, %i0
136ENDPROC(NG_ret_i2_plus_i4_minus_8)
137ENTRY(NG_ret_i2_plus_8)
138 ba,pt %xcc, __restore_asi
139 add %i2, 8, %i0
140ENDPROC(NG_ret_i2_plus_8)
141ENTRY(NG_ret_i2_plus_4)
142 ba,pt %xcc, __restore_asi
143 add %i2, 4, %i0
144ENDPROC(NG_ret_i2_plus_4)
145ENTRY(NG_ret_i2_plus_1)
146 ba,pt %xcc, __restore_asi
147 add %i2, 1, %i0
148ENDPROC(NG_ret_i2_plus_1)
149ENTRY(NG_ret_i2_plus_g1_plus_1)
150 add %g1, 1, %g1
151 ba,pt %xcc, __restore_asi
152 add %i2, %g1, %i0
153ENDPROC(NG_ret_i2_plus_g1_plus_1)
154ENTRY(NG_ret_i2)
155 ba,pt %xcc, __restore_asi
156 mov %i2, %i0
157ENDPROC(NG_ret_i2)
158ENTRY(NG_ret_i2_and_7_plus_i4)
159 and %i2, 7, %i2
160 ba,pt %xcc, __restore_asi
161 add %i2, %i4, %i0
162ENDPROC(NG_ret_i2_and_7_plus_i4)
163#endif
164
82 .align 64 165 .align 64
83 166
84 .globl FUNC_NAME 167 .globl FUNC_NAME
@@ -126,8 +209,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
126 sub %g0, %i4, %i4 ! bytes to align dst 209 sub %g0, %i4, %i4 ! bytes to align dst
127 sub %i2, %i4, %i2 210 sub %i2, %i4, %i2
1281: subcc %i4, 1, %i4 2111: subcc %i4, 1, %i4
129 EX_LD(LOAD(ldub, %i1, %g1)) 212 EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_i4_plus_1)
130 EX_ST(STORE(stb, %g1, %o0)) 213 EX_ST(STORE(stb, %g1, %o0), NG_ret_i2_plus_i4_plus_1)
131 add %i1, 1, %i1 214 add %i1, 1, %i1
132 bne,pt %XCC, 1b 215 bne,pt %XCC, 1b
133 add %o0, 1, %o0 216 add %o0, 1, %o0
@@ -160,7 +243,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
160 and %i4, 0x7, GLOBAL_SPARE 243 and %i4, 0x7, GLOBAL_SPARE
161 sll GLOBAL_SPARE, 3, GLOBAL_SPARE 244 sll GLOBAL_SPARE, 3, GLOBAL_SPARE
162 mov 64, %i5 245 mov 64, %i5
163 EX_LD(LOAD_TWIN(%i1, %g2, %g3)) 246 EX_LD(LOAD_TWIN(%i1, %g2, %g3), NG_ret_i2_plus_g1)
164 sub %i5, GLOBAL_SPARE, %i5 247 sub %i5, GLOBAL_SPARE, %i5
165 mov 16, %o4 248 mov 16, %o4
166 mov 32, %o5 249 mov 32, %o5
@@ -178,31 +261,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
178 srlx WORD3, PRE_SHIFT, TMP; \ 261 srlx WORD3, PRE_SHIFT, TMP; \
179 or WORD2, TMP, WORD2; 262 or WORD2, TMP, WORD2;
180 263
1818: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) 2648: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
182 MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) 265 MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
183 LOAD(prefetch, %i1 + %i3, #one_read) 266 LOAD(prefetch, %i1 + %i3, #one_read)
184 267
185 EX_ST(STORE_INIT(%g2, %o0 + 0x00)) 268 EX_ST(STORE_INIT(%g2, %o0 + 0x00), NG_ret_i2_plus_g1)
186 EX_ST(STORE_INIT(%g3, %o0 + 0x08)) 269 EX_ST(STORE_INIT(%g3, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
187 270
188 EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) 271 EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
189 MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) 272 MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
190 273
191 EX_ST(STORE_INIT(%o2, %o0 + 0x10)) 274 EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
192 EX_ST(STORE_INIT(%o3, %o0 + 0x18)) 275 EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
193 276
194 EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) 277 EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
195 MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) 278 MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
196 279
197 EX_ST(STORE_INIT(%g2, %o0 + 0x20)) 280 EX_ST(STORE_INIT(%g2, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
198 EX_ST(STORE_INIT(%g3, %o0 + 0x28)) 281 EX_ST(STORE_INIT(%g3, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
199 282
200 EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) 283 EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
201 add %i1, 64, %i1 284 add %i1, 64, %i1
202 MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) 285 MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
203 286
204 EX_ST(STORE_INIT(%o2, %o0 + 0x30)) 287 EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
205 EX_ST(STORE_INIT(%o3, %o0 + 0x38)) 288 EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
206 289
207 subcc %g1, 64, %g1 290 subcc %g1, 64, %g1
208 bne,pt %XCC, 8b 291 bne,pt %XCC, 8b
@@ -211,31 +294,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
211 ba,pt %XCC, 60f 294 ba,pt %XCC, 60f
212 add %i1, %i4, %i1 295 add %i1, %i4, %i1
213 296
2149: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) 2979: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
215 MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) 298 MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
216 LOAD(prefetch, %i1 + %i3, #one_read) 299 LOAD(prefetch, %i1 + %i3, #one_read)
217 300
218 EX_ST(STORE_INIT(%g3, %o0 + 0x00)) 301 EX_ST(STORE_INIT(%g3, %o0 + 0x00), NG_ret_i2_plus_g1)
219 EX_ST(STORE_INIT(%o2, %o0 + 0x08)) 302 EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
220 303
221 EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) 304 EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
222 MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) 305 MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
223 306
224 EX_ST(STORE_INIT(%o3, %o0 + 0x10)) 307 EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
225 EX_ST(STORE_INIT(%g2, %o0 + 0x18)) 308 EX_ST(STORE_INIT(%g2, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
226 309
227 EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) 310 EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
228 MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) 311 MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
229 312
230 EX_ST(STORE_INIT(%g3, %o0 + 0x20)) 313 EX_ST(STORE_INIT(%g3, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
231 EX_ST(STORE_INIT(%o2, %o0 + 0x28)) 314 EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
232 315
233 EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) 316 EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
234 add %i1, 64, %i1 317 add %i1, 64, %i1
235 MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) 318 MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
236 319
237 EX_ST(STORE_INIT(%o3, %o0 + 0x30)) 320 EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
238 EX_ST(STORE_INIT(%g2, %o0 + 0x38)) 321 EX_ST(STORE_INIT(%g2, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
239 322
240 subcc %g1, 64, %g1 323 subcc %g1, 64, %g1
241 bne,pt %XCC, 9b 324 bne,pt %XCC, 9b
@@ -249,25 +332,25 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
249 * one twin load ahead, then add 8 back into source when 332 * one twin load ahead, then add 8 back into source when
250 * we finish the loop. 333 * we finish the loop.
251 */ 334 */
252 EX_LD(LOAD_TWIN(%i1, %o4, %o5)) 335 EX_LD(LOAD_TWIN(%i1, %o4, %o5), NG_ret_i2_plus_g1)
253 mov 16, %o7 336 mov 16, %o7
254 mov 32, %g2 337 mov 32, %g2
255 mov 48, %g3 338 mov 48, %g3
256 mov 64, %o1 339 mov 64, %o1
2571: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) 3401: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
258 LOAD(prefetch, %i1 + %o1, #one_read) 341 LOAD(prefetch, %i1 + %o1, #one_read)
259 EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line 342 EX_ST(STORE_INIT(%o5, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
260 EX_ST(STORE_INIT(%o2, %o0 + 0x08)) 343 EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
261 EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) 344 EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
262 EX_ST(STORE_INIT(%o3, %o0 + 0x10)) 345 EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
263 EX_ST(STORE_INIT(%o4, %o0 + 0x18)) 346 EX_ST(STORE_INIT(%o4, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
264 EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) 347 EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
265 EX_ST(STORE_INIT(%o5, %o0 + 0x20)) 348 EX_ST(STORE_INIT(%o5, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
266 EX_ST(STORE_INIT(%o2, %o0 + 0x28)) 349 EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
267 EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5)) 350 EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5), NG_ret_i2_plus_g1_minus_48)
268 add %i1, 64, %i1 351 add %i1, 64, %i1
269 EX_ST(STORE_INIT(%o3, %o0 + 0x30)) 352 EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
270 EX_ST(STORE_INIT(%o4, %o0 + 0x38)) 353 EX_ST(STORE_INIT(%o4, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
271 subcc %g1, 64, %g1 354 subcc %g1, 64, %g1
272 bne,pt %XCC, 1b 355 bne,pt %XCC, 1b
273 add %o0, 64, %o0 356 add %o0, 64, %o0
@@ -282,20 +365,20 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
282 mov 32, %g2 365 mov 32, %g2
283 mov 48, %g3 366 mov 48, %g3
284 mov 64, %o1 367 mov 64, %o1
2851: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5)) 3681: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5), NG_ret_i2_plus_g1)
286 EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) 369 EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
287 LOAD(prefetch, %i1 + %o1, #one_read) 370 LOAD(prefetch, %i1 + %o1, #one_read)
288 EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line 371 EX_ST(STORE_INIT(%o4, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
289 EX_ST(STORE_INIT(%o5, %o0 + 0x08)) 372 EX_ST(STORE_INIT(%o5, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
290 EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) 373 EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
291 EX_ST(STORE_INIT(%o2, %o0 + 0x10)) 374 EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
292 EX_ST(STORE_INIT(%o3, %o0 + 0x18)) 375 EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
293 EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) 376 EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
294 add %i1, 64, %i1 377 add %i1, 64, %i1
295 EX_ST(STORE_INIT(%o4, %o0 + 0x20)) 378 EX_ST(STORE_INIT(%o4, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
296 EX_ST(STORE_INIT(%o5, %o0 + 0x28)) 379 EX_ST(STORE_INIT(%o5, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
297 EX_ST(STORE_INIT(%o2, %o0 + 0x30)) 380 EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
298 EX_ST(STORE_INIT(%o3, %o0 + 0x38)) 381 EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
299 subcc %g1, 64, %g1 382 subcc %g1, 64, %g1
300 bne,pt %XCC, 1b 383 bne,pt %XCC, 1b
301 add %o0, 64, %o0 384 add %o0, 64, %o0
@@ -321,28 +404,28 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
321 andn %i2, 0xf, %i4 404 andn %i2, 0xf, %i4
322 and %i2, 0xf, %i2 405 and %i2, 0xf, %i2
3231: subcc %i4, 0x10, %i4 4061: subcc %i4, 0x10, %i4
324 EX_LD(LOAD(ldx, %i1, %o4)) 407 EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4)
325 add %i1, 0x08, %i1 408 add %i1, 0x08, %i1
326 EX_LD(LOAD(ldx, %i1, %g1)) 409 EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4)
327 sub %i1, 0x08, %i1 410 sub %i1, 0x08, %i1
328 EX_ST(STORE(stx, %o4, %i1 + %i3)) 411 EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4)
329 add %i1, 0x8, %i1 412 add %i1, 0x8, %i1
330 EX_ST(STORE(stx, %g1, %i1 + %i3)) 413 EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8)
331 bgu,pt %XCC, 1b 414 bgu,pt %XCC, 1b
332 add %i1, 0x8, %i1 415 add %i1, 0x8, %i1
33373: andcc %i2, 0x8, %g0 41673: andcc %i2, 0x8, %g0
334 be,pt %XCC, 1f 417 be,pt %XCC, 1f
335 nop 418 nop
336 sub %i2, 0x8, %i2 419 sub %i2, 0x8, %i2
337 EX_LD(LOAD(ldx, %i1, %o4)) 420 EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_8)
338 EX_ST(STORE(stx, %o4, %i1 + %i3)) 421 EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_8)
339 add %i1, 0x8, %i1 422 add %i1, 0x8, %i1
3401: andcc %i2, 0x4, %g0 4231: andcc %i2, 0x4, %g0
341 be,pt %XCC, 1f 424 be,pt %XCC, 1f
342 nop 425 nop
343 sub %i2, 0x4, %i2 426 sub %i2, 0x4, %i2
344 EX_LD(LOAD(lduw, %i1, %i5)) 427 EX_LD(LOAD(lduw, %i1, %i5), NG_ret_i2_plus_4)
345 EX_ST(STORE(stw, %i5, %i1 + %i3)) 428 EX_ST(STORE(stw, %i5, %i1 + %i3), NG_ret_i2_plus_4)
346 add %i1, 0x4, %i1 429 add %i1, 0x4, %i1
3471: cmp %i2, 0 4301: cmp %i2, 0
348 be,pt %XCC, 85f 431 be,pt %XCC, 85f
@@ -358,8 +441,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
358 sub %i2, %g1, %i2 441 sub %i2, %g1, %i2
359 442
3601: subcc %g1, 1, %g1 4431: subcc %g1, 1, %g1
361 EX_LD(LOAD(ldub, %i1, %i5)) 444 EX_LD(LOAD(ldub, %i1, %i5), NG_ret_i2_plus_g1_plus_1)
362 EX_ST(STORE(stb, %i5, %i1 + %i3)) 445 EX_ST(STORE(stb, %i5, %i1 + %i3), NG_ret_i2_plus_g1_plus_1)
363 bgu,pt %icc, 1b 446 bgu,pt %icc, 1b
364 add %i1, 1, %i1 447 add %i1, 1, %i1
365 448
@@ -375,16 +458,16 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
375 458
3768: mov 64, %i3 4598: mov 64, %i3
377 andn %i1, 0x7, %i1 460 andn %i1, 0x7, %i1
378 EX_LD(LOAD(ldx, %i1, %g2)) 461 EX_LD(LOAD(ldx, %i1, %g2), NG_ret_i2)
379 sub %i3, %g1, %i3 462 sub %i3, %g1, %i3
380 andn %i2, 0x7, %i4 463 andn %i2, 0x7, %i4
381 sllx %g2, %g1, %g2 464 sllx %g2, %g1, %g2
3821: add %i1, 0x8, %i1 4651: add %i1, 0x8, %i1
383 EX_LD(LOAD(ldx, %i1, %g3)) 466 EX_LD(LOAD(ldx, %i1, %g3), NG_ret_i2_and_7_plus_i4)
384 subcc %i4, 0x8, %i4 467 subcc %i4, 0x8, %i4
385 srlx %g3, %i3, %i5 468 srlx %g3, %i3, %i5
386 or %i5, %g2, %i5 469 or %i5, %g2, %i5
387 EX_ST(STORE(stx, %i5, %o0)) 470 EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4)
388 add %o0, 0x8, %o0 471 add %o0, 0x8, %o0
389 bgu,pt %icc, 1b 472 bgu,pt %icc, 1b
390 sllx %g3, %g1, %g2 473 sllx %g3, %g1, %g2
@@ -404,8 +487,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
404 487
4051: 4881:
406 subcc %i2, 4, %i2 489 subcc %i2, 4, %i2
407 EX_LD(LOAD(lduw, %i1, %g1)) 490 EX_LD(LOAD(lduw, %i1, %g1), NG_ret_i2_plus_4)
408 EX_ST(STORE(stw, %g1, %i1 + %i3)) 491 EX_ST(STORE(stw, %g1, %i1 + %i3), NG_ret_i2_plus_4)
409 bgu,pt %XCC, 1b 492 bgu,pt %XCC, 1b
410 add %i1, 4, %i1 493 add %i1, 4, %i1
411 494
@@ -415,8 +498,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
415 .align 32 498 .align 32
41690: 49990:
417 subcc %i2, 1, %i2 500 subcc %i2, 1, %i2
418 EX_LD(LOAD(ldub, %i1, %g1)) 501 EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_1)
419 EX_ST(STORE(stb, %g1, %i1 + %i3)) 502 EX_ST(STORE(stb, %g1, %i1 + %i3), NG_ret_i2_plus_1)
420 bgu,pt %XCC, 90b 503 bgu,pt %XCC, 90b
421 add %i1, 1, %i1 504 add %i1, 1, %i1
422 ret 505 ret
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S
index ecc5692fa2b4..bb6ff73229e3 100644
--- a/arch/sparc/lib/U1copy_from_user.S
+++ b/arch/sparc/lib/U1copy_from_user.S
@@ -3,19 +3,19 @@
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) 3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 */ 4 */
5 5
6#define EX_LD(x) \ 6#define EX_LD(x,y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __retl_one; \ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \ 14#define EX_LD_FP(x,y) \
1598: x; \ 1598: x; \
16 .section __ex_table,"a";\ 16 .section __ex_table,"a";\
17 .align 4; \ 17 .align 4; \
18 .word 98b, __retl_one_fp;\ 18 .word 98b, y; \
19 .text; \ 19 .text; \
20 .align 4; 20 .align 4;
21 21
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S
index 9eea392e44d4..ed92ce739558 100644
--- a/arch/sparc/lib/U1copy_to_user.S
+++ b/arch/sparc/lib/U1copy_to_user.S
@@ -3,19 +3,19 @@
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) 3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 */ 4 */
5 5
6#define EX_ST(x) \ 6#define EX_ST(x,y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __retl_one; \ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \ 14#define EX_ST_FP(x,y) \
1598: x; \ 1598: x; \
16 .section __ex_table,"a";\ 16 .section __ex_table,"a";\
17 .align 4; \ 17 .align 4; \
18 .word 98b, __retl_one_fp;\ 18 .word 98b, y; \
19 .text; \ 19 .text; \
20 .align 4; 20 .align 4;
21 21
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index 97e1b211090c..4f0d50b33a72 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#ifdef __KERNEL__ 7#ifdef __KERNEL__
8#include <linux/linkage.h>
8#include <asm/visasm.h> 9#include <asm/visasm.h>
9#include <asm/asi.h> 10#include <asm/asi.h>
10#include <asm/export.h> 11#include <asm/export.h>
@@ -24,21 +25,17 @@
24#endif 25#endif
25 26
26#ifndef EX_LD 27#ifndef EX_LD
27#define EX_LD(x) x 28#define EX_LD(x,y) x
28#endif 29#endif
29#ifndef EX_LD_FP 30#ifndef EX_LD_FP
30#define EX_LD_FP(x) x 31#define EX_LD_FP(x,y) x
31#endif 32#endif
32 33
33#ifndef EX_ST 34#ifndef EX_ST
34#define EX_ST(x) x 35#define EX_ST(x,y) x
35#endif 36#endif
36#ifndef EX_ST_FP 37#ifndef EX_ST_FP
37#define EX_ST_FP(x) x 38#define EX_ST_FP(x,y) x
38#endif
39
40#ifndef EX_RETVAL
41#define EX_RETVAL(x) x
42#endif 39#endif
43 40
44#ifndef LOAD 41#ifndef LOAD
@@ -79,53 +76,169 @@
79 faligndata %f7, %f8, %f60; \ 76 faligndata %f7, %f8, %f60; \
80 faligndata %f8, %f9, %f62; 77 faligndata %f8, %f9, %f62;
81 78
82#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \ 79#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \
83 EX_LD_FP(LOAD_BLK(%src, %fdest)); \ 80 EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \
84 EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ 81 EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
85 add %src, 0x40, %src; \ 82 add %src, 0x40, %src; \
86 subcc %len, 0x40, %len; \ 83 subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \
87 be,pn %xcc, jmptgt; \ 84 be,pn %xcc, jmptgt; \
88 add %dest, 0x40, %dest; \ 85 add %dest, 0x40, %dest; \
89 86
90#define LOOP_CHUNK1(src, dest, len, branch_dest) \ 87#define LOOP_CHUNK1(src, dest, branch_dest) \
91 MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest) 88 MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest)
92#define LOOP_CHUNK2(src, dest, len, branch_dest) \ 89#define LOOP_CHUNK2(src, dest, branch_dest) \
93 MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest) 90 MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest)
94#define LOOP_CHUNK3(src, dest, len, branch_dest) \ 91#define LOOP_CHUNK3(src, dest, branch_dest) \
95 MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest) 92 MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest)
96 93
97#define DO_SYNC membar #Sync; 94#define DO_SYNC membar #Sync;
98#define STORE_SYNC(dest, fsrc) \ 95#define STORE_SYNC(dest, fsrc) \
99 EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ 96 EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
100 add %dest, 0x40, %dest; \ 97 add %dest, 0x40, %dest; \
101 DO_SYNC 98 DO_SYNC
102 99
103#define STORE_JUMP(dest, fsrc, target) \ 100#define STORE_JUMP(dest, fsrc, target) \
104 EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ 101 EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \
105 add %dest, 0x40, %dest; \ 102 add %dest, 0x40, %dest; \
106 ba,pt %xcc, target; \ 103 ba,pt %xcc, target; \
107 nop; 104 nop;
108 105
109#define FINISH_VISCHUNK(dest, f0, f1, left) \ 106#define FINISH_VISCHUNK(dest, f0, f1) \
110 subcc %left, 8, %left;\ 107 subcc %g3, 8, %g3; \
111 bl,pn %xcc, 95f; \ 108 bl,pn %xcc, 95f; \
112 faligndata %f0, %f1, %f48; \ 109 faligndata %f0, %f1, %f48; \
113 EX_ST_FP(STORE(std, %f48, %dest)); \ 110 EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \
114 add %dest, 8, %dest; 111 add %dest, 8, %dest;
115 112
116#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ 113#define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
117 subcc %left, 8, %left; \ 114 subcc %g3, 8, %g3; \
118 bl,pn %xcc, 95f; \ 115 bl,pn %xcc, 95f; \
119 fsrc2 %f0, %f1; 116 fsrc2 %f0, %f1;
120 117
121#define UNEVEN_VISCHUNK(dest, f0, f1, left) \ 118#define UNEVEN_VISCHUNK(dest, f0, f1) \
122 UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ 119 UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
123 ba,a,pt %xcc, 93f; 120 ba,a,pt %xcc, 93f;
124 121
125 .register %g2,#scratch 122 .register %g2,#scratch
126 .register %g3,#scratch 123 .register %g3,#scratch
127 124
128 .text 125 .text
126#ifndef EX_RETVAL
127#define EX_RETVAL(x) x
128ENTRY(U1_g1_1_fp)
129 VISExitHalf
130 add %g1, 1, %g1
131 add %g1, %g2, %g1
132 retl
133 add %g1, %o2, %o0
134ENDPROC(U1_g1_1_fp)
135ENTRY(U1_g2_0_fp)
136 VISExitHalf
137 retl
138 add %g2, %o2, %o0
139ENDPROC(U1_g2_0_fp)
140ENTRY(U1_g2_8_fp)
141 VISExitHalf
142 add %g2, 8, %g2
143 retl
144 add %g2, %o2, %o0
145ENDPROC(U1_g2_8_fp)
146ENTRY(U1_gs_0_fp)
147 VISExitHalf
148 add %GLOBAL_SPARE, %g3, %o0
149 retl
150 add %o0, %o2, %o0
151ENDPROC(U1_gs_0_fp)
152ENTRY(U1_gs_80_fp)
153 VISExitHalf
154 add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
155 add %GLOBAL_SPARE, %g3, %o0
156 retl
157 add %o0, %o2, %o0
158ENDPROC(U1_gs_80_fp)
159ENTRY(U1_gs_40_fp)
160 VISExitHalf
161 add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE
162 add %GLOBAL_SPARE, %g3, %o0
163 retl
164 add %o0, %o2, %o0
165ENDPROC(U1_gs_40_fp)
166ENTRY(U1_g3_0_fp)
167 VISExitHalf
168 retl
169 add %g3, %o2, %o0
170ENDPROC(U1_g3_0_fp)
171ENTRY(U1_g3_8_fp)
172 VISExitHalf
173 add %g3, 8, %g3
174 retl
175 add %g3, %o2, %o0
176ENDPROC(U1_g3_8_fp)
177ENTRY(U1_o2_0_fp)
178 VISExitHalf
179 retl
180 mov %o2, %o0
181ENDPROC(U1_o2_0_fp)
182ENTRY(U1_o2_1_fp)
183 VISExitHalf
184 retl
185 add %o2, 1, %o0
186ENDPROC(U1_o2_1_fp)
187ENTRY(U1_gs_0)
188 VISExitHalf
189 retl
190 add %GLOBAL_SPARE, %o2, %o0
191ENDPROC(U1_gs_0)
192ENTRY(U1_gs_8)
193 VISExitHalf
194 add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
195 retl
196 add %GLOBAL_SPARE, 0x8, %o0
197ENDPROC(U1_gs_8)
198ENTRY(U1_gs_10)
199 VISExitHalf
200 add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
201 retl
202 add %GLOBAL_SPARE, 0x10, %o0
203ENDPROC(U1_gs_10)
204ENTRY(U1_o2_0)
205 retl
206 mov %o2, %o0
207ENDPROC(U1_o2_0)
208ENTRY(U1_o2_8)
209 retl
210 add %o2, 8, %o0
211ENDPROC(U1_o2_8)
212ENTRY(U1_o2_4)
213 retl
214 add %o2, 4, %o0
215ENDPROC(U1_o2_4)
216ENTRY(U1_o2_1)
217 retl
218 add %o2, 1, %o0
219ENDPROC(U1_o2_1)
220ENTRY(U1_g1_0)
221 retl
222 add %g1, %o2, %o0
223ENDPROC(U1_g1_0)
224ENTRY(U1_g1_1)
225 add %g1, 1, %g1
226 retl
227 add %g1, %o2, %o0
228ENDPROC(U1_g1_1)
229ENTRY(U1_gs_0_o2_adj)
230 and %o2, 7, %o2
231 retl
232 add %GLOBAL_SPARE, %o2, %o0
233ENDPROC(U1_gs_0_o2_adj)
234ENTRY(U1_gs_8_o2_adj)
235 and %o2, 7, %o2
236 add %GLOBAL_SPARE, 8, %GLOBAL_SPARE
237 retl
238 add %GLOBAL_SPARE, %o2, %o0
239ENDPROC(U1_gs_8_o2_adj)
240#endif
241
129 .align 64 242 .align 64
130 243
131 .globl FUNC_NAME 244 .globl FUNC_NAME
@@ -167,8 +280,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
167 and %g2, 0x38, %g2 280 and %g2, 0x38, %g2
168 281
1691: subcc %g1, 0x1, %g1 2821: subcc %g1, 0x1, %g1
170 EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) 283 EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp)
171 EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE)) 284 EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp)
172 bgu,pt %XCC, 1b 285 bgu,pt %XCC, 1b
173 add %o1, 0x1, %o1 286 add %o1, 0x1, %o1
174 287
@@ -179,20 +292,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
179 be,pt %icc, 3f 292 be,pt %icc, 3f
180 alignaddr %o1, %g0, %o1 293 alignaddr %o1, %g0, %o1
181 294
182 EX_LD_FP(LOAD(ldd, %o1, %f4)) 295 EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp)
1831: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) 2961: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp)
184 add %o1, 0x8, %o1 297 add %o1, 0x8, %o1
185 subcc %g2, 0x8, %g2 298 subcc %g2, 0x8, %g2
186 faligndata %f4, %f6, %f0 299 faligndata %f4, %f6, %f0
187 EX_ST_FP(STORE(std, %f0, %o0)) 300 EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
188 be,pn %icc, 3f 301 be,pn %icc, 3f
189 add %o0, 0x8, %o0 302 add %o0, 0x8, %o0
190 303
191 EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) 304 EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp)
192 add %o1, 0x8, %o1 305 add %o1, 0x8, %o1
193 subcc %g2, 0x8, %g2 306 subcc %g2, 0x8, %g2
194 faligndata %f6, %f4, %f0 307 faligndata %f6, %f4, %f0
195 EX_ST_FP(STORE(std, %f0, %o0)) 308 EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
196 bne,pt %icc, 1b 309 bne,pt %icc, 1b
197 add %o0, 0x8, %o0 310 add %o0, 0x8, %o0
198 311
@@ -215,13 +328,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
215 add %g1, %GLOBAL_SPARE, %g1 328 add %g1, %GLOBAL_SPARE, %g1
216 subcc %o2, %g3, %o2 329 subcc %o2, %g3, %o2
217 330
218 EX_LD_FP(LOAD_BLK(%o1, %f0)) 331 EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp)
219 add %o1, 0x40, %o1 332 add %o1, 0x40, %o1
220 add %g1, %g3, %g1 333 add %g1, %g3, %g1
221 EX_LD_FP(LOAD_BLK(%o1, %f16)) 334 EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp)
222 add %o1, 0x40, %o1 335 add %o1, 0x40, %o1
223 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE 336 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
224 EX_LD_FP(LOAD_BLK(%o1, %f32)) 337 EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp)
225 add %o1, 0x40, %o1 338 add %o1, 0x40, %o1
226 339
227 /* There are 8 instances of the unrolled loop, 340 /* There are 8 instances of the unrolled loop,
@@ -241,11 +354,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
241 354
242 .align 64 355 .align 64
2431: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) 3561: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
244 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) 357 LOOP_CHUNK1(o1, o0, 1f)
245 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) 358 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
246 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) 359 LOOP_CHUNK2(o1, o0, 2f)
247 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) 360 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
248 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) 361 LOOP_CHUNK3(o1, o0, 3f)
249 ba,pt %xcc, 1b+4 362 ba,pt %xcc, 1b+4
250 faligndata %f0, %f2, %f48 363 faligndata %f0, %f2, %f48
2511: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) 3641: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
@@ -262,11 +375,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
262 STORE_JUMP(o0, f48, 56f) 375 STORE_JUMP(o0, f48, 56f)
263 376
2641: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) 3771: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
265 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) 378 LOOP_CHUNK1(o1, o0, 1f)
266 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) 379 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
267 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) 380 LOOP_CHUNK2(o1, o0, 2f)
268 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) 381 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
269 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) 382 LOOP_CHUNK3(o1, o0, 3f)
270 ba,pt %xcc, 1b+4 383 ba,pt %xcc, 1b+4
271 faligndata %f2, %f4, %f48 384 faligndata %f2, %f4, %f48
2721: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) 3851: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
@@ -283,11 +396,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
283 STORE_JUMP(o0, f48, 57f) 396 STORE_JUMP(o0, f48, 57f)
284 397
2851: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) 3981: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
286 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) 399 LOOP_CHUNK1(o1, o0, 1f)
287 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) 400 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
288 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) 401 LOOP_CHUNK2(o1, o0, 2f)
289 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) 402 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
290 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) 403 LOOP_CHUNK3(o1, o0, 3f)
291 ba,pt %xcc, 1b+4 404 ba,pt %xcc, 1b+4
292 faligndata %f4, %f6, %f48 405 faligndata %f4, %f6, %f48
2931: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) 4061: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
@@ -304,11 +417,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
304 STORE_JUMP(o0, f48, 58f) 417 STORE_JUMP(o0, f48, 58f)
305 418
3061: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) 4191: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
307 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) 420 LOOP_CHUNK1(o1, o0, 1f)
308 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) 421 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
309 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) 422 LOOP_CHUNK2(o1, o0, 2f)
310 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) 423 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
311 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) 424 LOOP_CHUNK3(o1, o0, 3f)
312 ba,pt %xcc, 1b+4 425 ba,pt %xcc, 1b+4
313 faligndata %f6, %f8, %f48 426 faligndata %f6, %f8, %f48
3141: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) 4271: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
@@ -325,11 +438,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
325 STORE_JUMP(o0, f48, 59f) 438 STORE_JUMP(o0, f48, 59f)
326 439
3271: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) 4401: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
328 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) 441 LOOP_CHUNK1(o1, o0, 1f)
329 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) 442 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
330 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) 443 LOOP_CHUNK2(o1, o0, 2f)
331 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) 444 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
332 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) 445 LOOP_CHUNK3(o1, o0, 3f)
333 ba,pt %xcc, 1b+4 446 ba,pt %xcc, 1b+4
334 faligndata %f8, %f10, %f48 447 faligndata %f8, %f10, %f48
3351: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) 4481: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
@@ -346,11 +459,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
346 STORE_JUMP(o0, f48, 60f) 459 STORE_JUMP(o0, f48, 60f)
347 460
3481: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) 4611: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
349 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) 462 LOOP_CHUNK1(o1, o0, 1f)
350 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) 463 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
351 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) 464 LOOP_CHUNK2(o1, o0, 2f)
352 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) 465 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
353 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) 466 LOOP_CHUNK3(o1, o0, 3f)
354 ba,pt %xcc, 1b+4 467 ba,pt %xcc, 1b+4
355 faligndata %f10, %f12, %f48 468 faligndata %f10, %f12, %f48
3561: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) 4691: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
@@ -367,11 +480,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
367 STORE_JUMP(o0, f48, 61f) 480 STORE_JUMP(o0, f48, 61f)
368 481
3691: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) 4821: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
370 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) 483 LOOP_CHUNK1(o1, o0, 1f)
371 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) 484 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
372 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) 485 LOOP_CHUNK2(o1, o0, 2f)
373 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) 486 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
374 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) 487 LOOP_CHUNK3(o1, o0, 3f)
375 ba,pt %xcc, 1b+4 488 ba,pt %xcc, 1b+4
376 faligndata %f12, %f14, %f48 489 faligndata %f12, %f14, %f48
3771: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) 4901: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
@@ -388,11 +501,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
388 STORE_JUMP(o0, f48, 62f) 501 STORE_JUMP(o0, f48, 62f)
389 502
3901: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) 5031: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
391 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) 504 LOOP_CHUNK1(o1, o0, 1f)
392 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) 505 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
393 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) 506 LOOP_CHUNK2(o1, o0, 2f)
394 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) 507 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
395 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) 508 LOOP_CHUNK3(o1, o0, 3f)
396 ba,pt %xcc, 1b+4 509 ba,pt %xcc, 1b+4
397 faligndata %f14, %f16, %f48 510 faligndata %f14, %f16, %f48
3981: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) 5111: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
@@ -408,53 +521,53 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
408 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) 521 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
409 STORE_JUMP(o0, f48, 63f) 522 STORE_JUMP(o0, f48, 63f)
410 523
41140: FINISH_VISCHUNK(o0, f0, f2, g3) 52440: FINISH_VISCHUNK(o0, f0, f2)
41241: FINISH_VISCHUNK(o0, f2, f4, g3) 52541: FINISH_VISCHUNK(o0, f2, f4)
41342: FINISH_VISCHUNK(o0, f4, f6, g3) 52642: FINISH_VISCHUNK(o0, f4, f6)
41443: FINISH_VISCHUNK(o0, f6, f8, g3) 52743: FINISH_VISCHUNK(o0, f6, f8)
41544: FINISH_VISCHUNK(o0, f8, f10, g3) 52844: FINISH_VISCHUNK(o0, f8, f10)
41645: FINISH_VISCHUNK(o0, f10, f12, g3) 52945: FINISH_VISCHUNK(o0, f10, f12)
41746: FINISH_VISCHUNK(o0, f12, f14, g3) 53046: FINISH_VISCHUNK(o0, f12, f14)
41847: UNEVEN_VISCHUNK(o0, f14, f0, g3) 53147: UNEVEN_VISCHUNK(o0, f14, f0)
41948: FINISH_VISCHUNK(o0, f16, f18, g3) 53248: FINISH_VISCHUNK(o0, f16, f18)
42049: FINISH_VISCHUNK(o0, f18, f20, g3) 53349: FINISH_VISCHUNK(o0, f18, f20)
42150: FINISH_VISCHUNK(o0, f20, f22, g3) 53450: FINISH_VISCHUNK(o0, f20, f22)
42251: FINISH_VISCHUNK(o0, f22, f24, g3) 53551: FINISH_VISCHUNK(o0, f22, f24)
42352: FINISH_VISCHUNK(o0, f24, f26, g3) 53652: FINISH_VISCHUNK(o0, f24, f26)
42453: FINISH_VISCHUNK(o0, f26, f28, g3) 53753: FINISH_VISCHUNK(o0, f26, f28)
42554: FINISH_VISCHUNK(o0, f28, f30, g3) 53854: FINISH_VISCHUNK(o0, f28, f30)
42655: UNEVEN_VISCHUNK(o0, f30, f0, g3) 53955: UNEVEN_VISCHUNK(o0, f30, f0)
42756: FINISH_VISCHUNK(o0, f32, f34, g3) 54056: FINISH_VISCHUNK(o0, f32, f34)
42857: FINISH_VISCHUNK(o0, f34, f36, g3) 54157: FINISH_VISCHUNK(o0, f34, f36)
42958: FINISH_VISCHUNK(o0, f36, f38, g3) 54258: FINISH_VISCHUNK(o0, f36, f38)
43059: FINISH_VISCHUNK(o0, f38, f40, g3) 54359: FINISH_VISCHUNK(o0, f38, f40)
43160: FINISH_VISCHUNK(o0, f40, f42, g3) 54460: FINISH_VISCHUNK(o0, f40, f42)
43261: FINISH_VISCHUNK(o0, f42, f44, g3) 54561: FINISH_VISCHUNK(o0, f42, f44)
43362: FINISH_VISCHUNK(o0, f44, f46, g3) 54662: FINISH_VISCHUNK(o0, f44, f46)
43463: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3) 54763: UNEVEN_VISCHUNK_LAST(o0, f46, f0)
435 548
43693: EX_LD_FP(LOAD(ldd, %o1, %f2)) 54993: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
437 add %o1, 8, %o1 550 add %o1, 8, %o1
438 subcc %g3, 8, %g3 551 subcc %g3, 8, %g3
439 faligndata %f0, %f2, %f8 552 faligndata %f0, %f2, %f8
440 EX_ST_FP(STORE(std, %f8, %o0)) 553 EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
441 bl,pn %xcc, 95f 554 bl,pn %xcc, 95f
442 add %o0, 8, %o0 555 add %o0, 8, %o0
443 EX_LD_FP(LOAD(ldd, %o1, %f0)) 556 EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
444 add %o1, 8, %o1 557 add %o1, 8, %o1
445 subcc %g3, 8, %g3 558 subcc %g3, 8, %g3
446 faligndata %f2, %f0, %f8 559 faligndata %f2, %f0, %f8
447 EX_ST_FP(STORE(std, %f8, %o0)) 560 EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
448 bge,pt %xcc, 93b 561 bge,pt %xcc, 93b
449 add %o0, 8, %o0 562 add %o0, 8, %o0
450 563
45195: brz,pt %o2, 2f 56495: brz,pt %o2, 2f
452 mov %g1, %o1 565 mov %g1, %o1
453 566
4541: EX_LD_FP(LOAD(ldub, %o1, %o3)) 5671: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp)
455 add %o1, 1, %o1 568 add %o1, 1, %o1
456 subcc %o2, 1, %o2 569 subcc %o2, 1, %o2
457 EX_ST_FP(STORE(stb, %o3, %o0)) 570 EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp)
458 bne,pt %xcc, 1b 571 bne,pt %xcc, 1b
459 add %o0, 1, %o0 572 add %o0, 1, %o0
460 573
@@ -470,27 +583,27 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
470 583
47172: andn %o2, 0xf, %GLOBAL_SPARE 58472: andn %o2, 0xf, %GLOBAL_SPARE
472 and %o2, 0xf, %o2 585 and %o2, 0xf, %o2
4731: EX_LD(LOAD(ldx, %o1 + 0x00, %o5)) 5861: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0)
474 EX_LD(LOAD(ldx, %o1 + 0x08, %g1)) 587 EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0)
475 subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE 588 subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
476 EX_ST(STORE(stx, %o5, %o1 + %o3)) 589 EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10)
477 add %o1, 0x8, %o1 590 add %o1, 0x8, %o1
478 EX_ST(STORE(stx, %g1, %o1 + %o3)) 591 EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8)
479 bgu,pt %XCC, 1b 592 bgu,pt %XCC, 1b
480 add %o1, 0x8, %o1 593 add %o1, 0x8, %o1
48173: andcc %o2, 0x8, %g0 59473: andcc %o2, 0x8, %g0
482 be,pt %XCC, 1f 595 be,pt %XCC, 1f
483 nop 596 nop
484 EX_LD(LOAD(ldx, %o1, %o5)) 597 EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0)
485 sub %o2, 0x8, %o2 598 sub %o2, 0x8, %o2
486 EX_ST(STORE(stx, %o5, %o1 + %o3)) 599 EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8)
487 add %o1, 0x8, %o1 600 add %o1, 0x8, %o1
4881: andcc %o2, 0x4, %g0 6011: andcc %o2, 0x4, %g0
489 be,pt %XCC, 1f 602 be,pt %XCC, 1f
490 nop 603 nop
491 EX_LD(LOAD(lduw, %o1, %o5)) 604 EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0)
492 sub %o2, 0x4, %o2 605 sub %o2, 0x4, %o2
493 EX_ST(STORE(stw, %o5, %o1 + %o3)) 606 EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4)
494 add %o1, 0x4, %o1 607 add %o1, 0x4, %o1
4951: cmp %o2, 0 6081: cmp %o2, 0
496 be,pt %XCC, 85f 609 be,pt %XCC, 85f
@@ -504,9 +617,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
504 sub %g0, %g1, %g1 617 sub %g0, %g1, %g1
505 sub %o2, %g1, %o2 618 sub %o2, %g1, %o2
506 619
5071: EX_LD(LOAD(ldub, %o1, %o5)) 6201: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0)
508 subcc %g1, 1, %g1 621 subcc %g1, 1, %g1
509 EX_ST(STORE(stb, %o5, %o1 + %o3)) 622 EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1)
510 bgu,pt %icc, 1b 623 bgu,pt %icc, 1b
511 add %o1, 1, %o1 624 add %o1, 1, %o1
512 625
@@ -522,16 +635,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
522 635
5238: mov 64, %o3 6368: mov 64, %o3
524 andn %o1, 0x7, %o1 637 andn %o1, 0x7, %o1
525 EX_LD(LOAD(ldx, %o1, %g2)) 638 EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0)
526 sub %o3, %g1, %o3 639 sub %o3, %g1, %o3
527 andn %o2, 0x7, %GLOBAL_SPARE 640 andn %o2, 0x7, %GLOBAL_SPARE
528 sllx %g2, %g1, %g2 641 sllx %g2, %g1, %g2
5291: EX_LD(LOAD(ldx, %o1 + 0x8, %g3)) 6421: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj)
530 subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE 643 subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
531 add %o1, 0x8, %o1 644 add %o1, 0x8, %o1
532 srlx %g3, %o3, %o5 645 srlx %g3, %o3, %o5
533 or %o5, %g2, %o5 646 or %o5, %g2, %o5
534 EX_ST(STORE(stx, %o5, %o0)) 647 EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj)
535 add %o0, 0x8, %o0 648 add %o0, 0x8, %o0
536 bgu,pt %icc, 1b 649 bgu,pt %icc, 1b
537 sllx %g3, %g1, %g2 650 sllx %g3, %g1, %g2
@@ -549,9 +662,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
549 bne,pn %XCC, 90f 662 bne,pn %XCC, 90f
550 sub %o0, %o1, %o3 663 sub %o0, %o1, %o3
551 664
5521: EX_LD(LOAD(lduw, %o1, %g1)) 6651: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0)
553 subcc %o2, 4, %o2 666 subcc %o2, 4, %o2
554 EX_ST(STORE(stw, %g1, %o1 + %o3)) 667 EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4)
555 bgu,pt %XCC, 1b 668 bgu,pt %XCC, 1b
556 add %o1, 4, %o1 669 add %o1, 4, %o1
557 670
@@ -559,9 +672,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
559 mov EX_RETVAL(%o4), %o0 672 mov EX_RETVAL(%o4), %o0
560 673
561 .align 32 674 .align 32
56290: EX_LD(LOAD(ldub, %o1, %g1)) 67590: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0)
563 subcc %o2, 1, %o2 676 subcc %o2, 1, %o2
564 EX_ST(STORE(stb, %g1, %o1 + %o3)) 677 EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1)
565 bgu,pt %XCC, 90b 678 bgu,pt %XCC, 90b
566 add %o1, 1, %o1 679 add %o1, 1, %o1
567 retl 680 retl
diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S
index 88ad73d86fe4..db73010a1af8 100644
--- a/arch/sparc/lib/U3copy_from_user.S
+++ b/arch/sparc/lib/U3copy_from_user.S
@@ -3,19 +3,19 @@
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) 3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 */ 4 */
5 5
6#define EX_LD(x) \ 6#define EX_LD(x,y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __retl_one; \ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \ 14#define EX_LD_FP(x,y) \
1598: x; \ 1598: x; \
16 .section __ex_table,"a";\ 16 .section __ex_table,"a";\
17 .align 4; \ 17 .align 4; \
18 .word 98b, __retl_one_fp;\ 18 .word 98b, y##_fp; \
19 .text; \ 19 .text; \
20 .align 4; 20 .align 4;
21 21
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S
index 845139d75537..c4ee858e352a 100644
--- a/arch/sparc/lib/U3copy_to_user.S
+++ b/arch/sparc/lib/U3copy_to_user.S
@@ -3,19 +3,19 @@
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) 3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 */ 4 */
5 5
6#define EX_ST(x) \ 6#define EX_ST(x,y) \
798: x; \ 798: x; \
8 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
9 .align 4; \ 9 .align 4; \
10 .word 98b, __retl_one; \ 10 .word 98b, y; \
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \ 14#define EX_ST_FP(x,y) \
1598: x; \ 1598: x; \
16 .section __ex_table,"a";\ 16 .section __ex_table,"a";\
17 .align 4; \ 17 .align 4; \
18 .word 98b, __retl_one_fp;\ 18 .word 98b, y##_fp; \
19 .text; \ 19 .text; \
20 .align 4; 20 .align 4;
21 21
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 491ee69e4995..54f98706b03b 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7#include <linux/linkage.h>
7#include <asm/visasm.h> 8#include <asm/visasm.h>
8#include <asm/asi.h> 9#include <asm/asi.h>
9#define GLOBAL_SPARE %g7 10#define GLOBAL_SPARE %g7
@@ -22,21 +23,17 @@
22#endif 23#endif
23 24
24#ifndef EX_LD 25#ifndef EX_LD
25#define EX_LD(x) x 26#define EX_LD(x,y) x
26#endif 27#endif
27#ifndef EX_LD_FP 28#ifndef EX_LD_FP
28#define EX_LD_FP(x) x 29#define EX_LD_FP(x,y) x
29#endif 30#endif
30 31
31#ifndef EX_ST 32#ifndef EX_ST
32#define EX_ST(x) x 33#define EX_ST(x,y) x
33#endif 34#endif
34#ifndef EX_ST_FP 35#ifndef EX_ST_FP
35#define EX_ST_FP(x) x 36#define EX_ST_FP(x,y) x
36#endif
37
38#ifndef EX_RETVAL
39#define EX_RETVAL(x) x
40#endif 37#endif
41 38
42#ifndef LOAD 39#ifndef LOAD
@@ -77,6 +74,87 @@
77 */ 74 */
78 75
79 .text 76 .text
77#ifndef EX_RETVAL
78#define EX_RETVAL(x) x
79__restore_fp:
80 VISExitHalf
81 retl
82 nop
83ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
84 add %g1, 1, %g1
85 add %g2, %g1, %g2
86 ba,pt %xcc, __restore_fp
87 add %o2, %g2, %o0
88ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
89ENTRY(U3_retl_o2_plus_g2_fp)
90 ba,pt %xcc, __restore_fp
91 add %o2, %g2, %o0
92ENDPROC(U3_retl_o2_plus_g2_fp)
93ENTRY(U3_retl_o2_plus_g2_plus_8_fp)
94 add %g2, 8, %g2
95 ba,pt %xcc, __restore_fp
96 add %o2, %g2, %o0
97ENDPROC(U3_retl_o2_plus_g2_plus_8_fp)
98ENTRY(U3_retl_o2)
99 retl
100 mov %o2, %o0
101ENDPROC(U3_retl_o2)
102ENTRY(U3_retl_o2_plus_1)
103 retl
104 add %o2, 1, %o0
105ENDPROC(U3_retl_o2_plus_1)
106ENTRY(U3_retl_o2_plus_4)
107 retl
108 add %o2, 4, %o0
109ENDPROC(U3_retl_o2_plus_4)
110ENTRY(U3_retl_o2_plus_8)
111 retl
112 add %o2, 8, %o0
113ENDPROC(U3_retl_o2_plus_8)
114ENTRY(U3_retl_o2_plus_g1_plus_1)
115 add %g1, 1, %g1
116 retl
117 add %o2, %g1, %o0
118ENDPROC(U3_retl_o2_plus_g1_plus_1)
119ENTRY(U3_retl_o2_fp)
120 ba,pt %xcc, __restore_fp
121 mov %o2, %o0
122ENDPROC(U3_retl_o2_fp)
123ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
124 sll %o3, 6, %o3
125 add %o3, 0x80, %o3
126 ba,pt %xcc, __restore_fp
127 add %o2, %o3, %o0
128ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
129ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
130 sll %o3, 6, %o3
131 add %o3, 0x40, %o3
132 ba,pt %xcc, __restore_fp
133 add %o2, %o3, %o0
134ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
135ENTRY(U3_retl_o2_plus_GS_plus_0x10)
136 add GLOBAL_SPARE, 0x10, GLOBAL_SPARE
137 retl
138 add %o2, GLOBAL_SPARE, %o0
139ENDPROC(U3_retl_o2_plus_GS_plus_0x10)
140ENTRY(U3_retl_o2_plus_GS_plus_0x08)
141 add GLOBAL_SPARE, 0x08, GLOBAL_SPARE
142 retl
143 add %o2, GLOBAL_SPARE, %o0
144ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
145ENTRY(U3_retl_o2_and_7_plus_GS)
146 and %o2, 7, %o2
147 retl
148 add %o2, GLOBAL_SPARE, %o2
149ENDPROC(U3_retl_o2_and_7_plus_GS)
150ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
151 add GLOBAL_SPARE, 8, GLOBAL_SPARE
152 and %o2, 7, %o2
153 retl
154 add %o2, GLOBAL_SPARE, %o2
155ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
156#endif
157
80 .align 64 158 .align 64
81 159
82 /* The cheetah's flexible spine, oversized liver, enlarged heart, 160 /* The cheetah's flexible spine, oversized liver, enlarged heart,
@@ -126,8 +204,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
126 and %g2, 0x38, %g2 204 and %g2, 0x38, %g2
127 205
1281: subcc %g1, 0x1, %g1 2061: subcc %g1, 0x1, %g1
129 EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) 207 EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1)
130 EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE)) 208 EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1)
131 bgu,pt %XCC, 1b 209 bgu,pt %XCC, 1b
132 add %o1, 0x1, %o1 210 add %o1, 0x1, %o1
133 211
@@ -138,20 +216,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
138 be,pt %icc, 3f 216 be,pt %icc, 3f
139 alignaddr %o1, %g0, %o1 217 alignaddr %o1, %g0, %o1
140 218
141 EX_LD_FP(LOAD(ldd, %o1, %f4)) 219 EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2)
1421: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) 2201: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2)
143 add %o1, 0x8, %o1 221 add %o1, 0x8, %o1
144 subcc %g2, 0x8, %g2 222 subcc %g2, 0x8, %g2
145 faligndata %f4, %f6, %f0 223 faligndata %f4, %f6, %f0
146 EX_ST_FP(STORE(std, %f0, %o0)) 224 EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8)
147 be,pn %icc, 3f 225 be,pn %icc, 3f
148 add %o0, 0x8, %o0 226 add %o0, 0x8, %o0
149 227
150 EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) 228 EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2)
151 add %o1, 0x8, %o1 229 add %o1, 0x8, %o1
152 subcc %g2, 0x8, %g2 230 subcc %g2, 0x8, %g2
153 faligndata %f6, %f4, %f2 231 faligndata %f6, %f4, %f2
154 EX_ST_FP(STORE(std, %f2, %o0)) 232 EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8)
155 bne,pt %icc, 1b 233 bne,pt %icc, 1b
156 add %o0, 0x8, %o0 234 add %o0, 0x8, %o0
157 235
@@ -161,25 +239,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
161 LOAD(prefetch, %o1 + 0x080, #one_read) 239 LOAD(prefetch, %o1 + 0x080, #one_read)
162 LOAD(prefetch, %o1 + 0x0c0, #one_read) 240 LOAD(prefetch, %o1 + 0x0c0, #one_read)
163 LOAD(prefetch, %o1 + 0x100, #one_read) 241 LOAD(prefetch, %o1 + 0x100, #one_read)
164 EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0)) 242 EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2)
165 LOAD(prefetch, %o1 + 0x140, #one_read) 243 LOAD(prefetch, %o1 + 0x140, #one_read)
166 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) 244 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2)
167 LOAD(prefetch, %o1 + 0x180, #one_read) 245 LOAD(prefetch, %o1 + 0x180, #one_read)
168 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) 246 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2)
169 LOAD(prefetch, %o1 + 0x1c0, #one_read) 247 LOAD(prefetch, %o1 + 0x1c0, #one_read)
170 faligndata %f0, %f2, %f16 248 faligndata %f0, %f2, %f16
171 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) 249 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2)
172 faligndata %f2, %f4, %f18 250 faligndata %f2, %f4, %f18
173 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) 251 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2)
174 faligndata %f4, %f6, %f20 252 faligndata %f4, %f6, %f20
175 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) 253 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2)
176 faligndata %f6, %f8, %f22 254 faligndata %f6, %f8, %f22
177 255
178 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) 256 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2)
179 faligndata %f8, %f10, %f24 257 faligndata %f8, %f10, %f24
180 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) 258 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2)
181 faligndata %f10, %f12, %f26 259 faligndata %f10, %f12, %f26
182 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) 260 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
183 261
184 subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE 262 subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
185 add %o1, 0x40, %o1 263 add %o1, 0x40, %o1
@@ -190,26 +268,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
190 268
191 .align 64 269 .align 64
1921: 2701:
193 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) 271 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
194 faligndata %f12, %f14, %f28 272 faligndata %f12, %f14, %f28
195 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) 273 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
196 faligndata %f14, %f0, %f30 274 faligndata %f14, %f0, %f30
197 EX_ST_FP(STORE_BLK(%f16, %o0)) 275 EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
198 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) 276 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
199 faligndata %f0, %f2, %f16 277 faligndata %f0, %f2, %f16
200 add %o0, 0x40, %o0 278 add %o0, 0x40, %o0
201 279
202 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) 280 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
203 faligndata %f2, %f4, %f18 281 faligndata %f2, %f4, %f18
204 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) 282 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
205 faligndata %f4, %f6, %f20 283 faligndata %f4, %f6, %f20
206 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) 284 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
207 subcc %o3, 0x01, %o3 285 subcc %o3, 0x01, %o3
208 faligndata %f6, %f8, %f22 286 faligndata %f6, %f8, %f22
209 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) 287 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80)
210 288
211 faligndata %f8, %f10, %f24 289 faligndata %f8, %f10, %f24
212 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) 290 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
213 LOAD(prefetch, %o1 + 0x1c0, #one_read) 291 LOAD(prefetch, %o1 + 0x1c0, #one_read)
214 faligndata %f10, %f12, %f26 292 faligndata %f10, %f12, %f26
215 bg,pt %XCC, 1b 293 bg,pt %XCC, 1b
@@ -217,29 +295,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
217 295
218 /* Finally we copy the last full 64-byte block. */ 296 /* Finally we copy the last full 64-byte block. */
2192: 2972:
220 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) 298 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
221 faligndata %f12, %f14, %f28 299 faligndata %f12, %f14, %f28
222 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) 300 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
223 faligndata %f14, %f0, %f30 301 faligndata %f14, %f0, %f30
224 EX_ST_FP(STORE_BLK(%f16, %o0)) 302 EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
225 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) 303 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
226 faligndata %f0, %f2, %f16 304 faligndata %f0, %f2, %f16
227 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) 305 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
228 faligndata %f2, %f4, %f18 306 faligndata %f2, %f4, %f18
229 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) 307 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
230 faligndata %f4, %f6, %f20 308 faligndata %f4, %f6, %f20
231 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) 309 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
232 faligndata %f6, %f8, %f22 310 faligndata %f6, %f8, %f22
233 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) 311 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40)
234 faligndata %f8, %f10, %f24 312 faligndata %f8, %f10, %f24
235 cmp %g1, 0 313 cmp %g1, 0
236 be,pt %XCC, 1f 314 be,pt %XCC, 1f
237 add %o0, 0x40, %o0 315 add %o0, 0x40, %o0
238 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) 316 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
2391: faligndata %f10, %f12, %f26 3171: faligndata %f10, %f12, %f26
240 faligndata %f12, %f14, %f28 318 faligndata %f12, %f14, %f28
241 faligndata %f14, %f0, %f30 319 faligndata %f14, %f0, %f30
242 EX_ST_FP(STORE_BLK(%f16, %o0)) 320 EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
243 add %o0, 0x40, %o0 321 add %o0, 0x40, %o0
244 add %o1, 0x40, %o1 322 add %o1, 0x40, %o1
245 membar #Sync 323 membar #Sync
@@ -259,20 +337,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
259 337
260 sub %o2, %g2, %o2 338 sub %o2, %g2, %o2
261 be,a,pt %XCC, 1f 339 be,a,pt %XCC, 1f
262 EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0)) 340 EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2)
263 341
2641: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2)) 3421: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2)
265 add %o1, 0x8, %o1 343 add %o1, 0x8, %o1
266 subcc %g2, 0x8, %g2 344 subcc %g2, 0x8, %g2
267 faligndata %f0, %f2, %f8 345 faligndata %f0, %f2, %f8
268 EX_ST_FP(STORE(std, %f8, %o0)) 346 EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
269 be,pn %XCC, 2f 347 be,pn %XCC, 2f
270 add %o0, 0x8, %o0 348 add %o0, 0x8, %o0
271 EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0)) 349 EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2)
272 add %o1, 0x8, %o1 350 add %o1, 0x8, %o1
273 subcc %g2, 0x8, %g2 351 subcc %g2, 0x8, %g2
274 faligndata %f2, %f0, %f8 352 faligndata %f2, %f0, %f8
275 EX_ST_FP(STORE(std, %f8, %o0)) 353 EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
276 bne,pn %XCC, 1b 354 bne,pn %XCC, 1b
277 add %o0, 0x8, %o0 355 add %o0, 0x8, %o0
278 356
@@ -292,30 +370,33 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
292 andcc %o2, 0x8, %g0 370 andcc %o2, 0x8, %g0
293 be,pt %icc, 1f 371 be,pt %icc, 1f
294 nop 372 nop
295 EX_LD(LOAD(ldx, %o1, %o5)) 373 EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2)
296 EX_ST(STORE(stx, %o5, %o1 + %o3)) 374 EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2)
297 add %o1, 0x8, %o1 375 add %o1, 0x8, %o1
376 sub %o2, 8, %o2
298 377
2991: andcc %o2, 0x4, %g0 3781: andcc %o2, 0x4, %g0
300 be,pt %icc, 1f 379 be,pt %icc, 1f
301 nop 380 nop
302 EX_LD(LOAD(lduw, %o1, %o5)) 381 EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2)
303 EX_ST(STORE(stw, %o5, %o1 + %o3)) 382 EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2)
304 add %o1, 0x4, %o1 383 add %o1, 0x4, %o1
384 sub %o2, 4, %o2
305 385
3061: andcc %o2, 0x2, %g0 3861: andcc %o2, 0x2, %g0
307 be,pt %icc, 1f 387 be,pt %icc, 1f
308 nop 388 nop
309 EX_LD(LOAD(lduh, %o1, %o5)) 389 EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2)
310 EX_ST(STORE(sth, %o5, %o1 + %o3)) 390 EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2)
311 add %o1, 0x2, %o1 391 add %o1, 0x2, %o1
392 sub %o2, 2, %o2
312 393
3131: andcc %o2, 0x1, %g0 3941: andcc %o2, 0x1, %g0
314 be,pt %icc, 85f 395 be,pt %icc, 85f
315 nop 396 nop
316 EX_LD(LOAD(ldub, %o1, %o5)) 397 EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2)
317 ba,pt %xcc, 85f 398 ba,pt %xcc, 85f
318 EX_ST(STORE(stb, %o5, %o1 + %o3)) 399 EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2)
319 400
320 .align 64 401 .align 64
32170: /* 16 < len <= 64 */ 40270: /* 16 < len <= 64 */
@@ -326,26 +407,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
326 andn %o2, 0xf, GLOBAL_SPARE 407 andn %o2, 0xf, GLOBAL_SPARE
327 and %o2, 0xf, %o2 408 and %o2, 0xf, %o2
3281: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE 4091: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE
329 EX_LD(LOAD(ldx, %o1 + 0x00, %o5)) 410 EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10)
330 EX_LD(LOAD(ldx, %o1 + 0x08, %g1)) 411 EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10)
331 EX_ST(STORE(stx, %o5, %o1 + %o3)) 412 EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10)
332 add %o1, 0x8, %o1 413 add %o1, 0x8, %o1
333 EX_ST(STORE(stx, %g1, %o1 + %o3)) 414 EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08)
334 bgu,pt %XCC, 1b 415 bgu,pt %XCC, 1b
335 add %o1, 0x8, %o1 416 add %o1, 0x8, %o1
33673: andcc %o2, 0x8, %g0 41773: andcc %o2, 0x8, %g0
337 be,pt %XCC, 1f 418 be,pt %XCC, 1f
338 nop 419 nop
339 sub %o2, 0x8, %o2 420 sub %o2, 0x8, %o2
340 EX_LD(LOAD(ldx, %o1, %o5)) 421 EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8)
341 EX_ST(STORE(stx, %o5, %o1 + %o3)) 422 EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8)
342 add %o1, 0x8, %o1 423 add %o1, 0x8, %o1
3431: andcc %o2, 0x4, %g0 4241: andcc %o2, 0x4, %g0
344 be,pt %XCC, 1f 425 be,pt %XCC, 1f
345 nop 426 nop
346 sub %o2, 0x4, %o2 427 sub %o2, 0x4, %o2
347 EX_LD(LOAD(lduw, %o1, %o5)) 428 EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4)
348 EX_ST(STORE(stw, %o5, %o1 + %o3)) 429 EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4)
349 add %o1, 0x4, %o1 430 add %o1, 0x4, %o1
3501: cmp %o2, 0 4311: cmp %o2, 0
351 be,pt %XCC, 85f 432 be,pt %XCC, 85f
@@ -361,8 +442,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
361 sub %o2, %g1, %o2 442 sub %o2, %g1, %o2
362 443
3631: subcc %g1, 1, %g1 4441: subcc %g1, 1, %g1
364 EX_LD(LOAD(ldub, %o1, %o5)) 445 EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1)
365 EX_ST(STORE(stb, %o5, %o1 + %o3)) 446 EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1)
366 bgu,pt %icc, 1b 447 bgu,pt %icc, 1b
367 add %o1, 1, %o1 448 add %o1, 1, %o1
368 449
@@ -378,16 +459,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
378 459
3798: mov 64, %o3 4608: mov 64, %o3
380 andn %o1, 0x7, %o1 461 andn %o1, 0x7, %o1
381 EX_LD(LOAD(ldx, %o1, %g2)) 462 EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2)
382 sub %o3, %g1, %o3 463 sub %o3, %g1, %o3
383 andn %o2, 0x7, GLOBAL_SPARE 464 andn %o2, 0x7, GLOBAL_SPARE
384 sllx %g2, %g1, %g2 465 sllx %g2, %g1, %g2
3851: EX_LD(LOAD(ldx, %o1 + 0x8, %g3)) 4661: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS)
386 subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE 467 subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE
387 add %o1, 0x8, %o1 468 add %o1, 0x8, %o1
388 srlx %g3, %o3, %o5 469 srlx %g3, %o3, %o5
389 or %o5, %g2, %o5 470 or %o5, %g2, %o5
390 EX_ST(STORE(stx, %o5, %o0)) 471 EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8)
391 add %o0, 0x8, %o0 472 add %o0, 0x8, %o0
392 bgu,pt %icc, 1b 473 bgu,pt %icc, 1b
393 sllx %g3, %g1, %g2 474 sllx %g3, %g1, %g2
@@ -407,8 +488,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
407 488
4081: 4891:
409 subcc %o2, 4, %o2 490 subcc %o2, 4, %o2
410 EX_LD(LOAD(lduw, %o1, %g1)) 491 EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4)
411 EX_ST(STORE(stw, %g1, %o1 + %o3)) 492 EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4)
412 bgu,pt %XCC, 1b 493 bgu,pt %XCC, 1b
413 add %o1, 4, %o1 494 add %o1, 4, %o1
414 495
@@ -418,8 +499,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
418 .align 32 499 .align 32
41990: 50090:
420 subcc %o2, 1, %o2 501 subcc %o2, 1, %o2
421 EX_LD(LOAD(ldub, %o1, %g1)) 502 EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1)
422 EX_ST(STORE(stb, %g1, %o1 + %o3)) 503 EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1)
423 bgu,pt %XCC, 90b 504 bgu,pt %XCC, 90b
424 add %o1, 1, %o1 505 add %o1, 1, %o1
425 retl 506 retl
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 482de093bdae..0252b218de45 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -9,18 +9,33 @@
9 9
10#define XCC xcc 10#define XCC xcc
11 11
12#define EX(x,y) \ 12#define EX(x,y,z) \
1398: x,y; \ 1398: x,y; \
14 .section __ex_table,"a";\ 14 .section __ex_table,"a";\
15 .align 4; \ 15 .align 4; \
16 .word 98b, __retl_one; \ 16 .word 98b, z; \
17 .text; \ 17 .text; \
18 .align 4; 18 .align 4;
19 19
20#define EX_O4(x,y) EX(x,y,__retl_o4_plus_8)
21#define EX_O2_4(x,y) EX(x,y,__retl_o2_plus_4)
22#define EX_O2_1(x,y) EX(x,y,__retl_o2_plus_1)
23
20 .register %g2,#scratch 24 .register %g2,#scratch
21 .register %g3,#scratch 25 .register %g3,#scratch
22 26
23 .text 27 .text
28__retl_o4_plus_8:
29 add %o4, %o2, %o4
30 retl
31 add %o4, 8, %o0
32__retl_o2_plus_4:
33 retl
34 add %o2, 4, %o0
35__retl_o2_plus_1:
36 retl
37 add %o2, 1, %o0
38
24 .align 32 39 .align 32
25 40
26 /* Don't try to get too fancy here, just nice and 41 /* Don't try to get too fancy here, just nice and
@@ -45,8 +60,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
45 andn %o2, 0x7, %o4 60 andn %o2, 0x7, %o4
46 and %o2, 0x7, %o2 61 and %o2, 0x7, %o2
471: subcc %o4, 0x8, %o4 621: subcc %o4, 0x8, %o4
48 EX(ldxa [%o1] %asi, %o5) 63 EX_O4(ldxa [%o1] %asi, %o5)
49 EX(stxa %o5, [%o0] %asi) 64 EX_O4(stxa %o5, [%o0] %asi)
50 add %o1, 0x8, %o1 65 add %o1, 0x8, %o1
51 bgu,pt %XCC, 1b 66 bgu,pt %XCC, 1b
52 add %o0, 0x8, %o0 67 add %o0, 0x8, %o0
@@ -54,8 +69,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
54 be,pt %XCC, 1f 69 be,pt %XCC, 1f
55 nop 70 nop
56 sub %o2, 0x4, %o2 71 sub %o2, 0x4, %o2
57 EX(lduwa [%o1] %asi, %o5) 72 EX_O2_4(lduwa [%o1] %asi, %o5)
58 EX(stwa %o5, [%o0] %asi) 73 EX_O2_4(stwa %o5, [%o0] %asi)
59 add %o1, 0x4, %o1 74 add %o1, 0x4, %o1
60 add %o0, 0x4, %o0 75 add %o0, 0x4, %o0
611: cmp %o2, 0 761: cmp %o2, 0
@@ -71,8 +86,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
71 86
7282: 8782:
73 subcc %o2, 4, %o2 88 subcc %o2, 4, %o2
74 EX(lduwa [%o1] %asi, %g1) 89 EX_O2_4(lduwa [%o1] %asi, %g1)
75 EX(stwa %g1, [%o0] %asi) 90 EX_O2_4(stwa %g1, [%o0] %asi)
76 add %o1, 4, %o1 91 add %o1, 4, %o1
77 bgu,pt %XCC, 82b 92 bgu,pt %XCC, 82b
78 add %o0, 4, %o0 93 add %o0, 4, %o0
@@ -83,8 +98,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
83 .align 32 98 .align 32
8490: 9990:
85 subcc %o2, 1, %o2 100 subcc %o2, 1, %o2
86 EX(lduba [%o1] %asi, %g1) 101 EX_O2_1(lduba [%o1] %asi, %g1)
87 EX(stba %g1, [%o0] %asi) 102 EX_O2_1(stba %g1, [%o0] %asi)
88 add %o1, 1, %o1 103 add %o1, 1, %o1
89 bgu,pt %XCC, 90b 104 bgu,pt %XCC, 90b
90 add %o0, 1, %o0 105 add %o0, 1, %o0
diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c
deleted file mode 100644
index ac96ae236709..000000000000
--- a/arch/sparc/lib/user_fixup.c
+++ /dev/null
@@ -1,71 +0,0 @@
1/* user_fixup.c: Fix up user copy faults.
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6#include <linux/compiler.h>
7#include <linux/kernel.h>
8#include <linux/string.h>
9#include <linux/errno.h>
10#include <linux/module.h>
11
12#include <asm/uaccess.h>
13
14/* Calculating the exact fault address when using
15 * block loads and stores can be very complicated.
16 *
17 * Instead of trying to be clever and handling all
18 * of the cases, just fix things up simply here.
19 */
20
21static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
22{
23 unsigned long fault_addr = current_thread_info()->fault_address;
24 unsigned long end = start + size;
25
26 if (fault_addr < start || fault_addr >= end) {
27 *offset = 0;
28 } else {
29 *offset = fault_addr - start;
30 size = end - fault_addr;
31 }
32 return size;
33}
34
35unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
36{
37 unsigned long offset;
38
39 size = compute_size((unsigned long) from, size, &offset);
40 if (likely(size))
41 memset(to + offset, 0, size);
42
43 return size;
44}
45EXPORT_SYMBOL(copy_from_user_fixup);
46
47unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
48{
49 unsigned long offset;
50
51 return compute_size((unsigned long) to, size, &offset);
52}
53EXPORT_SYMBOL(copy_to_user_fixup);
54
55unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
56{
57 unsigned long fault_addr = current_thread_info()->fault_address;
58 unsigned long start = (unsigned long) to;
59 unsigned long end = start + size;
60
61 if (fault_addr >= start && fault_addr < end)
62 return end - fault_addr;
63
64 start = (unsigned long) from;
65 end = start + size;
66 if (fault_addr >= start && fault_addr < end)
67 return end - fault_addr;
68
69 return size;
70}
71EXPORT_SYMBOL(copy_in_user_fixup);
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index f2b77112e9d8..e20fbbafb0b0 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -27,6 +27,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr)
27 return (tag == (vaddr >> 22)); 27 return (tag == (vaddr >> 22));
28} 28}
29 29
30static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
31{
32 unsigned long idx;
33
34 for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
35 struct tsb *ent = &swapper_tsb[idx];
36 unsigned long match = idx << 13;
37
38 match |= (ent->tag << 22);
39 if (match >= start && match < end)
40 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
41 }
42}
43
30/* TSB flushes need only occur on the processor initiating the address 44/* TSB flushes need only occur on the processor initiating the address
31 * space modification, not on each cpu the address space has run on. 45 * space modification, not on each cpu the address space has run on.
32 * Only the TLB flush needs that treatment. 46 * Only the TLB flush needs that treatment.
@@ -36,6 +50,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
36{ 50{
37 unsigned long v; 51 unsigned long v;
38 52
53 if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
54 return flush_tsb_kernel_range_scan(start, end);
55
39 for (v = start; v < end; v += PAGE_SIZE) { 56 for (v = start; v < end; v += PAGE_SIZE) {
40 unsigned long hash = tsb_hash(v, PAGE_SHIFT, 57 unsigned long hash = tsb_hash(v, PAGE_SHIFT,
41 KERNEL_TSB_NENTRIES); 58 KERNEL_TSB_NENTRIES);
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index b4f4733abc6e..5d2fd6cd3189 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -30,7 +30,7 @@
30 .text 30 .text
31 .align 32 31 .align 32
32 .globl __flush_tlb_mm 32 .globl __flush_tlb_mm
33__flush_tlb_mm: /* 18 insns */ 33__flush_tlb_mm: /* 19 insns */
34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ 34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
35 ldxa [%o1] ASI_DMMU, %g2 35 ldxa [%o1] ASI_DMMU, %g2
36 cmp %g2, %o0 36 cmp %g2, %o0
@@ -81,7 +81,7 @@ __flush_tlb_page: /* 22 insns */
81 81
82 .align 32 82 .align 32
83 .globl __flush_tlb_pending 83 .globl __flush_tlb_pending
84__flush_tlb_pending: /* 26 insns */ 84__flush_tlb_pending: /* 27 insns */
85 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 85 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
86 rdpr %pstate, %g7 86 rdpr %pstate, %g7
87 sllx %o1, 3, %o1 87 sllx %o1, 3, %o1
@@ -113,12 +113,14 @@ __flush_tlb_pending: /* 26 insns */
113 113
114 .align 32 114 .align 32
115 .globl __flush_tlb_kernel_range 115 .globl __flush_tlb_kernel_range
116__flush_tlb_kernel_range: /* 16 insns */ 116__flush_tlb_kernel_range: /* 31 insns */
117 /* %o0=start, %o1=end */ 117 /* %o0=start, %o1=end */
118 cmp %o0, %o1 118 cmp %o0, %o1
119 be,pn %xcc, 2f 119 be,pn %xcc, 2f
120 sub %o1, %o0, %o3
121 srlx %o3, 18, %o4
122 brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
120 sethi %hi(PAGE_SIZE), %o4 123 sethi %hi(PAGE_SIZE), %o4
121 sub %o1, %o0, %o3
122 sub %o3, %o4, %o3 124 sub %o3, %o4, %o3
123 or %o0, 0x20, %o0 ! Nucleus 125 or %o0, 0x20, %o0 ! Nucleus
1241: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP 1261: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
@@ -131,6 +133,41 @@ __flush_tlb_kernel_range: /* 16 insns */
131 retl 133 retl
132 nop 134 nop
133 nop 135 nop
136 nop
137 nop
138 nop
139 nop
140 nop
141 nop
142 nop
143 nop
144 nop
145 nop
146 nop
147 nop
148 nop
149
150__spitfire_flush_tlb_kernel_range_slow:
151 mov 63 * 8, %o4
1521: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
153 andcc %o3, 0x40, %g0 /* _PAGE_L_4U */
154 bne,pn %xcc, 2f
155 mov TLB_TAG_ACCESS, %o3
156 stxa %g0, [%o3] ASI_IMMU
157 stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
158 membar #Sync
1592: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
160 andcc %o3, 0x40, %g0
161 bne,pn %xcc, 2f
162 mov TLB_TAG_ACCESS, %o3
163 stxa %g0, [%o3] ASI_DMMU
164 stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
165 membar #Sync
1662: sub %o4, 8, %o4
167 brgez,pt %o4, 1b
168 nop
169 retl
170 nop
134 171
135__spitfire_flush_tlb_mm_slow: 172__spitfire_flush_tlb_mm_slow:
136 rdpr %pstate, %g1 173 rdpr %pstate, %g1
@@ -285,6 +322,40 @@ __cheetah_flush_tlb_pending: /* 27 insns */
285 retl 322 retl
286 wrpr %g7, 0x0, %pstate 323 wrpr %g7, 0x0, %pstate
287 324
325__cheetah_flush_tlb_kernel_range: /* 31 insns */
326 /* %o0=start, %o1=end */
327 cmp %o0, %o1
328 be,pn %xcc, 2f
329 sub %o1, %o0, %o3
330 srlx %o3, 18, %o4
331 brnz,pn %o4, 3f
332 sethi %hi(PAGE_SIZE), %o4
333 sub %o3, %o4, %o3
334 or %o0, 0x20, %o0 ! Nucleus
3351: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
336 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
337 membar #Sync
338 brnz,pt %o3, 1b
339 sub %o3, %o4, %o3
3402: sethi %hi(KERNBASE), %o3
341 flush %o3
342 retl
343 nop
3443: mov 0x80, %o4
345 stxa %g0, [%o4] ASI_DMMU_DEMAP
346 membar #Sync
347 stxa %g0, [%o4] ASI_IMMU_DEMAP
348 membar #Sync
349 retl
350 nop
351 nop
352 nop
353 nop
354 nop
355 nop
356 nop
357 nop
358
288#ifdef DCACHE_ALIASING_POSSIBLE 359#ifdef DCACHE_ALIASING_POSSIBLE
289__cheetah_flush_dcache_page: /* 11 insns */ 360__cheetah_flush_dcache_page: /* 11 insns */
290 sethi %hi(PAGE_OFFSET), %g1 361 sethi %hi(PAGE_OFFSET), %g1
@@ -309,19 +380,28 @@ __hypervisor_tlb_tl0_error:
309 ret 380 ret
310 restore 381 restore
311 382
312__hypervisor_flush_tlb_mm: /* 10 insns */ 383__hypervisor_flush_tlb_mm: /* 19 insns */
313 mov %o0, %o2 /* ARG2: mmu context */ 384 mov %o0, %o2 /* ARG2: mmu context */
314 mov 0, %o0 /* ARG0: CPU lists unimplemented */ 385 mov 0, %o0 /* ARG0: CPU lists unimplemented */
315 mov 0, %o1 /* ARG1: CPU lists unimplemented */ 386 mov 0, %o1 /* ARG1: CPU lists unimplemented */
316 mov HV_MMU_ALL, %o3 /* ARG3: flags */ 387 mov HV_MMU_ALL, %o3 /* ARG3: flags */
317 mov HV_FAST_MMU_DEMAP_CTX, %o5 388 mov HV_FAST_MMU_DEMAP_CTX, %o5
318 ta HV_FAST_TRAP 389 ta HV_FAST_TRAP
319 brnz,pn %o0, __hypervisor_tlb_tl0_error 390 brnz,pn %o0, 1f
320 mov HV_FAST_MMU_DEMAP_CTX, %o1 391 mov HV_FAST_MMU_DEMAP_CTX, %o1
321 retl 392 retl
322 nop 393 nop
3941: sethi %hi(__hypervisor_tlb_tl0_error), %o5
395 jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
396 nop
397 nop
398 nop
399 nop
400 nop
401 nop
402 nop
323 403
324__hypervisor_flush_tlb_page: /* 11 insns */ 404__hypervisor_flush_tlb_page: /* 22 insns */
325 /* %o0 = context, %o1 = vaddr */ 405 /* %o0 = context, %o1 = vaddr */
326 mov %o0, %g2 406 mov %o0, %g2
327 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ 407 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
@@ -330,12 +410,23 @@ __hypervisor_flush_tlb_page: /* 11 insns */
330 srlx %o0, PAGE_SHIFT, %o0 410 srlx %o0, PAGE_SHIFT, %o0
331 sllx %o0, PAGE_SHIFT, %o0 411 sllx %o0, PAGE_SHIFT, %o0
332 ta HV_MMU_UNMAP_ADDR_TRAP 412 ta HV_MMU_UNMAP_ADDR_TRAP
333 brnz,pn %o0, __hypervisor_tlb_tl0_error 413 brnz,pn %o0, 1f
334 mov HV_MMU_UNMAP_ADDR_TRAP, %o1 414 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
335 retl 415 retl
336 nop 416 nop
4171: sethi %hi(__hypervisor_tlb_tl0_error), %o2
418 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
419 nop
420 nop
421 nop
422 nop
423 nop
424 nop
425 nop
426 nop
427 nop
337 428
338__hypervisor_flush_tlb_pending: /* 16 insns */ 429__hypervisor_flush_tlb_pending: /* 27 insns */
339 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 430 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
340 sllx %o1, 3, %g1 431 sllx %o1, 3, %g1
341 mov %o2, %g2 432 mov %o2, %g2
@@ -347,31 +438,57 @@ __hypervisor_flush_tlb_pending: /* 16 insns */
347 srlx %o0, PAGE_SHIFT, %o0 438 srlx %o0, PAGE_SHIFT, %o0
348 sllx %o0, PAGE_SHIFT, %o0 439 sllx %o0, PAGE_SHIFT, %o0
349 ta HV_MMU_UNMAP_ADDR_TRAP 440 ta HV_MMU_UNMAP_ADDR_TRAP
350 brnz,pn %o0, __hypervisor_tlb_tl0_error 441 brnz,pn %o0, 1f
351 mov HV_MMU_UNMAP_ADDR_TRAP, %o1 442 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
352 brnz,pt %g1, 1b 443 brnz,pt %g1, 1b
353 nop 444 nop
354 retl 445 retl
355 nop 446 nop
4471: sethi %hi(__hypervisor_tlb_tl0_error), %o2
448 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
449 nop
450 nop
451 nop
452 nop
453 nop
454 nop
455 nop
456 nop
457 nop
356 458
357__hypervisor_flush_tlb_kernel_range: /* 16 insns */ 459__hypervisor_flush_tlb_kernel_range: /* 31 insns */
358 /* %o0=start, %o1=end */ 460 /* %o0=start, %o1=end */
359 cmp %o0, %o1 461 cmp %o0, %o1
360 be,pn %xcc, 2f 462 be,pn %xcc, 2f
361 sethi %hi(PAGE_SIZE), %g3 463 sub %o1, %o0, %g2
362 mov %o0, %g1 464 srlx %g2, 18, %g3
363 sub %o1, %g1, %g2 465 brnz,pn %g3, 4f
466 mov %o0, %g1
467 sethi %hi(PAGE_SIZE), %g3
364 sub %g2, %g3, %g2 468 sub %g2, %g3, %g2
3651: add %g1, %g2, %o0 /* ARG0: virtual address */ 4691: add %g1, %g2, %o0 /* ARG0: virtual address */
366 mov 0, %o1 /* ARG1: mmu context */ 470 mov 0, %o1 /* ARG1: mmu context */
367 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 471 mov HV_MMU_ALL, %o2 /* ARG2: flags */
368 ta HV_MMU_UNMAP_ADDR_TRAP 472 ta HV_MMU_UNMAP_ADDR_TRAP
369 brnz,pn %o0, __hypervisor_tlb_tl0_error 473 brnz,pn %o0, 3f
370 mov HV_MMU_UNMAP_ADDR_TRAP, %o1 474 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
371 brnz,pt %g2, 1b 475 brnz,pt %g2, 1b
372 sub %g2, %g3, %g2 476 sub %g2, %g3, %g2
3732: retl 4772: retl
374 nop 478 nop
4793: sethi %hi(__hypervisor_tlb_tl0_error), %o2
480 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
481 nop
4824: mov 0, %o0 /* ARG0: CPU lists unimplemented */
483 mov 0, %o1 /* ARG1: CPU lists unimplemented */
484 mov 0, %o2 /* ARG2: mmu context == nucleus */
485 mov HV_MMU_ALL, %o3 /* ARG3: flags */
486 mov HV_FAST_MMU_DEMAP_CTX, %o5
487 ta HV_FAST_TRAP
488 brnz,pn %o0, 3b
489 mov HV_FAST_MMU_DEMAP_CTX, %o1
490 retl
491 nop
375 492
376#ifdef DCACHE_ALIASING_POSSIBLE 493#ifdef DCACHE_ALIASING_POSSIBLE
377 /* XXX Niagara and friends have an 8K cache, so no aliasing is 494 /* XXX Niagara and friends have an 8K cache, so no aliasing is
@@ -394,43 +511,6 @@ tlb_patch_one:
394 retl 511 retl
395 nop 512 nop
396 513
397 .globl cheetah_patch_cachetlbops
398cheetah_patch_cachetlbops:
399 save %sp, -128, %sp
400
401 sethi %hi(__flush_tlb_mm), %o0
402 or %o0, %lo(__flush_tlb_mm), %o0
403 sethi %hi(__cheetah_flush_tlb_mm), %o1
404 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
405 call tlb_patch_one
406 mov 19, %o2
407
408 sethi %hi(__flush_tlb_page), %o0
409 or %o0, %lo(__flush_tlb_page), %o0
410 sethi %hi(__cheetah_flush_tlb_page), %o1
411 or %o1, %lo(__cheetah_flush_tlb_page), %o1
412 call tlb_patch_one
413 mov 22, %o2
414
415 sethi %hi(__flush_tlb_pending), %o0
416 or %o0, %lo(__flush_tlb_pending), %o0
417 sethi %hi(__cheetah_flush_tlb_pending), %o1
418 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
419 call tlb_patch_one
420 mov 27, %o2
421
422#ifdef DCACHE_ALIASING_POSSIBLE
423 sethi %hi(__flush_dcache_page), %o0
424 or %o0, %lo(__flush_dcache_page), %o0
425 sethi %hi(__cheetah_flush_dcache_page), %o1
426 or %o1, %lo(__cheetah_flush_dcache_page), %o1
427 call tlb_patch_one
428 mov 11, %o2
429#endif /* DCACHE_ALIASING_POSSIBLE */
430
431 ret
432 restore
433
434#ifdef CONFIG_SMP 514#ifdef CONFIG_SMP
435 /* These are all called by the slaves of a cross call, at 515 /* These are all called by the slaves of a cross call, at
436 * trap level 1, with interrupts fully disabled. 516 * trap level 1, with interrupts fully disabled.
@@ -447,7 +527,7 @@ cheetah_patch_cachetlbops:
447 */ 527 */
448 .align 32 528 .align 32
449 .globl xcall_flush_tlb_mm 529 .globl xcall_flush_tlb_mm
450xcall_flush_tlb_mm: /* 21 insns */ 530xcall_flush_tlb_mm: /* 24 insns */
451 mov PRIMARY_CONTEXT, %g2 531 mov PRIMARY_CONTEXT, %g2
452 ldxa [%g2] ASI_DMMU, %g3 532 ldxa [%g2] ASI_DMMU, %g3
453 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 533 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -469,9 +549,12 @@ xcall_flush_tlb_mm: /* 21 insns */
469 nop 549 nop
470 nop 550 nop
471 nop 551 nop
552 nop
553 nop
554 nop
472 555
473 .globl xcall_flush_tlb_page 556 .globl xcall_flush_tlb_page
474xcall_flush_tlb_page: /* 17 insns */ 557xcall_flush_tlb_page: /* 20 insns */
475 /* %g5=context, %g1=vaddr */ 558 /* %g5=context, %g1=vaddr */
476 mov PRIMARY_CONTEXT, %g4 559 mov PRIMARY_CONTEXT, %g4
477 ldxa [%g4] ASI_DMMU, %g2 560 ldxa [%g4] ASI_DMMU, %g2
@@ -490,15 +573,20 @@ xcall_flush_tlb_page: /* 17 insns */
490 retry 573 retry
491 nop 574 nop
492 nop 575 nop
576 nop
577 nop
578 nop
493 579
494 .globl xcall_flush_tlb_kernel_range 580 .globl xcall_flush_tlb_kernel_range
495xcall_flush_tlb_kernel_range: /* 25 insns */ 581xcall_flush_tlb_kernel_range: /* 44 insns */
496 sethi %hi(PAGE_SIZE - 1), %g2 582 sethi %hi(PAGE_SIZE - 1), %g2
497 or %g2, %lo(PAGE_SIZE - 1), %g2 583 or %g2, %lo(PAGE_SIZE - 1), %g2
498 andn %g1, %g2, %g1 584 andn %g1, %g2, %g1
499 andn %g7, %g2, %g7 585 andn %g7, %g2, %g7
500 sub %g7, %g1, %g3 586 sub %g7, %g1, %g3
501 add %g2, 1, %g2 587 srlx %g3, 18, %g2
588 brnz,pn %g2, 2f
589 add %g2, 1, %g2
502 sub %g3, %g2, %g3 590 sub %g3, %g2, %g3
503 or %g1, 0x20, %g1 ! Nucleus 591 or %g1, 0x20, %g1 ! Nucleus
5041: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP 5921: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
@@ -507,8 +595,25 @@ xcall_flush_tlb_kernel_range: /* 25 insns */
507 brnz,pt %g3, 1b 595 brnz,pt %g3, 1b
508 sub %g3, %g2, %g3 596 sub %g3, %g2, %g3
509 retry 597 retry
510 nop 5982: mov 63 * 8, %g1
511 nop 5991: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
600 andcc %g2, 0x40, %g0 /* _PAGE_L_4U */
601 bne,pn %xcc, 2f
602 mov TLB_TAG_ACCESS, %g2
603 stxa %g0, [%g2] ASI_IMMU
604 stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
605 membar #Sync
6062: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
607 andcc %g2, 0x40, %g0
608 bne,pn %xcc, 2f
609 mov TLB_TAG_ACCESS, %g2
610 stxa %g0, [%g2] ASI_DMMU
611 stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
612 membar #Sync
6132: sub %g1, 8, %g1
614 brgez,pt %g1, 1b
615 nop
616 retry
512 nop 617 nop
513 nop 618 nop
514 nop 619 nop
@@ -637,6 +742,52 @@ xcall_fetch_glob_pmu_n4:
637 742
638 retry 743 retry
639 744
745__cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
746 sethi %hi(PAGE_SIZE - 1), %g2
747 or %g2, %lo(PAGE_SIZE - 1), %g2
748 andn %g1, %g2, %g1
749 andn %g7, %g2, %g7
750 sub %g7, %g1, %g3
751 srlx %g3, 18, %g2
752 brnz,pn %g2, 2f
753 add %g2, 1, %g2
754 sub %g3, %g2, %g3
755 or %g1, 0x20, %g1 ! Nucleus
7561: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
757 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
758 membar #Sync
759 brnz,pt %g3, 1b
760 sub %g3, %g2, %g3
761 retry
7622: mov 0x80, %g2
763 stxa %g0, [%g2] ASI_DMMU_DEMAP
764 membar #Sync
765 stxa %g0, [%g2] ASI_IMMU_DEMAP
766 membar #Sync
767 retry
768 nop
769 nop
770 nop
771 nop
772 nop
773 nop
774 nop
775 nop
776 nop
777 nop
778 nop
779 nop
780 nop
781 nop
782 nop
783 nop
784 nop
785 nop
786 nop
787 nop
788 nop
789 nop
790
640#ifdef DCACHE_ALIASING_POSSIBLE 791#ifdef DCACHE_ALIASING_POSSIBLE
641 .align 32 792 .align 32
642 .globl xcall_flush_dcache_page_cheetah 793 .globl xcall_flush_dcache_page_cheetah
@@ -700,7 +851,7 @@ __hypervisor_tlb_xcall_error:
700 ba,a,pt %xcc, rtrap 851 ba,a,pt %xcc, rtrap
701 852
702 .globl __hypervisor_xcall_flush_tlb_mm 853 .globl __hypervisor_xcall_flush_tlb_mm
703__hypervisor_xcall_flush_tlb_mm: /* 21 insns */ 854__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
704 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ 855 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
705 mov %o0, %g2 856 mov %o0, %g2
706 mov %o1, %g3 857 mov %o1, %g3
@@ -714,7 +865,7 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
714 mov HV_FAST_MMU_DEMAP_CTX, %o5 865 mov HV_FAST_MMU_DEMAP_CTX, %o5
715 ta HV_FAST_TRAP 866 ta HV_FAST_TRAP
716 mov HV_FAST_MMU_DEMAP_CTX, %g6 867 mov HV_FAST_MMU_DEMAP_CTX, %g6
717 brnz,pn %o0, __hypervisor_tlb_xcall_error 868 brnz,pn %o0, 1f
718 mov %o0, %g5 869 mov %o0, %g5
719 mov %g2, %o0 870 mov %g2, %o0
720 mov %g3, %o1 871 mov %g3, %o1
@@ -723,9 +874,12 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
723 mov %g7, %o5 874 mov %g7, %o5
724 membar #Sync 875 membar #Sync
725 retry 876 retry
8771: sethi %hi(__hypervisor_tlb_xcall_error), %g4
878 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
879 nop
726 880
727 .globl __hypervisor_xcall_flush_tlb_page 881 .globl __hypervisor_xcall_flush_tlb_page
728__hypervisor_xcall_flush_tlb_page: /* 17 insns */ 882__hypervisor_xcall_flush_tlb_page: /* 20 insns */
729 /* %g5=ctx, %g1=vaddr */ 883 /* %g5=ctx, %g1=vaddr */
730 mov %o0, %g2 884 mov %o0, %g2
731 mov %o1, %g3 885 mov %o1, %g3
@@ -737,42 +891,64 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */
737 sllx %o0, PAGE_SHIFT, %o0 891 sllx %o0, PAGE_SHIFT, %o0
738 ta HV_MMU_UNMAP_ADDR_TRAP 892 ta HV_MMU_UNMAP_ADDR_TRAP
739 mov HV_MMU_UNMAP_ADDR_TRAP, %g6 893 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
740 brnz,a,pn %o0, __hypervisor_tlb_xcall_error 894 brnz,a,pn %o0, 1f
741 mov %o0, %g5 895 mov %o0, %g5
742 mov %g2, %o0 896 mov %g2, %o0
743 mov %g3, %o1 897 mov %g3, %o1
744 mov %g4, %o2 898 mov %g4, %o2
745 membar #Sync 899 membar #Sync
746 retry 900 retry
9011: sethi %hi(__hypervisor_tlb_xcall_error), %g4
902 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
903 nop
747 904
748 .globl __hypervisor_xcall_flush_tlb_kernel_range 905 .globl __hypervisor_xcall_flush_tlb_kernel_range
749__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ 906__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
750 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ 907 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
751 sethi %hi(PAGE_SIZE - 1), %g2 908 sethi %hi(PAGE_SIZE - 1), %g2
752 or %g2, %lo(PAGE_SIZE - 1), %g2 909 or %g2, %lo(PAGE_SIZE - 1), %g2
753 andn %g1, %g2, %g1 910 andn %g1, %g2, %g1
754 andn %g7, %g2, %g7 911 andn %g7, %g2, %g7
755 sub %g7, %g1, %g3 912 sub %g7, %g1, %g3
913 srlx %g3, 18, %g7
756 add %g2, 1, %g2 914 add %g2, 1, %g2
757 sub %g3, %g2, %g3 915 sub %g3, %g2, %g3
758 mov %o0, %g2 916 mov %o0, %g2
759 mov %o1, %g4 917 mov %o1, %g4
760 mov %o2, %g7 918 brnz,pn %g7, 2f
919 mov %o2, %g7
7611: add %g1, %g3, %o0 /* ARG0: virtual address */ 9201: add %g1, %g3, %o0 /* ARG0: virtual address */
762 mov 0, %o1 /* ARG1: mmu context */ 921 mov 0, %o1 /* ARG1: mmu context */
763 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 922 mov HV_MMU_ALL, %o2 /* ARG2: flags */
764 ta HV_MMU_UNMAP_ADDR_TRAP 923 ta HV_MMU_UNMAP_ADDR_TRAP
765 mov HV_MMU_UNMAP_ADDR_TRAP, %g6 924 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
766 brnz,pn %o0, __hypervisor_tlb_xcall_error 925 brnz,pn %o0, 1f
767 mov %o0, %g5 926 mov %o0, %g5
768 sethi %hi(PAGE_SIZE), %o2 927 sethi %hi(PAGE_SIZE), %o2
769 brnz,pt %g3, 1b 928 brnz,pt %g3, 1b
770 sub %g3, %o2, %g3 929 sub %g3, %o2, %g3
771 mov %g2, %o0 9305: mov %g2, %o0
772 mov %g4, %o1 931 mov %g4, %o1
773 mov %g7, %o2 932 mov %g7, %o2
774 membar #Sync 933 membar #Sync
775 retry 934 retry
9351: sethi %hi(__hypervisor_tlb_xcall_error), %g4
936 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
937 nop
9382: mov %o3, %g1
939 mov %o5, %g3
940 mov 0, %o0 /* ARG0: CPU lists unimplemented */
941 mov 0, %o1 /* ARG1: CPU lists unimplemented */
942 mov 0, %o2 /* ARG2: mmu context == nucleus */
943 mov HV_MMU_ALL, %o3 /* ARG3: flags */
944 mov HV_FAST_MMU_DEMAP_CTX, %o5
945 ta HV_FAST_TRAP
946 mov %g1, %o3
947 brz,pt %o0, 5b
948 mov %g3, %o5
949 mov HV_FAST_MMU_DEMAP_CTX, %g6
950 ba,pt %xcc, 1b
951 clr %g5
776 952
777 /* These just get rescheduled to PIL vectors. */ 953 /* These just get rescheduled to PIL vectors. */
778 .globl xcall_call_function 954 .globl xcall_call_function
@@ -809,6 +985,58 @@ xcall_kgdb_capture:
809 985
810#endif /* CONFIG_SMP */ 986#endif /* CONFIG_SMP */
811 987
988 .globl cheetah_patch_cachetlbops
989cheetah_patch_cachetlbops:
990 save %sp, -128, %sp
991
992 sethi %hi(__flush_tlb_mm), %o0
993 or %o0, %lo(__flush_tlb_mm), %o0
994 sethi %hi(__cheetah_flush_tlb_mm), %o1
995 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
996 call tlb_patch_one
997 mov 19, %o2
998
999 sethi %hi(__flush_tlb_page), %o0
1000 or %o0, %lo(__flush_tlb_page), %o0
1001 sethi %hi(__cheetah_flush_tlb_page), %o1
1002 or %o1, %lo(__cheetah_flush_tlb_page), %o1
1003 call tlb_patch_one
1004 mov 22, %o2
1005
1006 sethi %hi(__flush_tlb_pending), %o0
1007 or %o0, %lo(__flush_tlb_pending), %o0
1008 sethi %hi(__cheetah_flush_tlb_pending), %o1
1009 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
1010 call tlb_patch_one
1011 mov 27, %o2
1012
1013 sethi %hi(__flush_tlb_kernel_range), %o0
1014 or %o0, %lo(__flush_tlb_kernel_range), %o0
1015 sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
1016 or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
1017 call tlb_patch_one
1018 mov 31, %o2
1019
1020#ifdef DCACHE_ALIASING_POSSIBLE
1021 sethi %hi(__flush_dcache_page), %o0
1022 or %o0, %lo(__flush_dcache_page), %o0
1023 sethi %hi(__cheetah_flush_dcache_page), %o1
1024 or %o1, %lo(__cheetah_flush_dcache_page), %o1
1025 call tlb_patch_one
1026 mov 11, %o2
1027#endif /* DCACHE_ALIASING_POSSIBLE */
1028
1029#ifdef CONFIG_SMP
1030 sethi %hi(xcall_flush_tlb_kernel_range), %o0
1031 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
1032 sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
1033 or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
1034 call tlb_patch_one
1035 mov 44, %o2
1036#endif /* CONFIG_SMP */
1037
1038 ret
1039 restore
812 1040
813 .globl hypervisor_patch_cachetlbops 1041 .globl hypervisor_patch_cachetlbops
814hypervisor_patch_cachetlbops: 1042hypervisor_patch_cachetlbops:
@@ -819,28 +1047,28 @@ hypervisor_patch_cachetlbops:
819 sethi %hi(__hypervisor_flush_tlb_mm), %o1 1047 sethi %hi(__hypervisor_flush_tlb_mm), %o1
820 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 1048 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
821 call tlb_patch_one 1049 call tlb_patch_one
822 mov 10, %o2 1050 mov 19, %o2
823 1051
824 sethi %hi(__flush_tlb_page), %o0 1052 sethi %hi(__flush_tlb_page), %o0
825 or %o0, %lo(__flush_tlb_page), %o0 1053 or %o0, %lo(__flush_tlb_page), %o0
826 sethi %hi(__hypervisor_flush_tlb_page), %o1 1054 sethi %hi(__hypervisor_flush_tlb_page), %o1
827 or %o1, %lo(__hypervisor_flush_tlb_page), %o1 1055 or %o1, %lo(__hypervisor_flush_tlb_page), %o1
828 call tlb_patch_one 1056 call tlb_patch_one
829 mov 11, %o2 1057 mov 22, %o2
830 1058
831 sethi %hi(__flush_tlb_pending), %o0 1059 sethi %hi(__flush_tlb_pending), %o0
832 or %o0, %lo(__flush_tlb_pending), %o0 1060 or %o0, %lo(__flush_tlb_pending), %o0
833 sethi %hi(__hypervisor_flush_tlb_pending), %o1 1061 sethi %hi(__hypervisor_flush_tlb_pending), %o1
834 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 1062 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
835 call tlb_patch_one 1063 call tlb_patch_one
836 mov 16, %o2 1064 mov 27, %o2
837 1065
838 sethi %hi(__flush_tlb_kernel_range), %o0 1066 sethi %hi(__flush_tlb_kernel_range), %o0
839 or %o0, %lo(__flush_tlb_kernel_range), %o0 1067 or %o0, %lo(__flush_tlb_kernel_range), %o0
840 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 1068 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
841 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 1069 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
842 call tlb_patch_one 1070 call tlb_patch_one
843 mov 16, %o2 1071 mov 31, %o2
844 1072
845#ifdef DCACHE_ALIASING_POSSIBLE 1073#ifdef DCACHE_ALIASING_POSSIBLE
846 sethi %hi(__flush_dcache_page), %o0 1074 sethi %hi(__flush_dcache_page), %o0
@@ -857,21 +1085,21 @@ hypervisor_patch_cachetlbops:
857 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 1085 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
858 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 1086 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
859 call tlb_patch_one 1087 call tlb_patch_one
860 mov 21, %o2 1088 mov 24, %o2
861 1089
862 sethi %hi(xcall_flush_tlb_page), %o0 1090 sethi %hi(xcall_flush_tlb_page), %o0
863 or %o0, %lo(xcall_flush_tlb_page), %o0 1091 or %o0, %lo(xcall_flush_tlb_page), %o0
864 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 1092 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
865 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 1093 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
866 call tlb_patch_one 1094 call tlb_patch_one
867 mov 17, %o2 1095 mov 20, %o2
868 1096
869 sethi %hi(xcall_flush_tlb_kernel_range), %o0 1097 sethi %hi(xcall_flush_tlb_kernel_range), %o0
870 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 1098 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
871 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 1099 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
872 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 1100 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
873 call tlb_patch_one 1101 call tlb_patch_one
874 mov 25, %o2 1102 mov 44, %o2
875#endif /* CONFIG_SMP */ 1103#endif /* CONFIG_SMP */
876 1104
877 ret 1105 ret
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
index 6160761d5f61..4810e48dbbbf 100644
--- a/arch/tile/include/asm/cache.h
+++ b/arch/tile/include/asm/cache.h
@@ -61,4 +61,7 @@
61 */ 61 */
62#define __write_once __read_mostly 62#define __write_once __read_mostly
63 63
64/* __ro_after_init is the generic name for the tile arch __write_once. */
65#define __ro_after_init __read_mostly
66
64#endif /* _ASM_TILE_CACHE_H */ 67#endif /* _ASM_TILE_CACHE_H */
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 0ab5ee1c26af..aa8b0672f87a 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -888,7 +888,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
888 unsigned long auth_tag_len = crypto_aead_authsize(tfm); 888 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
889 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 889 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
890 struct scatter_walk src_sg_walk; 890 struct scatter_walk src_sg_walk;
891 struct scatter_walk dst_sg_walk; 891 struct scatter_walk dst_sg_walk = {};
892 unsigned int i; 892 unsigned int i;
893 893
894 /* Assuming we are supporting rfc4106 64-bit extended */ 894 /* Assuming we are supporting rfc4106 64-bit extended */
@@ -968,7 +968,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
968 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 968 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
969 u8 authTag[16]; 969 u8 authTag[16];
970 struct scatter_walk src_sg_walk; 970 struct scatter_walk src_sg_walk;
971 struct scatter_walk dst_sg_walk; 971 struct scatter_walk dst_sg_walk = {};
972 unsigned int i; 972 unsigned int i;
973 973
974 if (unlikely(req->assoclen != 16 && req->assoclen != 20)) 974 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 5f845eef9a4d..81195cca7eae 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -8,8 +8,12 @@
8#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 8#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
9#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 9#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
10#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 10#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
11#define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f 11#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
12#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c 12#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
13#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
14#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
15#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
16#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
13 17
14/* SNB event control */ 18/* SNB event control */
15#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff 19#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -616,13 +620,29 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
616 620
617static const struct pci_device_id skl_uncore_pci_ids[] = { 621static const struct pci_device_id skl_uncore_pci_ids[] = {
618 { /* IMC */ 622 { /* IMC */
619 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC), 623 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
620 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 624 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
621 }, 625 },
622 { /* IMC */ 626 { /* IMC */
623 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), 627 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
624 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 628 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
625 }, 629 },
630 { /* IMC */
631 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
632 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
633 },
634 { /* IMC */
635 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
636 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
637 },
638 { /* IMC */
639 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
640 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
641 },
642 { /* IMC */
643 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
644 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
645 },
626 646
627 { /* end: all zeroes */ }, 647 { /* end: all zeroes */ },
628}; 648};
@@ -666,8 +686,12 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
666 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ 686 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
667 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ 687 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
668 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ 688 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
669 IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */ 689 IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */
670 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ 690 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
691 IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */
692 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
693 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
694 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
671 { /* end marker */ } 695 { /* end marker */ }
672}; 696};
673 697
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index 5b6753d1f7f4..49da9f497b90 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -17,6 +17,7 @@
17 17
18extern int intel_mid_pci_init(void); 18extern int intel_mid_pci_init(void);
19extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state); 19extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
20extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev);
20 21
21extern void intel_mid_pwr_power_off(void); 22extern void intel_mid_pwr_power_off(void);
22 23
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4b20f7304b9c..bdde80731f49 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -948,7 +948,6 @@ struct kvm_x86_ops {
948 int (*get_lpage_level)(void); 948 int (*get_lpage_level)(void);
949 bool (*rdtscp_supported)(void); 949 bool (*rdtscp_supported)(void);
950 bool (*invpcid_supported)(void); 950 bool (*invpcid_supported)(void);
951 void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);
952 951
953 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 952 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
954 953
@@ -958,8 +957,6 @@ struct kvm_x86_ops {
958 957
959 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 958 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
960 959
961 u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
962
963 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 960 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
964 961
965 int (*check_intercept)(struct kvm_vcpu *vcpu, 962 int (*check_intercept)(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index c7364bd633e1..51287cd90bf6 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1042,8 +1042,11 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
1042 1042
1043 if (apm_info.get_power_status_broken) 1043 if (apm_info.get_power_status_broken)
1044 return APM_32_UNSUPPORTED; 1044 return APM_32_UNSUPPORTED;
1045 if (apm_bios_call(&call)) 1045 if (apm_bios_call(&call)) {
1046 if (!call.err)
1047 return APM_NO_ERROR;
1046 return call.err; 1048 return call.err;
1049 }
1047 *status = call.ebx; 1050 *status = call.ebx;
1048 *bat = call.ecx; 1051 *bat = call.ecx;
1049 if (apm_info.get_power_status_swabinminutes) { 1052 if (apm_info.get_power_status_swabinminutes) {
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index b81fe2d63e15..1e81a37c034e 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
347#ifdef CONFIG_SMP 347#ifdef CONFIG_SMP
348 unsigned bits; 348 unsigned bits;
349 int cpu = smp_processor_id(); 349 int cpu = smp_processor_id();
350 unsigned int socket_id, core_complex_id;
351 350
352 bits = c->x86_coreid_bits; 351 bits = c->x86_coreid_bits;
353 /* Low order bits define the core id (index of core in socket) */ 352 /* Low order bits define the core id (index of core in socket) */
@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
365 if (c->x86 != 0x17 || !cpuid_edx(0x80000006)) 364 if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
366 return; 365 return;
367 366
368 socket_id = (c->apicid >> bits) - 1; 367 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
369 core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
370
371 per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
372#endif 368#endif
373} 369}
374 370
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9bd910a7dd0a..cc9e980c68ec 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -979,6 +979,35 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c)
979} 979}
980 980
981/* 981/*
982 * The physical to logical package id mapping is initialized from the
983 * acpi/mptables information. Make sure that CPUID actually agrees with
984 * that.
985 */
986static void sanitize_package_id(struct cpuinfo_x86 *c)
987{
988#ifdef CONFIG_SMP
989 unsigned int pkg, apicid, cpu = smp_processor_id();
990
991 apicid = apic->cpu_present_to_apicid(cpu);
992 pkg = apicid >> boot_cpu_data.x86_coreid_bits;
993
994 if (apicid != c->initial_apicid) {
995 pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n",
996 cpu, apicid, c->initial_apicid);
997 c->initial_apicid = apicid;
998 }
999 if (pkg != c->phys_proc_id) {
1000 pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n",
1001 cpu, pkg, c->phys_proc_id);
1002 c->phys_proc_id = pkg;
1003 }
1004 c->logical_proc_id = topology_phys_to_logical_pkg(pkg);
1005#else
1006 c->logical_proc_id = 0;
1007#endif
1008}
1009
1010/*
982 * This does the hard work of actually picking apart the CPU stuff... 1011 * This does the hard work of actually picking apart the CPU stuff...
983 */ 1012 */
984static void identify_cpu(struct cpuinfo_x86 *c) 1013static void identify_cpu(struct cpuinfo_x86 *c)
@@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
1103#ifdef CONFIG_NUMA 1132#ifdef CONFIG_NUMA
1104 numa_add_cpu(smp_processor_id()); 1133 numa_add_cpu(smp_processor_id());
1105#endif 1134#endif
1106 /* The boot/hotplug time assigment got cleared, restore it */ 1135 sanitize_package_id(c);
1107 c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
1108} 1136}
1109 1137
1110/* 1138/*
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4e95d3eb2955..cbd7b92585bb 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -5045,7 +5045,7 @@ done_prefixes:
5045 /* Decode and fetch the destination operand: register or memory. */ 5045 /* Decode and fetch the destination operand: register or memory. */
5046 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); 5046 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5047 5047
5048 if (ctxt->rip_relative) 5048 if (ctxt->rip_relative && likely(ctxt->memopp))
5049 ctxt->memopp->addr.mem.ea = address_mask(ctxt, 5049 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5050 ctxt->memopp->addr.mem.ea + ctxt->_eip); 5050 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5051 5051
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f8157a36ab09..8ca1eca5038d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1138,21 +1138,6 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1138 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 1138 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1139} 1139}
1140 1140
1141static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
1142{
1143 struct vcpu_svm *svm = to_svm(vcpu);
1144
1145 svm->vmcb->control.tsc_offset += adjustment;
1146 if (is_guest_mode(vcpu))
1147 svm->nested.hsave->control.tsc_offset += adjustment;
1148 else
1149 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1150 svm->vmcb->control.tsc_offset - adjustment,
1151 svm->vmcb->control.tsc_offset);
1152
1153 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1154}
1155
1156static void avic_init_vmcb(struct vcpu_svm *svm) 1141static void avic_init_vmcb(struct vcpu_svm *svm)
1157{ 1142{
1158 struct vmcb *vmcb = svm->vmcb; 1143 struct vmcb *vmcb = svm->vmcb;
@@ -3449,12 +3434,6 @@ static int cr8_write_interception(struct vcpu_svm *svm)
3449 return 0; 3434 return 0;
3450} 3435}
3451 3436
3452static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
3453{
3454 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
3455 return vmcb->control.tsc_offset + host_tsc;
3456}
3457
3458static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3437static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3459{ 3438{
3460 struct vcpu_svm *svm = to_svm(vcpu); 3439 struct vcpu_svm *svm = to_svm(vcpu);
@@ -5422,8 +5401,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
5422 .has_wbinvd_exit = svm_has_wbinvd_exit, 5401 .has_wbinvd_exit = svm_has_wbinvd_exit,
5423 5402
5424 .write_tsc_offset = svm_write_tsc_offset, 5403 .write_tsc_offset = svm_write_tsc_offset,
5425 .adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
5426 .read_l1_tsc = svm_read_l1_tsc,
5427 5404
5428 .set_tdp_cr3 = set_tdp_cr3, 5405 .set_tdp_cr3 = set_tdp_cr3,
5429 5406
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index cf1b16dbc98a..5382b82462fc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -187,6 +187,7 @@ struct vmcs {
187 */ 187 */
188struct loaded_vmcs { 188struct loaded_vmcs {
189 struct vmcs *vmcs; 189 struct vmcs *vmcs;
190 struct vmcs *shadow_vmcs;
190 int cpu; 191 int cpu;
191 int launched; 192 int launched;
192 struct list_head loaded_vmcss_on_cpu_link; 193 struct list_head loaded_vmcss_on_cpu_link;
@@ -411,7 +412,6 @@ struct nested_vmx {
411 * memory during VMXOFF, VMCLEAR, VMPTRLD. 412 * memory during VMXOFF, VMCLEAR, VMPTRLD.
412 */ 413 */
413 struct vmcs12 *cached_vmcs12; 414 struct vmcs12 *cached_vmcs12;
414 struct vmcs *current_shadow_vmcs;
415 /* 415 /*
416 * Indicates if the shadow vmcs must be updated with the 416 * Indicates if the shadow vmcs must be updated with the
417 * data hold by vmcs12 417 * data hold by vmcs12
@@ -421,7 +421,6 @@ struct nested_vmx {
421 /* vmcs02_list cache of VMCSs recently used to run L2 guests */ 421 /* vmcs02_list cache of VMCSs recently used to run L2 guests */
422 struct list_head vmcs02_pool; 422 struct list_head vmcs02_pool;
423 int vmcs02_num; 423 int vmcs02_num;
424 u64 vmcs01_tsc_offset;
425 bool change_vmcs01_virtual_x2apic_mode; 424 bool change_vmcs01_virtual_x2apic_mode;
426 /* L2 must run next, and mustn't decide to exit to L1. */ 425 /* L2 must run next, and mustn't decide to exit to L1. */
427 bool nested_run_pending; 426 bool nested_run_pending;
@@ -1419,6 +1418,8 @@ static void vmcs_clear(struct vmcs *vmcs)
1419static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) 1418static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1420{ 1419{
1421 vmcs_clear(loaded_vmcs->vmcs); 1420 vmcs_clear(loaded_vmcs->vmcs);
1421 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1422 vmcs_clear(loaded_vmcs->shadow_vmcs);
1422 loaded_vmcs->cpu = -1; 1423 loaded_vmcs->cpu = -1;
1423 loaded_vmcs->launched = 0; 1424 loaded_vmcs->launched = 0;
1424} 1425}
@@ -2605,20 +2606,6 @@ static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2605} 2606}
2606 2607
2607/* 2608/*
2608 * Like guest_read_tsc, but always returns L1's notion of the timestamp
2609 * counter, even if a nested guest (L2) is currently running.
2610 */
2611static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2612{
2613 u64 tsc_offset;
2614
2615 tsc_offset = is_guest_mode(vcpu) ?
2616 to_vmx(vcpu)->nested.vmcs01_tsc_offset :
2617 vmcs_read64(TSC_OFFSET);
2618 return host_tsc + tsc_offset;
2619}
2620
2621/*
2622 * writes 'offset' into guest's timestamp counter offset register 2609 * writes 'offset' into guest's timestamp counter offset register
2623 */ 2610 */
2624static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 2611static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
@@ -2631,7 +2618,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2631 * to the newly set TSC to get L2's TSC. 2618 * to the newly set TSC to get L2's TSC.
2632 */ 2619 */
2633 struct vmcs12 *vmcs12; 2620 struct vmcs12 *vmcs12;
2634 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
2635 /* recalculate vmcs02.TSC_OFFSET: */ 2621 /* recalculate vmcs02.TSC_OFFSET: */
2636 vmcs12 = get_vmcs12(vcpu); 2622 vmcs12 = get_vmcs12(vcpu);
2637 vmcs_write64(TSC_OFFSET, offset + 2623 vmcs_write64(TSC_OFFSET, offset +
@@ -2644,19 +2630,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2644 } 2630 }
2645} 2631}
2646 2632
2647static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
2648{
2649 u64 offset = vmcs_read64(TSC_OFFSET);
2650
2651 vmcs_write64(TSC_OFFSET, offset + adjustment);
2652 if (is_guest_mode(vcpu)) {
2653 /* Even when running L2, the adjustment needs to apply to L1 */
2654 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
2655 } else
2656 trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
2657 offset + adjustment);
2658}
2659
2660static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) 2633static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
2661{ 2634{
2662 struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); 2635 struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
@@ -3562,6 +3535,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
3562 loaded_vmcs_clear(loaded_vmcs); 3535 loaded_vmcs_clear(loaded_vmcs);
3563 free_vmcs(loaded_vmcs->vmcs); 3536 free_vmcs(loaded_vmcs->vmcs);
3564 loaded_vmcs->vmcs = NULL; 3537 loaded_vmcs->vmcs = NULL;
3538 WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
3565} 3539}
3566 3540
3567static void free_kvm_area(void) 3541static void free_kvm_area(void)
@@ -6696,6 +6670,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
6696 if (!item) 6670 if (!item)
6697 return NULL; 6671 return NULL;
6698 item->vmcs02.vmcs = alloc_vmcs(); 6672 item->vmcs02.vmcs = alloc_vmcs();
6673 item->vmcs02.shadow_vmcs = NULL;
6699 if (!item->vmcs02.vmcs) { 6674 if (!item->vmcs02.vmcs) {
6700 kfree(item); 6675 kfree(item);
6701 return NULL; 6676 return NULL;
@@ -7072,7 +7047,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
7072 shadow_vmcs->revision_id |= (1u << 31); 7047 shadow_vmcs->revision_id |= (1u << 31);
7073 /* init shadow vmcs */ 7048 /* init shadow vmcs */
7074 vmcs_clear(shadow_vmcs); 7049 vmcs_clear(shadow_vmcs);
7075 vmx->nested.current_shadow_vmcs = shadow_vmcs; 7050 vmx->vmcs01.shadow_vmcs = shadow_vmcs;
7076 } 7051 }
7077 7052
7078 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); 7053 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
@@ -7174,8 +7149,11 @@ static void free_nested(struct vcpu_vmx *vmx)
7174 free_page((unsigned long)vmx->nested.msr_bitmap); 7149 free_page((unsigned long)vmx->nested.msr_bitmap);
7175 vmx->nested.msr_bitmap = NULL; 7150 vmx->nested.msr_bitmap = NULL;
7176 } 7151 }
7177 if (enable_shadow_vmcs) 7152 if (enable_shadow_vmcs) {
7178 free_vmcs(vmx->nested.current_shadow_vmcs); 7153 vmcs_clear(vmx->vmcs01.shadow_vmcs);
7154 free_vmcs(vmx->vmcs01.shadow_vmcs);
7155 vmx->vmcs01.shadow_vmcs = NULL;
7156 }
7179 kfree(vmx->nested.cached_vmcs12); 7157 kfree(vmx->nested.cached_vmcs12);
7180 /* Unpin physical memory we referred to in current vmcs02 */ 7158 /* Unpin physical memory we referred to in current vmcs02 */
7181 if (vmx->nested.apic_access_page) { 7159 if (vmx->nested.apic_access_page) {
@@ -7352,7 +7330,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
7352 int i; 7330 int i;
7353 unsigned long field; 7331 unsigned long field;
7354 u64 field_value; 7332 u64 field_value;
7355 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; 7333 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
7356 const unsigned long *fields = shadow_read_write_fields; 7334 const unsigned long *fields = shadow_read_write_fields;
7357 const int num_fields = max_shadow_read_write_fields; 7335 const int num_fields = max_shadow_read_write_fields;
7358 7336
@@ -7401,7 +7379,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
7401 int i, q; 7379 int i, q;
7402 unsigned long field; 7380 unsigned long field;
7403 u64 field_value = 0; 7381 u64 field_value = 0;
7404 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; 7382 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
7405 7383
7406 vmcs_load(shadow_vmcs); 7384 vmcs_load(shadow_vmcs);
7407 7385
@@ -7591,7 +7569,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
7591 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, 7569 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
7592 SECONDARY_EXEC_SHADOW_VMCS); 7570 SECONDARY_EXEC_SHADOW_VMCS);
7593 vmcs_write64(VMCS_LINK_POINTER, 7571 vmcs_write64(VMCS_LINK_POINTER,
7594 __pa(vmx->nested.current_shadow_vmcs)); 7572 __pa(vmx->vmcs01.shadow_vmcs));
7595 vmx->nested.sync_shadow_vmcs = true; 7573 vmx->nested.sync_shadow_vmcs = true;
7596 } 7574 }
7597 } 7575 }
@@ -7659,7 +7637,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
7659 7637
7660 types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 7638 types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
7661 7639
7662 if (!(types & (1UL << type))) { 7640 if (type >= 32 || !(types & (1 << type))) {
7663 nested_vmx_failValid(vcpu, 7641 nested_vmx_failValid(vcpu,
7664 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 7642 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7665 skip_emulated_instruction(vcpu); 7643 skip_emulated_instruction(vcpu);
@@ -7722,7 +7700,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
7722 7700
7723 types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7; 7701 types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
7724 7702
7725 if (!(types & (1UL << type))) { 7703 if (type >= 32 || !(types & (1 << type))) {
7726 nested_vmx_failValid(vcpu, 7704 nested_vmx_failValid(vcpu,
7727 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 7705 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7728 skip_emulated_instruction(vcpu); 7706 skip_emulated_instruction(vcpu);
@@ -9156,6 +9134,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
9156 9134
9157 vmx->loaded_vmcs = &vmx->vmcs01; 9135 vmx->loaded_vmcs = &vmx->vmcs01;
9158 vmx->loaded_vmcs->vmcs = alloc_vmcs(); 9136 vmx->loaded_vmcs->vmcs = alloc_vmcs();
9137 vmx->loaded_vmcs->shadow_vmcs = NULL;
9159 if (!vmx->loaded_vmcs->vmcs) 9138 if (!vmx->loaded_vmcs->vmcs)
9160 goto free_msrs; 9139 goto free_msrs;
9161 if (!vmm_exclusive) 9140 if (!vmm_exclusive)
@@ -10061,9 +10040,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
10061 10040
10062 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 10041 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
10063 vmcs_write64(TSC_OFFSET, 10042 vmcs_write64(TSC_OFFSET,
10064 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); 10043 vcpu->arch.tsc_offset + vmcs12->tsc_offset);
10065 else 10044 else
10066 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); 10045 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
10067 if (kvm_has_tsc_control) 10046 if (kvm_has_tsc_control)
10068 decache_tsc_multiplier(vmx); 10047 decache_tsc_multiplier(vmx);
10069 10048
@@ -10293,8 +10272,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10293 10272
10294 enter_guest_mode(vcpu); 10273 enter_guest_mode(vcpu);
10295 10274
10296 vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
10297
10298 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 10275 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
10299 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 10276 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
10300 10277
@@ -10818,7 +10795,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
10818 load_vmcs12_host_state(vcpu, vmcs12); 10795 load_vmcs12_host_state(vcpu, vmcs12);
10819 10796
10820 /* Update any VMCS fields that might have changed while L2 ran */ 10797 /* Update any VMCS fields that might have changed while L2 ran */
10821 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); 10798 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
10822 if (vmx->hv_deadline_tsc == -1) 10799 if (vmx->hv_deadline_tsc == -1)
10823 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, 10800 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
10824 PIN_BASED_VMX_PREEMPTION_TIMER); 10801 PIN_BASED_VMX_PREEMPTION_TIMER);
@@ -11339,8 +11316,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
11339 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 11316 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
11340 11317
11341 .write_tsc_offset = vmx_write_tsc_offset, 11318 .write_tsc_offset = vmx_write_tsc_offset,
11342 .adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
11343 .read_l1_tsc = vmx_read_l1_tsc,
11344 11319
11345 .set_tdp_cr3 = vmx_set_cr3, 11320 .set_tdp_cr3 = vmx_set_cr3,
11346 11321
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e375235d81c9..3017de0431bd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1409,7 +1409,7 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1409 1409
1410u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 1410u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
1411{ 1411{
1412 return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc)); 1412 return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
1413} 1413}
1414EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 1414EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
1415 1415
@@ -1547,7 +1547,7 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
1547static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, 1547static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
1548 s64 adjustment) 1548 s64 adjustment)
1549{ 1549{
1550 kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment); 1550 kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
1551} 1551}
1552 1552
1553static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) 1553static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -1555,7 +1555,7 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
1555 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) 1555 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
1556 WARN_ON(adjustment < 0); 1556 WARN_ON(adjustment < 0);
1557 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment); 1557 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
1558 kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment); 1558 adjust_tsc_offset_guest(vcpu, adjustment);
1559} 1559}
1560 1560
1561#ifdef CONFIG_X86_64 1561#ifdef CONFIG_X86_64
@@ -2262,7 +2262,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2262 /* Drop writes to this legacy MSR -- see rdmsr 2262 /* Drop writes to this legacy MSR -- see rdmsr
2263 * counterpart for further detail. 2263 * counterpart for further detail.
2264 */ 2264 */
2265 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); 2265 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
2266 break; 2266 break;
2267 case MSR_AMD64_OSVW_ID_LENGTH: 2267 case MSR_AMD64_OSVW_ID_LENGTH:
2268 if (!guest_cpuid_has_osvw(vcpu)) 2268 if (!guest_cpuid_has_osvw(vcpu))
@@ -2280,11 +2280,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2280 if (kvm_pmu_is_valid_msr(vcpu, msr)) 2280 if (kvm_pmu_is_valid_msr(vcpu, msr))
2281 return kvm_pmu_set_msr(vcpu, msr_info); 2281 return kvm_pmu_set_msr(vcpu, msr_info);
2282 if (!ignore_msrs) { 2282 if (!ignore_msrs) {
2283 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", 2283 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
2284 msr, data); 2284 msr, data);
2285 return 1; 2285 return 1;
2286 } else { 2286 } else {
2287 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", 2287 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
2288 msr, data); 2288 msr, data);
2289 break; 2289 break;
2290 } 2290 }
@@ -7410,10 +7410,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7410 7410
7411void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 7411void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
7412{ 7412{
7413 void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
7414
7413 kvmclock_reset(vcpu); 7415 kvmclock_reset(vcpu);
7414 7416
7415 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
7416 kvm_x86_ops->vcpu_free(vcpu); 7417 kvm_x86_ops->vcpu_free(vcpu);
7418 free_cpumask_var(wbinvd_dirty_mask);
7417} 7419}
7418 7420
7419struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 7421struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index bf99aa7005eb..936a488d6cf6 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -861,7 +861,7 @@ static void __init __efi_enter_virtual_mode(void)
861 int count = 0, pg_shift = 0; 861 int count = 0, pg_shift = 0;
862 void *new_memmap = NULL; 862 void *new_memmap = NULL;
863 efi_status_t status; 863 efi_status_t status;
864 phys_addr_t pa; 864 unsigned long pa;
865 865
866 efi.systab = NULL; 866 efi.systab = NULL;
867 867
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 58b0f801f66f..319148bd4b05 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -31,6 +31,7 @@
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/reboot.h> 32#include <linux/reboot.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/ucs2_string.h>
34 35
35#include <asm/setup.h> 36#include <asm/setup.h>
36#include <asm/page.h> 37#include <asm/page.h>
@@ -211,6 +212,35 @@ void efi_sync_low_kernel_mappings(void)
211 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); 212 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
212} 213}
213 214
215/*
216 * Wrapper for slow_virt_to_phys() that handles NULL addresses.
217 */
218static inline phys_addr_t
219virt_to_phys_or_null_size(void *va, unsigned long size)
220{
221 bool bad_size;
222
223 if (!va)
224 return 0;
225
226 if (virt_addr_valid(va))
227 return virt_to_phys(va);
228
229 /*
230 * A fully aligned variable on the stack is guaranteed not to
231 * cross a page bounary. Try to catch strings on the stack by
232 * checking that 'size' is a power of two.
233 */
234 bad_size = size > PAGE_SIZE || !is_power_of_2(size);
235
236 WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
237
238 return slow_virt_to_phys(va);
239}
240
241#define virt_to_phys_or_null(addr) \
242 virt_to_phys_or_null_size((addr), sizeof(*(addr)))
243
214int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) 244int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
215{ 245{
216 unsigned long pfn, text; 246 unsigned long pfn, text;
@@ -494,8 +524,8 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
494 524
495 spin_lock(&rtc_lock); 525 spin_lock(&rtc_lock);
496 526
497 phys_tm = virt_to_phys(tm); 527 phys_tm = virt_to_phys_or_null(tm);
498 phys_tc = virt_to_phys(tc); 528 phys_tc = virt_to_phys_or_null(tc);
499 529
500 status = efi_thunk(get_time, phys_tm, phys_tc); 530 status = efi_thunk(get_time, phys_tm, phys_tc);
501 531
@@ -511,7 +541,7 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
511 541
512 spin_lock(&rtc_lock); 542 spin_lock(&rtc_lock);
513 543
514 phys_tm = virt_to_phys(tm); 544 phys_tm = virt_to_phys_or_null(tm);
515 545
516 status = efi_thunk(set_time, phys_tm); 546 status = efi_thunk(set_time, phys_tm);
517 547
@@ -529,9 +559,9 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
529 559
530 spin_lock(&rtc_lock); 560 spin_lock(&rtc_lock);
531 561
532 phys_enabled = virt_to_phys(enabled); 562 phys_enabled = virt_to_phys_or_null(enabled);
533 phys_pending = virt_to_phys(pending); 563 phys_pending = virt_to_phys_or_null(pending);
534 phys_tm = virt_to_phys(tm); 564 phys_tm = virt_to_phys_or_null(tm);
535 565
536 status = efi_thunk(get_wakeup_time, phys_enabled, 566 status = efi_thunk(get_wakeup_time, phys_enabled,
537 phys_pending, phys_tm); 567 phys_pending, phys_tm);
@@ -549,7 +579,7 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
549 579
550 spin_lock(&rtc_lock); 580 spin_lock(&rtc_lock);
551 581
552 phys_tm = virt_to_phys(tm); 582 phys_tm = virt_to_phys_or_null(tm);
553 583
554 status = efi_thunk(set_wakeup_time, enabled, phys_tm); 584 status = efi_thunk(set_wakeup_time, enabled, phys_tm);
555 585
@@ -558,6 +588,10 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
558 return status; 588 return status;
559} 589}
560 590
591static unsigned long efi_name_size(efi_char16_t *name)
592{
593 return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
594}
561 595
562static efi_status_t 596static efi_status_t
563efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, 597efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
@@ -567,11 +601,11 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
567 u32 phys_name, phys_vendor, phys_attr; 601 u32 phys_name, phys_vendor, phys_attr;
568 u32 phys_data_size, phys_data; 602 u32 phys_data_size, phys_data;
569 603
570 phys_data_size = virt_to_phys(data_size); 604 phys_data_size = virt_to_phys_or_null(data_size);
571 phys_vendor = virt_to_phys(vendor); 605 phys_vendor = virt_to_phys_or_null(vendor);
572 phys_name = virt_to_phys(name); 606 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
573 phys_attr = virt_to_phys(attr); 607 phys_attr = virt_to_phys_or_null(attr);
574 phys_data = virt_to_phys(data); 608 phys_data = virt_to_phys_or_null_size(data, *data_size);
575 609
576 status = efi_thunk(get_variable, phys_name, phys_vendor, 610 status = efi_thunk(get_variable, phys_name, phys_vendor,
577 phys_attr, phys_data_size, phys_data); 611 phys_attr, phys_data_size, phys_data);
@@ -586,9 +620,9 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
586 u32 phys_name, phys_vendor, phys_data; 620 u32 phys_name, phys_vendor, phys_data;
587 efi_status_t status; 621 efi_status_t status;
588 622
589 phys_name = virt_to_phys(name); 623 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
590 phys_vendor = virt_to_phys(vendor); 624 phys_vendor = virt_to_phys_or_null(vendor);
591 phys_data = virt_to_phys(data); 625 phys_data = virt_to_phys_or_null_size(data, data_size);
592 626
593 /* If data_size is > sizeof(u32) we've got problems */ 627 /* If data_size is > sizeof(u32) we've got problems */
594 status = efi_thunk(set_variable, phys_name, phys_vendor, 628 status = efi_thunk(set_variable, phys_name, phys_vendor,
@@ -605,9 +639,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
605 efi_status_t status; 639 efi_status_t status;
606 u32 phys_name_size, phys_name, phys_vendor; 640 u32 phys_name_size, phys_name, phys_vendor;
607 641
608 phys_name_size = virt_to_phys(name_size); 642 phys_name_size = virt_to_phys_or_null(name_size);
609 phys_vendor = virt_to_phys(vendor); 643 phys_vendor = virt_to_phys_or_null(vendor);
610 phys_name = virt_to_phys(name); 644 phys_name = virt_to_phys_or_null_size(name, *name_size);
611 645
612 status = efi_thunk(get_next_variable, phys_name_size, 646 status = efi_thunk(get_next_variable, phys_name_size,
613 phys_name, phys_vendor); 647 phys_name, phys_vendor);
@@ -621,7 +655,7 @@ efi_thunk_get_next_high_mono_count(u32 *count)
621 efi_status_t status; 655 efi_status_t status;
622 u32 phys_count; 656 u32 phys_count;
623 657
624 phys_count = virt_to_phys(count); 658 phys_count = virt_to_phys_or_null(count);
625 status = efi_thunk(get_next_high_mono_count, phys_count); 659 status = efi_thunk(get_next_high_mono_count, phys_count);
626 660
627 return status; 661 return status;
@@ -633,7 +667,7 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
633{ 667{
634 u32 phys_data; 668 u32 phys_data;
635 669
636 phys_data = virt_to_phys(data); 670 phys_data = virt_to_phys_or_null_size(data, data_size);
637 671
638 efi_thunk(reset_system, reset_type, status, data_size, phys_data); 672 efi_thunk(reset_system, reset_type, status, data_size, phys_data);
639} 673}
@@ -661,9 +695,9 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
661 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 695 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
662 return EFI_UNSUPPORTED; 696 return EFI_UNSUPPORTED;
663 697
664 phys_storage = virt_to_phys(storage_space); 698 phys_storage = virt_to_phys_or_null(storage_space);
665 phys_remaining = virt_to_phys(remaining_space); 699 phys_remaining = virt_to_phys_or_null(remaining_space);
666 phys_max = virt_to_phys(max_variable_size); 700 phys_max = virt_to_phys_or_null(max_variable_size);
667 701
668 status = efi_thunk(query_variable_info, attr, phys_storage, 702 status = efi_thunk(query_variable_info, attr, phys_storage,
669 phys_remaining, phys_max); 703 phys_remaining, phys_max);
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c
index 5d3b45ad1c03..67375dda451c 100644
--- a/arch/x86/platform/intel-mid/pwr.c
+++ b/arch/x86/platform/intel-mid/pwr.c
@@ -272,6 +272,25 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
272} 272}
273EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state); 273EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
274 274
275pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
276{
277 struct mid_pwr *pwr = midpwr;
278 int id, reg, bit;
279 u32 power;
280
281 if (!pwr || !pwr->available)
282 return PCI_UNKNOWN;
283
284 id = intel_mid_pwr_get_lss_id(pdev);
285 if (id < 0)
286 return PCI_UNKNOWN;
287
288 reg = (id * LSS_PWS_BITS) / 32;
289 bit = (id * LSS_PWS_BITS) % 32;
290 power = mid_pwr_get_state(pwr, reg);
291 return (__force pci_power_t)((power >> bit) & 3);
292}
293
275void intel_mid_pwr_power_off(void) 294void intel_mid_pwr_power_off(void)
276{ 295{
277 struct mid_pwr *pwr = midpwr; 296 struct mid_pwr *pwr = midpwr;
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index d58fbf7f04e6..7dd70927991e 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -122,7 +122,7 @@ static int acpi_apd_create_device(struct acpi_device *adev,
122 int ret; 122 int ret;
123 123
124 if (!dev_desc) { 124 if (!dev_desc) {
125 pdev = acpi_create_platform_device(adev); 125 pdev = acpi_create_platform_device(adev, NULL);
126 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; 126 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
127 } 127 }
128 128
@@ -139,14 +139,8 @@ static int acpi_apd_create_device(struct acpi_device *adev,
139 goto err_out; 139 goto err_out;
140 } 140 }
141 141
142 if (dev_desc->properties) {
143 ret = device_add_properties(&adev->dev, dev_desc->properties);
144 if (ret)
145 goto err_out;
146 }
147
148 adev->driver_data = pdata; 142 adev->driver_data = pdata;
149 pdev = acpi_create_platform_device(adev); 143 pdev = acpi_create_platform_device(adev, dev_desc->properties);
150 if (!IS_ERR_OR_NULL(pdev)) 144 if (!IS_ERR_OR_NULL(pdev))
151 return 1; 145 return 1;
152 146
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 552010288135..373657f7e35a 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -395,7 +395,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
395 395
396 dev_desc = (const struct lpss_device_desc *)id->driver_data; 396 dev_desc = (const struct lpss_device_desc *)id->driver_data;
397 if (!dev_desc) { 397 if (!dev_desc) {
398 pdev = acpi_create_platform_device(adev); 398 pdev = acpi_create_platform_device(adev, NULL);
399 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; 399 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
400 } 400 }
401 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 401 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
@@ -451,14 +451,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
451 goto err_out; 451 goto err_out;
452 } 452 }
453 453
454 if (dev_desc->properties) {
455 ret = device_add_properties(&adev->dev, dev_desc->properties);
456 if (ret)
457 goto err_out;
458 }
459
460 adev->driver_data = pdata; 454 adev->driver_data = pdata;
461 pdev = acpi_create_platform_device(adev); 455 pdev = acpi_create_platform_device(adev, dev_desc->properties);
462 if (!IS_ERR_OR_NULL(pdev)) { 456 if (!IS_ERR_OR_NULL(pdev)) {
463 return 1; 457 return 1;
464 } 458 }
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index b200ae1f3c6f..b4c1a6a51da4 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -50,6 +50,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
50/** 50/**
51 * acpi_create_platform_device - Create platform device for ACPI device node 51 * acpi_create_platform_device - Create platform device for ACPI device node
52 * @adev: ACPI device node to create a platform device for. 52 * @adev: ACPI device node to create a platform device for.
53 * @properties: Optional collection of build-in properties.
53 * 54 *
54 * Check if the given @adev can be represented as a platform device and, if 55 * Check if the given @adev can be represented as a platform device and, if
55 * that's the case, create and register a platform device, populate its common 56 * that's the case, create and register a platform device, populate its common
@@ -57,7 +58,8 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
57 * 58 *
58 * Name of the platform device will be the same as @adev's. 59 * Name of the platform device will be the same as @adev's.
59 */ 60 */
60struct platform_device *acpi_create_platform_device(struct acpi_device *adev) 61struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
62 struct property_entry *properties)
61{ 63{
62 struct platform_device *pdev = NULL; 64 struct platform_device *pdev = NULL;
63 struct platform_device_info pdevinfo; 65 struct platform_device_info pdevinfo;
@@ -106,6 +108,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
106 pdevinfo.res = resources; 108 pdevinfo.res = resources;
107 pdevinfo.num_res = count; 109 pdevinfo.num_res = count;
108 pdevinfo.fwnode = acpi_fwnode_handle(adev); 110 pdevinfo.fwnode = acpi_fwnode_handle(adev);
111 pdevinfo.properties = properties;
109 112
110 if (acpi_dma_supported(adev)) 113 if (acpi_dma_supported(adev))
111 pdevinfo.dma_mask = DMA_BIT_MASK(32); 114 pdevinfo.dma_mask = DMA_BIT_MASK(32);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 33505c651f62..86364097e236 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -34,11 +34,11 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
34 const struct acpi_device_id *id) 34 const struct acpi_device_id *id)
35{ 35{
36 if (IS_ENABLED(CONFIG_INT340X_THERMAL)) 36 if (IS_ENABLED(CONFIG_INT340X_THERMAL))
37 acpi_create_platform_device(adev); 37 acpi_create_platform_device(adev, NULL);
38 /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ 38 /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
39 else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) && 39 else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
40 id->driver_data == INT3401_DEVICE) 40 id->driver_data == INT3401_DEVICE)
41 acpi_create_platform_device(adev); 41 acpi_create_platform_device(adev, NULL);
42 return 1; 42 return 1;
43} 43}
44 44
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 035ac646d8db..3d1856f1f4d0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1734,7 +1734,7 @@ static void acpi_default_enumeration(struct acpi_device *device)
1734 &is_spi_i2c_slave); 1734 &is_spi_i2c_slave);
1735 acpi_dev_free_resource_list(&resource_list); 1735 acpi_dev_free_resource_list(&resource_list);
1736 if (!is_spi_i2c_slave) { 1736 if (!is_spi_i2c_slave) {
1737 acpi_create_platform_device(device); 1737 acpi_create_platform_device(device, NULL);
1738 acpi_device_set_enumerated(device); 1738 acpi_device_set_enumerated(device);
1739 } else { 1739 } else {
1740 blocking_notifier_call_chain(&acpi_reconfig_chain, 1740 blocking_notifier_call_chain(&acpi_reconfig_chain,
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d22a7260f42b..d76cd97a98b6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -324,7 +324,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
324{ 324{
325 int ret = -EPROBE_DEFER; 325 int ret = -EPROBE_DEFER;
326 int local_trigger_count = atomic_read(&deferred_trigger_count); 326 int local_trigger_count = atomic_read(&deferred_trigger_count);
327 bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE); 327 bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
328 !drv->suppress_bind_attrs;
328 329
329 if (defer_all_probes) { 330 if (defer_all_probes) {
330 /* 331 /*
@@ -383,7 +384,7 @@ re_probe:
383 if (test_remove) { 384 if (test_remove) {
384 test_remove = false; 385 test_remove = false;
385 386
386 if (dev->bus && dev->bus->remove) 387 if (dev->bus->remove)
387 dev->bus->remove(dev); 388 dev->bus->remove(dev);
388 else if (drv->remove) 389 else if (drv->remove)
389 drv->remove(dev); 390 drv->remove(dev);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e44944f4be77..2932a5bd892f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
1027 TRACE_DEVICE(dev); 1027 TRACE_DEVICE(dev);
1028 TRACE_SUSPEND(0); 1028 TRACE_SUSPEND(0);
1029 1029
1030 dpm_wait_for_children(dev, async);
1031
1030 if (async_error) 1032 if (async_error)
1031 goto Complete; 1033 goto Complete;
1032 1034
@@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
1038 if (dev->power.syscore || dev->power.direct_complete) 1040 if (dev->power.syscore || dev->power.direct_complete)
1039 goto Complete; 1041 goto Complete;
1040 1042
1041 dpm_wait_for_children(dev, async);
1042
1043 if (dev->pm_domain) { 1043 if (dev->pm_domain) {
1044 info = "noirq power domain "; 1044 info = "noirq power domain ";
1045 callback = pm_noirq_op(&dev->pm_domain->ops, state); 1045 callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
1174 1174
1175 __pm_runtime_disable(dev, false); 1175 __pm_runtime_disable(dev, false);
1176 1176
1177 dpm_wait_for_children(dev, async);
1178
1177 if (async_error) 1179 if (async_error)
1178 goto Complete; 1180 goto Complete;
1179 1181
@@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
1185 if (dev->power.syscore || dev->power.direct_complete) 1187 if (dev->power.syscore || dev->power.direct_complete)
1186 goto Complete; 1188 goto Complete;
1187 1189
1188 dpm_wait_for_children(dev, async);
1189
1190 if (dev->pm_domain) { 1190 if (dev->pm_domain) {
1191 info = "late power domain "; 1191 info = "late power domain ";
1192 callback = pm_late_early_op(&dev->pm_domain->ops, state); 1192 callback = pm_late_early_op(&dev->pm_domain->ops, state);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index ab19adb07a12..3c606c09fd5a 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -853,45 +853,6 @@ rqbiocnt(struct request *r)
853 return n; 853 return n;
854} 854}
855 855
856/* This can be removed if we are certain that no users of the block
857 * layer will ever use zero-count pages in bios. Otherwise we have to
858 * protect against the put_page sometimes done by the network layer.
859 *
860 * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
861 * discussion.
862 *
863 * We cannot use get_page in the workaround, because it insists on a
864 * positive page count as a precondition. So we use _refcount directly.
865 */
866static void
867bio_pageinc(struct bio *bio)
868{
869 struct bio_vec bv;
870 struct page *page;
871 struct bvec_iter iter;
872
873 bio_for_each_segment(bv, bio, iter) {
874 /* Non-zero page count for non-head members of
875 * compound pages is no longer allowed by the kernel.
876 */
877 page = compound_head(bv.bv_page);
878 page_ref_inc(page);
879 }
880}
881
882static void
883bio_pagedec(struct bio *bio)
884{
885 struct page *page;
886 struct bio_vec bv;
887 struct bvec_iter iter;
888
889 bio_for_each_segment(bv, bio, iter) {
890 page = compound_head(bv.bv_page);
891 page_ref_dec(page);
892 }
893}
894
895static void 856static void
896bufinit(struct buf *buf, struct request *rq, struct bio *bio) 857bufinit(struct buf *buf, struct request *rq, struct bio *bio)
897{ 858{
@@ -899,7 +860,6 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
899 buf->rq = rq; 860 buf->rq = rq;
900 buf->bio = bio; 861 buf->bio = bio;
901 buf->iter = bio->bi_iter; 862 buf->iter = bio->bi_iter;
902 bio_pageinc(bio);
903} 863}
904 864
905static struct buf * 865static struct buf *
@@ -1127,7 +1087,6 @@ aoe_end_buf(struct aoedev *d, struct buf *buf)
1127 if (buf == d->ip.buf) 1087 if (buf == d->ip.buf)
1128 d->ip.buf = NULL; 1088 d->ip.buf = NULL;
1129 rq = buf->rq; 1089 rq = buf->rq;
1130 bio_pagedec(buf->bio);
1131 mempool_free(buf, d->bufpool); 1090 mempool_free(buf, d->bufpool);
1132 n = (unsigned long) rq->special; 1091 n = (unsigned long) rq->special;
1133 rq->special = (void *) --n; 1092 rq->special = (void *) --n;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 100be556e613..83482721bc01 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
1871 drbd_update_congested(connection); 1871 drbd_update_congested(connection);
1872 } 1872 }
1873 do { 1873 do {
1874 rv = kernel_sendmsg(sock, &msg, &iov, 1, size); 1874 rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
1875 if (rv == -EAGAIN) { 1875 if (rv == -EAGAIN) {
1876 if (we_should_drop_the_connection(connection, sock)) 1876 if (we_should_drop_the_connection(connection, sock))
1877 break; 1877 break;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 19a16b2dbb91..7a1048755914 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -599,7 +599,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
599 return -EINVAL; 599 return -EINVAL;
600 600
601 sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0); 601 sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
602 if (!sreq) 602 if (IS_ERR(sreq))
603 return -ENOMEM; 603 return -ENOMEM;
604 604
605 mutex_unlock(&nbd->tx_lock); 605 mutex_unlock(&nbd->tx_lock);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2dc5c96c186a..5545a679abd8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -376,7 +376,7 @@ static void virtblk_config_changed(struct virtio_device *vdev)
376 376
377static int init_vq(struct virtio_blk *vblk) 377static int init_vq(struct virtio_blk *vblk)
378{ 378{
379 int err = 0; 379 int err;
380 int i; 380 int i;
381 vq_callback_t **callbacks; 381 vq_callback_t **callbacks;
382 const char **names; 382 const char **names;
@@ -390,13 +390,13 @@ static int init_vq(struct virtio_blk *vblk)
390 if (err) 390 if (err)
391 num_vqs = 1; 391 num_vqs = 1;
392 392
393 vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); 393 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
394 if (!vblk->vqs) 394 if (!vblk->vqs)
395 return -ENOMEM; 395 return -ENOMEM;
396 396
397 names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); 397 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
398 callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); 398 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
399 vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); 399 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
400 if (!names || !callbacks || !vqs) { 400 if (!names || !callbacks || !vqs) {
401 err = -ENOMEM; 401 err = -ENOMEM;
402 goto out; 402 goto out;
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index d23368874710..6af1ce04b3da 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -748,10 +748,7 @@ static int pp_release(struct inode *inode, struct file *file)
748 } 748 }
749 749
750 if (pp->pdev) { 750 if (pp->pdev) {
751 const char *name = pp->pdev->name;
752
753 parport_unregister_device(pp->pdev); 751 parport_unregister_device(pp->pdev);
754 kfree(name);
755 pp->pdev = NULL; 752 pp->pdev = NULL;
756 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); 753 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
757 } 754 }
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 8de61876f633..3a9149cf0110 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -813,9 +813,6 @@ int tpm_do_selftest(struct tpm_chip *chip)
813 continue; 813 continue;
814 } 814 }
815 815
816 if (rc < TPM_HEADER_SIZE)
817 return -EFAULT;
818
819 if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { 816 if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
820 dev_info(&chip->dev, 817 dev_info(&chip->dev,
821 "TPM is disabled/deactivated (0x%X)\n", rc); 818 "TPM is disabled/deactivated (0x%X)\n", rc);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d433b1db1fdd..5649234b7316 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1539,19 +1539,29 @@ static void remove_port_data(struct port *port)
1539 spin_lock_irq(&port->inbuf_lock); 1539 spin_lock_irq(&port->inbuf_lock);
1540 /* Remove unused data this port might have received. */ 1540 /* Remove unused data this port might have received. */
1541 discard_port_data(port); 1541 discard_port_data(port);
1542 spin_unlock_irq(&port->inbuf_lock);
1542 1543
1543 /* Remove buffers we queued up for the Host to send us data in. */ 1544 /* Remove buffers we queued up for the Host to send us data in. */
1544 while ((buf = virtqueue_detach_unused_buf(port->in_vq))) 1545 do {
1545 free_buf(buf, true); 1546 spin_lock_irq(&port->inbuf_lock);
1546 spin_unlock_irq(&port->inbuf_lock); 1547 buf = virtqueue_detach_unused_buf(port->in_vq);
1548 spin_unlock_irq(&port->inbuf_lock);
1549 if (buf)
1550 free_buf(buf, true);
1551 } while (buf);
1547 1552
1548 spin_lock_irq(&port->outvq_lock); 1553 spin_lock_irq(&port->outvq_lock);
1549 reclaim_consumed_buffers(port); 1554 reclaim_consumed_buffers(port);
1555 spin_unlock_irq(&port->outvq_lock);
1550 1556
1551 /* Free pending buffers from the out-queue. */ 1557 /* Free pending buffers from the out-queue. */
1552 while ((buf = virtqueue_detach_unused_buf(port->out_vq))) 1558 do {
1553 free_buf(buf, true); 1559 spin_lock_irq(&port->outvq_lock);
1554 spin_unlock_irq(&port->outvq_lock); 1560 buf = virtqueue_detach_unused_buf(port->out_vq);
1561 spin_unlock_irq(&port->outvq_lock);
1562 if (buf)
1563 free_buf(buf, true);
1564 } while (buf);
1555} 1565}
1556 1566
1557/* 1567/*
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 20b105584f82..80ae2a51452d 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -700,6 +700,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
700 struct mux_hwclock *hwc, 700 struct mux_hwclock *hwc,
701 const struct clk_ops *ops, 701 const struct clk_ops *ops,
702 unsigned long min_rate, 702 unsigned long min_rate,
703 unsigned long max_rate,
703 unsigned long pct80_rate, 704 unsigned long pct80_rate,
704 const char *fmt, int idx) 705 const char *fmt, int idx)
705{ 706{
@@ -728,6 +729,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
728 continue; 729 continue;
729 if (rate < min_rate) 730 if (rate < min_rate)
730 continue; 731 continue;
732 if (rate > max_rate)
733 continue;
731 734
732 parent_names[j] = div->name; 735 parent_names[j] = div->name;
733 hwc->parent_to_clksel[j] = i; 736 hwc->parent_to_clksel[j] = i;
@@ -759,7 +762,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
759 struct mux_hwclock *hwc; 762 struct mux_hwclock *hwc;
760 const struct clockgen_pll_div *div; 763 const struct clockgen_pll_div *div;
761 unsigned long plat_rate, min_rate; 764 unsigned long plat_rate, min_rate;
762 u64 pct80_rate; 765 u64 max_rate, pct80_rate;
763 u32 clksel; 766 u32 clksel;
764 767
765 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); 768 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
@@ -787,8 +790,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
787 return NULL; 790 return NULL;
788 } 791 }
789 792
790 pct80_rate = clk_get_rate(div->clk); 793 max_rate = clk_get_rate(div->clk);
791 pct80_rate *= 8; 794 pct80_rate = max_rate * 8;
792 do_div(pct80_rate, 10); 795 do_div(pct80_rate, 10);
793 796
794 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); 797 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
@@ -798,7 +801,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
798 else 801 else
799 min_rate = plat_rate / 2; 802 min_rate = plat_rate / 2;
800 803
801 return create_mux_common(cg, hwc, &cmux_ops, min_rate, 804 return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
802 pct80_rate, "cg-cmux%d", idx); 805 pct80_rate, "cg-cmux%d", idx);
803} 806}
804 807
@@ -813,7 +816,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
813 hwc->reg = cg->regs + 0x20 * idx + 0x10; 816 hwc->reg = cg->regs + 0x20 * idx + 0x10;
814 hwc->info = cg->info.hwaccel[idx]; 817 hwc->info = cg->info.hwaccel[idx];
815 818
816 return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, 819 return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
817 "cg-hwaccel%d", idx); 820 "cg-hwaccel%d", idx);
818} 821}
819 822
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 5daddf5ecc4b..bc37030e38ba 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -463,22 +463,20 @@ static int xgene_clk_enable(struct clk_hw *hw)
463 struct xgene_clk *pclk = to_xgene_clk(hw); 463 struct xgene_clk *pclk = to_xgene_clk(hw);
464 unsigned long flags = 0; 464 unsigned long flags = 0;
465 u32 data; 465 u32 data;
466 phys_addr_t reg;
467 466
468 if (pclk->lock) 467 if (pclk->lock)
469 spin_lock_irqsave(pclk->lock, flags); 468 spin_lock_irqsave(pclk->lock, flags);
470 469
471 if (pclk->param.csr_reg != NULL) { 470 if (pclk->param.csr_reg != NULL) {
472 pr_debug("%s clock enabled\n", clk_hw_get_name(hw)); 471 pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
473 reg = __pa(pclk->param.csr_reg);
474 /* First enable the clock */ 472 /* First enable the clock */
475 data = xgene_clk_read(pclk->param.csr_reg + 473 data = xgene_clk_read(pclk->param.csr_reg +
476 pclk->param.reg_clk_offset); 474 pclk->param.reg_clk_offset);
477 data |= pclk->param.reg_clk_mask; 475 data |= pclk->param.reg_clk_mask;
478 xgene_clk_write(data, pclk->param.csr_reg + 476 xgene_clk_write(data, pclk->param.csr_reg +
479 pclk->param.reg_clk_offset); 477 pclk->param.reg_clk_offset);
480 pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n", 478 pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
481 clk_hw_get_name(hw), &reg, 479 clk_hw_get_name(hw),
482 pclk->param.reg_clk_offset, pclk->param.reg_clk_mask, 480 pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
483 data); 481 data);
484 482
@@ -488,8 +486,8 @@ static int xgene_clk_enable(struct clk_hw *hw)
488 data &= ~pclk->param.reg_csr_mask; 486 data &= ~pclk->param.reg_csr_mask;
489 xgene_clk_write(data, pclk->param.csr_reg + 487 xgene_clk_write(data, pclk->param.csr_reg +
490 pclk->param.reg_csr_offset); 488 pclk->param.reg_csr_offset);
491 pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n", 489 pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
492 clk_hw_get_name(hw), &reg, 490 clk_hw_get_name(hw),
493 pclk->param.reg_csr_offset, pclk->param.reg_csr_mask, 491 pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
494 data); 492 data);
495 } 493 }
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 19f9b622981a..7a6acc3e4a92 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
223 temp64 *= mfn; 223 temp64 *= mfn;
224 do_div(temp64, mfd); 224 do_div(temp64, mfd);
225 225
226 return (parent_rate * div) + (u32)temp64; 226 return parent_rate * div + (unsigned long)temp64;
227} 227}
228 228
229static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate, 229static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
247 do_div(temp64, parent_rate); 247 do_div(temp64, parent_rate);
248 mfn = temp64; 248 mfn = temp64;
249 249
250 return parent_rate * div + parent_rate * mfn / mfd; 250 temp64 = (u64)parent_rate;
251 temp64 *= mfn;
252 do_div(temp64, mfd);
253
254 return parent_rate * div + (unsigned long)temp64;
251} 255}
252 256
253static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate, 257static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 3a51fff1b0e7..9adaf48aea23 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
313 } 313 }
314 314
315 pxa_unit->apmu_base = of_iomap(np, 1); 315 pxa_unit->apmu_base = of_iomap(np, 1);
316 if (!pxa_unit->mpmu_base) { 316 if (!pxa_unit->apmu_base) {
317 pr_err("failed to map apmu registers\n"); 317 pr_err("failed to map apmu registers\n");
318 return; 318 return;
319 } 319 }
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index 87f2317b2a00..f110c02e83cb 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
262 } 262 }
263 263
264 pxa_unit->apmu_base = of_iomap(np, 1); 264 pxa_unit->apmu_base = of_iomap(np, 1);
265 if (!pxa_unit->mpmu_base) { 265 if (!pxa_unit->apmu_base) {
266 pr_err("failed to map apmu registers\n"); 266 pr_err("failed to map apmu registers\n");
267 return; 267 return;
268 } 268 }
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
index e22a67f76d93..64d1ef49caeb 100644
--- a/drivers/clk/mmp/clk-of-pxa910.c
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
282 } 282 }
283 283
284 pxa_unit->apmu_base = of_iomap(np, 1); 284 pxa_unit->apmu_base = of_iomap(np, 1);
285 if (!pxa_unit->mpmu_base) { 285 if (!pxa_unit->apmu_base) {
286 pr_err("failed to map apmu registers\n"); 286 pr_err("failed to map apmu registers\n");
287 return; 287 return;
288 } 288 }
@@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
294 } 294 }
295 295
296 pxa_unit->apbcp_base = of_iomap(np, 3); 296 pxa_unit->apbcp_base = of_iomap(np, 3);
297 if (!pxa_unit->mpmu_base) { 297 if (!pxa_unit->apbcp_base) {
298 pr_err("failed to map apbcp registers\n"); 298 pr_err("failed to map apbcp registers\n");
299 return; 299 return;
300 } 300 }
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index 8feba93672c5..e8075359366b 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -144,11 +144,8 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
144 ddrclk->ddr_flag = ddr_flag; 144 ddrclk->ddr_flag = ddr_flag;
145 145
146 clk = clk_register(NULL, &ddrclk->hw); 146 clk = clk_register(NULL, &ddrclk->hw);
147 if (IS_ERR(clk)) { 147 if (IS_ERR(clk))
148 pr_err("%s: could not register ddrclk %s\n", __func__, name);
149 kfree(ddrclk); 148 kfree(ddrclk);
150 return NULL;
151 }
152 149
153 return clk; 150 return clk;
154} 151}
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 96fab6cfb202..6c6afb87b4ce 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -132,28 +132,34 @@ free_clkout:
132 pr_err("%s: failed to register clkout clock\n", __func__); 132 pr_err("%s: failed to register clkout clock\n", __func__);
133} 133}
134 134
135/*
136 * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting
137 * the OF_POPULATED flag on the pmu device tree node, so later the
138 * Exynos PMU platform device can be properly probed with PMU driver.
139 */
140
135static void __init exynos4_clkout_init(struct device_node *node) 141static void __init exynos4_clkout_init(struct device_node *node)
136{ 142{
137 exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK); 143 exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
138} 144}
139CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu", 145CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
140 exynos4_clkout_init); 146 exynos4_clkout_init);
141CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu", 147CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu",
142 exynos4_clkout_init); 148 exynos4_clkout_init);
143CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu", 149CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
144 exynos4_clkout_init); 150 exynos4_clkout_init);
145CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu", 151CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
146 exynos4_clkout_init); 152 exynos4_clkout_init);
147 153
148static void __init exynos5_clkout_init(struct device_node *node) 154static void __init exynos5_clkout_init(struct device_node *node)
149{ 155{
150 exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK); 156 exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
151} 157}
152CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu", 158CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu",
153 exynos5_clkout_init); 159 exynos5_clkout_init);
154CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu", 160CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu",
155 exynos5_clkout_init); 161 exynos5_clkout_init);
156CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu", 162CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu",
157 exynos5_clkout_init); 163 exynos5_clkout_init);
158CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu", 164CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu",
159 exynos5_clkout_init); 165 exynos5_clkout_init);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 363fc5ec1a4e..5d3640264f2d 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -73,13 +73,13 @@ struct rfc2734_header {
73 73
74#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) 74#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
75#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) 75#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
76#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) 76#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
77#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) 77#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
78#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) 78#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
79 79
80#define fwnet_set_hdr_lf(lf) ((lf) << 30) 80#define fwnet_set_hdr_lf(lf) ((lf) << 30)
81#define fwnet_set_hdr_ether_type(et) (et) 81#define fwnet_set_hdr_ether_type(et) (et)
82#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) 82#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
83#define fwnet_set_hdr_fg_off(fgo) (fgo) 83#define fwnet_set_hdr_fg_off(fgo) (fgo)
84 84
85#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) 85#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
@@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
578 int retval; 578 int retval;
579 u16 ether_type; 579 u16 ether_type;
580 580
581 if (len <= RFC2374_UNFRAG_HDR_SIZE)
582 return 0;
583
581 hdr.w0 = be32_to_cpu(buf[0]); 584 hdr.w0 = be32_to_cpu(buf[0]);
582 lf = fwnet_get_hdr_lf(&hdr); 585 lf = fwnet_get_hdr_lf(&hdr);
583 if (lf == RFC2374_HDR_UNFRAG) { 586 if (lf == RFC2374_HDR_UNFRAG) {
@@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
602 return fwnet_finish_incoming_packet(net, skb, source_node_id, 605 return fwnet_finish_incoming_packet(net, skb, source_node_id,
603 is_broadcast, ether_type); 606 is_broadcast, ether_type);
604 } 607 }
608
605 /* A datagram fragment has been received, now the fun begins. */ 609 /* A datagram fragment has been received, now the fun begins. */
610
611 if (len <= RFC2374_FRAG_HDR_SIZE)
612 return 0;
613
606 hdr.w1 = ntohl(buf[1]); 614 hdr.w1 = ntohl(buf[1]);
607 buf += 2; 615 buf += 2;
608 len -= RFC2374_FRAG_HDR_SIZE; 616 len -= RFC2374_FRAG_HDR_SIZE;
@@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
614 fg_off = fwnet_get_hdr_fg_off(&hdr); 622 fg_off = fwnet_get_hdr_fg_off(&hdr);
615 } 623 }
616 datagram_label = fwnet_get_hdr_dgl(&hdr); 624 datagram_label = fwnet_get_hdr_dgl(&hdr);
617 dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ 625 dg_size = fwnet_get_hdr_dg_size(&hdr);
626
627 if (fg_off + len > dg_size)
628 return 0;
618 629
619 spin_lock_irqsave(&dev->lock, flags); 630 spin_lock_irqsave(&dev->lock, flags);
620 631
@@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
722 fw_send_response(card, r, rcode); 733 fw_send_response(card, r, rcode);
723} 734}
724 735
736static int gasp_source_id(__be32 *p)
737{
738 return be32_to_cpu(p[0]) >> 16;
739}
740
741static u32 gasp_specifier_id(__be32 *p)
742{
743 return (be32_to_cpu(p[0]) & 0xffff) << 8 |
744 (be32_to_cpu(p[1]) & 0xff000000) >> 24;
745}
746
747static u32 gasp_version(__be32 *p)
748{
749 return be32_to_cpu(p[1]) & 0xffffff;
750}
751
725static void fwnet_receive_broadcast(struct fw_iso_context *context, 752static void fwnet_receive_broadcast(struct fw_iso_context *context,
726 u32 cycle, size_t header_length, void *header, void *data) 753 u32 cycle, size_t header_length, void *header, void *data)
727{ 754{
@@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
731 __be32 *buf_ptr; 758 __be32 *buf_ptr;
732 int retval; 759 int retval;
733 u32 length; 760 u32 length;
734 u16 source_node_id;
735 u32 specifier_id;
736 u32 ver;
737 unsigned long offset; 761 unsigned long offset;
738 unsigned long flags; 762 unsigned long flags;
739 763
@@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
750 774
751 spin_unlock_irqrestore(&dev->lock, flags); 775 spin_unlock_irqrestore(&dev->lock, flags);
752 776
753 specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 777 if (length > IEEE1394_GASP_HDR_SIZE &&
754 | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; 778 gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
755 ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; 779 (gasp_version(buf_ptr) == RFC2734_SW_VERSION
756 source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
757
758 if (specifier_id == IANA_SPECIFIER_ID &&
759 (ver == RFC2734_SW_VERSION
760#if IS_ENABLED(CONFIG_IPV6) 780#if IS_ENABLED(CONFIG_IPV6)
761 || ver == RFC3146_SW_VERSION 781 || gasp_version(buf_ptr) == RFC3146_SW_VERSION
762#endif 782#endif
763 )) { 783 ))
764 buf_ptr += 2; 784 fwnet_incoming_packet(dev, buf_ptr + 2,
765 length -= IEEE1394_GASP_HDR_SIZE; 785 length - IEEE1394_GASP_HDR_SIZE,
766 fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, 786 gasp_source_id(buf_ptr),
767 context->card->generation, true); 787 context->card->generation, true);
768 }
769 788
770 packet.payload_length = dev->rcv_buffer_size; 789 packet.payload_length = dev->rcv_buffer_size;
771 packet.interrupt = 1; 790 packet.interrupt = 1;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index cd5dc27320a2..1ed6132b993c 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -293,10 +293,10 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
293{ 293{
294 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 294 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
295 struct mvebu_gpio_chip *mvchip = gc->private; 295 struct mvebu_gpio_chip *mvchip = gc->private;
296 u32 mask = ~(1 << (d->irq - gc->irq_base)); 296 u32 mask = d->mask;
297 297
298 irq_gc_lock(gc); 298 irq_gc_lock(gc);
299 writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip)); 299 writel_relaxed(~mask, mvebu_gpioreg_edge_cause(mvchip));
300 irq_gc_unlock(gc); 300 irq_gc_unlock(gc);
301} 301}
302 302
@@ -305,7 +305,7 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
305 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 305 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
306 struct mvebu_gpio_chip *mvchip = gc->private; 306 struct mvebu_gpio_chip *mvchip = gc->private;
307 struct irq_chip_type *ct = irq_data_get_chip_type(d); 307 struct irq_chip_type *ct = irq_data_get_chip_type(d);
308 u32 mask = 1 << (d->irq - gc->irq_base); 308 u32 mask = d->mask;
309 309
310 irq_gc_lock(gc); 310 irq_gc_lock(gc);
311 ct->mask_cache_priv &= ~mask; 311 ct->mask_cache_priv &= ~mask;
@@ -319,8 +319,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
319 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 319 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
320 struct mvebu_gpio_chip *mvchip = gc->private; 320 struct mvebu_gpio_chip *mvchip = gc->private;
321 struct irq_chip_type *ct = irq_data_get_chip_type(d); 321 struct irq_chip_type *ct = irq_data_get_chip_type(d);
322 322 u32 mask = d->mask;
323 u32 mask = 1 << (d->irq - gc->irq_base);
324 323
325 irq_gc_lock(gc); 324 irq_gc_lock(gc);
326 ct->mask_cache_priv |= mask; 325 ct->mask_cache_priv |= mask;
@@ -333,8 +332,7 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
333 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 332 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
334 struct mvebu_gpio_chip *mvchip = gc->private; 333 struct mvebu_gpio_chip *mvchip = gc->private;
335 struct irq_chip_type *ct = irq_data_get_chip_type(d); 334 struct irq_chip_type *ct = irq_data_get_chip_type(d);
336 335 u32 mask = d->mask;
337 u32 mask = 1 << (d->irq - gc->irq_base);
338 336
339 irq_gc_lock(gc); 337 irq_gc_lock(gc);
340 ct->mask_cache_priv &= ~mask; 338 ct->mask_cache_priv &= ~mask;
@@ -347,8 +345,7 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
347 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 345 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
348 struct mvebu_gpio_chip *mvchip = gc->private; 346 struct mvebu_gpio_chip *mvchip = gc->private;
349 struct irq_chip_type *ct = irq_data_get_chip_type(d); 347 struct irq_chip_type *ct = irq_data_get_chip_type(d);
350 348 u32 mask = d->mask;
351 u32 mask = 1 << (d->irq - gc->irq_base);
352 349
353 irq_gc_lock(gc); 350 irq_gc_lock(gc);
354 ct->mask_cache_priv |= mask; 351 ct->mask_cache_priv |= mask;
@@ -462,7 +459,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
462 for (i = 0; i < mvchip->chip.ngpio; i++) { 459 for (i = 0; i < mvchip->chip.ngpio; i++) {
463 int irq; 460 int irq;
464 461
465 irq = mvchip->irqbase + i; 462 irq = irq_find_mapping(mvchip->domain, i);
466 463
467 if (!(cause & (1 << i))) 464 if (!(cause & (1 << i)))
468 continue; 465 continue;
@@ -655,6 +652,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
655 struct irq_chip_type *ct; 652 struct irq_chip_type *ct;
656 struct clk *clk; 653 struct clk *clk;
657 unsigned int ngpios; 654 unsigned int ngpios;
655 bool have_irqs;
658 int soc_variant; 656 int soc_variant;
659 int i, cpu, id; 657 int i, cpu, id;
660 int err; 658 int err;
@@ -665,6 +663,9 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
665 else 663 else
666 soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION; 664 soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
667 665
666 /* Some gpio controllers do not provide irq support */
667 have_irqs = of_irq_count(np) != 0;
668
668 mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), 669 mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
669 GFP_KERNEL); 670 GFP_KERNEL);
670 if (!mvchip) 671 if (!mvchip)
@@ -697,7 +698,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
697 mvchip->chip.get = mvebu_gpio_get; 698 mvchip->chip.get = mvebu_gpio_get;
698 mvchip->chip.direction_output = mvebu_gpio_direction_output; 699 mvchip->chip.direction_output = mvebu_gpio_direction_output;
699 mvchip->chip.set = mvebu_gpio_set; 700 mvchip->chip.set = mvebu_gpio_set;
700 mvchip->chip.to_irq = mvebu_gpio_to_irq; 701 if (have_irqs)
702 mvchip->chip.to_irq = mvebu_gpio_to_irq;
701 mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK; 703 mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
702 mvchip->chip.ngpio = ngpios; 704 mvchip->chip.ngpio = ngpios;
703 mvchip->chip.can_sleep = false; 705 mvchip->chip.can_sleep = false;
@@ -758,34 +760,30 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
758 devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip); 760 devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
759 761
760 /* Some gpio controllers do not provide irq support */ 762 /* Some gpio controllers do not provide irq support */
761 if (!of_irq_count(np)) 763 if (!have_irqs)
762 return 0; 764 return 0;
763 765
764 /* Setup the interrupt handlers. Each chip can have up to 4 766 mvchip->domain =
765 * interrupt handlers, with each handler dealing with 8 GPIO 767 irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
766 * pins. */ 768 if (!mvchip->domain) {
767 for (i = 0; i < 4; i++) { 769 dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
768 int irq = platform_get_irq(pdev, i); 770 mvchip->chip.label);
769 771 return -ENODEV;
770 if (irq < 0)
771 continue;
772 irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
773 mvchip);
774 }
775
776 mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
777 if (mvchip->irqbase < 0) {
778 dev_err(&pdev->dev, "no irqs\n");
779 return mvchip->irqbase;
780 } 772 }
781 773
782 gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase, 774 err = irq_alloc_domain_generic_chips(
783 mvchip->membase, handle_level_irq); 775 mvchip->domain, ngpios, 2, np->name, handle_level_irq,
784 if (!gc) { 776 IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0);
785 dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n"); 777 if (err) {
786 return -ENOMEM; 778 dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n",
779 mvchip->chip.label);
780 goto err_domain;
787 } 781 }
788 782
783 /* NOTE: The common accessors cannot be used because of the percpu
784 * access to the mask registers
785 */
786 gc = irq_get_domain_generic_chip(mvchip->domain, 0);
789 gc->private = mvchip; 787 gc->private = mvchip;
790 ct = &gc->chip_types[0]; 788 ct = &gc->chip_types[0];
791 ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; 789 ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
@@ -803,27 +801,23 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
803 ct->handler = handle_edge_irq; 801 ct->handler = handle_edge_irq;
804 ct->chip.name = mvchip->chip.label; 802 ct->chip.name = mvchip->chip.label;
805 803
806 irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0, 804 /* Setup the interrupt handlers. Each chip can have up to 4
807 IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); 805 * interrupt handlers, with each handler dealing with 8 GPIO
806 * pins.
807 */
808 for (i = 0; i < 4; i++) {
809 int irq = platform_get_irq(pdev, i);
808 810
809 /* Setup irq domain on top of the generic chip. */ 811 if (irq < 0)
810 mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio, 812 continue;
811 mvchip->irqbase, 813 irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
812 &irq_domain_simple_ops, 814 mvchip);
813 mvchip);
814 if (!mvchip->domain) {
815 dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
816 mvchip->chip.label);
817 err = -ENODEV;
818 goto err_generic_chip;
819 } 815 }
820 816
821 return 0; 817 return 0;
822 818
823err_generic_chip: 819err_domain:
824 irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST, 820 irq_domain_remove(mvchip->domain);
825 IRQ_LEVEL | IRQ_NOPROBE);
826 kfree(gc);
827 821
828 return err; 822 return err;
829} 823}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ecad3f0e3b77..193f15d50bba 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -26,14 +26,18 @@
26 26
27#include "gpiolib.h" 27#include "gpiolib.h"
28 28
29static int of_gpiochip_match_node(struct gpio_chip *chip, void *data) 29static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
30{ 30{
31 return chip->gpiodev->dev.of_node == data; 31 struct of_phandle_args *gpiospec = data;
32
33 return chip->gpiodev->dev.of_node == gpiospec->np &&
34 chip->of_xlate(chip, gpiospec, NULL) >= 0;
32} 35}
33 36
34static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np) 37static struct gpio_chip *of_find_gpiochip_by_xlate(
38 struct of_phandle_args *gpiospec)
35{ 39{
36 return gpiochip_find(np, of_gpiochip_match_node); 40 return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate);
37} 41}
38 42
39static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip, 43static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
@@ -79,7 +83,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
79 return ERR_PTR(ret); 83 return ERR_PTR(ret);
80 } 84 }
81 85
82 chip = of_find_gpiochip_by_node(gpiospec.np); 86 chip = of_find_gpiochip_by_xlate(&gpiospec);
83 if (!chip) { 87 if (!chip) {
84 desc = ERR_PTR(-EPROBE_DEFER); 88 desc = ERR_PTR(-EPROBE_DEFER);
85 goto out; 89 goto out;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 20e09b7c2de3..93ed0e00c578 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -21,6 +21,7 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/compat.h> 22#include <linux/compat.h>
23#include <linux/anon_inodes.h> 23#include <linux/anon_inodes.h>
24#include <linux/file.h>
24#include <linux/kfifo.h> 25#include <linux/kfifo.h>
25#include <linux/poll.h> 26#include <linux/poll.h>
26#include <linux/timekeeping.h> 27#include <linux/timekeeping.h>
@@ -423,6 +424,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
423{ 424{
424 struct gpiohandle_request handlereq; 425 struct gpiohandle_request handlereq;
425 struct linehandle_state *lh; 426 struct linehandle_state *lh;
427 struct file *file;
426 int fd, i, ret; 428 int fd, i, ret;
427 429
428 if (copy_from_user(&handlereq, ip, sizeof(handlereq))) 430 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@@ -499,26 +501,41 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
499 i--; 501 i--;
500 lh->numdescs = handlereq.lines; 502 lh->numdescs = handlereq.lines;
501 503
502 fd = anon_inode_getfd("gpio-linehandle", 504 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
503 &linehandle_fileops,
504 lh,
505 O_RDONLY | O_CLOEXEC);
506 if (fd < 0) { 505 if (fd < 0) {
507 ret = fd; 506 ret = fd;
508 goto out_free_descs; 507 goto out_free_descs;
509 } 508 }
510 509
510 file = anon_inode_getfile("gpio-linehandle",
511 &linehandle_fileops,
512 lh,
513 O_RDONLY | O_CLOEXEC);
514 if (IS_ERR(file)) {
515 ret = PTR_ERR(file);
516 goto out_put_unused_fd;
517 }
518
511 handlereq.fd = fd; 519 handlereq.fd = fd;
512 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { 520 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
513 ret = -EFAULT; 521 /*
514 goto out_free_descs; 522 * fput() will trigger the release() callback, so do not go onto
523 * the regular error cleanup path here.
524 */
525 fput(file);
526 put_unused_fd(fd);
527 return -EFAULT;
515 } 528 }
516 529
530 fd_install(fd, file);
531
517 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 532 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
518 lh->numdescs); 533 lh->numdescs);
519 534
520 return 0; 535 return 0;
521 536
537out_put_unused_fd:
538 put_unused_fd(fd);
522out_free_descs: 539out_free_descs:
523 for (; i >= 0; i--) 540 for (; i >= 0; i--)
524 gpiod_free(lh->descs[i]); 541 gpiod_free(lh->descs[i]);
@@ -721,6 +738,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
721 struct gpioevent_request eventreq; 738 struct gpioevent_request eventreq;
722 struct lineevent_state *le; 739 struct lineevent_state *le;
723 struct gpio_desc *desc; 740 struct gpio_desc *desc;
741 struct file *file;
724 u32 offset; 742 u32 offset;
725 u32 lflags; 743 u32 lflags;
726 u32 eflags; 744 u32 eflags;
@@ -815,23 +833,38 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
815 if (ret) 833 if (ret)
816 goto out_free_desc; 834 goto out_free_desc;
817 835
818 fd = anon_inode_getfd("gpio-event", 836 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
819 &lineevent_fileops,
820 le,
821 O_RDONLY | O_CLOEXEC);
822 if (fd < 0) { 837 if (fd < 0) {
823 ret = fd; 838 ret = fd;
824 goto out_free_irq; 839 goto out_free_irq;
825 } 840 }
826 841
842 file = anon_inode_getfile("gpio-event",
843 &lineevent_fileops,
844 le,
845 O_RDONLY | O_CLOEXEC);
846 if (IS_ERR(file)) {
847 ret = PTR_ERR(file);
848 goto out_put_unused_fd;
849 }
850
827 eventreq.fd = fd; 851 eventreq.fd = fd;
828 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { 852 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
829 ret = -EFAULT; 853 /*
830 goto out_free_irq; 854 * fput() will trigger the release() callback, so do not go onto
855 * the regular error cleanup path here.
856 */
857 fput(file);
858 put_unused_fd(fd);
859 return -EFAULT;
831 } 860 }
832 861
862 fd_install(fd, file);
863
833 return 0; 864 return 0;
834 865
866out_put_unused_fd:
867 put_unused_fd(fd);
835out_free_irq: 868out_free_irq:
836 free_irq(le->irq, le); 869 free_irq(le->irq, le);
837out_free_desc: 870out_free_desc:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 892d60fb225b..2057683f7b59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle)
395{ 395{
396 int i, ret; 396 int i, ret;
397 struct device *dev; 397 struct device *dev;
398
399 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 398 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
400 399
400 /* return early if no ACP */
401 if (!adev->acp.acp_genpd)
402 return 0;
403
401 for (i = 0; i < ACP_DEVS ; i++) { 404 for (i = 0; i < ACP_DEVS ; i++) {
402 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); 405 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
403 ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); 406 ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a8bfa34682f..662976292535 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -795,10 +795,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
795 if (!adev->pm.fw) { 795 if (!adev->pm.fw) {
796 switch (adev->asic_type) { 796 switch (adev->asic_type) {
797 case CHIP_TOPAZ: 797 case CHIP_TOPAZ:
798 strcpy(fw_name, "amdgpu/topaz_smc.bin"); 798 if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
799 ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
800 ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
801 strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
802 else
803 strcpy(fw_name, "amdgpu/topaz_smc.bin");
799 break; 804 break;
800 case CHIP_TONGA: 805 case CHIP_TONGA:
801 strcpy(fw_name, "amdgpu/tonga_smc.bin"); 806 if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
807 ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
808 strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
809 else
810 strcpy(fw_name, "amdgpu/tonga_smc.bin");
802 break; 811 break;
803 case CHIP_FIJI: 812 case CHIP_FIJI:
804 strcpy(fw_name, "amdgpu/fiji_smc.bin"); 813 strcpy(fw_name, "amdgpu/fiji_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index e3281d4e3e41..086aa5c9c634 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -769,7 +769,7 @@ static void amdgpu_connector_unregister(struct drm_connector *connector)
769{ 769{
770 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 770 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
771 771
772 if (amdgpu_connector->ddc_bus->has_aux) { 772 if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) {
773 drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); 773 drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
774 amdgpu_connector->ddc_bus->has_aux = false; 774 amdgpu_connector->ddc_bus->has_aux = false;
775 } 775 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b0f6e6957536..82dc8d20e28a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -519,7 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
519 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, 519 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
520 &duplicates); 520 &duplicates);
521 if (unlikely(r != 0)) { 521 if (unlikely(r != 0)) {
522 DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); 522 if (r != -ERESTARTSYS)
523 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
523 goto error_free_pages; 524 goto error_free_pages;
524 } 525 }
525 526
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b4f4a9239069..7ca07e7b25c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1959,6 +1959,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
1959 /* evict remaining vram memory */ 1959 /* evict remaining vram memory */
1960 amdgpu_bo_evict_vram(adev); 1960 amdgpu_bo_evict_vram(adev);
1961 1961
1962 amdgpu_atombios_scratch_regs_save(adev);
1962 pci_save_state(dev->pdev); 1963 pci_save_state(dev->pdev);
1963 if (suspend) { 1964 if (suspend) {
1964 /* Shut down the device */ 1965 /* Shut down the device */
@@ -2010,6 +2011,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2010 return r; 2011 return r;
2011 } 2012 }
2012 } 2013 }
2014 amdgpu_atombios_scratch_regs_restore(adev);
2013 2015
2014 /* post card */ 2016 /* post card */
2015 if (!amdgpu_card_posted(adev) || !resume) { 2017 if (!amdgpu_card_posted(adev) || !resume) {
@@ -2268,8 +2270,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
2268 } 2270 }
2269 2271
2270 if (need_full_reset) { 2272 if (need_full_reset) {
2271 /* save scratch */
2272 amdgpu_atombios_scratch_regs_save(adev);
2273 r = amdgpu_suspend(adev); 2273 r = amdgpu_suspend(adev);
2274 2274
2275retry: 2275retry:
@@ -2279,8 +2279,9 @@ retry:
2279 amdgpu_display_stop_mc_access(adev, &save); 2279 amdgpu_display_stop_mc_access(adev, &save);
2280 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); 2280 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2281 } 2281 }
2282 2282 amdgpu_atombios_scratch_regs_save(adev);
2283 r = amdgpu_asic_reset(adev); 2283 r = amdgpu_asic_reset(adev);
2284 amdgpu_atombios_scratch_regs_restore(adev);
2284 /* post card */ 2285 /* post card */
2285 amdgpu_atom_asic_init(adev->mode_info.atom_context); 2286 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2286 2287
@@ -2288,8 +2289,6 @@ retry:
2288 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); 2289 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2289 r = amdgpu_resume(adev); 2290 r = amdgpu_resume(adev);
2290 } 2291 }
2291 /* restore scratch */
2292 amdgpu_atombios_scratch_regs_restore(adev);
2293 } 2292 }
2294 if (!r) { 2293 if (!r) {
2295 amdgpu_irq_gpu_reset_resume_helper(adev); 2294 amdgpu_irq_gpu_reset_resume_helper(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 71ed27eb3dde..02ff0747197c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -735,8 +735,20 @@ static struct pci_driver amdgpu_kms_pci_driver = {
735 735
736static int __init amdgpu_init(void) 736static int __init amdgpu_init(void)
737{ 737{
738 amdgpu_sync_init(); 738 int r;
739 amdgpu_fence_slab_init(); 739
740 r = amdgpu_sync_init();
741 if (r)
742 goto error_sync;
743
744 r = amdgpu_fence_slab_init();
745 if (r)
746 goto error_fence;
747
748 r = amd_sched_fence_slab_init();
749 if (r)
750 goto error_sched;
751
740 if (vgacon_text_force()) { 752 if (vgacon_text_force()) {
741 DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); 753 DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
742 return -EINVAL; 754 return -EINVAL;
@@ -748,6 +760,15 @@ static int __init amdgpu_init(void)
748 amdgpu_register_atpx_handler(); 760 amdgpu_register_atpx_handler();
749 /* let modprobe override vga console setting */ 761 /* let modprobe override vga console setting */
750 return drm_pci_init(driver, pdriver); 762 return drm_pci_init(driver, pdriver);
763
764error_sched:
765 amdgpu_fence_slab_fini();
766
767error_fence:
768 amdgpu_sync_fini();
769
770error_sync:
771 return r;
751} 772}
752 773
753static void __exit amdgpu_exit(void) 774static void __exit amdgpu_exit(void)
@@ -756,6 +777,7 @@ static void __exit amdgpu_exit(void)
756 drm_pci_exit(driver, pdriver); 777 drm_pci_exit(driver, pdriver);
757 amdgpu_unregister_atpx_handler(); 778 amdgpu_unregister_atpx_handler();
758 amdgpu_sync_fini(); 779 amdgpu_sync_fini();
780 amd_sched_fence_slab_fini();
759 amdgpu_fence_slab_fini(); 781 amdgpu_fence_slab_fini();
760} 782}
761 783
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3a2e42f4b897..77b34ec92632 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -68,6 +68,7 @@ int amdgpu_fence_slab_init(void)
68 68
69void amdgpu_fence_slab_fini(void) 69void amdgpu_fence_slab_fini(void)
70{ 70{
71 rcu_barrier();
71 kmem_cache_destroy(amdgpu_fence_slab); 72 kmem_cache_destroy(amdgpu_fence_slab);
72} 73}
73/* 74/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 278708f5a744..9fa809876339 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -239,6 +239,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
239 if (r) { 239 if (r) {
240 adev->irq.installed = false; 240 adev->irq.installed = false;
241 flush_work(&adev->hotplug_work); 241 flush_work(&adev->hotplug_work);
242 cancel_work_sync(&adev->reset_work);
242 return r; 243 return r;
243 } 244 }
244 245
@@ -264,6 +265,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
264 if (adev->irq.msi_enabled) 265 if (adev->irq.msi_enabled)
265 pci_disable_msi(adev->pdev); 266 pci_disable_msi(adev->pdev);
266 flush_work(&adev->hotplug_work); 267 flush_work(&adev->hotplug_work);
268 cancel_work_sync(&adev->reset_work);
267 } 269 }
268 270
269 for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { 271 for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c2c7fb140338..3938fca1ea8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
99 99
100 if ((amdgpu_runtime_pm != 0) && 100 if ((amdgpu_runtime_pm != 0) &&
101 amdgpu_has_atpx() && 101 amdgpu_has_atpx() &&
102 (amdgpu_is_atpx_hybrid() ||
103 amdgpu_has_atpx_dgpu_power_cntl()) &&
102 ((flags & AMD_IS_APU) == 0)) 104 ((flags & AMD_IS_APU) == 0))
103 flags |= AMD_IS_PX; 105 flags |= AMD_IS_PX;
104 106
@@ -459,10 +461,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
459 /* return all clocks in KHz */ 461 /* return all clocks in KHz */
460 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; 462 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
461 if (adev->pm.dpm_enabled) { 463 if (adev->pm.dpm_enabled) {
462 dev_info.max_engine_clock = 464 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
463 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; 465 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
464 dev_info.max_memory_clock =
465 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk * 10;
466 } else { 466 } else {
467 dev_info.max_engine_clock = adev->pm.default_sclk * 10; 467 dev_info.max_engine_clock = adev->pm.default_sclk * 10;
468 dev_info.max_memory_clock = adev->pm.default_mclk * 10; 468 dev_info.max_memory_clock = adev->pm.default_mclk * 10;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 06f24322e7c3..968c4260d7a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1758,5 +1758,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1758 fence_put(adev->vm_manager.ids[i].first); 1758 fence_put(adev->vm_manager.ids[i].first);
1759 amdgpu_sync_free(&adev->vm_manager.ids[i].active); 1759 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1760 fence_put(id->flushed_updates); 1760 fence_put(id->flushed_updates);
1761 fence_put(id->last_flush);
1761 } 1762 }
1762} 1763}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 1d8c375a3561..5be788b269e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -4075,7 +4075,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4075 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 4075 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4076 } 4076 }
4077 } else { 4077 } else {
4078 if (pi->last_mclk_dpm_enable_mask & 0x1) { 4078 if (pi->uvd_enabled) {
4079 pi->uvd_enabled = false; 4079 pi->uvd_enabled = false;
4080 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; 4080 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4081 amdgpu_ci_send_msg_to_smc_with_parameter(adev, 4081 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
@@ -6236,6 +6236,8 @@ static int ci_dpm_sw_fini(void *handle)
6236{ 6236{
6237 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6237 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6238 6238
6239 flush_work(&adev->pm.dpm.thermal.work);
6240
6239 mutex_lock(&adev->pm.mutex); 6241 mutex_lock(&adev->pm.mutex);
6240 amdgpu_pm_sysfs_fini(adev); 6242 amdgpu_pm_sysfs_fini(adev);
6241 ci_dpm_fini(adev); 6243 ci_dpm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 4108c686aa7c..9260caef74fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3151,10 +3151,6 @@ static int dce_v10_0_hw_fini(void *handle)
3151 3151
3152static int dce_v10_0_suspend(void *handle) 3152static int dce_v10_0_suspend(void *handle)
3153{ 3153{
3154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3155
3156 amdgpu_atombios_scratch_regs_save(adev);
3157
3158 return dce_v10_0_hw_fini(handle); 3154 return dce_v10_0_hw_fini(handle);
3159} 3155}
3160 3156
@@ -3165,8 +3161,6 @@ static int dce_v10_0_resume(void *handle)
3165 3161
3166 ret = dce_v10_0_hw_init(handle); 3162 ret = dce_v10_0_hw_init(handle);
3167 3163
3168 amdgpu_atombios_scratch_regs_restore(adev);
3169
3170 /* turn on the BL */ 3164 /* turn on the BL */
3171 if (adev->mode_info.bl_encoder) { 3165 if (adev->mode_info.bl_encoder) {
3172 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3166 u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index f264b8f17ad1..367739bd1927 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3215,10 +3215,6 @@ static int dce_v11_0_hw_fini(void *handle)
3215 3215
3216static int dce_v11_0_suspend(void *handle) 3216static int dce_v11_0_suspend(void *handle)
3217{ 3217{
3218 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3219
3220 amdgpu_atombios_scratch_regs_save(adev);
3221
3222 return dce_v11_0_hw_fini(handle); 3218 return dce_v11_0_hw_fini(handle);
3223} 3219}
3224 3220
@@ -3229,8 +3225,6 @@ static int dce_v11_0_resume(void *handle)
3229 3225
3230 ret = dce_v11_0_hw_init(handle); 3226 ret = dce_v11_0_hw_init(handle);
3231 3227
3232 amdgpu_atombios_scratch_regs_restore(adev);
3233
3234 /* turn on the BL */ 3228 /* turn on the BL */
3235 if (adev->mode_info.bl_encoder) { 3229 if (adev->mode_info.bl_encoder) {
3236 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3230 u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b948d6cb1399..15f9fc0514b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2482,10 +2482,6 @@ static int dce_v6_0_hw_fini(void *handle)
2482 2482
2483static int dce_v6_0_suspend(void *handle) 2483static int dce_v6_0_suspend(void *handle)
2484{ 2484{
2485 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2486
2487 amdgpu_atombios_scratch_regs_save(adev);
2488
2489 return dce_v6_0_hw_fini(handle); 2485 return dce_v6_0_hw_fini(handle);
2490} 2486}
2491 2487
@@ -2496,8 +2492,6 @@ static int dce_v6_0_resume(void *handle)
2496 2492
2497 ret = dce_v6_0_hw_init(handle); 2493 ret = dce_v6_0_hw_init(handle);
2498 2494
2499 amdgpu_atombios_scratch_regs_restore(adev);
2500
2501 /* turn on the BL */ 2495 /* turn on the BL */
2502 if (adev->mode_info.bl_encoder) { 2496 if (adev->mode_info.bl_encoder) {
2503 u8 bl_level = amdgpu_display_backlight_get_level(adev, 2497 u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 5966166ec94c..8c4d808db0f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -3033,10 +3033,6 @@ static int dce_v8_0_hw_fini(void *handle)
3033 3033
3034static int dce_v8_0_suspend(void *handle) 3034static int dce_v8_0_suspend(void *handle)
3035{ 3035{
3036 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3037
3038 amdgpu_atombios_scratch_regs_save(adev);
3039
3040 return dce_v8_0_hw_fini(handle); 3036 return dce_v8_0_hw_fini(handle);
3041} 3037}
3042 3038
@@ -3047,8 +3043,6 @@ static int dce_v8_0_resume(void *handle)
3047 3043
3048 ret = dce_v8_0_hw_init(handle); 3044 ret = dce_v8_0_hw_init(handle);
3049 3045
3050 amdgpu_atombios_scratch_regs_restore(adev);
3051
3052 /* turn on the BL */ 3046 /* turn on the BL */
3053 if (adev->mode_info.bl_encoder) { 3047 if (adev->mode_info.bl_encoder) {
3054 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3048 u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ee6a48a09214..bb97182dc749 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -640,7 +640,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
640 mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201, 640 mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
641 mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201, 641 mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
642 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, 642 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
643 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
644}; 643};
645 644
646static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); 645static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c22ef140a542..a16b2201d52c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -100,6 +100,7 @@ static const u32 cz_mgcg_cgcg_init[] =
100 100
101static const u32 stoney_mgcg_cgcg_init[] = 101static const u32 stoney_mgcg_cgcg_init[] =
102{ 102{
103 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
103 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 104 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
104}; 105};
105 106
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index f8618a3881a8..71d2856222fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -3063,6 +3063,8 @@ static int kv_dpm_sw_fini(void *handle)
3063{ 3063{
3064 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3064 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3065 3065
3066 flush_work(&adev->pm.dpm.thermal.work);
3067
3066 mutex_lock(&adev->pm.mutex); 3068 mutex_lock(&adev->pm.mutex);
3067 amdgpu_pm_sysfs_fini(adev); 3069 amdgpu_pm_sysfs_fini(adev);
3068 kv_dpm_fini(adev); 3070 kv_dpm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 3de7bca5854b..d6f85b1a0b93 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3477,6 +3477,49 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3477 int i; 3477 int i;
3478 struct si_dpm_quirk *p = si_dpm_quirk_list; 3478 struct si_dpm_quirk *p = si_dpm_quirk_list;
3479 3479
3480 /* limit all SI kickers */
3481 if (adev->asic_type == CHIP_PITCAIRN) {
3482 if ((adev->pdev->revision == 0x81) ||
3483 (adev->pdev->device == 0x6810) ||
3484 (adev->pdev->device == 0x6811) ||
3485 (adev->pdev->device == 0x6816) ||
3486 (adev->pdev->device == 0x6817) ||
3487 (adev->pdev->device == 0x6806))
3488 max_mclk = 120000;
3489 } else if (adev->asic_type == CHIP_VERDE) {
3490 if ((adev->pdev->revision == 0x81) ||
3491 (adev->pdev->revision == 0x83) ||
3492 (adev->pdev->revision == 0x87) ||
3493 (adev->pdev->device == 0x6820) ||
3494 (adev->pdev->device == 0x6821) ||
3495 (adev->pdev->device == 0x6822) ||
3496 (adev->pdev->device == 0x6823) ||
3497 (adev->pdev->device == 0x682A) ||
3498 (adev->pdev->device == 0x682B)) {
3499 max_sclk = 75000;
3500 max_mclk = 80000;
3501 }
3502 } else if (adev->asic_type == CHIP_OLAND) {
3503 if ((adev->pdev->revision == 0xC7) ||
3504 (adev->pdev->revision == 0x80) ||
3505 (adev->pdev->revision == 0x81) ||
3506 (adev->pdev->revision == 0x83) ||
3507 (adev->pdev->device == 0x6604) ||
3508 (adev->pdev->device == 0x6605)) {
3509 max_sclk = 75000;
3510 max_mclk = 80000;
3511 }
3512 } else if (adev->asic_type == CHIP_HAINAN) {
3513 if ((adev->pdev->revision == 0x81) ||
3514 (adev->pdev->revision == 0x83) ||
3515 (adev->pdev->revision == 0xC3) ||
3516 (adev->pdev->device == 0x6664) ||
3517 (adev->pdev->device == 0x6665) ||
3518 (adev->pdev->device == 0x6667)) {
3519 max_sclk = 75000;
3520 max_mclk = 80000;
3521 }
3522 }
3480 /* Apply dpm quirks */ 3523 /* Apply dpm quirks */
3481 while (p && p->chip_device != 0) { 3524 while (p && p->chip_device != 0) {
3482 if (adev->pdev->vendor == p->chip_vendor && 3525 if (adev->pdev->vendor == p->chip_vendor &&
@@ -3489,22 +3532,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3489 } 3532 }
3490 ++p; 3533 ++p;
3491 } 3534 }
3492 /* limit mclk on all R7 370 parts for stability */
3493 if (adev->pdev->device == 0x6811 &&
3494 adev->pdev->revision == 0x81)
3495 max_mclk = 120000;
3496 /* limit sclk/mclk on Jet parts for stability */
3497 if (adev->pdev->device == 0x6665 &&
3498 adev->pdev->revision == 0xc3) {
3499 max_sclk = 75000;
3500 max_mclk = 80000;
3501 }
3502 /* Limit clocks for some HD8600 parts */
3503 if (adev->pdev->device == 0x6660 &&
3504 adev->pdev->revision == 0x83) {
3505 max_sclk = 75000;
3506 max_mclk = 80000;
3507 }
3508 3535
3509 if (rps->vce_active) { 3536 if (rps->vce_active) {
3510 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 3537 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
@@ -7777,6 +7804,8 @@ static int si_dpm_sw_fini(void *handle)
7777{ 7804{
7778 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7805 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7779 7806
7807 flush_work(&adev->pm.dpm.thermal.work);
7808
7780 mutex_lock(&adev->pm.mutex); 7809 mutex_lock(&adev->pm.mutex);
7781 amdgpu_pm_sysfs_fini(adev); 7810 amdgpu_pm_sysfs_fini(adev);
7782 si_dpm_fini(adev); 7811 si_dpm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 8533269ec160..6feed726e299 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -52,6 +52,8 @@
52#define VCE_V3_0_STACK_SIZE (64 * 1024) 52#define VCE_V3_0_STACK_SIZE (64 * 1024)
53#define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 53#define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
54 54
55#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
56
55static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 57static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
56static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 58static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
57static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 59static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -382,6 +384,10 @@ static int vce_v3_0_sw_init(void *handle)
382 if (r) 384 if (r)
383 return r; 385 return r;
384 386
387 /* 52.8.3 required for 3 ring support */
388 if (adev->vce.fw_version < FW_52_8_3)
389 adev->vce.num_rings = 2;
390
385 r = amdgpu_vce_resume(adev); 391 r = amdgpu_vce_resume(adev);
386 if (r) 392 if (r)
387 return r; 393 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index c0d9aad7126f..f62f1a74f890 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -80,7 +80,9 @@
80#include "dce_virtual.h" 80#include "dce_virtual.h"
81 81
82MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); 82MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
83MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
83MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); 84MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
85MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
84MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); 86MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
85MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 87MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
86MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 88MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
@@ -1651,7 +1653,7 @@ static int vi_common_early_init(void *handle)
1651 AMD_CG_SUPPORT_SDMA_MGCG | 1653 AMD_CG_SUPPORT_SDMA_MGCG |
1652 AMD_CG_SUPPORT_SDMA_LS | 1654 AMD_CG_SUPPORT_SDMA_LS |
1653 AMD_CG_SUPPORT_VCE_MGCG; 1655 AMD_CG_SUPPORT_VCE_MGCG;
1654 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1656 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1655 AMD_PG_SUPPORT_GFX_SMG | 1657 AMD_PG_SUPPORT_GFX_SMG |
1656 AMD_PG_SUPPORT_GFX_PIPELINE | 1658 AMD_PG_SUPPORT_GFX_PIPELINE |
1657 AMD_PG_SUPPORT_UVD | 1659 AMD_PG_SUPPORT_UVD |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 14f8c1f4da3d..0723758ed065 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -272,7 +272,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw
272 PHM_FUNC_CHECK(hwmgr); 272 PHM_FUNC_CHECK(hwmgr);
273 273
274 if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) 274 if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
275 return -EINVAL; 275 return false;
276 276
277 return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr); 277 return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
278} 278}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 1167205057b3..e03dcb6ea9c1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -710,13 +710,15 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
710 uint32_t vol; 710 uint32_t vol;
711 int ret = 0; 711 int ret = 0;
712 712
713 if (hwmgr->chip_id < CHIP_POLARIS10) { 713 if (hwmgr->chip_id < CHIP_TONGA) {
714 atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage); 714 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
715 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
716 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
715 if (*voltage >= 2000 || *voltage == 0) 717 if (*voltage >= 2000 || *voltage == 0)
716 *voltage = 1150; 718 *voltage = 1150;
717 } else { 719 } else {
718 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol); 720 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
719 *voltage = (uint16_t)vol/100; 721 *voltage = (uint16_t)(vol/100);
720 } 722 }
721 return ret; 723 return ret;
722} 724}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 1126bd4f74dc..0894527d932f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1320,7 +1320,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
1320 if (0 != result) 1320 if (0 != result)
1321 return result; 1321 return result;
1322 1322
1323 *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); 1323 *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)
1324 (&get_voltage_info_param_space))->ulVoltageLevel);
1324 1325
1325 return result; 1326 return result;
1326} 1327}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 7de701d8a450..4477c55a58e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -1201,12 +1201,15 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
1201static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr) 1201static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
1202{ 1202{
1203 const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); 1203 const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
1204 const ATOM_Tonga_VCE_State_Table *vce_state_table = 1204 const ATOM_Tonga_VCE_State_Table *vce_state_table;
1205 (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
1206 1205
1207 if (vce_state_table == NULL) 1206
1207 if (pp_table == NULL)
1208 return 0; 1208 return 0;
1209 1209
1210 vce_state_table = (void *)pp_table +
1211 le16_to_cpu(pp_table->usVCEStateTableOffset);
1212
1210 return vce_state_table->ucNumEntries; 1213 return vce_state_table->ucNumEntries;
1211} 1214}
1212 1215
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 609996c84ad5..b0c929dd8beb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1168,8 +1168,8 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1168 1168
1169 tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1; 1169 tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
1170 PP_ASSERT_WITH_CODE(tmp_result == 0, 1170 PP_ASSERT_WITH_CODE(tmp_result == 0,
1171 "DPM is already running right now, no need to enable DPM!", 1171 "DPM is already running",
1172 return 0); 1172 );
1173 1173
1174 if (smu7_voltage_control(hwmgr)) { 1174 if (smu7_voltage_control(hwmgr)) {
1175 tmp_result = smu7_enable_voltage_control(hwmgr); 1175 tmp_result = smu7_enable_voltage_control(hwmgr);
@@ -1460,19 +1460,19 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1460 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; 1460 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1461 1461
1462 1462
1463 if (table_info == NULL)
1464 return -EINVAL;
1465
1466 sclk_table = table_info->vdd_dep_on_sclk;
1467
1468 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { 1463 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1469 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1464 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1470 1465
1471 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 1466 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1472 if (0 == phm_get_sclk_for_voltage_evv(hwmgr, 1467 if ((hwmgr->pp_table_version == PP_TABLE_V1)
1468 && !phm_get_sclk_for_voltage_evv(hwmgr,
1473 table_info->vddgfx_lookup_table, vv_id, &sclk)) { 1469 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1474 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1470 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1475 PHM_PlatformCaps_ClockStretcher)) { 1471 PHM_PlatformCaps_ClockStretcher)) {
1472 if (table_info == NULL)
1473 return -EINVAL;
1474 sclk_table = table_info->vdd_dep_on_sclk;
1475
1476 for (j = 1; j < sclk_table->count; j++) { 1476 for (j = 1; j < sclk_table->count; j++) {
1477 if (sclk_table->entries[j].clk == sclk && 1477 if (sclk_table->entries[j].clk == sclk &&
1478 sclk_table->entries[j].cks_enable == 0) { 1478 sclk_table->entries[j].cks_enable == 0) {
@@ -1498,12 +1498,15 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1498 } 1498 }
1499 } 1499 }
1500 } else { 1500 } else {
1501
1502 if ((hwmgr->pp_table_version == PP_TABLE_V0) 1501 if ((hwmgr->pp_table_version == PP_TABLE_V0)
1503 || !phm_get_sclk_for_voltage_evv(hwmgr, 1502 || !phm_get_sclk_for_voltage_evv(hwmgr,
1504 table_info->vddc_lookup_table, vv_id, &sclk)) { 1503 table_info->vddc_lookup_table, vv_id, &sclk)) {
1505 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1504 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1506 PHM_PlatformCaps_ClockStretcher)) { 1505 PHM_PlatformCaps_ClockStretcher)) {
1506 if (table_info == NULL)
1507 return -EINVAL;
1508 sclk_table = table_info->vdd_dep_on_sclk;
1509
1507 for (j = 1; j < sclk_table->count; j++) { 1510 for (j = 1; j < sclk_table->count; j++) {
1508 if (sclk_table->entries[j].clk == sclk && 1511 if (sclk_table->entries[j].clk == sclk &&
1509 sclk_table->entries[j].cks_enable == 0) { 1512 sclk_table->entries[j].cks_enable == 0) {
@@ -2127,15 +2130,20 @@ static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2127} 2130}
2128 2131
2129static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, 2132static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2130 struct phm_clock_and_voltage_limits *tab) 2133 struct phm_clock_and_voltage_limits *tab)
2131{ 2134{
2135 uint32_t vddc, vddci;
2132 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2136 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2133 2137
2134 if (tab) { 2138 if (tab) {
2135 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc, 2139 vddc = tab->vddc;
2136 &data->vddc_leakage); 2140 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2137 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci, 2141 &data->vddc_leakage);
2138 &data->vddci_leakage); 2142 tab->vddc = vddc;
2143 vddci = tab->vddci;
2144 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2145 &data->vddci_leakage);
2146 tab->vddci = vddci;
2139 } 2147 }
2140 2148
2141 return 0; 2149 return 0;
@@ -4225,18 +4233,26 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4225{ 4233{
4226 struct phm_ppt_v1_information *table_info = 4234 struct phm_ppt_v1_information *table_info =
4227 (struct phm_ppt_v1_information *)hwmgr->pptable; 4235 (struct phm_ppt_v1_information *)hwmgr->pptable;
4228 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 4236 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
4237 struct phm_clock_voltage_dependency_table *sclk_table;
4229 int i; 4238 int i;
4230 4239
4231 if (table_info == NULL) 4240 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4232 return -EINVAL; 4241 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
4233 4242 return -EINVAL;
4234 dep_sclk_table = table_info->vdd_dep_on_sclk; 4243 dep_sclk_table = table_info->vdd_dep_on_sclk;
4235 4244 for (i = 0; i < dep_sclk_table->count; i++) {
4236 for (i = 0; i < dep_sclk_table->count; i++) { 4245 clocks->clock[i] = dep_sclk_table->entries[i].clk;
4237 clocks->clock[i] = dep_sclk_table->entries[i].clk; 4246 clocks->count++;
4238 clocks->count++; 4247 }
4248 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4249 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
4250 for (i = 0; i < sclk_table->count; i++) {
4251 clocks->clock[i] = sclk_table->entries[i].clk;
4252 clocks->count++;
4253 }
4239 } 4254 }
4255
4240 return 0; 4256 return 0;
4241} 4257}
4242 4258
@@ -4258,17 +4274,24 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4258 (struct phm_ppt_v1_information *)hwmgr->pptable; 4274 (struct phm_ppt_v1_information *)hwmgr->pptable;
4259 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 4275 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4260 int i; 4276 int i;
4277 struct phm_clock_voltage_dependency_table *mclk_table;
4261 4278
4262 if (table_info == NULL) 4279 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4263 return -EINVAL; 4280 if (table_info == NULL)
4264 4281 return -EINVAL;
4265 dep_mclk_table = table_info->vdd_dep_on_mclk; 4282 dep_mclk_table = table_info->vdd_dep_on_mclk;
4266 4283 for (i = 0; i < dep_mclk_table->count; i++) {
4267 for (i = 0; i < dep_mclk_table->count; i++) { 4284 clocks->clock[i] = dep_mclk_table->entries[i].clk;
4268 clocks->clock[i] = dep_mclk_table->entries[i].clk; 4285 clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4269 clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4270 dep_mclk_table->entries[i].clk); 4286 dep_mclk_table->entries[i].clk);
4271 clocks->count++; 4287 clocks->count++;
4288 }
4289 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4290 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
4291 for (i = 0; i < mclk_table->count; i++) {
4292 clocks->clock[i] = mclk_table->entries[i].clk;
4293 clocks->count++;
4294 }
4272 } 4295 }
4273 return 0; 4296 return 0;
4274} 4297}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index fb6c6f6106d5..29d0319b22e6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -30,7 +30,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
30 struct phm_fan_speed_info *fan_speed_info) 30 struct phm_fan_speed_info *fan_speed_info)
31{ 31{
32 if (hwmgr->thermal_controller.fanInfo.bNoFan) 32 if (hwmgr->thermal_controller.fanInfo.bNoFan)
33 return 0; 33 return -ENODEV;
34 34
35 fan_speed_info->supports_percent_read = true; 35 fan_speed_info->supports_percent_read = true;
36 fan_speed_info->supports_percent_write = true; 36 fan_speed_info->supports_percent_write = true;
@@ -60,7 +60,7 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
60 uint64_t tmp64; 60 uint64_t tmp64;
61 61
62 if (hwmgr->thermal_controller.fanInfo.bNoFan) 62 if (hwmgr->thermal_controller.fanInfo.bNoFan)
63 return 0; 63 return -ENODEV;
64 64
65 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 65 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
66 CG_FDO_CTRL1, FMAX_DUTY100); 66 CG_FDO_CTRL1, FMAX_DUTY100);
@@ -89,7 +89,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
89 if (hwmgr->thermal_controller.fanInfo.bNoFan || 89 if (hwmgr->thermal_controller.fanInfo.bNoFan ||
90 (hwmgr->thermal_controller.fanInfo. 90 (hwmgr->thermal_controller.fanInfo.
91 ucTachometerPulsesPerRevolution == 0)) 91 ucTachometerPulsesPerRevolution == 0))
92 return 0; 92 return -ENODEV;
93 93
94 tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 94 tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
95 CG_TACH_STATUS, TACH_PERIOD); 95 CG_TACH_STATUS, TACH_PERIOD);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 963a24d46a93..ffe1f85ce300 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
35static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); 35static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
36 36
37struct kmem_cache *sched_fence_slab;
38atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
39
40/* Initialize a given run queue struct */ 37/* Initialize a given run queue struct */
41static void amd_sched_rq_init(struct amd_sched_rq *rq) 38static void amd_sched_rq_init(struct amd_sched_rq *rq)
42{ 39{
@@ -618,13 +615,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
618 INIT_LIST_HEAD(&sched->ring_mirror_list); 615 INIT_LIST_HEAD(&sched->ring_mirror_list);
619 spin_lock_init(&sched->job_list_lock); 616 spin_lock_init(&sched->job_list_lock);
620 atomic_set(&sched->hw_rq_count, 0); 617 atomic_set(&sched->hw_rq_count, 0);
621 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
622 sched_fence_slab = kmem_cache_create(
623 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
624 SLAB_HWCACHE_ALIGN, NULL);
625 if (!sched_fence_slab)
626 return -ENOMEM;
627 }
628 618
629 /* Each scheduler will run on a seperate kernel thread */ 619 /* Each scheduler will run on a seperate kernel thread */
630 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 620 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -645,6 +635,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
645{ 635{
646 if (sched->thread) 636 if (sched->thread)
647 kthread_stop(sched->thread); 637 kthread_stop(sched->thread);
648 if (atomic_dec_and_test(&sched_fence_slab_ref))
649 kmem_cache_destroy(sched_fence_slab);
650} 638}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb502ef..51068e6c3d9a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -30,9 +30,6 @@
30struct amd_gpu_scheduler; 30struct amd_gpu_scheduler;
31struct amd_sched_rq; 31struct amd_sched_rq;
32 32
33extern struct kmem_cache *sched_fence_slab;
34extern atomic_t sched_fence_slab_ref;
35
36/** 33/**
37 * A scheduler entity is a wrapper around a job queue or a group 34 * A scheduler entity is a wrapper around a job queue or a group
38 * of other entities. Entities take turns emitting jobs from their 35 * of other entities. Entities take turns emitting jobs from their
@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
145 struct amd_sched_entity *entity); 142 struct amd_sched_entity *entity);
146void amd_sched_entity_push_job(struct amd_sched_job *sched_job); 143void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
147 144
145int amd_sched_fence_slab_init(void);
146void amd_sched_fence_slab_fini(void);
147
148struct amd_sched_fence *amd_sched_fence_create( 148struct amd_sched_fence *amd_sched_fence_create(
149 struct amd_sched_entity *s_entity, void *owner); 149 struct amd_sched_entity *s_entity, void *owner);
150void amd_sched_fence_scheduled(struct amd_sched_fence *fence); 150void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 6b63beaf7574..88fc2d662579 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -27,6 +27,25 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "gpu_scheduler.h" 28#include "gpu_scheduler.h"
29 29
30static struct kmem_cache *sched_fence_slab;
31
32int amd_sched_fence_slab_init(void)
33{
34 sched_fence_slab = kmem_cache_create(
35 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
36 SLAB_HWCACHE_ALIGN, NULL);
37 if (!sched_fence_slab)
38 return -ENOMEM;
39
40 return 0;
41}
42
43void amd_sched_fence_slab_fini(void)
44{
45 rcu_barrier();
46 kmem_cache_destroy(sched_fence_slab);
47}
48
30struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, 49struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
31 void *owner) 50 void *owner)
32{ 51{
@@ -103,7 +122,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
103} 122}
104 123
105/** 124/**
106 * amd_sched_fence_release - callback that fence can be freed 125 * amd_sched_fence_release_scheduled - callback that fence can be freed
107 * 126 *
108 * @fence: fence 127 * @fence: fence
109 * 128 *
@@ -118,7 +137,7 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
118} 137}
119 138
120/** 139/**
121 * amd_sched_fence_release_scheduled - drop extra reference 140 * amd_sched_fence_release_finished - drop extra reference
122 * 141 *
123 * @f: fence 142 * @f: fence
124 * 143 *
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 23739609427d..e6862a744210 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -420,18 +420,21 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
420 ssize_t expected_size, 420 ssize_t expected_size,
421 bool *replaced) 421 bool *replaced)
422{ 422{
423 struct drm_device *dev = crtc->dev;
424 struct drm_property_blob *new_blob = NULL; 423 struct drm_property_blob *new_blob = NULL;
425 424
426 if (blob_id != 0) { 425 if (blob_id != 0) {
427 new_blob = drm_property_lookup_blob(dev, blob_id); 426 new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
428 if (new_blob == NULL) 427 if (new_blob == NULL)
429 return -EINVAL; 428 return -EINVAL;
430 if (expected_size > 0 && expected_size != new_blob->length) 429
430 if (expected_size > 0 && expected_size != new_blob->length) {
431 drm_property_unreference_blob(new_blob);
431 return -EINVAL; 432 return -EINVAL;
433 }
432 } 434 }
433 435
434 drm_atomic_replace_property_blob(blob, new_blob, replaced); 436 drm_atomic_replace_property_blob(blob, new_blob, replaced);
437 drm_property_unreference_blob(new_blob);
435 438
436 return 0; 439 return 0;
437} 440}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c3f83476f996..21f992605541 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -594,10 +594,6 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
594 struct drm_plane_state *plane_state; 594 struct drm_plane_state *plane_state;
595 int i, ret = 0; 595 int i, ret = 0;
596 596
597 ret = drm_atomic_normalize_zpos(dev, state);
598 if (ret)
599 return ret;
600
601 for_each_plane_in_state(state, plane, plane_state, i) { 597 for_each_plane_in_state(state, plane, plane_state, i) {
602 const struct drm_plane_helper_funcs *funcs; 598 const struct drm_plane_helper_funcs *funcs;
603 599
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 04e457117980..aa644487749c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -914,6 +914,7 @@ static void drm_dp_destroy_port(struct kref *kref)
914 /* no need to clean up vcpi 914 /* no need to clean up vcpi
915 * as if we have no connector we never setup a vcpi */ 915 * as if we have no connector we never setup a vcpi */
916 drm_dp_port_teardown_pdt(port, port->pdt); 916 drm_dp_port_teardown_pdt(port, port->pdt);
917 port->pdt = DP_PEER_DEVICE_NONE;
917 } 918 }
918 kfree(port); 919 kfree(port);
919} 920}
@@ -1159,7 +1160,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1159 drm_dp_put_port(port); 1160 drm_dp_put_port(port);
1160 goto out; 1161 goto out;
1161 } 1162 }
1162 if (port->port_num >= DP_MST_LOGICAL_PORT_0) { 1163 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1164 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1165 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1163 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1166 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1164 drm_mode_connector_set_tile_property(port->connector); 1167 drm_mode_connector_set_tile_property(port->connector);
1165 } 1168 }
@@ -2919,6 +2922,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2919 mgr->cbs->destroy_connector(mgr, port->connector); 2922 mgr->cbs->destroy_connector(mgr, port->connector);
2920 2923
2921 drm_dp_port_teardown_pdt(port, port->pdt); 2924 drm_dp_port_teardown_pdt(port, port->pdt);
2925 port->pdt = DP_PEER_DEVICE_NONE;
2922 2926
2923 if (!port->input && port->vcpi.vcpi > 0) { 2927 if (!port->input && port->vcpi.vcpi > 0) {
2924 drm_dp_mst_reset_vcpi_slots(mgr, port); 2928 drm_dp_mst_reset_vcpi_slots(mgr, port);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 03414bde1f15..6c75e62c0b22 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -131,7 +131,12 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
131 return 0; 131 return 0;
132fail: 132fail:
133 for (i = 0; i < fb_helper->connector_count; i++) { 133 for (i = 0; i < fb_helper->connector_count; i++) {
134 kfree(fb_helper->connector_info[i]); 134 struct drm_fb_helper_connector *fb_helper_connector =
135 fb_helper->connector_info[i];
136
137 drm_connector_unreference(fb_helper_connector->connector);
138
139 kfree(fb_helper_connector);
135 fb_helper->connector_info[i] = NULL; 140 fb_helper->connector_info[i] = NULL;
136 } 141 }
137 fb_helper->connector_count = 0; 142 fb_helper->connector_count = 0;
@@ -603,6 +608,24 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
603} 608}
604EXPORT_SYMBOL(drm_fb_helper_blank); 609EXPORT_SYMBOL(drm_fb_helper_blank);
605 610
611static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper,
612 struct drm_mode_set *modeset)
613{
614 int i;
615
616 for (i = 0; i < modeset->num_connectors; i++) {
617 drm_connector_unreference(modeset->connectors[i]);
618 modeset->connectors[i] = NULL;
619 }
620 modeset->num_connectors = 0;
621
622 drm_mode_destroy(helper->dev, modeset->mode);
623 modeset->mode = NULL;
624
625 /* FIXME should hold a ref? */
626 modeset->fb = NULL;
627}
628
606static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) 629static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
607{ 630{
608 int i; 631 int i;
@@ -612,10 +635,12 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
612 kfree(helper->connector_info[i]); 635 kfree(helper->connector_info[i]);
613 } 636 }
614 kfree(helper->connector_info); 637 kfree(helper->connector_info);
638
615 for (i = 0; i < helper->crtc_count; i++) { 639 for (i = 0; i < helper->crtc_count; i++) {
616 kfree(helper->crtc_info[i].mode_set.connectors); 640 struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set;
617 if (helper->crtc_info[i].mode_set.mode) 641
618 drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode); 642 drm_fb_helper_modeset_release(helper, modeset);
643 kfree(modeset->connectors);
619 } 644 }
620 kfree(helper->crtc_info); 645 kfree(helper->crtc_info);
621} 646}
@@ -644,7 +669,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
644 clip->x2 = clip->y2 = 0; 669 clip->x2 = clip->y2 = 0;
645 spin_unlock_irqrestore(&helper->dirty_lock, flags); 670 spin_unlock_irqrestore(&helper->dirty_lock, flags);
646 671
647 helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); 672 /* call dirty callback only when it has been really touched */
673 if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
674 helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
648} 675}
649 676
650/** 677/**
@@ -2088,7 +2115,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
2088 struct drm_fb_helper_crtc **crtcs; 2115 struct drm_fb_helper_crtc **crtcs;
2089 struct drm_display_mode **modes; 2116 struct drm_display_mode **modes;
2090 struct drm_fb_offset *offsets; 2117 struct drm_fb_offset *offsets;
2091 struct drm_mode_set *modeset;
2092 bool *enabled; 2118 bool *enabled;
2093 int width, height; 2119 int width, height;
2094 int i; 2120 int i;
@@ -2136,45 +2162,35 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
2136 2162
2137 /* need to set the modesets up here for use later */ 2163 /* need to set the modesets up here for use later */
2138 /* fill out the connector<->crtc mappings into the modesets */ 2164 /* fill out the connector<->crtc mappings into the modesets */
2139 for (i = 0; i < fb_helper->crtc_count; i++) { 2165 for (i = 0; i < fb_helper->crtc_count; i++)
2140 modeset = &fb_helper->crtc_info[i].mode_set; 2166 drm_fb_helper_modeset_release(fb_helper,
2141 modeset->num_connectors = 0; 2167 &fb_helper->crtc_info[i].mode_set);
2142 modeset->fb = NULL;
2143 }
2144 2168
2145 for (i = 0; i < fb_helper->connector_count; i++) { 2169 for (i = 0; i < fb_helper->connector_count; i++) {
2146 struct drm_display_mode *mode = modes[i]; 2170 struct drm_display_mode *mode = modes[i];
2147 struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; 2171 struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
2148 struct drm_fb_offset *offset = &offsets[i]; 2172 struct drm_fb_offset *offset = &offsets[i];
2149 modeset = &fb_crtc->mode_set; 2173 struct drm_mode_set *modeset = &fb_crtc->mode_set;
2150 2174
2151 if (mode && fb_crtc) { 2175 if (mode && fb_crtc) {
2176 struct drm_connector *connector =
2177 fb_helper->connector_info[i]->connector;
2178
2152 DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n", 2179 DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
2153 mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y); 2180 mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
2181
2154 fb_crtc->desired_mode = mode; 2182 fb_crtc->desired_mode = mode;
2155 fb_crtc->x = offset->x; 2183 fb_crtc->x = offset->x;
2156 fb_crtc->y = offset->y; 2184 fb_crtc->y = offset->y;
2157 if (modeset->mode)
2158 drm_mode_destroy(dev, modeset->mode);
2159 modeset->mode = drm_mode_duplicate(dev, 2185 modeset->mode = drm_mode_duplicate(dev,
2160 fb_crtc->desired_mode); 2186 fb_crtc->desired_mode);
2161 modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; 2187 drm_connector_reference(connector);
2188 modeset->connectors[modeset->num_connectors++] = connector;
2162 modeset->fb = fb_helper->fb; 2189 modeset->fb = fb_helper->fb;
2163 modeset->x = offset->x; 2190 modeset->x = offset->x;
2164 modeset->y = offset->y; 2191 modeset->y = offset->y;
2165 } 2192 }
2166 } 2193 }
2167
2168 /* Clear out any old modes if there are no more connected outputs. */
2169 for (i = 0; i < fb_helper->crtc_count; i++) {
2170 modeset = &fb_helper->crtc_info[i].mode_set;
2171 if (modeset->num_connectors == 0) {
2172 BUG_ON(modeset->fb);
2173 if (modeset->mode)
2174 drm_mode_destroy(dev, modeset->mode);
2175 modeset->mode = NULL;
2176 }
2177 }
2178out: 2194out:
2179 kfree(crtcs); 2195 kfree(crtcs);
2180 kfree(modes); 2196 kfree(modes);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index def78c8c1780..f86e7c846678 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -262,6 +262,26 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
262 return 0; 262 return 0;
263} 263}
264 264
265int exynos_atomic_check(struct drm_device *dev,
266 struct drm_atomic_state *state)
267{
268 int ret;
269
270 ret = drm_atomic_helper_check_modeset(dev, state);
271 if (ret)
272 return ret;
273
274 ret = drm_atomic_normalize_zpos(dev, state);
275 if (ret)
276 return ret;
277
278 ret = drm_atomic_helper_check_planes(dev, state);
279 if (ret)
280 return ret;
281
282 return ret;
283}
284
265static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 285static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
266{ 286{
267 struct drm_exynos_file_private *file_priv; 287 struct drm_exynos_file_private *file_priv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d215149e737b..80c4d5b81689 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -301,6 +301,7 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
301 301
302int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, 302int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
303 bool nonblock); 303 bool nonblock);
304int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
304 305
305 306
306extern struct platform_driver fimd_driver; 307extern struct platform_driver fimd_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 40ce841eb952..23cce0a3f5fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -190,7 +190,7 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
190static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 190static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
191 .fb_create = exynos_user_fb_create, 191 .fb_create = exynos_user_fb_create,
192 .output_poll_changed = exynos_drm_output_poll_changed, 192 .output_poll_changed = exynos_drm_output_poll_changed,
193 .atomic_check = drm_atomic_helper_check, 193 .atomic_check = exynos_atomic_check,
194 .atomic_commit = exynos_atomic_commit, 194 .atomic_commit = exynos_atomic_commit,
195}; 195};
196 196
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index bfb2efd8d4d4..18dfdd5c1b3b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1447,8 +1447,6 @@ static int i915_drm_suspend(struct drm_device *dev)
1447 1447
1448 dev_priv->suspend_count++; 1448 dev_priv->suspend_count++;
1449 1449
1450 intel_display_set_init_power(dev_priv, false);
1451
1452 intel_csr_ucode_suspend(dev_priv); 1450 intel_csr_ucode_suspend(dev_priv);
1453 1451
1454out: 1452out:
@@ -1466,6 +1464,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1466 1464
1467 disable_rpm_wakeref_asserts(dev_priv); 1465 disable_rpm_wakeref_asserts(dev_priv);
1468 1466
1467 intel_display_set_init_power(dev_priv, false);
1468
1469 fw_csr = !IS_BROXTON(dev_priv) && 1469 fw_csr = !IS_BROXTON(dev_priv) &&
1470 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; 1470 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
1471 /* 1471 /*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8b9ee4e390c0..685e9e065287 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2883,6 +2883,11 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
2883extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2883extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2884 unsigned long arg); 2884 unsigned long arg);
2885#endif 2885#endif
2886extern const struct dev_pm_ops i915_pm_ops;
2887
2888extern int i915_driver_load(struct pci_dev *pdev,
2889 const struct pci_device_id *ent);
2890extern void i915_driver_unload(struct drm_device *dev);
2886extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); 2891extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2887extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); 2892extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
2888extern void i915_reset(struct drm_i915_private *dev_priv); 2893extern void i915_reset(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 947e82c2b175..91ab7e9d6d2e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1806,7 +1806,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1806 /* Use a partial view if it is bigger than available space */ 1806 /* Use a partial view if it is bigger than available space */
1807 chunk_size = MIN_CHUNK_PAGES; 1807 chunk_size = MIN_CHUNK_PAGES;
1808 if (i915_gem_object_is_tiled(obj)) 1808 if (i915_gem_object_is_tiled(obj))
1809 chunk_size = max(chunk_size, tile_row_pages(obj)); 1809 chunk_size = roundup(chunk_size, tile_row_pages(obj));
1810 1810
1811 memset(&view, 0, sizeof(view)); 1811 memset(&view, 0, sizeof(view));
1812 view.type = I915_GGTT_VIEW_PARTIAL; 1812 view.type = I915_GGTT_VIEW_PARTIAL;
@@ -3543,15 +3543,27 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3543 if (view->type == I915_GGTT_VIEW_NORMAL) 3543 if (view->type == I915_GGTT_VIEW_NORMAL)
3544 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 3544 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3545 PIN_MAPPABLE | PIN_NONBLOCK); 3545 PIN_MAPPABLE | PIN_NONBLOCK);
3546 if (IS_ERR(vma)) 3546 if (IS_ERR(vma)) {
3547 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0); 3547 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3548 unsigned int flags;
3549
3550 /* Valleyview is definitely limited to scanning out the first
3551 * 512MiB. Lets presume this behaviour was inherited from the
3552 * g4x display engine and that all earlier gen are similarly
3553 * limited. Testing suggests that it is a little more
3554 * complicated than this. For example, Cherryview appears quite
3555 * happy to scanout from anywhere within its global aperture.
3556 */
3557 flags = 0;
3558 if (HAS_GMCH_DISPLAY(i915))
3559 flags = PIN_MAPPABLE;
3560 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3561 }
3548 if (IS_ERR(vma)) 3562 if (IS_ERR(vma))
3549 goto err_unpin_display; 3563 goto err_unpin_display;
3550 3564
3551 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); 3565 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3552 3566
3553 WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
3554
3555 i915_gem_object_flush_cpu_write_domain(obj); 3567 i915_gem_object_flush_cpu_write_domain(obj);
3556 3568
3557 old_write_domain = obj->base.write_domain; 3569 old_write_domain = obj->base.write_domain;
@@ -3588,7 +3600,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3588 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 3600 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3589 3601
3590 i915_vma_unpin(vma); 3602 i915_vma_unpin(vma);
3591 WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
3592} 3603}
3593 3604
3594/** 3605/**
@@ -3745,7 +3756,12 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3745 mappable = (vma->node.start + fence_size <= 3756 mappable = (vma->node.start + fence_size <=
3746 dev_priv->ggtt.mappable_end); 3757 dev_priv->ggtt.mappable_end);
3747 3758
3748 if (mappable && fenceable) 3759 /*
3760 * Explicitly disable for rotated VMA since the display does not
3761 * need the fence and the VMA is not accessible to other users.
3762 */
3763 if (mappable && fenceable &&
3764 vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
3749 vma->flags |= I915_VMA_CAN_FENCE; 3765 vma->flags |= I915_VMA_CAN_FENCE;
3750 else 3766 else
3751 vma->flags &= ~I915_VMA_CAN_FENCE; 3767 vma->flags &= ~I915_VMA_CAN_FENCE;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 8df1fa7234e8..2c7ba0ee127c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -290,6 +290,8 @@ i915_vma_put_fence(struct i915_vma *vma)
290{ 290{
291 struct drm_i915_fence_reg *fence = vma->fence; 291 struct drm_i915_fence_reg *fence = vma->fence;
292 292
293 assert_rpm_wakelock_held(to_i915(vma->vm->dev));
294
293 if (!fence) 295 if (!fence)
294 return 0; 296 return 0;
295 297
@@ -341,6 +343,8 @@ i915_vma_get_fence(struct i915_vma *vma)
341 struct drm_i915_fence_reg *fence; 343 struct drm_i915_fence_reg *fence;
342 struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; 344 struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
343 345
346 assert_rpm_wakelock_held(to_i915(vma->vm->dev));
347
344 /* Just update our place in the LRU if our fence is getting reused. */ 348 /* Just update our place in the LRU if our fence is getting reused. */
345 if (vma->fence) { 349 if (vma->fence) {
346 fence = vma->fence; 350 fence = vma->fence;
@@ -371,6 +375,12 @@ void i915_gem_restore_fences(struct drm_device *dev)
371 struct drm_i915_private *dev_priv = to_i915(dev); 375 struct drm_i915_private *dev_priv = to_i915(dev);
372 int i; 376 int i;
373 377
378 /* Note that this may be called outside of struct_mutex, by
379 * runtime suspend/resume. The barrier we require is enforced by
380 * rpm itself - all access to fences/GTT are only within an rpm
381 * wakeref, and to acquire that wakeref you must pass through here.
382 */
383
374 for (i = 0; i < dev_priv->num_fence_regs; i++) { 384 for (i = 0; i < dev_priv->num_fence_regs; i++) {
375 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 385 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
376 struct i915_vma *vma = reg->vma; 386 struct i915_vma *vma = reg->vma;
@@ -379,10 +389,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
379 * Commit delayed tiling changes if we have an object still 389 * Commit delayed tiling changes if we have an object still
380 * attached to the fence, otherwise just clear the fence. 390 * attached to the fence, otherwise just clear the fence.
381 */ 391 */
382 if (vma && !i915_gem_object_is_tiled(vma->obj)) 392 if (vma && !i915_gem_object_is_tiled(vma->obj)) {
393 GEM_BUG_ON(!reg->dirty);
394 GEM_BUG_ON(vma->obj->fault_mappable);
395
396 list_move(&reg->link, &dev_priv->mm.fence_list);
397 vma->fence = NULL;
383 vma = NULL; 398 vma = NULL;
399 }
384 400
385 fence_update(reg, vma); 401 fence_write(reg, vma);
402 reg->vma = vma;
386 } 403 }
387} 404}
388 405
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 687c768833b3..31e6edd08dd0 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -431,9 +431,6 @@ static const struct pci_device_id pciidlist[] = {
431}; 431};
432MODULE_DEVICE_TABLE(pci, pciidlist); 432MODULE_DEVICE_TABLE(pci, pciidlist);
433 433
434extern int i915_driver_load(struct pci_dev *pdev,
435 const struct pci_device_id *ent);
436
437static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 434static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
438{ 435{
439 struct intel_device_info *intel_info = 436 struct intel_device_info *intel_info =
@@ -463,8 +460,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
463 return i915_driver_load(pdev, ent); 460 return i915_driver_load(pdev, ent);
464} 461}
465 462
466extern void i915_driver_unload(struct drm_device *dev);
467
468static void i915_pci_remove(struct pci_dev *pdev) 463static void i915_pci_remove(struct pci_dev *pdev)
469{ 464{
470 struct drm_device *dev = pci_get_drvdata(pdev); 465 struct drm_device *dev = pci_get_drvdata(pdev);
@@ -473,8 +468,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
473 drm_dev_unref(dev); 468 drm_dev_unref(dev);
474} 469}
475 470
476extern const struct dev_pm_ops i915_pm_ops;
477
478static struct pci_driver i915_pci_driver = { 471static struct pci_driver i915_pci_driver = {
479 .name = DRIVER_NAME, 472 .name = DRIVER_NAME,
480 .id_table = pciidlist, 473 .id_table = pciidlist,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c6e69e4cfa83..1f8af87c6294 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1031,6 +1031,77 @@ static u8 translate_iboost(u8 val)
1031 return mapping[val]; 1031 return mapping[val];
1032} 1032}
1033 1033
1034static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
1035 enum port port)
1036{
1037 const struct ddi_vbt_port_info *info =
1038 &dev_priv->vbt.ddi_port_info[port];
1039 enum port p;
1040
1041 if (!info->alternate_ddc_pin)
1042 return;
1043
1044 for_each_port_masked(p, (1 << port) - 1) {
1045 struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
1046
1047 if (info->alternate_ddc_pin != i->alternate_ddc_pin)
1048 continue;
1049
1050 DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
1051 "disabling port %c DVI/HDMI support\n",
1052 port_name(p), i->alternate_ddc_pin,
1053 port_name(port), port_name(p));
1054
1055 /*
1056 * If we have multiple ports supposedly sharing the
1057 * pin, then dvi/hdmi couldn't exist on the shared
1058 * port. Otherwise they share the same ddc bin and
1059 * system couldn't communicate with them separately.
1060 *
1061 * Due to parsing the ports in alphabetical order,
1062 * a higher port will always clobber a lower one.
1063 */
1064 i->supports_dvi = false;
1065 i->supports_hdmi = false;
1066 i->alternate_ddc_pin = 0;
1067 }
1068}
1069
1070static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
1071 enum port port)
1072{
1073 const struct ddi_vbt_port_info *info =
1074 &dev_priv->vbt.ddi_port_info[port];
1075 enum port p;
1076
1077 if (!info->alternate_aux_channel)
1078 return;
1079
1080 for_each_port_masked(p, (1 << port) - 1) {
1081 struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
1082
1083 if (info->alternate_aux_channel != i->alternate_aux_channel)
1084 continue;
1085
1086 DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
1087 "disabling port %c DP support\n",
1088 port_name(p), i->alternate_aux_channel,
1089 port_name(port), port_name(p));
1090
1091 /*
1092 * If we have multiple ports supposedlt sharing the
1093 * aux channel, then DP couldn't exist on the shared
1094 * port. Otherwise they share the same aux channel
1095 * and system couldn't communicate with them separately.
1096 *
1097 * Due to parsing the ports in alphabetical order,
1098 * a higher port will always clobber a lower one.
1099 */
1100 i->supports_dp = false;
1101 i->alternate_aux_channel = 0;
1102 }
1103}
1104
1034static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, 1105static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1035 const struct bdb_header *bdb) 1106 const struct bdb_header *bdb)
1036{ 1107{
@@ -1105,54 +1176,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1105 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); 1176 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
1106 1177
1107 if (is_dvi) { 1178 if (is_dvi) {
1108 if (port == PORT_E) { 1179 info->alternate_ddc_pin = ddc_pin;
1109 info->alternate_ddc_pin = ddc_pin; 1180
1110 /* if DDIE share ddc pin with other port, then 1181 sanitize_ddc_pin(dev_priv, port);
1111 * dvi/hdmi couldn't exist on the shared port.
1112 * Otherwise they share the same ddc bin and system
1113 * couldn't communicate with them seperately. */
1114 if (ddc_pin == DDC_PIN_B) {
1115 dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
1116 dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
1117 } else if (ddc_pin == DDC_PIN_C) {
1118 dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
1119 dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
1120 } else if (ddc_pin == DDC_PIN_D) {
1121 dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
1122 dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
1123 }
1124 } else if (ddc_pin == DDC_PIN_B && port != PORT_B)
1125 DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
1126 else if (ddc_pin == DDC_PIN_C && port != PORT_C)
1127 DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
1128 else if (ddc_pin == DDC_PIN_D && port != PORT_D)
1129 DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
1130 } 1182 }
1131 1183
1132 if (is_dp) { 1184 if (is_dp) {
1133 if (port == PORT_E) { 1185 info->alternate_aux_channel = aux_channel;
1134 info->alternate_aux_channel = aux_channel; 1186
1135 /* if DDIE share aux channel with other port, then 1187 sanitize_aux_ch(dev_priv, port);
1136 * DP couldn't exist on the shared port. Otherwise
1137 * they share the same aux channel and system
1138 * couldn't communicate with them seperately. */
1139 if (aux_channel == DP_AUX_A)
1140 dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
1141 else if (aux_channel == DP_AUX_B)
1142 dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
1143 else if (aux_channel == DP_AUX_C)
1144 dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
1145 else if (aux_channel == DP_AUX_D)
1146 dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
1147 }
1148 else if (aux_channel == DP_AUX_A && port != PORT_A)
1149 DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
1150 else if (aux_channel == DP_AUX_B && port != PORT_B)
1151 DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
1152 else if (aux_channel == DP_AUX_C && port != PORT_C)
1153 DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
1154 else if (aux_channel == DP_AUX_D && port != PORT_D)
1155 DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
1156 } 1188 }
1157 1189
1158 if (bdb->version >= 158) { 1190 if (bdb->version >= 158) {
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 73b6858600ac..1b20e160bc1f 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -192,7 +192,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
192 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 192 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
193 const int s_max = 3, ss_max = 3, eu_max = 8; 193 const int s_max = 3, ss_max = 3, eu_max = 8;
194 int s, ss; 194 int s, ss;
195 u32 fuse2, eu_disable[s_max]; 195 u32 fuse2, eu_disable[3]; /* s_max */
196 196
197 fuse2 = I915_READ(GEN8_FUSE2); 197 fuse2 = I915_READ(GEN8_FUSE2);
198 sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; 198 sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fbcfed63a76e..81c11499bcf0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2978,7 +2978,8 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
2978 /* Rotate src coordinates to match rotated GTT view */ 2978 /* Rotate src coordinates to match rotated GTT view */
2979 if (intel_rotation_90_or_270(rotation)) 2979 if (intel_rotation_90_or_270(rotation))
2980 drm_rect_rotate(&plane_state->base.src, 2980 drm_rect_rotate(&plane_state->base.src,
2981 fb->width, fb->height, DRM_ROTATE_270); 2981 fb->width << 16, fb->height << 16,
2982 DRM_ROTATE_270);
2982 2983
2983 /* 2984 /*
2984 * Handle the AUX surface first since 2985 * Handle the AUX surface first since
@@ -10242,6 +10243,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
10242 bxt_set_cdclk(to_i915(dev), req_cdclk); 10243 bxt_set_cdclk(to_i915(dev), req_cdclk);
10243} 10244}
10244 10245
10246static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
10247 int pixel_rate)
10248{
10249 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
10250
10251 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
10252 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
10253 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
10254
10255 /* BSpec says "Do not use DisplayPort with CDCLK less than
10256 * 432 MHz, audio enabled, port width x4, and link rate
10257 * HBR2 (5.4 GHz), or else there may be audio corruption or
10258 * screen corruption."
10259 */
10260 if (intel_crtc_has_dp_encoder(crtc_state) &&
10261 crtc_state->has_audio &&
10262 crtc_state->port_clock >= 540000 &&
10263 crtc_state->lane_count == 4)
10264 pixel_rate = max(432000, pixel_rate);
10265
10266 return pixel_rate;
10267}
10268
10245/* compute the max rate for new configuration */ 10269/* compute the max rate for new configuration */
10246static int ilk_max_pixel_rate(struct drm_atomic_state *state) 10270static int ilk_max_pixel_rate(struct drm_atomic_state *state)
10247{ 10271{
@@ -10267,9 +10291,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
10267 10291
10268 pixel_rate = ilk_pipe_pixel_rate(crtc_state); 10292 pixel_rate = ilk_pipe_pixel_rate(crtc_state);
10269 10293
10270 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 10294 if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
10271 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 10295 pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
10272 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); 10296 pixel_rate);
10273 10297
10274 intel_state->min_pixclk[i] = pixel_rate; 10298 intel_state->min_pixclk[i] = pixel_rate;
10275 } 10299 }
@@ -14310,7 +14334,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14310 14334
14311 for_each_plane_in_state(state, plane, plane_state, i) { 14335 for_each_plane_in_state(state, plane, plane_state, i) {
14312 struct intel_plane_state *intel_plane_state = 14336 struct intel_plane_state *intel_plane_state =
14313 to_intel_plane_state(plane_state); 14337 to_intel_plane_state(plane->state);
14314 14338
14315 if (!intel_plane_state->wait_req) 14339 if (!intel_plane_state->wait_req)
14316 continue; 14340 continue;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 14a3cf0b7213..3581b5a7f716 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1108,6 +1108,44 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1108 return ret; 1108 return ret;
1109} 1109}
1110 1110
1111static enum port intel_aux_port(struct drm_i915_private *dev_priv,
1112 enum port port)
1113{
1114 const struct ddi_vbt_port_info *info =
1115 &dev_priv->vbt.ddi_port_info[port];
1116 enum port aux_port;
1117
1118 if (!info->alternate_aux_channel) {
1119 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1120 port_name(port), port_name(port));
1121 return port;
1122 }
1123
1124 switch (info->alternate_aux_channel) {
1125 case DP_AUX_A:
1126 aux_port = PORT_A;
1127 break;
1128 case DP_AUX_B:
1129 aux_port = PORT_B;
1130 break;
1131 case DP_AUX_C:
1132 aux_port = PORT_C;
1133 break;
1134 case DP_AUX_D:
1135 aux_port = PORT_D;
1136 break;
1137 default:
1138 MISSING_CASE(info->alternate_aux_channel);
1139 aux_port = PORT_A;
1140 break;
1141 }
1142
1143 DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1144 port_name(aux_port), port_name(port));
1145
1146 return aux_port;
1147}
1148
1111static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv, 1149static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1112 enum port port) 1150 enum port port)
1113{ 1151{
@@ -1168,36 +1206,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1168 } 1206 }
1169} 1207}
1170 1208
1171/*
1172 * On SKL we don't have Aux for port E so we rely
1173 * on VBT to set a proper alternate aux channel.
1174 */
1175static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1176{
1177 const struct ddi_vbt_port_info *info =
1178 &dev_priv->vbt.ddi_port_info[PORT_E];
1179
1180 switch (info->alternate_aux_channel) {
1181 case DP_AUX_A:
1182 return PORT_A;
1183 case DP_AUX_B:
1184 return PORT_B;
1185 case DP_AUX_C:
1186 return PORT_C;
1187 case DP_AUX_D:
1188 return PORT_D;
1189 default:
1190 MISSING_CASE(info->alternate_aux_channel);
1191 return PORT_A;
1192 }
1193}
1194
1195static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv, 1209static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1196 enum port port) 1210 enum port port)
1197{ 1211{
1198 if (port == PORT_E)
1199 port = skl_porte_aux_port(dev_priv);
1200
1201 switch (port) { 1212 switch (port) {
1202 case PORT_A: 1213 case PORT_A:
1203 case PORT_B: 1214 case PORT_B:
@@ -1213,9 +1224,6 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1213static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv, 1224static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1214 enum port port, int index) 1225 enum port port, int index)
1215{ 1226{
1216 if (port == PORT_E)
1217 port = skl_porte_aux_port(dev_priv);
1218
1219 switch (port) { 1227 switch (port) {
1220 case PORT_A: 1228 case PORT_A:
1221 case PORT_B: 1229 case PORT_B:
@@ -1253,7 +1261,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1253static void intel_aux_reg_init(struct intel_dp *intel_dp) 1261static void intel_aux_reg_init(struct intel_dp *intel_dp)
1254{ 1262{
1255 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 1263 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1256 enum port port = dp_to_dig_port(intel_dp)->port; 1264 enum port port = intel_aux_port(dev_priv,
1265 dp_to_dig_port(intel_dp)->port);
1257 int i; 1266 int i;
1258 1267
1259 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port); 1268 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
@@ -3551,8 +3560,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
3551 /* Read the eDP Display control capabilities registers */ 3560 /* Read the eDP Display control capabilities registers */
3552 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && 3561 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3553 drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 3562 drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3554 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) == 3563 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3555 sizeof(intel_dp->edp_dpcd))) 3564 sizeof(intel_dp->edp_dpcd))
3556 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), 3565 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3557 intel_dp->edp_dpcd); 3566 intel_dp->edp_dpcd);
3558 3567
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index faa67624e1ed..c43dd9abce79 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -104,8 +104,10 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
104 int lines; 104 int lines;
105 105
106 intel_fbc_get_plane_source_size(cache, NULL, &lines); 106 intel_fbc_get_plane_source_size(cache, NULL, &lines);
107 if (INTEL_INFO(dev_priv)->gen >= 7) 107 if (INTEL_GEN(dev_priv) == 7)
108 lines = min(lines, 2048); 108 lines = min(lines, 2048);
109 else if (INTEL_GEN(dev_priv) >= 8)
110 lines = min(lines, 2560);
109 111
110 /* Hardware needs the full buffer stride, not just the active area. */ 112 /* Hardware needs the full buffer stride, not just the active area. */
111 return lines * cache->fb.stride; 113 return lines * cache->fb.stride;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f40a35f2913a..13c306173f27 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1799,6 +1799,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
1799 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; 1799 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
1800} 1800}
1801 1801
1802static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
1803 enum port port)
1804{
1805 const struct ddi_vbt_port_info *info =
1806 &dev_priv->vbt.ddi_port_info[port];
1807 u8 ddc_pin;
1808
1809 if (info->alternate_ddc_pin) {
1810 DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
1811 info->alternate_ddc_pin, port_name(port));
1812 return info->alternate_ddc_pin;
1813 }
1814
1815 switch (port) {
1816 case PORT_B:
1817 if (IS_BROXTON(dev_priv))
1818 ddc_pin = GMBUS_PIN_1_BXT;
1819 else
1820 ddc_pin = GMBUS_PIN_DPB;
1821 break;
1822 case PORT_C:
1823 if (IS_BROXTON(dev_priv))
1824 ddc_pin = GMBUS_PIN_2_BXT;
1825 else
1826 ddc_pin = GMBUS_PIN_DPC;
1827 break;
1828 case PORT_D:
1829 if (IS_CHERRYVIEW(dev_priv))
1830 ddc_pin = GMBUS_PIN_DPD_CHV;
1831 else
1832 ddc_pin = GMBUS_PIN_DPD;
1833 break;
1834 default:
1835 MISSING_CASE(port);
1836 ddc_pin = GMBUS_PIN_DPB;
1837 break;
1838 }
1839
1840 DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
1841 ddc_pin, port_name(port));
1842
1843 return ddc_pin;
1844}
1845
1802void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 1846void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1803 struct intel_connector *intel_connector) 1847 struct intel_connector *intel_connector)
1804{ 1848{
@@ -1808,7 +1852,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1808 struct drm_device *dev = intel_encoder->base.dev; 1852 struct drm_device *dev = intel_encoder->base.dev;
1809 struct drm_i915_private *dev_priv = to_i915(dev); 1853 struct drm_i915_private *dev_priv = to_i915(dev);
1810 enum port port = intel_dig_port->port; 1854 enum port port = intel_dig_port->port;
1811 uint8_t alternate_ddc_pin;
1812 1855
1813 DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", 1856 DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
1814 port_name(port)); 1857 port_name(port));
@@ -1826,12 +1869,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1826 connector->doublescan_allowed = 0; 1869 connector->doublescan_allowed = 0;
1827 connector->stereo_allowed = 1; 1870 connector->stereo_allowed = 1;
1828 1871
1872 intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
1873
1829 switch (port) { 1874 switch (port) {
1830 case PORT_B: 1875 case PORT_B:
1831 if (IS_BROXTON(dev_priv))
1832 intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
1833 else
1834 intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
1835 /* 1876 /*
1836 * On BXT A0/A1, sw needs to activate DDIA HPD logic and 1877 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
1837 * interrupts to check the external panel connection. 1878 * interrupts to check the external panel connection.
@@ -1842,46 +1883,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1842 intel_encoder->hpd_pin = HPD_PORT_B; 1883 intel_encoder->hpd_pin = HPD_PORT_B;
1843 break; 1884 break;
1844 case PORT_C: 1885 case PORT_C:
1845 if (IS_BROXTON(dev_priv))
1846 intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
1847 else
1848 intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
1849 intel_encoder->hpd_pin = HPD_PORT_C; 1886 intel_encoder->hpd_pin = HPD_PORT_C;
1850 break; 1887 break;
1851 case PORT_D: 1888 case PORT_D:
1852 if (WARN_ON(IS_BROXTON(dev_priv)))
1853 intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
1854 else if (IS_CHERRYVIEW(dev_priv))
1855 intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
1856 else
1857 intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
1858 intel_encoder->hpd_pin = HPD_PORT_D; 1889 intel_encoder->hpd_pin = HPD_PORT_D;
1859 break; 1890 break;
1860 case PORT_E: 1891 case PORT_E:
1861 /* On SKL PORT E doesn't have seperate GMBUS pin
1862 * We rely on VBT to set a proper alternate GMBUS pin. */
1863 alternate_ddc_pin =
1864 dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
1865 switch (alternate_ddc_pin) {
1866 case DDC_PIN_B:
1867 intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
1868 break;
1869 case DDC_PIN_C:
1870 intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
1871 break;
1872 case DDC_PIN_D:
1873 intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
1874 break;
1875 default:
1876 MISSING_CASE(alternate_ddc_pin);
1877 }
1878 intel_encoder->hpd_pin = HPD_PORT_E; 1892 intel_encoder->hpd_pin = HPD_PORT_E;
1879 break; 1893 break;
1880 case PORT_A:
1881 intel_encoder->hpd_pin = HPD_PORT_A;
1882 /* Internal port only for eDP. */
1883 default: 1894 default:
1884 BUG(); 1895 MISSING_CASE(port);
1896 return;
1885 } 1897 }
1886 1898
1887 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1899 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a2f751cd187a..db24f898853c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3362,13 +3362,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3362 int num_active; 3362 int num_active;
3363 int id, i; 3363 int id, i;
3364 3364
3365 /* Clear the partitioning for disabled planes. */
3366 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3367 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3368
3365 if (WARN_ON(!state)) 3369 if (WARN_ON(!state))
3366 return 0; 3370 return 0;
3367 3371
3368 if (!cstate->base.active) { 3372 if (!cstate->base.active) {
3369 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0; 3373 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3370 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3371 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3372 return 0; 3374 return 0;
3373 } 3375 }
3374 3376
@@ -3468,12 +3470,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3468 return 0; 3470 return 0;
3469} 3471}
3470 3472
3471static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
3472{
3473 /* TODO: Take into account the scalers once we support them */
3474 return config->base.adjusted_mode.crtc_clock;
3475}
3476
3477/* 3473/*
3478 * The max latency should be 257 (max the punit can code is 255 and we add 2us 3474 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3479 * for the read latency) and cpp should always be <= 8, so that 3475 * for the read latency) and cpp should always be <= 8, so that
@@ -3524,7 +3520,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
3524 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate 3520 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3525 * with additional adjustments for plane-specific scaling. 3521 * with additional adjustments for plane-specific scaling.
3526 */ 3522 */
3527 adjusted_pixel_rate = skl_pipe_pixel_rate(cstate); 3523 adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
3528 downscale_amount = skl_plane_downscale_amount(pstate); 3524 downscale_amount = skl_plane_downscale_amount(pstate);
3529 3525
3530 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; 3526 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
@@ -3736,11 +3732,11 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3736 if (!cstate->base.active) 3732 if (!cstate->base.active)
3737 return 0; 3733 return 0;
3738 3734
3739 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0)) 3735 if (WARN_ON(ilk_pipe_pixel_rate(cstate) == 0))
3740 return 0; 3736 return 0;
3741 3737
3742 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, 3738 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3743 skl_pipe_pixel_rate(cstate)); 3739 ilk_pipe_pixel_rate(cstate));
3744} 3740}
3745 3741
3746static void skl_compute_transition_wm(struct intel_crtc_state *cstate, 3742static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
@@ -4050,6 +4046,12 @@ skl_compute_ddb(struct drm_atomic_state *state)
4050 intel_state->wm_results.dirty_pipes = ~0; 4046 intel_state->wm_results.dirty_pipes = ~0;
4051 } 4047 }
4052 4048
4049 /*
4050 * We're not recomputing for the pipes not included in the commit, so
4051 * make sure we start with the current state.
4052 */
4053 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
4054
4053 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { 4055 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
4054 struct intel_crtc_state *cstate; 4056 struct intel_crtc_state *cstate;
4055 4057
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6c11168facd6..a38c2fefe85a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1139,7 +1139,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1139 1139
1140 intel_power_sequencer_reset(dev_priv); 1140 intel_power_sequencer_reset(dev_priv);
1141 1141
1142 intel_hpd_poll_init(dev_priv); 1142 /* Prevent us from re-enabling polling on accident in late suspend */
1143 if (!dev_priv->drm.dev->power.is_suspended)
1144 intel_hpd_poll_init(dev_priv);
1143} 1145}
1144 1146
1145static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1147static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 98df09c2b388..9672b579f950 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -357,8 +357,8 @@ static int imx_drm_bind(struct device *dev)
357 int ret; 357 int ret;
358 358
359 drm = drm_dev_alloc(&imx_drm_driver, dev); 359 drm = drm_dev_alloc(&imx_drm_driver, dev);
360 if (!drm) 360 if (IS_ERR(drm))
361 return -ENOMEM; 361 return PTR_ERR(drm);
362 362
363 imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL); 363 imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
364 if (!imxdrm) { 364 if (!imxdrm) {
@@ -436,9 +436,11 @@ static int imx_drm_bind(struct device *dev)
436 436
437err_fbhelper: 437err_fbhelper:
438 drm_kms_helper_poll_fini(drm); 438 drm_kms_helper_poll_fini(drm);
439#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
439 if (imxdrm->fbhelper) 440 if (imxdrm->fbhelper)
440 drm_fbdev_cma_fini(imxdrm->fbhelper); 441 drm_fbdev_cma_fini(imxdrm->fbhelper);
441err_unbind: 442err_unbind:
443#endif
442 component_unbind_all(drm->dev, drm); 444 component_unbind_all(drm->dev, drm);
443err_vblank: 445err_vblank:
444 drm_vblank_cleanup(drm); 446 drm_vblank_cleanup(drm);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 4e1ae3fc462d..6be515a9fb69 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -68,6 +68,12 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
68 68
69 ipu_dc_disable_channel(ipu_crtc->dc); 69 ipu_dc_disable_channel(ipu_crtc->dc);
70 ipu_di_disable(ipu_crtc->di); 70 ipu_di_disable(ipu_crtc->di);
71 /*
72 * Planes must be disabled before DC clock is removed, as otherwise the
73 * attached IDMACs will be left in undefined state, possibly hanging
74 * the IPU or even system.
75 */
76 drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
71 ipu_dc_disable(ipu); 77 ipu_dc_disable(ipu);
72 78
73 spin_lock_irq(&crtc->dev->event_lock); 79 spin_lock_irq(&crtc->dev->event_lock);
@@ -77,9 +83,6 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
77 } 83 }
78 spin_unlock_irq(&crtc->dev->event_lock); 84 spin_unlock_irq(&crtc->dev->event_lock);
79 85
80 /* always disable planes on the CRTC */
81 drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
82
83 drm_crtc_vblank_off(crtc); 86 drm_crtc_vblank_off(crtc);
84} 87}
85 88
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index ce22d0a0ddc8..d5864ed4d772 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -103,11 +103,11 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
103 (state->src_x >> 16) / 2 - eba; 103 (state->src_x >> 16) / 2 - eba;
104} 104}
105 105
106static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane, 106static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane)
107 struct drm_plane_state *old_state)
108{ 107{
109 struct drm_plane *plane = &ipu_plane->base; 108 struct drm_plane *plane = &ipu_plane->base;
110 struct drm_plane_state *state = plane->state; 109 struct drm_plane_state *state = plane->state;
110 struct drm_crtc_state *crtc_state = state->crtc->state;
111 struct drm_framebuffer *fb = state->fb; 111 struct drm_framebuffer *fb = state->fb;
112 unsigned long eba, ubo, vbo; 112 unsigned long eba, ubo, vbo;
113 int active; 113 int active;
@@ -117,7 +117,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
117 switch (fb->pixel_format) { 117 switch (fb->pixel_format) {
118 case DRM_FORMAT_YUV420: 118 case DRM_FORMAT_YUV420:
119 case DRM_FORMAT_YVU420: 119 case DRM_FORMAT_YVU420:
120 if (old_state->fb) 120 if (!drm_atomic_crtc_needs_modeset(crtc_state))
121 break; 121 break;
122 122
123 /* 123 /*
@@ -149,7 +149,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
149 break; 149 break;
150 } 150 }
151 151
152 if (old_state->fb) { 152 if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
153 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); 153 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
154 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); 154 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
155 ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); 155 ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
@@ -259,6 +259,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
259 struct drm_framebuffer *fb = state->fb; 259 struct drm_framebuffer *fb = state->fb;
260 struct drm_framebuffer *old_fb = old_state->fb; 260 struct drm_framebuffer *old_fb = old_state->fb;
261 unsigned long eba, ubo, vbo, old_ubo, old_vbo; 261 unsigned long eba, ubo, vbo, old_ubo, old_vbo;
262 int hsub, vsub;
262 263
263 /* Ok to disable */ 264 /* Ok to disable */
264 if (!fb) 265 if (!fb)
@@ -355,7 +356,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
355 if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) 356 if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
356 return -EINVAL; 357 return -EINVAL;
357 358
358 if (old_fb) { 359 if (old_fb &&
360 (old_fb->pixel_format == DRM_FORMAT_YUV420 ||
361 old_fb->pixel_format == DRM_FORMAT_YVU420)) {
359 old_ubo = drm_plane_state_to_ubo(old_state); 362 old_ubo = drm_plane_state_to_ubo(old_state);
360 old_vbo = drm_plane_state_to_vbo(old_state); 363 old_vbo = drm_plane_state_to_vbo(old_state);
361 if (ubo != old_ubo || vbo != old_vbo) 364 if (ubo != old_ubo || vbo != old_vbo)
@@ -370,6 +373,16 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
370 373
371 if (old_fb && old_fb->pitches[1] != fb->pitches[1]) 374 if (old_fb && old_fb->pitches[1] != fb->pitches[1])
372 crtc_state->mode_changed = true; 375 crtc_state->mode_changed = true;
376
377 /*
378 * The x/y offsets must be even in case of horizontal/vertical
379 * chroma subsampling.
380 */
381 hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
382 vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
383 if (((state->src_x >> 16) & (hsub - 1)) ||
384 ((state->src_y >> 16) & (vsub - 1)))
385 return -EINVAL;
373 } 386 }
374 387
375 return 0; 388 return 0;
@@ -392,7 +405,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
392 struct drm_crtc_state *crtc_state = state->crtc->state; 405 struct drm_crtc_state *crtc_state = state->crtc->state;
393 406
394 if (!drm_atomic_crtc_needs_modeset(crtc_state)) { 407 if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
395 ipu_plane_atomic_set_base(ipu_plane, old_state); 408 ipu_plane_atomic_set_base(ipu_plane);
396 return; 409 return;
397 } 410 }
398 } 411 }
@@ -424,6 +437,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
424 ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false); 437 ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
425 break; 438 break;
426 default: 439 default:
440 ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
427 break; 441 break;
428 } 442 }
429 } 443 }
@@ -437,7 +451,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
437 ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); 451 ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
438 ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); 452 ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
439 ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]); 453 ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
440 ipu_plane_atomic_set_base(ipu_plane, old_state); 454 ipu_plane_atomic_set_base(ipu_plane);
441 ipu_plane_enable(ipu_plane); 455 ipu_plane_enable(ipu_plane);
442} 456}
443 457
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index f05ed0e1f3d6..6f240021705b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -139,6 +139,7 @@ struct msm_dsi_host {
139 139
140 u32 err_work_state; 140 u32 err_work_state;
141 struct work_struct err_work; 141 struct work_struct err_work;
142 struct work_struct hpd_work;
142 struct workqueue_struct *workqueue; 143 struct workqueue_struct *workqueue;
143 144
144 /* DSI 6G TX buffer*/ 145 /* DSI 6G TX buffer*/
@@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
1294 wmb(); /* make sure dsi controller enabled again */ 1295 wmb(); /* make sure dsi controller enabled again */
1295} 1296}
1296 1297
1298static void dsi_hpd_worker(struct work_struct *work)
1299{
1300 struct msm_dsi_host *msm_host =
1301 container_of(work, struct msm_dsi_host, hpd_work);
1302
1303 drm_helper_hpd_irq_event(msm_host->dev);
1304}
1305
1297static void dsi_err_worker(struct work_struct *work) 1306static void dsi_err_worker(struct work_struct *work)
1298{ 1307{
1299 struct msm_dsi_host *msm_host = 1308 struct msm_dsi_host *msm_host =
@@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
1480 1489
1481 DBG("id=%d", msm_host->id); 1490 DBG("id=%d", msm_host->id);
1482 if (msm_host->dev) 1491 if (msm_host->dev)
1483 drm_helper_hpd_irq_event(msm_host->dev); 1492 queue_work(msm_host->workqueue, &msm_host->hpd_work);
1484 1493
1485 return 0; 1494 return 0;
1486} 1495}
@@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
1494 1503
1495 DBG("id=%d", msm_host->id); 1504 DBG("id=%d", msm_host->id);
1496 if (msm_host->dev) 1505 if (msm_host->dev)
1497 drm_helper_hpd_irq_event(msm_host->dev); 1506 queue_work(msm_host->workqueue, &msm_host->hpd_work);
1498 1507
1499 return 0; 1508 return 0;
1500} 1509}
@@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1748 /* setup workqueue */ 1757 /* setup workqueue */
1749 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); 1758 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1750 INIT_WORK(&msm_host->err_work, dsi_err_worker); 1759 INIT_WORK(&msm_host->err_work, dsi_err_worker);
1760 INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
1751 1761
1752 msm_dsi->host = &msm_host->base; 1762 msm_dsi->host = &msm_host->base;
1753 msm_dsi->id = msm_host->id; 1763 msm_dsi->id = msm_host->id;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 598fdaff0a41..26e3a01a99c2 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
521 .parent_names = (const char *[]){ "xo" }, 521 .parent_names = (const char *[]){ "xo" },
522 .num_parents = 1, 522 .num_parents = 1,
523 .name = vco_name, 523 .name = vco_name,
524 .flags = CLK_IGNORE_UNUSED,
524 .ops = &clk_ops_dsi_pll_28nm_vco, 525 .ops = &clk_ops_dsi_pll_28nm_vco,
525 }; 526 };
526 struct device *dev = &pll_28nm->pdev->dev; 527 struct device *dev = &pll_28nm->pdev->dev;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
index 38c90e1eb002..49008451085b 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
412 struct clk_init_data vco_init = { 412 struct clk_init_data vco_init = {
413 .parent_names = (const char *[]){ "pxo" }, 413 .parent_names = (const char *[]){ "pxo" },
414 .num_parents = 1, 414 .num_parents = 1,
415 .flags = CLK_IGNORE_UNUSED,
415 .ops = &clk_ops_dsi_pll_28nm_vco, 416 .ops = &clk_ops_dsi_pll_28nm_vco,
416 }; 417 };
417 struct device *dev = &pll_28nm->pdev->dev; 418 struct device *dev = &pll_28nm->pdev->dev;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index aa94a553794f..143eab46ba68 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -702,6 +702,7 @@ static struct clk_init_data pll_init = {
702 .ops = &hdmi_8996_pll_ops, 702 .ops = &hdmi_8996_pll_ops,
703 .parent_names = hdmi_pll_parents, 703 .parent_names = hdmi_pll_parents,
704 .num_parents = ARRAY_SIZE(hdmi_pll_parents), 704 .num_parents = ARRAY_SIZE(hdmi_pll_parents),
705 .flags = CLK_IGNORE_UNUSED,
705}; 706};
706 707
707int msm_hdmi_pll_8996_init(struct platform_device *pdev) 708int msm_hdmi_pll_8996_init(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 92da69aa6187..99590758c68b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -424,6 +424,7 @@ static struct clk_init_data pll_init = {
424 .ops = &hdmi_pll_ops, 424 .ops = &hdmi_pll_ops,
425 .parent_names = hdmi_pll_parents, 425 .parent_names = hdmi_pll_parents,
426 .num_parents = ARRAY_SIZE(hdmi_pll_parents), 426 .num_parents = ARRAY_SIZE(hdmi_pll_parents),
427 .flags = CLK_IGNORE_UNUSED,
427}; 428};
428 429
429int msm_hdmi_pll_8960_init(struct platform_device *pdev) 430int msm_hdmi_pll_8960_init(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index ac9e4cde1380..8b4e3004f451 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
272 .count = 2, 272 .count = 2,
273 .base = { 0x14000, 0x16000 }, 273 .base = { 0x14000, 0x16000 },
274 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 274 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
275 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, 275 MDP_PIPE_CAP_DECIMATION,
276 }, 276 },
277 .pipe_dma = { 277 .pipe_dma = {
278 .count = 1, 278 .count = 1,
@@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
282 .lm = { 282 .lm = {
283 .count = 2, /* LM0 and LM3 */ 283 .count = 2, /* LM0 and LM3 */
284 .base = { 0x44000, 0x47000 }, 284 .base = { 0x44000, 0x47000 },
285 .nb_stages = 5, 285 .nb_stages = 8,
286 .max_width = 2048, 286 .max_width = 2048,
287 .max_height = 0xFFFF, 287 .max_height = 0xFFFF,
288 }, 288 },
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index fa2be7ce9468..c205c360e16d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -223,12 +223,7 @@ static void blend_setup(struct drm_crtc *crtc)
223 plane_cnt++; 223 plane_cnt++;
224 } 224 }
225 225
226 /* 226 if (!pstates[STAGE_BASE]) {
227 * If there is no base layer, enable border color.
228 * Although it's not possbile in current blend logic,
229 * put it here as a reminder.
230 */
231 if (!pstates[STAGE_BASE] && plane_cnt) {
232 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; 227 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
233 DBG("Border Color is enabled"); 228 DBG("Border Color is enabled");
234 } 229 }
@@ -365,6 +360,15 @@ static int pstate_cmp(const void *a, const void *b)
365 return pa->state->zpos - pb->state->zpos; 360 return pa->state->zpos - pb->state->zpos;
366} 361}
367 362
363/* is there a helper for this? */
364static bool is_fullscreen(struct drm_crtc_state *cstate,
365 struct drm_plane_state *pstate)
366{
367 return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
368 ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
369 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
370}
371
368static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, 372static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
369 struct drm_crtc_state *state) 373 struct drm_crtc_state *state)
370{ 374{
@@ -375,21 +379,11 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
375 struct plane_state pstates[STAGE_MAX + 1]; 379 struct plane_state pstates[STAGE_MAX + 1];
376 const struct mdp5_cfg_hw *hw_cfg; 380 const struct mdp5_cfg_hw *hw_cfg;
377 const struct drm_plane_state *pstate; 381 const struct drm_plane_state *pstate;
378 int cnt = 0, i; 382 int cnt = 0, base = 0, i;
379 383
380 DBG("%s: check", mdp5_crtc->name); 384 DBG("%s: check", mdp5_crtc->name);
381 385
382 /* verify that there are not too many planes attached to crtc
383 * and that we don't have conflicting mixer stages:
384 */
385 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
386 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { 386 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
387 if (cnt >= (hw_cfg->lm.nb_stages)) {
388 dev_err(dev->dev, "too many planes!\n");
389 return -EINVAL;
390 }
391
392
393 pstates[cnt].plane = plane; 387 pstates[cnt].plane = plane;
394 pstates[cnt].state = to_mdp5_plane_state(pstate); 388 pstates[cnt].state = to_mdp5_plane_state(pstate);
395 389
@@ -399,8 +393,24 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
399 /* assign a stage based on sorted zpos property */ 393 /* assign a stage based on sorted zpos property */
400 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); 394 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
401 395
396 /* if the bottom-most layer is not fullscreen, we need to use
397 * it for solid-color:
398 */
399 if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
400 base++;
401
402 /* verify that there are not too many planes attached to crtc
403 * and that we don't have conflicting mixer stages:
404 */
405 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
406
407 if ((cnt + base) >= hw_cfg->lm.nb_stages) {
408 dev_err(dev->dev, "too many planes!\n");
409 return -EINVAL;
410 }
411
402 for (i = 0; i < cnt; i++) { 412 for (i = 0; i < cnt; i++) {
403 pstates[i].state->stage = STAGE_BASE + i; 413 pstates[i].state->stage = STAGE_BASE + i + base;
404 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name, 414 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
405 pipe2name(mdp5_plane_pipe(pstates[i].plane)), 415 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
406 pstates[i].state->stage); 416 pstates[i].state->stage);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 951c002b05df..83bf997dda03 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -292,8 +292,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
292 format = to_mdp_format(msm_framebuffer_format(state->fb)); 292 format = to_mdp_format(msm_framebuffer_format(state->fb));
293 if (MDP_FORMAT_IS_YUV(format) && 293 if (MDP_FORMAT_IS_YUV(format) &&
294 !pipe_supports_yuv(mdp5_plane->caps)) { 294 !pipe_supports_yuv(mdp5_plane->caps)) {
295 dev_err(plane->dev->dev, 295 DBG("Pipe doesn't support YUV\n");
296 "Pipe doesn't support YUV\n");
297 296
298 return -EINVAL; 297 return -EINVAL;
299 } 298 }
@@ -301,8 +300,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
301 if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) && 300 if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
302 (((state->src_w >> 16) != state->crtc_w) || 301 (((state->src_w >> 16) != state->crtc_w) ||
303 ((state->src_h >> 16) != state->crtc_h))) { 302 ((state->src_h >> 16) != state->crtc_h))) {
304 dev_err(plane->dev->dev, 303 DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
305 "Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
306 state->src_w >> 16, state->src_h >> 16, 304 state->src_w >> 16, state->src_h >> 16,
307 state->crtc_w, state->crtc_h); 305 state->crtc_w, state->crtc_h);
308 306
@@ -313,8 +311,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
313 vflip = !!(state->rotation & DRM_REFLECT_Y); 311 vflip = !!(state->rotation & DRM_REFLECT_Y);
314 if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || 312 if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
315 (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { 313 (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
316 dev_err(plane->dev->dev, 314 DBG("Pipe doesn't support flip\n");
317 "Pipe doesn't support flip\n");
318 315
319 return -EINVAL; 316 return -EINVAL;
320 } 317 }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index fb5c0b0a7594..46568fc80848 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -228,7 +228,7 @@ static int msm_drm_uninit(struct device *dev)
228 flush_workqueue(priv->atomic_wq); 228 flush_workqueue(priv->atomic_wq);
229 destroy_workqueue(priv->atomic_wq); 229 destroy_workqueue(priv->atomic_wq);
230 230
231 if (kms) 231 if (kms && kms->funcs)
232 kms->funcs->destroy(kms); 232 kms->funcs->destroy(kms);
233 233
234 if (gpu) { 234 if (gpu) {
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 283d2841ba58..192b2d3a79cb 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -163,6 +163,9 @@ void msm_gem_shrinker_init(struct drm_device *dev)
163void msm_gem_shrinker_cleanup(struct drm_device *dev) 163void msm_gem_shrinker_cleanup(struct drm_device *dev)
164{ 164{
165 struct msm_drm_private *priv = dev->dev_private; 165 struct msm_drm_private *priv = dev->dev_private;
166 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); 166
167 unregister_shrinker(&priv->shrinker); 167 if (priv->shrinker.nr_deferred) {
168 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
169 unregister_shrinker(&priv->shrinker);
170 }
168} 171}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dc57b628e074..193573d191e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -240,7 +240,8 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
240 if (!parent_adev) 240 if (!parent_adev)
241 return false; 241 return false;
242 242
243 return acpi_has_method(parent_adev->handle, "_PR3"); 243 return parent_adev->power.flags.power_resources &&
244 acpi_has_method(parent_adev->handle, "_PR3");
244} 245}
245 246
246static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out, 247static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 103fc8650197..a0d4a0522fdc 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
1396void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 1396void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1397 int ring, u32 cp_int_cntl) 1397 int ring, u32 cp_int_cntl)
1398{ 1398{
1399 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; 1399 WREG32(SRBM_GFX_CNTL, RINGID(ring));
1400
1401 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
1402 WREG32(CP_INT_CNTL, cp_int_cntl); 1400 WREG32(CP_INT_CNTL, cp_int_cntl);
1403} 1401}
1404 1402
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index e18839d52e3e..27affbde058c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -931,7 +931,7 @@ static void radeon_connector_unregister(struct drm_connector *connector)
931{ 931{
932 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 932 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
933 933
934 if (radeon_connector->ddc_bus->has_aux) { 934 if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) {
935 drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux); 935 drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
936 radeon_connector->ddc_bus->has_aux = false; 936 radeon_connector->ddc_bus->has_aux = false;
937 } 937 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index eb92aef46e3c..621af069a3d2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = {
104 "LAST", 104 "LAST",
105}; 105};
106 106
107#if defined(CONFIG_VGA_SWITCHEROO)
108bool radeon_has_atpx_dgpu_power_cntl(void);
109bool radeon_is_atpx_hybrid(void);
110#else
111static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
112static inline bool radeon_is_atpx_hybrid(void) { return false; }
113#endif
114
107#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) 115#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
108#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) 116#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
109 117
@@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
160 168
161 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) 169 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
162 rdev->flags &= ~RADEON_IS_PX; 170 rdev->flags &= ~RADEON_IS_PX;
171
172 /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
173 if (!radeon_is_atpx_hybrid() &&
174 !radeon_has_atpx_dgpu_power_cntl())
175 rdev->flags &= ~RADEON_IS_PX;
163} 176}
164 177
165/** 178/**
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index 2d465648856a..474a8a1886f7 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
105 105
106 tmp &= AUX_HPD_SEL(0x7); 106 tmp &= AUX_HPD_SEL(0x7);
107 tmp |= AUX_HPD_SEL(chan->rec.hpd); 107 tmp |= AUX_HPD_SEL(chan->rec.hpd);
108 tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1); 108 tmp |= AUX_EN | AUX_LS_READ_EN;
109 109
110 WREG32(AUX_CONTROL + aux_offset[instance], tmp); 110 WREG32(AUX_CONTROL + aux_offset[instance], tmp);
111 111
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 89bdf20344ae..c49934527a87 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2999 int i; 2999 int i;
3000 struct si_dpm_quirk *p = si_dpm_quirk_list; 3000 struct si_dpm_quirk *p = si_dpm_quirk_list;
3001 3001
3002 /* limit all SI kickers */
3003 if (rdev->family == CHIP_PITCAIRN) {
3004 if ((rdev->pdev->revision == 0x81) ||
3005 (rdev->pdev->device == 0x6810) ||
3006 (rdev->pdev->device == 0x6811) ||
3007 (rdev->pdev->device == 0x6816) ||
3008 (rdev->pdev->device == 0x6817) ||
3009 (rdev->pdev->device == 0x6806))
3010 max_mclk = 120000;
3011 } else if (rdev->family == CHIP_VERDE) {
3012 if ((rdev->pdev->revision == 0x81) ||
3013 (rdev->pdev->revision == 0x83) ||
3014 (rdev->pdev->revision == 0x87) ||
3015 (rdev->pdev->device == 0x6820) ||
3016 (rdev->pdev->device == 0x6821) ||
3017 (rdev->pdev->device == 0x6822) ||
3018 (rdev->pdev->device == 0x6823) ||
3019 (rdev->pdev->device == 0x682A) ||
3020 (rdev->pdev->device == 0x682B)) {
3021 max_sclk = 75000;
3022 max_mclk = 80000;
3023 }
3024 } else if (rdev->family == CHIP_OLAND) {
3025 if ((rdev->pdev->revision == 0xC7) ||
3026 (rdev->pdev->revision == 0x80) ||
3027 (rdev->pdev->revision == 0x81) ||
3028 (rdev->pdev->revision == 0x83) ||
3029 (rdev->pdev->device == 0x6604) ||
3030 (rdev->pdev->device == 0x6605)) {
3031 max_sclk = 75000;
3032 max_mclk = 80000;
3033 }
3034 } else if (rdev->family == CHIP_HAINAN) {
3035 if ((rdev->pdev->revision == 0x81) ||
3036 (rdev->pdev->revision == 0x83) ||
3037 (rdev->pdev->revision == 0xC3) ||
3038 (rdev->pdev->device == 0x6664) ||
3039 (rdev->pdev->device == 0x6665) ||
3040 (rdev->pdev->device == 0x6667)) {
3041 max_sclk = 75000;
3042 max_mclk = 80000;
3043 }
3044 }
3002 /* Apply dpm quirks */ 3045 /* Apply dpm quirks */
3003 while (p && p->chip_device != 0) { 3046 while (p && p->chip_device != 0) {
3004 if (rdev->pdev->vendor == p->chip_vendor && 3047 if (rdev->pdev->vendor == p->chip_vendor &&
@@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3011 } 3054 }
3012 ++p; 3055 ++p;
3013 } 3056 }
3014 /* limit mclk on all R7 370 parts for stability */
3015 if (rdev->pdev->device == 0x6811 &&
3016 rdev->pdev->revision == 0x81)
3017 max_mclk = 120000;
3018 /* limit sclk/mclk on Jet parts for stability */
3019 if (rdev->pdev->device == 0x6665 &&
3020 rdev->pdev->revision == 0xc3) {
3021 max_sclk = 75000;
3022 max_mclk = 80000;
3023 }
3024 3057
3025 if (rps->vce_active) { 3058 if (rps->vce_active) {
3026 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 3059 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index bd9c3bb9252c..392c7e6de042 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -231,8 +231,16 @@ static int rcar_du_atomic_check(struct drm_device *dev,
231 struct rcar_du_device *rcdu = dev->dev_private; 231 struct rcar_du_device *rcdu = dev->dev_private;
232 int ret; 232 int ret;
233 233
234 ret = drm_atomic_helper_check(dev, state); 234 ret = drm_atomic_helper_check_modeset(dev, state);
235 if (ret < 0) 235 if (ret)
236 return ret;
237
238 ret = drm_atomic_normalize_zpos(dev, state);
239 if (ret)
240 return ret;
241
242 ret = drm_atomic_helper_check_planes(dev, state);
243 if (ret)
236 return ret; 244 return ret;
237 245
238 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) 246 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 2784919a7366..9df308565f6c 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -195,6 +195,26 @@ static void sti_atomic_work(struct work_struct *work)
195 sti_atomic_complete(private, private->commit.state); 195 sti_atomic_complete(private, private->commit.state);
196} 196}
197 197
198static int sti_atomic_check(struct drm_device *dev,
199 struct drm_atomic_state *state)
200{
201 int ret;
202
203 ret = drm_atomic_helper_check_modeset(dev, state);
204 if (ret)
205 return ret;
206
207 ret = drm_atomic_normalize_zpos(dev, state);
208 if (ret)
209 return ret;
210
211 ret = drm_atomic_helper_check_planes(dev, state);
212 if (ret)
213 return ret;
214
215 return ret;
216}
217
198static int sti_atomic_commit(struct drm_device *drm, 218static int sti_atomic_commit(struct drm_device *drm,
199 struct drm_atomic_state *state, bool nonblock) 219 struct drm_atomic_state *state, bool nonblock)
200{ 220{
@@ -248,7 +268,7 @@ static void sti_output_poll_changed(struct drm_device *ddev)
248static const struct drm_mode_config_funcs sti_mode_config_funcs = { 268static const struct drm_mode_config_funcs sti_mode_config_funcs = {
249 .fb_create = drm_fb_cma_create, 269 .fb_create = drm_fb_cma_create,
250 .output_poll_changed = sti_output_poll_changed, 270 .output_poll_changed = sti_output_poll_changed,
251 .atomic_check = drm_atomic_helper_check, 271 .atomic_check = sti_atomic_check,
252 .atomic_commit = sti_atomic_commit, 272 .atomic_commit = sti_atomic_commit,
253}; 273};
254 274
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 29f0207fa677..873f010d9616 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -98,17 +98,23 @@ success:
98static int udl_select_std_channel(struct udl_device *udl) 98static int udl_select_std_channel(struct udl_device *udl)
99{ 99{
100 int ret; 100 int ret;
101 u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, 101 static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
102 0x1C, 0x88, 0x5E, 0x15, 102 0x1C, 0x88, 0x5E, 0x15,
103 0x60, 0xFE, 0xC6, 0x97, 103 0x60, 0xFE, 0xC6, 0x97,
104 0x16, 0x3D, 0x47, 0xF2}; 104 0x16, 0x3D, 0x47, 0xF2};
105 void *sendbuf;
106
107 sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
108 if (!sendbuf)
109 return -ENOMEM;
105 110
106 ret = usb_control_msg(udl->udev, 111 ret = usb_control_msg(udl->udev,
107 usb_sndctrlpipe(udl->udev, 0), 112 usb_sndctrlpipe(udl->udev, 0),
108 NR_USB_REQUEST_CHANNEL, 113 NR_USB_REQUEST_CHANNEL,
109 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, 114 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
110 set_def_chn, sizeof(set_def_chn), 115 sendbuf, sizeof(set_def_chn),
111 USB_CTRL_SET_TIMEOUT); 116 USB_CTRL_SET_TIMEOUT);
117 kfree(sendbuf);
112 return ret < 0 ? ret : 0; 118 return ret < 0 ? ret : 0;
113} 119}
114 120
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 7cf3678623c3..58048709c34e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -338,8 +338,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
338 338
339 drm_atomic_helper_commit_modeset_disables(dev, state); 339 drm_atomic_helper_commit_modeset_disables(dev, state);
340 drm_atomic_helper_commit_modeset_enables(dev, state); 340 drm_atomic_helper_commit_modeset_enables(dev, state);
341 drm_atomic_helper_commit_planes(dev, state, 341 drm_atomic_helper_commit_planes(dev, state, 0);
342 DRM_PLANE_COMMIT_ACTIVE_ONLY);
343 342
344 drm_atomic_helper_commit_hw_done(state); 343 drm_atomic_helper_commit_hw_done(state);
345 344
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 2ba7d437a2af..805b6fa7b5f4 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -1617,7 +1617,7 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1617 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode, 1617 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
1618 complete, complete_context); 1618 complete, complete_context);
1619 if (IS_ERR(ctx)) 1619 if (IS_ERR(ctx))
1620 return ERR_PTR(PTR_ERR(ctx)); 1620 return ERR_CAST(ctx);
1621 1621
1622 run = kzalloc(sizeof(*run), GFP_KERNEL); 1622 run = kzalloc(sizeof(*run), GFP_KERNEL);
1623 if (!run) { 1623 if (!run) {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 6cfb5cacc253..575aa65436d1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -179,6 +179,7 @@
179#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205 179#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
180#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208 180#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
181#define USB_DEVICE_ID_ATEN_CS682 0x2213 181#define USB_DEVICE_ID_ATEN_CS682 0x2213
182#define USB_DEVICE_ID_ATEN_CS692 0x8021
182 183
183#define USB_VENDOR_ID_ATMEL 0x03eb 184#define USB_VENDOR_ID_ATMEL 0x03eb
184#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c 185#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 5614fee82347..3a84aaf1418b 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -292,11 +292,11 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
292 bool input = false; 292 bool input = false;
293 int value = 0; 293 int value = 0;
294 294
295 if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage, 295 if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
296 name) == 3) { 296 name) == 3) {
297 feature = true; 297 feature = true;
298 field_index = index + sensor_inst->input_field_count; 298 field_index = index + sensor_inst->input_field_count;
299 } else if (sscanf(attr->attr.name, "input-%d-%x-%s", &index, &usage, 299 } else if (sscanf(attr->attr.name, "input-%x-%x-%s", &index, &usage,
300 name) == 3) { 300 name) == 3) {
301 input = true; 301 input = true;
302 field_index = index; 302 field_index = index;
@@ -398,7 +398,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
398 char name[HID_CUSTOM_NAME_LENGTH]; 398 char name[HID_CUSTOM_NAME_LENGTH];
399 int value; 399 int value;
400 400
401 if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage, 401 if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
402 name) == 3) { 402 name) == 3) {
403 field_index = index + sensor_inst->input_field_count; 403 field_index = index + sensor_inst->input_field_count;
404 } else 404 } else
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 658a607dc6d9..c5c3d6111729 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -251,6 +251,9 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
251 struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev); 251 struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
252 int report_size; 252 int report_size;
253 int ret = 0; 253 int ret = 0;
254 u8 *val_ptr;
255 int buffer_index = 0;
256 int i;
254 257
255 mutex_lock(&data->mutex); 258 mutex_lock(&data->mutex);
256 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); 259 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
@@ -271,7 +274,17 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
271 goto done_proc; 274 goto done_proc;
272 } 275 }
273 ret = min(report_size, buffer_size); 276 ret = min(report_size, buffer_size);
274 memcpy(buffer, report->field[field_index]->value, ret); 277
278 val_ptr = (u8 *)report->field[field_index]->value;
279 for (i = 0; i < report->field[field_index]->report_count; ++i) {
280 if (buffer_index >= ret)
281 break;
282
283 memcpy(&((u8 *)buffer)[buffer_index], val_ptr,
284 report->field[field_index]->report_size / 8);
285 val_ptr += sizeof(__s32);
286 buffer_index += (report->field[field_index]->report_size / 8);
287 }
275 288
276done_proc: 289done_proc:
277 mutex_unlock(&data->mutex); 290 mutex_unlock(&data->mutex);
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index e2517c11e0ee..0c9ac4d5d850 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -638,6 +638,58 @@ eoi:
638} 638}
639 639
640/** 640/**
641 * ish_disable_dma() - disable dma communication between host and ISHFW
642 * @dev: ishtp device pointer
643 *
644 * Clear the dma enable bit and wait for dma inactive.
645 *
646 * Return: 0 for success else error code.
647 */
648static int ish_disable_dma(struct ishtp_device *dev)
649{
650 unsigned int dma_delay;
651
652 /* Clear the dma enable bit */
653 ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
654
655 /* wait for dma inactive */
656 for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
657 _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
658 dma_delay += 5)
659 mdelay(5);
660
661 if (dma_delay >= MAX_DMA_DELAY) {
662 dev_err(dev->devc,
663 "Wait for DMA inactive timeout\n");
664 return -EBUSY;
665 }
666
667 return 0;
668}
669
670/**
671 * ish_wakeup() - wakeup ishfw from waiting-for-host state
672 * @dev: ishtp device pointer
673 *
674 * Set the dma enable bit and send a void message to FW,
675 * it wil wakeup FW from waiting-for-host state.
676 */
677static void ish_wakeup(struct ishtp_device *dev)
678{
679 /* Set dma enable bit */
680 ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
681
682 /*
683 * Send 0 IPC message so that ISH FW wakes up if it was already
684 * asleep.
685 */
686 ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
687
688 /* Flush writes to doorbell and REMAP2 */
689 ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
690}
691
692/**
641 * _ish_hw_reset() - HW reset 693 * _ish_hw_reset() - HW reset
642 * @dev: ishtp device pointer 694 * @dev: ishtp device pointer
643 * 695 *
@@ -649,7 +701,6 @@ static int _ish_hw_reset(struct ishtp_device *dev)
649{ 701{
650 struct pci_dev *pdev = dev->pdev; 702 struct pci_dev *pdev = dev->pdev;
651 int rv; 703 int rv;
652 unsigned int dma_delay;
653 uint16_t csr; 704 uint16_t csr;
654 705
655 if (!pdev) 706 if (!pdev)
@@ -664,15 +715,8 @@ static int _ish_hw_reset(struct ishtp_device *dev)
664 return -EINVAL; 715 return -EINVAL;
665 } 716 }
666 717
667 /* Now trigger reset to FW */ 718 /* Disable dma communication between FW and host */
668 ish_reg_write(dev, IPC_REG_ISH_RMP2, 0); 719 if (ish_disable_dma(dev)) {
669
670 for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
671 _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
672 dma_delay += 5)
673 mdelay(5);
674
675 if (dma_delay >= MAX_DMA_DELAY) {
676 dev_err(&pdev->dev, 720 dev_err(&pdev->dev,
677 "Can't reset - stuck with DMA in-progress\n"); 721 "Can't reset - stuck with DMA in-progress\n");
678 return -EBUSY; 722 return -EBUSY;
@@ -690,16 +734,8 @@ static int _ish_hw_reset(struct ishtp_device *dev)
690 csr |= PCI_D0; 734 csr |= PCI_D0;
691 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr); 735 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
692 736
693 ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED); 737 /* Now we can enable ISH DMA operation and wakeup ISHFW */
694 738 ish_wakeup(dev);
695 /*
696 * Send 0 IPC message so that ISH FW wakes up if it was already
697 * asleep
698 */
699 ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
700
701 /* Flush writes to doorbell and REMAP2 */
702 ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
703 739
704 return 0; 740 return 0;
705} 741}
@@ -758,16 +794,9 @@ static int _ish_ipc_reset(struct ishtp_device *dev)
758int ish_hw_start(struct ishtp_device *dev) 794int ish_hw_start(struct ishtp_device *dev)
759{ 795{
760 ish_set_host_rdy(dev); 796 ish_set_host_rdy(dev);
761 /* After that we can enable ISH DMA operation */
762 ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
763 797
764 /* 798 /* After that we can enable ISH DMA operation and wakeup ISHFW */
765 * Send 0 IPC message so that ISH FW wakes up if it was already 799 ish_wakeup(dev);
766 * asleep
767 */
768 ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
769 /* Flush write to doorbell */
770 ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
771 800
772 set_host_ready(dev); 801 set_host_ready(dev);
773 802
@@ -876,6 +905,21 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
876 */ 905 */
877void ish_device_disable(struct ishtp_device *dev) 906void ish_device_disable(struct ishtp_device *dev)
878{ 907{
908 struct pci_dev *pdev = dev->pdev;
909
910 if (!pdev)
911 return;
912
913 /* Disable dma communication between FW and host */
914 if (ish_disable_dma(dev)) {
915 dev_err(&pdev->dev,
916 "Can't reset - stuck with DMA in-progress\n");
917 return;
918 }
919
920 /* Put ISH to D3hot state for power saving */
921 pci_set_power_state(pdev, PCI_D3hot);
922
879 dev->dev_state = ISHTP_DEV_DISABLED; 923 dev->dev_state = ISHTP_DEV_DISABLED;
880 ish_clr_host_rdy(dev); 924 ish_clr_host_rdy(dev);
881} 925}
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 42f0beeb09fd..20d647d2dd2c 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -146,7 +146,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
146 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; 146 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
147 147
148 /* request and enable interrupt */ 148 /* request and enable interrupt */
149 ret = request_irq(pdev->irq, ish_irq_handler, IRQF_NO_SUSPEND, 149 ret = request_irq(pdev->irq, ish_irq_handler, IRQF_SHARED,
150 KBUILD_MODNAME, dev); 150 KBUILD_MODNAME, dev);
151 if (ret) { 151 if (ret) {
152 dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n", 152 dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n",
@@ -202,6 +202,7 @@ static void ish_remove(struct pci_dev *pdev)
202 kfree(ishtp_dev); 202 kfree(ishtp_dev);
203} 203}
204 204
205#ifdef CONFIG_PM
205static struct device *ish_resume_device; 206static struct device *ish_resume_device;
206 207
207/** 208/**
@@ -293,7 +294,6 @@ static int ish_resume(struct device *device)
293 return 0; 294 return 0;
294} 295}
295 296
296#ifdef CONFIG_PM
297static const struct dev_pm_ops ish_pm_ops = { 297static const struct dev_pm_ops ish_pm_ops = {
298 .suspend = ish_suspend, 298 .suspend = ish_suspend,
299 .resume = ish_resume, 299 .resume = ish_resume,
@@ -301,7 +301,7 @@ static const struct dev_pm_ops ish_pm_ops = {
301#define ISHTP_ISH_PM_OPS (&ish_pm_ops) 301#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
302#else 302#else
303#define ISHTP_ISH_PM_OPS NULL 303#define ISHTP_ISH_PM_OPS NULL
304#endif 304#endif /* CONFIG_PM */
305 305
306static struct pci_driver ish_driver = { 306static struct pci_driver ish_driver = {
307 .name = KBUILD_MODNAME, 307 .name = KBUILD_MODNAME,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 354d49ea36dd..e6cfd323babc 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -63,6 +63,7 @@ static const struct hid_blacklist {
63 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, 63 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
64 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, 64 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
65 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET }, 65 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
66 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
66 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET }, 67 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
67 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET }, 68 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
68 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET }, 69 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a259e18d22d5..0276d2ef06ee 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -961,7 +961,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
961{ 961{
962 int ret = 0; 962 int ret = 0;
963 963
964 dev_set_name(&child_device_obj->device, "vmbus-%pUl", 964 dev_set_name(&child_device_obj->device, "%pUl",
965 child_device_obj->channel->offermsg.offer.if_instance.b); 965 child_device_obj->channel->offermsg.offer.if_instance.b);
966 966
967 child_device_obj->device.bus = &hv_bus; 967 child_device_obj->device.bus = &hv_bus;
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index adae6848ffb2..a74c075a30ec 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -536,8 +536,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
536 536
537 hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups), 537 hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
538 GFP_KERNEL); 538 GFP_KERNEL);
539 if (!hwdev->groups) 539 if (!hwdev->groups) {
540 return ERR_PTR(-ENOMEM); 540 err = -ENOMEM;
541 goto free_hwmon;
542 }
541 543
542 attrs = __hwmon_create_attrs(dev, drvdata, chip); 544 attrs = __hwmon_create_attrs(dev, drvdata, chip);
543 if (IS_ERR(attrs)) { 545 if (IS_ERR(attrs)) {
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 1704fc84d647..b432b64e307a 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -2179,6 +2179,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
2179 /* add the driver to the list of i2c drivers in the driver core */ 2179 /* add the driver to the list of i2c drivers in the driver core */
2180 driver->driver.owner = owner; 2180 driver->driver.owner = owner;
2181 driver->driver.bus = &i2c_bus_type; 2181 driver->driver.bus = &i2c_bus_type;
2182 INIT_LIST_HEAD(&driver->clients);
2182 2183
2183 /* When registration returns, the driver core 2184 /* When registration returns, the driver core
2184 * will have called probe() for all matching-but-unbound devices. 2185 * will have called probe() for all matching-but-unbound devices.
@@ -2189,7 +2190,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
2189 2190
2190 pr_debug("driver [%s] registered\n", driver->driver.name); 2191 pr_debug("driver [%s] registered\n", driver->driver.name);
2191 2192
2192 INIT_LIST_HEAD(&driver->clients);
2193 /* Walk the adapters that are already present */ 2193 /* Walk the adapters that are already present */
2194 i2c_for_each_dev(driver, __process_new_driver); 2194 i2c_for_each_dev(driver, __process_new_driver);
2195 2195
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index da3fb069ec5c..ce69048c88e9 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -743,8 +743,8 @@ static int st_accel_read_raw(struct iio_dev *indio_dev,
743 743
744 return IIO_VAL_INT; 744 return IIO_VAL_INT;
745 case IIO_CHAN_INFO_SCALE: 745 case IIO_CHAN_INFO_SCALE:
746 *val = 0; 746 *val = adata->current_fullscale->gain / 1000000;
747 *val2 = adata->current_fullscale->gain; 747 *val2 = adata->current_fullscale->gain % 1000000;
748 return IIO_VAL_INT_PLUS_MICRO; 748 return IIO_VAL_INT_PLUS_MICRO;
749 case IIO_CHAN_INFO_SAMP_FREQ: 749 case IIO_CHAN_INFO_SAMP_FREQ:
750 *val = adata->odr; 750 *val = adata->odr;
@@ -763,9 +763,13 @@ static int st_accel_write_raw(struct iio_dev *indio_dev,
763 int err; 763 int err;
764 764
765 switch (mask) { 765 switch (mask) {
766 case IIO_CHAN_INFO_SCALE: 766 case IIO_CHAN_INFO_SCALE: {
767 err = st_sensors_set_fullscale_by_gain(indio_dev, val2); 767 int gain;
768
769 gain = val * 1000000 + val2;
770 err = st_sensors_set_fullscale_by_gain(indio_dev, gain);
768 break; 771 break;
772 }
769 case IIO_CHAN_INFO_SAMP_FREQ: 773 case IIO_CHAN_INFO_SAMP_FREQ:
770 if (val2) 774 if (val2)
771 return -EINVAL; 775 return -EINVAL;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index dc33c1dd5191..b5beea53d6f6 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -30,26 +30,26 @@ static struct {
30 u32 usage_id; 30 u32 usage_id;
31 int unit; /* 0 for default others from HID sensor spec */ 31 int unit; /* 0 for default others from HID sensor spec */
32 int scale_val0; /* scale, whole number */ 32 int scale_val0; /* scale, whole number */
33 int scale_val1; /* scale, fraction in micros */ 33 int scale_val1; /* scale, fraction in nanos */
34} unit_conversion[] = { 34} unit_conversion[] = {
35 {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650}, 35 {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000},
36 {HID_USAGE_SENSOR_ACCEL_3D, 36 {HID_USAGE_SENSOR_ACCEL_3D,
37 HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0}, 37 HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0},
38 {HID_USAGE_SENSOR_ACCEL_3D, 38 {HID_USAGE_SENSOR_ACCEL_3D,
39 HID_USAGE_SENSOR_UNITS_G, 9, 806650}, 39 HID_USAGE_SENSOR_UNITS_G, 9, 806650000},
40 40
41 {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453}, 41 {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293},
42 {HID_USAGE_SENSOR_GYRO_3D, 42 {HID_USAGE_SENSOR_GYRO_3D,
43 HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0}, 43 HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0},
44 {HID_USAGE_SENSOR_GYRO_3D, 44 {HID_USAGE_SENSOR_GYRO_3D,
45 HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453}, 45 HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293},
46 46
47 {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000}, 47 {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000},
48 {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0}, 48 {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0},
49 49
50 {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453}, 50 {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293},
51 {HID_USAGE_SENSOR_INCLINOMETER_3D, 51 {HID_USAGE_SENSOR_INCLINOMETER_3D,
52 HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453}, 52 HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
53 {HID_USAGE_SENSOR_INCLINOMETER_3D, 53 {HID_USAGE_SENSOR_INCLINOMETER_3D,
54 HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0}, 54 HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0},
55 55
@@ -57,7 +57,7 @@ static struct {
57 {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, 57 {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
58 58
59 {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0}, 59 {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
60 {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000}, 60 {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000},
61}; 61};
62 62
63static int pow_10(unsigned power) 63static int pow_10(unsigned power)
@@ -266,15 +266,15 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
266/* 266/*
267 * This fuction applies the unit exponent to the scale. 267 * This fuction applies the unit exponent to the scale.
268 * For example: 268 * For example:
269 * 9.806650 ->exp:2-> val0[980]val1[665000] 269 * 9.806650000 ->exp:2-> val0[980]val1[665000000]
270 * 9.000806 ->exp:2-> val0[900]val1[80600] 270 * 9.000806000 ->exp:2-> val0[900]val1[80600000]
271 * 0.174535 ->exp:2-> val0[17]val1[453500] 271 * 0.174535293 ->exp:2-> val0[17]val1[453529300]
272 * 1.001745 ->exp:0-> val0[1]val1[1745] 272 * 1.001745329 ->exp:0-> val0[1]val1[1745329]
273 * 1.001745 ->exp:2-> val0[100]val1[174500] 273 * 1.001745329 ->exp:2-> val0[100]val1[174532900]
274 * 1.001745 ->exp:4-> val0[10017]val1[450000] 274 * 1.001745329 ->exp:4-> val0[10017]val1[453290000]
275 * 9.806650 ->exp:-2-> val0[0]val1[98066] 275 * 9.806650000 ->exp:-2-> val0[0]val1[98066500]
276 */ 276 */
277static void adjust_exponent_micro(int *val0, int *val1, int scale0, 277static void adjust_exponent_nano(int *val0, int *val1, int scale0,
278 int scale1, int exp) 278 int scale1, int exp)
279{ 279{
280 int i; 280 int i;
@@ -285,32 +285,32 @@ static void adjust_exponent_micro(int *val0, int *val1, int scale0,
285 if (exp > 0) { 285 if (exp > 0) {
286 *val0 = scale0 * pow_10(exp); 286 *val0 = scale0 * pow_10(exp);
287 res = 0; 287 res = 0;
288 if (exp > 6) { 288 if (exp > 9) {
289 *val1 = 0; 289 *val1 = 0;
290 return; 290 return;
291 } 291 }
292 for (i = 0; i < exp; ++i) { 292 for (i = 0; i < exp; ++i) {
293 x = scale1 / pow_10(5 - i); 293 x = scale1 / pow_10(8 - i);
294 res += (pow_10(exp - 1 - i) * x); 294 res += (pow_10(exp - 1 - i) * x);
295 scale1 = scale1 % pow_10(5 - i); 295 scale1 = scale1 % pow_10(8 - i);
296 } 296 }
297 *val0 += res; 297 *val0 += res;
298 *val1 = scale1 * pow_10(exp); 298 *val1 = scale1 * pow_10(exp);
299 } else if (exp < 0) { 299 } else if (exp < 0) {
300 exp = abs(exp); 300 exp = abs(exp);
301 if (exp > 6) { 301 if (exp > 9) {
302 *val0 = *val1 = 0; 302 *val0 = *val1 = 0;
303 return; 303 return;
304 } 304 }
305 *val0 = scale0 / pow_10(exp); 305 *val0 = scale0 / pow_10(exp);
306 rem = scale0 % pow_10(exp); 306 rem = scale0 % pow_10(exp);
307 res = 0; 307 res = 0;
308 for (i = 0; i < (6 - exp); ++i) { 308 for (i = 0; i < (9 - exp); ++i) {
309 x = scale1 / pow_10(5 - i); 309 x = scale1 / pow_10(8 - i);
310 res += (pow_10(5 - exp - i) * x); 310 res += (pow_10(8 - exp - i) * x);
311 scale1 = scale1 % pow_10(5 - i); 311 scale1 = scale1 % pow_10(8 - i);
312 } 312 }
313 *val1 = rem * pow_10(6 - exp) + res; 313 *val1 = rem * pow_10(9 - exp) + res;
314 } else { 314 } else {
315 *val0 = scale0; 315 *val0 = scale0;
316 *val1 = scale1; 316 *val1 = scale1;
@@ -332,14 +332,14 @@ int hid_sensor_format_scale(u32 usage_id,
332 unit_conversion[i].unit == attr_info->units) { 332 unit_conversion[i].unit == attr_info->units) {
333 exp = hid_sensor_convert_exponent( 333 exp = hid_sensor_convert_exponent(
334 attr_info->unit_expo); 334 attr_info->unit_expo);
335 adjust_exponent_micro(val0, val1, 335 adjust_exponent_nano(val0, val1,
336 unit_conversion[i].scale_val0, 336 unit_conversion[i].scale_val0,
337 unit_conversion[i].scale_val1, exp); 337 unit_conversion[i].scale_val1, exp);
338 break; 338 break;
339 } 339 }
340 } 340 }
341 341
342 return IIO_VAL_INT_PLUS_MICRO; 342 return IIO_VAL_INT_PLUS_NANO;
343} 343}
344EXPORT_SYMBOL(hid_sensor_format_scale); 344EXPORT_SYMBOL(hid_sensor_format_scale);
345 345
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 285a64a589d7..975a1f19f747 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -612,7 +612,7 @@ EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail);
612ssize_t st_sensors_sysfs_scale_avail(struct device *dev, 612ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
613 struct device_attribute *attr, char *buf) 613 struct device_attribute *attr, char *buf)
614{ 614{
615 int i, len = 0; 615 int i, len = 0, q, r;
616 struct iio_dev *indio_dev = dev_get_drvdata(dev); 616 struct iio_dev *indio_dev = dev_get_drvdata(dev);
617 struct st_sensor_data *sdata = iio_priv(indio_dev); 617 struct st_sensor_data *sdata = iio_priv(indio_dev);
618 618
@@ -621,8 +621,10 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
621 if (sdata->sensor_settings->fs.fs_avl[i].num == 0) 621 if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
622 break; 622 break;
623 623
624 len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ", 624 q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000;
625 sdata->sensor_settings->fs.fs_avl[i].gain); 625 r = sdata->sensor_settings->fs.fs_avl[i].gain % 1000000;
626
627 len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
626 } 628 }
627 mutex_unlock(&indio_dev->mlock); 629 mutex_unlock(&indio_dev->mlock);
628 buf[len - 1] = '\n'; 630 buf[len - 1] = '\n';
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index b98b9d94d184..a97e802ca523 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -335,6 +335,7 @@ static struct platform_driver hid_dev_rot_platform_driver = {
335 .id_table = hid_dev_rot_ids, 335 .id_table = hid_dev_rot_ids,
336 .driver = { 336 .driver = {
337 .name = KBUILD_MODNAME, 337 .name = KBUILD_MODNAME,
338 .pm = &hid_sensor_pm_ops,
338 }, 339 },
339 .probe = hid_dev_rot_probe, 340 .probe = hid_dev_rot_probe,
340 .remove = hid_dev_rot_remove, 341 .remove = hid_dev_rot_remove,
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 066161a4bccd..f962f31a5eb2 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -136,6 +136,8 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
136 ret = spi_read(data->spi, (void *)&buf32, storage_bytes); 136 ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
137 *val = be32_to_cpu(buf32); 137 *val = be32_to_cpu(buf32);
138 break; 138 break;
139 default:
140 ret = -EINVAL;
139 } 141 }
140 142
141 if (ret) 143 if (ret)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 36bf50ebb187..89a6b0546804 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1094,47 +1094,47 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
1094 } 1094 }
1095} 1095}
1096 1096
1097static void cma_save_ip4_info(struct sockaddr *src_addr, 1097static void cma_save_ip4_info(struct sockaddr_in *src_addr,
1098 struct sockaddr *dst_addr, 1098 struct sockaddr_in *dst_addr,
1099 struct cma_hdr *hdr, 1099 struct cma_hdr *hdr,
1100 __be16 local_port) 1100 __be16 local_port)
1101{ 1101{
1102 struct sockaddr_in *ip4;
1103
1104 if (src_addr) { 1102 if (src_addr) {
1105 ip4 = (struct sockaddr_in *)src_addr; 1103 *src_addr = (struct sockaddr_in) {
1106 ip4->sin_family = AF_INET; 1104 .sin_family = AF_INET,
1107 ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; 1105 .sin_addr.s_addr = hdr->dst_addr.ip4.addr,
1108 ip4->sin_port = local_port; 1106 .sin_port = local_port,
1107 };
1109 } 1108 }
1110 1109
1111 if (dst_addr) { 1110 if (dst_addr) {
1112 ip4 = (struct sockaddr_in *)dst_addr; 1111 *dst_addr = (struct sockaddr_in) {
1113 ip4->sin_family = AF_INET; 1112 .sin_family = AF_INET,
1114 ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; 1113 .sin_addr.s_addr = hdr->src_addr.ip4.addr,
1115 ip4->sin_port = hdr->port; 1114 .sin_port = hdr->port,
1115 };
1116 } 1116 }
1117} 1117}
1118 1118
1119static void cma_save_ip6_info(struct sockaddr *src_addr, 1119static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
1120 struct sockaddr *dst_addr, 1120 struct sockaddr_in6 *dst_addr,
1121 struct cma_hdr *hdr, 1121 struct cma_hdr *hdr,
1122 __be16 local_port) 1122 __be16 local_port)
1123{ 1123{
1124 struct sockaddr_in6 *ip6;
1125
1126 if (src_addr) { 1124 if (src_addr) {
1127 ip6 = (struct sockaddr_in6 *)src_addr; 1125 *src_addr = (struct sockaddr_in6) {
1128 ip6->sin6_family = AF_INET6; 1126 .sin6_family = AF_INET6,
1129 ip6->sin6_addr = hdr->dst_addr.ip6; 1127 .sin6_addr = hdr->dst_addr.ip6,
1130 ip6->sin6_port = local_port; 1128 .sin6_port = local_port,
1129 };
1131 } 1130 }
1132 1131
1133 if (dst_addr) { 1132 if (dst_addr) {
1134 ip6 = (struct sockaddr_in6 *)dst_addr; 1133 *dst_addr = (struct sockaddr_in6) {
1135 ip6->sin6_family = AF_INET6; 1134 .sin6_family = AF_INET6,
1136 ip6->sin6_addr = hdr->src_addr.ip6; 1135 .sin6_addr = hdr->src_addr.ip6,
1137 ip6->sin6_port = hdr->port; 1136 .sin6_port = hdr->port,
1137 };
1138 } 1138 }
1139} 1139}
1140 1140
@@ -1159,10 +1159,12 @@ static int cma_save_ip_info(struct sockaddr *src_addr,
1159 1159
1160 switch (cma_get_ip_ver(hdr)) { 1160 switch (cma_get_ip_ver(hdr)) {
1161 case 4: 1161 case 4:
1162 cma_save_ip4_info(src_addr, dst_addr, hdr, port); 1162 cma_save_ip4_info((struct sockaddr_in *)src_addr,
1163 (struct sockaddr_in *)dst_addr, hdr, port);
1163 break; 1164 break;
1164 case 6: 1165 case 6:
1165 cma_save_ip6_info(src_addr, dst_addr, hdr, port); 1166 cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
1167 (struct sockaddr_in6 *)dst_addr, hdr, port);
1166 break; 1168 break;
1167 default: 1169 default:
1168 return -EAFNOSUPPORT; 1170 return -EAFNOSUPPORT;
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index 54eceb30ede5..a7d39689bbfb 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -43,7 +43,7 @@ int focaltech_detect(struct psmouse *psmouse, bool set_properties)
43 43
44 if (set_properties) { 44 if (set_properties) {
45 psmouse->vendor = "FocalTech"; 45 psmouse->vendor = "FocalTech";
46 psmouse->name = "FocalTech Touchpad"; 46 psmouse->name = "Touchpad";
47 } 47 }
48 48
49 return 0; 49 return 0;
@@ -146,8 +146,8 @@ static void focaltech_report_state(struct psmouse *psmouse)
146 } 146 }
147 input_mt_report_pointer_emulation(dev, true); 147 input_mt_report_pointer_emulation(dev, true);
148 148
149 input_report_key(psmouse->dev, BTN_LEFT, state->pressed); 149 input_report_key(dev, BTN_LEFT, state->pressed);
150 input_sync(psmouse->dev); 150 input_sync(dev);
151} 151}
152 152
153static void focaltech_process_touch_packet(struct psmouse *psmouse, 153static void focaltech_process_touch_packet(struct psmouse *psmouse,
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index f4bfb4b2d50a..073246c7d163 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
877 DMI_MATCH(DMI_PRODUCT_NAME, "P34"), 877 DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
878 }, 878 },
879 }, 879 },
880 {
881 /* Schenker XMG C504 - Elantech touchpad */
882 .matches = {
883 DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
884 DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
885 },
886 },
880 { } 887 { }
881}; 888};
882 889
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 15c01c3cd540..e6f9b2d745ca 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2636,17 +2636,26 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2636 /* And we're up. Go go go! */ 2636 /* And we're up. Go go go! */
2637 of_iommu_set_ops(dev->of_node, &arm_smmu_ops); 2637 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
2638#ifdef CONFIG_PCI 2638#ifdef CONFIG_PCI
2639 pci_request_acs(); 2639 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
2640 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops); 2640 pci_request_acs();
2641 if (ret) 2641 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2642 return ret; 2642 if (ret)
2643 return ret;
2644 }
2643#endif 2645#endif
2644#ifdef CONFIG_ARM_AMBA 2646#ifdef CONFIG_ARM_AMBA
2645 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops); 2647 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
2646 if (ret) 2648 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2647 return ret; 2649 if (ret)
2650 return ret;
2651 }
2648#endif 2652#endif
2649 return bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 2653 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
2654 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2655 if (ret)
2656 return ret;
2657 }
2658 return 0;
2650} 2659}
2651 2660
2652static int arm_smmu_device_remove(struct platform_device *pdev) 2661static int arm_smmu_device_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c841eb7a1a74..8f7281444551 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -324,8 +324,10 @@ struct arm_smmu_master_cfg {
324#define INVALID_SMENDX -1 324#define INVALID_SMENDX -1
325#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv) 325#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
326#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu) 326#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
327#define fwspec_smendx(fw, i) \
328 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
327#define for_each_cfg_sme(fw, i, idx) \ 329#define for_each_cfg_sme(fw, i, idx) \
328 for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i) 330 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
329 331
330struct arm_smmu_device { 332struct arm_smmu_device {
331 struct device *dev; 333 struct device *dev;
@@ -1228,6 +1230,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1228 return -ENXIO; 1230 return -ENXIO;
1229 } 1231 }
1230 1232
1233 /*
1234 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1235 * domains between of_xlate() and add_device() - we have no way to cope
1236 * with that, so until ARM gets converted to rely on groups and default
1237 * domains, just say no (but more politely than by dereferencing NULL).
1238 * This should be at least a WARN_ON once that's sorted.
1239 */
1240 if (!fwspec->iommu_priv)
1241 return -ENODEV;
1242
1231 smmu = fwspec_smmu(fwspec); 1243 smmu = fwspec_smmu(fwspec);
1232 /* Ensure that the domain is finalised */ 1244 /* Ensure that the domain is finalised */
1233 ret = arm_smmu_init_domain_context(domain, smmu); 1245 ret = arm_smmu_init_domain_context(domain, smmu);
@@ -1390,7 +1402,7 @@ static int arm_smmu_add_device(struct device *dev)
1390 fwspec = dev->iommu_fwspec; 1402 fwspec = dev->iommu_fwspec;
1391 if (ret) 1403 if (ret)
1392 goto out_free; 1404 goto out_free;
1393 } else if (fwspec) { 1405 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
1394 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode)); 1406 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
1395 } else { 1407 } else {
1396 return -ENODEV; 1408 return -ENODEV;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a4407eabf0e6..3965e73db51c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1711,6 +1711,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
1711 if (!iommu->domains || !iommu->domain_ids) 1711 if (!iommu->domains || !iommu->domain_ids)
1712 return; 1712 return;
1713 1713
1714again:
1714 spin_lock_irqsave(&device_domain_lock, flags); 1715 spin_lock_irqsave(&device_domain_lock, flags);
1715 list_for_each_entry_safe(info, tmp, &device_domain_list, global) { 1716 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1716 struct dmar_domain *domain; 1717 struct dmar_domain *domain;
@@ -1723,10 +1724,19 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
1723 1724
1724 domain = info->domain; 1725 domain = info->domain;
1725 1726
1726 dmar_remove_one_dev_info(domain, info->dev); 1727 __dmar_remove_one_dev_info(info);
1727 1728
1728 if (!domain_type_is_vm_or_si(domain)) 1729 if (!domain_type_is_vm_or_si(domain)) {
1730 /*
1731 * The domain_exit() function can't be called under
1732 * device_domain_lock, as it takes this lock itself.
1733 * So release the lock here and re-run the loop
1734 * afterwards.
1735 */
1736 spin_unlock_irqrestore(&device_domain_lock, flags);
1729 domain_exit(domain); 1737 domain_exit(domain);
1738 goto again;
1739 }
1730 } 1740 }
1731 spin_unlock_irqrestore(&device_domain_lock, flags); 1741 spin_unlock_irqrestore(&device_domain_lock, flags);
1732 1742
diff --git a/drivers/md/md.c b/drivers/md/md.c
index eac84d8ff724..2089d46b0eb8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3887,10 +3887,10 @@ array_state_show(struct mddev *mddev, char *page)
3887 st = read_auto; 3887 st = read_auto;
3888 break; 3888 break;
3889 case 0: 3889 case 0:
3890 if (mddev->in_sync) 3890 if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3891 st = clean;
3892 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3893 st = write_pending; 3891 st = write_pending;
3892 else if (mddev->in_sync)
3893 st = clean;
3894 else if (mddev->safemode) 3894 else if (mddev->safemode)
3895 st = active_idle; 3895 st = active_idle;
3896 else 3896 else
@@ -8144,14 +8144,14 @@ void md_do_sync(struct md_thread *thread)
8144 8144
8145 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8145 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8146 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8146 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8147 mddev->curr_resync > 2) { 8147 mddev->curr_resync > 3) {
8148 mddev->curr_resync_completed = mddev->curr_resync; 8148 mddev->curr_resync_completed = mddev->curr_resync;
8149 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8149 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8150 } 8150 }
8151 mddev->pers->sync_request(mddev, max_sectors, &skipped); 8151 mddev->pers->sync_request(mddev, max_sectors, &skipped);
8152 8152
8153 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 8153 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8154 mddev->curr_resync > 2) { 8154 mddev->curr_resync > 3) {
8155 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8155 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8156 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8156 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8157 if (mddev->curr_resync >= mddev->recovery_cp) { 8157 if (mddev->curr_resync >= mddev->recovery_cp) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1961d827dbd1..29e2df5cd77b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -403,11 +403,14 @@ static void raid1_end_write_request(struct bio *bio)
403 struct bio *to_put = NULL; 403 struct bio *to_put = NULL;
404 int mirror = find_bio_disk(r1_bio, bio); 404 int mirror = find_bio_disk(r1_bio, bio);
405 struct md_rdev *rdev = conf->mirrors[mirror].rdev; 405 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
406 bool discard_error;
407
408 discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
406 409
407 /* 410 /*
408 * 'one mirror IO has finished' event handler: 411 * 'one mirror IO has finished' event handler:
409 */ 412 */
410 if (bio->bi_error) { 413 if (bio->bi_error && !discard_error) {
411 set_bit(WriteErrorSeen, &rdev->flags); 414 set_bit(WriteErrorSeen, &rdev->flags);
412 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 415 if (!test_and_set_bit(WantReplacement, &rdev->flags))
413 set_bit(MD_RECOVERY_NEEDED, & 416 set_bit(MD_RECOVERY_NEEDED, &
@@ -444,7 +447,7 @@ static void raid1_end_write_request(struct bio *bio)
444 447
445 /* Maybe we can clear some bad blocks. */ 448 /* Maybe we can clear some bad blocks. */
446 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 449 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
447 &first_bad, &bad_sectors)) { 450 &first_bad, &bad_sectors) && !discard_error) {
448 r1_bio->bios[mirror] = IO_MADE_GOOD; 451 r1_bio->bios[mirror] = IO_MADE_GOOD;
449 set_bit(R1BIO_MadeGood, &r1_bio->state); 452 set_bit(R1BIO_MadeGood, &r1_bio->state);
450 } 453 }
@@ -2294,17 +2297,23 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2294 * This is all done synchronously while the array is 2297 * This is all done synchronously while the array is
2295 * frozen 2298 * frozen
2296 */ 2299 */
2300
2301 bio = r1_bio->bios[r1_bio->read_disk];
2302 bdevname(bio->bi_bdev, b);
2303 bio_put(bio);
2304 r1_bio->bios[r1_bio->read_disk] = NULL;
2305
2297 if (mddev->ro == 0) { 2306 if (mddev->ro == 0) {
2298 freeze_array(conf, 1); 2307 freeze_array(conf, 1);
2299 fix_read_error(conf, r1_bio->read_disk, 2308 fix_read_error(conf, r1_bio->read_disk,
2300 r1_bio->sector, r1_bio->sectors); 2309 r1_bio->sector, r1_bio->sectors);
2301 unfreeze_array(conf); 2310 unfreeze_array(conf);
2302 } else 2311 } else {
2303 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); 2312 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2313 }
2314
2304 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); 2315 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
2305 2316
2306 bio = r1_bio->bios[r1_bio->read_disk];
2307 bdevname(bio->bi_bdev, b);
2308read_more: 2317read_more:
2309 disk = read_balance(conf, r1_bio, &max_sectors); 2318 disk = read_balance(conf, r1_bio, &max_sectors);
2310 if (disk == -1) { 2319 if (disk == -1) {
@@ -2315,11 +2324,6 @@ read_more:
2315 } else { 2324 } else {
2316 const unsigned long do_sync 2325 const unsigned long do_sync
2317 = r1_bio->master_bio->bi_opf & REQ_SYNC; 2326 = r1_bio->master_bio->bi_opf & REQ_SYNC;
2318 if (bio) {
2319 r1_bio->bios[r1_bio->read_disk] =
2320 mddev->ro ? IO_BLOCKED : NULL;
2321 bio_put(bio);
2322 }
2323 r1_bio->read_disk = disk; 2327 r1_bio->read_disk = disk;
2324 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); 2328 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2325 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, 2329 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index be1a9fca3b2d..39fddda2fef2 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -447,6 +447,9 @@ static void raid10_end_write_request(struct bio *bio)
447 struct r10conf *conf = r10_bio->mddev->private; 447 struct r10conf *conf = r10_bio->mddev->private;
448 int slot, repl; 448 int slot, repl;
449 struct md_rdev *rdev = NULL; 449 struct md_rdev *rdev = NULL;
450 bool discard_error;
451
452 discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
450 453
451 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 454 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
452 455
@@ -460,7 +463,7 @@ static void raid10_end_write_request(struct bio *bio)
460 /* 463 /*
461 * this branch is our 'one mirror IO has finished' event handler: 464 * this branch is our 'one mirror IO has finished' event handler:
462 */ 465 */
463 if (bio->bi_error) { 466 if (bio->bi_error && !discard_error) {
464 if (repl) 467 if (repl)
465 /* Never record new bad blocks to replacement, 468 /* Never record new bad blocks to replacement,
466 * just fail it. 469 * just fail it.
@@ -503,7 +506,7 @@ static void raid10_end_write_request(struct bio *bio)
503 if (is_badblock(rdev, 506 if (is_badblock(rdev,
504 r10_bio->devs[slot].addr, 507 r10_bio->devs[slot].addr,
505 r10_bio->sectors, 508 r10_bio->sectors,
506 &first_bad, &bad_sectors)) { 509 &first_bad, &bad_sectors) && !discard_error) {
507 bio_put(bio); 510 bio_put(bio);
508 if (repl) 511 if (repl)
509 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; 512 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 1b1ab4a1d132..a227a9f3ee65 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1087,7 +1087,7 @@ static int r5l_recovery_log(struct r5l_log *log)
1087 * 1's seq + 10 and let superblock points to meta2. The same recovery will 1087 * 1's seq + 10 and let superblock points to meta2. The same recovery will
1088 * not think meta 3 is a valid meta, because its seq doesn't match 1088 * not think meta 3 is a valid meta, because its seq doesn't match
1089 */ 1089 */
1090 if (ctx.seq > log->last_cp_seq + 1) { 1090 if (ctx.seq > log->last_cp_seq) {
1091 int ret; 1091 int ret;
1092 1092
1093 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10); 1093 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
@@ -1096,6 +1096,8 @@ static int r5l_recovery_log(struct r5l_log *log)
1096 log->seq = ctx.seq + 11; 1096 log->seq = ctx.seq + 11;
1097 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); 1097 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
1098 r5l_write_super(log, ctx.pos); 1098 r5l_write_super(log, ctx.pos);
1099 log->last_checkpoint = ctx.pos;
1100 log->next_checkpoint = ctx.pos;
1099 } else { 1101 } else {
1100 log->log_start = ctx.pos; 1102 log->log_start = ctx.pos;
1101 log->seq = ctx.seq; 1103 log->seq = ctx.seq;
@@ -1154,6 +1156,7 @@ create:
1154 if (create_super) { 1156 if (create_super) {
1155 log->last_cp_seq = prandom_u32(); 1157 log->last_cp_seq = prandom_u32();
1156 cp = 0; 1158 cp = 0;
1159 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
1157 /* 1160 /*
1158 * Make sure super points to correct address. Log might have 1161 * Make sure super points to correct address. Log might have
1159 * data very soon. If super hasn't correct log tail address, 1162 * data very soon. If super hasn't correct log tail address,
@@ -1168,6 +1171,7 @@ create:
1168 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) 1171 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
1169 log->max_free_space = RECLAIM_MAX_FREE_SPACE; 1172 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
1170 log->last_checkpoint = cp; 1173 log->last_checkpoint = cp;
1174 log->next_checkpoint = cp;
1171 1175
1172 __free_page(page); 1176 __free_page(page);
1173 1177
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 012225587c25..b71b747ee0ba 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -513,6 +513,11 @@ config DVB_AS102_FE
513 depends on DVB_CORE 513 depends on DVB_CORE
514 default DVB_AS102 514 default DVB_AS102
515 515
516config DVB_GP8PSK_FE
517 tristate
518 depends on DVB_CORE
519 default DVB_USB_GP8PSK
520
516comment "DVB-C (cable) frontends" 521comment "DVB-C (cable) frontends"
517 depends on DVB_CORE 522 depends on DVB_CORE
518 523
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index e90165ad361b..93921a4eaa27 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -121,6 +121,7 @@ obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832_sdr.o
121obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o 121obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
122obj-$(CONFIG_DVB_AF9033) += af9033.o 122obj-$(CONFIG_DVB_AF9033) += af9033.o
123obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o 123obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
124obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o
124obj-$(CONFIG_DVB_TC90522) += tc90522.o 125obj-$(CONFIG_DVB_TC90522) += tc90522.o
125obj-$(CONFIG_DVB_HORUS3A) += horus3a.o 126obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
126obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o 127obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
diff --git a/drivers/media/usb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index db6eb79cde07..93f59bfea092 100644
--- a/drivers/media/usb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -1,5 +1,5 @@
1/* DVB USB compliant Linux driver for the 1/*
2 * - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module 2 * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module
3 * 3 *
4 * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) 4 * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com)
5 * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) 5 * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com)
@@ -8,17 +8,31 @@
8 * 8 *
9 * This module is based off the vp7045 and vp702x modules 9 * This module is based off the vp7045 and vp702x modules
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free 12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation, version 2. 13 * Software Foundation, version 2.
14 *
15 * see Documentation/dvb/README.dvb-usb for more information
16 */ 14 */
17#include "gp8psk.h" 15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include "gp8psk-fe.h"
19#include "dvb_frontend.h"
20
21static int debug;
22module_param(debug, int, 0644);
23MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
24
25#define dprintk(fmt, arg...) do { \
26 if (debug) \
27 printk(KERN_DEBUG pr_fmt("%s: " fmt), \
28 __func__, ##arg); \
29} while (0)
18 30
19struct gp8psk_fe_state { 31struct gp8psk_fe_state {
20 struct dvb_frontend fe; 32 struct dvb_frontend fe;
21 struct dvb_usb_device *d; 33 void *priv;
34 const struct gp8psk_fe_ops *ops;
35 bool is_rev1;
22 u8 lock; 36 u8 lock;
23 u16 snr; 37 u16 snr;
24 unsigned long next_status_check; 38 unsigned long next_status_check;
@@ -29,22 +43,24 @@ static int gp8psk_tuned_to_DCII(struct dvb_frontend *fe)
29{ 43{
30 struct gp8psk_fe_state *st = fe->demodulator_priv; 44 struct gp8psk_fe_state *st = fe->demodulator_priv;
31 u8 status; 45 u8 status;
32 gp8psk_usb_in_op(st->d, GET_8PSK_CONFIG, 0, 0, &status, 1); 46
47 st->ops->in(st->priv, GET_8PSK_CONFIG, 0, 0, &status, 1);
33 return status & bmDCtuned; 48 return status & bmDCtuned;
34} 49}
35 50
36static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode) 51static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode)
37{ 52{
38 struct gp8psk_fe_state *state = fe->demodulator_priv; 53 struct gp8psk_fe_state *st = fe->demodulator_priv;
39 return gp8psk_usb_out_op(state->d, SET_8PSK_CONFIG, mode, 0, NULL, 0); 54
55 return st->ops->out(st->priv, SET_8PSK_CONFIG, mode, 0, NULL, 0);
40} 56}
41 57
42static int gp8psk_fe_update_status(struct gp8psk_fe_state *st) 58static int gp8psk_fe_update_status(struct gp8psk_fe_state *st)
43{ 59{
44 u8 buf[6]; 60 u8 buf[6];
45 if (time_after(jiffies,st->next_status_check)) { 61 if (time_after(jiffies,st->next_status_check)) {
46 gp8psk_usb_in_op(st->d, GET_SIGNAL_LOCK, 0,0,&st->lock,1); 62 st->ops->in(st->priv, GET_SIGNAL_LOCK, 0, 0, &st->lock, 1);
47 gp8psk_usb_in_op(st->d, GET_SIGNAL_STRENGTH, 0,0,buf,6); 63 st->ops->in(st->priv, GET_SIGNAL_STRENGTH, 0, 0, buf, 6);
48 st->snr = (buf[1]) << 8 | buf[0]; 64 st->snr = (buf[1]) << 8 | buf[0];
49 st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000; 65 st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000;
50 } 66 }
@@ -116,13 +132,12 @@ static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_front
116 132
117static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) 133static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
118{ 134{
119 struct gp8psk_fe_state *state = fe->demodulator_priv; 135 struct gp8psk_fe_state *st = fe->demodulator_priv;
120 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 136 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
121 u8 cmd[10]; 137 u8 cmd[10];
122 u32 freq = c->frequency * 1000; 138 u32 freq = c->frequency * 1000;
123 int gp_product_id = le16_to_cpu(state->d->udev->descriptor.idProduct);
124 139
125 deb_fe("%s()\n", __func__); 140 dprintk("%s()\n", __func__);
126 141
127 cmd[4] = freq & 0xff; 142 cmd[4] = freq & 0xff;
128 cmd[5] = (freq >> 8) & 0xff; 143 cmd[5] = (freq >> 8) & 0xff;
@@ -136,21 +151,21 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
136 switch (c->delivery_system) { 151 switch (c->delivery_system) {
137 case SYS_DVBS: 152 case SYS_DVBS:
138 if (c->modulation != QPSK) { 153 if (c->modulation != QPSK) {
139 deb_fe("%s: unsupported modulation selected (%d)\n", 154 dprintk("%s: unsupported modulation selected (%d)\n",
140 __func__, c->modulation); 155 __func__, c->modulation);
141 return -EOPNOTSUPP; 156 return -EOPNOTSUPP;
142 } 157 }
143 c->fec_inner = FEC_AUTO; 158 c->fec_inner = FEC_AUTO;
144 break; 159 break;
145 case SYS_DVBS2: /* kept for backwards compatibility */ 160 case SYS_DVBS2: /* kept for backwards compatibility */
146 deb_fe("%s: DVB-S2 delivery system selected\n", __func__); 161 dprintk("%s: DVB-S2 delivery system selected\n", __func__);
147 break; 162 break;
148 case SYS_TURBO: 163 case SYS_TURBO:
149 deb_fe("%s: Turbo-FEC delivery system selected\n", __func__); 164 dprintk("%s: Turbo-FEC delivery system selected\n", __func__);
150 break; 165 break;
151 166
152 default: 167 default:
153 deb_fe("%s: unsupported delivery system selected (%d)\n", 168 dprintk("%s: unsupported delivery system selected (%d)\n",
154 __func__, c->delivery_system); 169 __func__, c->delivery_system);
155 return -EOPNOTSUPP; 170 return -EOPNOTSUPP;
156 } 171 }
@@ -161,9 +176,9 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
161 cmd[3] = (c->symbol_rate >> 24) & 0xff; 176 cmd[3] = (c->symbol_rate >> 24) & 0xff;
162 switch (c->modulation) { 177 switch (c->modulation) {
163 case QPSK: 178 case QPSK:
164 if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) 179 if (st->is_rev1)
165 if (gp8psk_tuned_to_DCII(fe)) 180 if (gp8psk_tuned_to_DCII(fe))
166 gp8psk_bcm4500_reload(state->d); 181 st->ops->reload(st->priv);
167 switch (c->fec_inner) { 182 switch (c->fec_inner) {
168 case FEC_1_2: 183 case FEC_1_2:
169 cmd[9] = 0; break; 184 cmd[9] = 0; break;
@@ -207,18 +222,18 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
207 cmd[9] = 0; 222 cmd[9] = 0;
208 break; 223 break;
209 default: /* Unknown modulation */ 224 default: /* Unknown modulation */
210 deb_fe("%s: unsupported modulation selected (%d)\n", 225 dprintk("%s: unsupported modulation selected (%d)\n",
211 __func__, c->modulation); 226 __func__, c->modulation);
212 return -EOPNOTSUPP; 227 return -EOPNOTSUPP;
213 } 228 }
214 229
215 if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) 230 if (st->is_rev1)
216 gp8psk_set_tuner_mode(fe, 0); 231 gp8psk_set_tuner_mode(fe, 0);
217 gp8psk_usb_out_op(state->d, TUNE_8PSK, 0, 0, cmd, 10); 232 st->ops->out(st->priv, TUNE_8PSK, 0, 0, cmd, 10);
218 233
219 state->lock = 0; 234 st->lock = 0;
220 state->next_status_check = jiffies; 235 st->next_status_check = jiffies;
221 state->status_check_interval = 200; 236 st->status_check_interval = 200;
222 237
223 return 0; 238 return 0;
224} 239}
@@ -228,9 +243,9 @@ static int gp8psk_fe_send_diseqc_msg (struct dvb_frontend* fe,
228{ 243{
229 struct gp8psk_fe_state *st = fe->demodulator_priv; 244 struct gp8psk_fe_state *st = fe->demodulator_priv;
230 245
231 deb_fe("%s\n",__func__); 246 dprintk("%s\n", __func__);
232 247
233 if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, m->msg[0], 0, 248 if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, m->msg[0], 0,
234 m->msg, m->msg_len)) { 249 m->msg, m->msg_len)) {
235 return -EINVAL; 250 return -EINVAL;
236 } 251 }
@@ -243,12 +258,12 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe,
243 struct gp8psk_fe_state *st = fe->demodulator_priv; 258 struct gp8psk_fe_state *st = fe->demodulator_priv;
244 u8 cmd; 259 u8 cmd;
245 260
246 deb_fe("%s\n",__func__); 261 dprintk("%s\n", __func__);
247 262
248 /* These commands are certainly wrong */ 263 /* These commands are certainly wrong */
249 cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01; 264 cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01;
250 265
251 if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, cmd, 0, 266 if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, cmd, 0,
252 &cmd, 0)) { 267 &cmd, 0)) {
253 return -EINVAL; 268 return -EINVAL;
254 } 269 }
@@ -258,10 +273,10 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe,
258static int gp8psk_fe_set_tone(struct dvb_frontend *fe, 273static int gp8psk_fe_set_tone(struct dvb_frontend *fe,
259 enum fe_sec_tone_mode tone) 274 enum fe_sec_tone_mode tone)
260{ 275{
261 struct gp8psk_fe_state* state = fe->demodulator_priv; 276 struct gp8psk_fe_state *st = fe->demodulator_priv;
262 277
263 if (gp8psk_usb_out_op(state->d,SET_22KHZ_TONE, 278 if (st->ops->out(st->priv, SET_22KHZ_TONE,
264 (tone == SEC_TONE_ON), 0, NULL, 0)) { 279 (tone == SEC_TONE_ON), 0, NULL, 0)) {
265 return -EINVAL; 280 return -EINVAL;
266 } 281 }
267 return 0; 282 return 0;
@@ -270,9 +285,9 @@ static int gp8psk_fe_set_tone(struct dvb_frontend *fe,
270static int gp8psk_fe_set_voltage(struct dvb_frontend *fe, 285static int gp8psk_fe_set_voltage(struct dvb_frontend *fe,
271 enum fe_sec_voltage voltage) 286 enum fe_sec_voltage voltage)
272{ 287{
273 struct gp8psk_fe_state* state = fe->demodulator_priv; 288 struct gp8psk_fe_state *st = fe->demodulator_priv;
274 289
275 if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, 290 if (st->ops->out(st->priv, SET_LNB_VOLTAGE,
276 voltage == SEC_VOLTAGE_18, 0, NULL, 0)) { 291 voltage == SEC_VOLTAGE_18, 0, NULL, 0)) {
277 return -EINVAL; 292 return -EINVAL;
278 } 293 }
@@ -281,52 +296,60 @@ static int gp8psk_fe_set_voltage(struct dvb_frontend *fe,
281 296
282static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff) 297static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff)
283{ 298{
284 struct gp8psk_fe_state* state = fe->demodulator_priv; 299 struct gp8psk_fe_state *st = fe->demodulator_priv;
285 return gp8psk_usb_out_op(state->d, USE_EXTRA_VOLT, onoff, 0,NULL,0); 300
301 return st->ops->out(st->priv, USE_EXTRA_VOLT, onoff, 0, NULL, 0);
286} 302}
287 303
288static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd) 304static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd)
289{ 305{
290 struct gp8psk_fe_state* state = fe->demodulator_priv; 306 struct gp8psk_fe_state *st = fe->demodulator_priv;
291 u8 cmd = sw_cmd & 0x7f; 307 u8 cmd = sw_cmd & 0x7f;
292 308
293 if (gp8psk_usb_out_op(state->d,SET_DN_SWITCH, cmd, 0, 309 if (st->ops->out(st->priv, SET_DN_SWITCH, cmd, 0, NULL, 0))
294 NULL, 0)) {
295 return -EINVAL; 310 return -EINVAL;
296 } 311
297 if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, !!(sw_cmd & 0x80), 312 if (st->ops->out(st->priv, SET_LNB_VOLTAGE, !!(sw_cmd & 0x80),
298 0, NULL, 0)) { 313 0, NULL, 0))
299 return -EINVAL; 314 return -EINVAL;
300 }
301 315
302 return 0; 316 return 0;
303} 317}
304 318
305static void gp8psk_fe_release(struct dvb_frontend* fe) 319static void gp8psk_fe_release(struct dvb_frontend* fe)
306{ 320{
307 struct gp8psk_fe_state *state = fe->demodulator_priv; 321 struct gp8psk_fe_state *st = fe->demodulator_priv;
308 kfree(state); 322
323 kfree(st);
309} 324}
310 325
311static struct dvb_frontend_ops gp8psk_fe_ops; 326static struct dvb_frontend_ops gp8psk_fe_ops;
312 327
313struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d) 328struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
329 void *priv, bool is_rev1)
314{ 330{
315 struct gp8psk_fe_state *s = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL); 331 struct gp8psk_fe_state *st;
316 if (s == NULL)
317 goto error;
318
319 s->d = d;
320 memcpy(&s->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops));
321 s->fe.demodulator_priv = s;
322
323 goto success;
324error:
325 return NULL;
326success:
327 return &s->fe;
328}
329 332
333 if (!ops || !ops->in || !ops->out || !ops->reload) {
334 pr_err("Error! gp8psk-fe ops not defined.\n");
335 return NULL;
336 }
337
338 st = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL);
339 if (!st)
340 return NULL;
341
342 memcpy(&st->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops));
343 st->fe.demodulator_priv = st;
344 st->ops = ops;
345 st->priv = priv;
346 st->is_rev1 = is_rev1;
347
348 pr_info("Frontend %sattached\n", is_rev1 ? "revision 1 " : "");
349
350 return &st->fe;
351}
352EXPORT_SYMBOL_GPL(gp8psk_fe_attach);
330 353
331static struct dvb_frontend_ops gp8psk_fe_ops = { 354static struct dvb_frontend_ops gp8psk_fe_ops = {
332 .delsys = { SYS_DVBS }, 355 .delsys = { SYS_DVBS },
@@ -370,3 +393,8 @@ static struct dvb_frontend_ops gp8psk_fe_ops = {
370 .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd, 393 .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd,
371 .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage 394 .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage
372}; 395};
396
397MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>");
398MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S");
399MODULE_VERSION("1.1");
400MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.h b/drivers/media/dvb-frontends/gp8psk-fe.h
new file mode 100644
index 000000000000..6c7944b1ecd6
--- /dev/null
+++ b/drivers/media/dvb-frontends/gp8psk-fe.h
@@ -0,0 +1,82 @@
1/*
2 * gp8psk_fe driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef GP8PSK_FE_H
16#define GP8PSK_FE_H
17
18#include <linux/types.h>
19
20/* gp8psk commands */
21
22#define GET_8PSK_CONFIG 0x80 /* in */
23#define SET_8PSK_CONFIG 0x81
24#define I2C_WRITE 0x83
25#define I2C_READ 0x84
26#define ARM_TRANSFER 0x85
27#define TUNE_8PSK 0x86
28#define GET_SIGNAL_STRENGTH 0x87 /* in */
29#define LOAD_BCM4500 0x88
30#define BOOT_8PSK 0x89 /* in */
31#define START_INTERSIL 0x8A /* in */
32#define SET_LNB_VOLTAGE 0x8B
33#define SET_22KHZ_TONE 0x8C
34#define SEND_DISEQC_COMMAND 0x8D
35#define SET_DVB_MODE 0x8E
36#define SET_DN_SWITCH 0x8F
37#define GET_SIGNAL_LOCK 0x90 /* in */
38#define GET_FW_VERS 0x92
39#define GET_SERIAL_NUMBER 0x93 /* in */
40#define USE_EXTRA_VOLT 0x94
41#define GET_FPGA_VERS 0x95
42#define CW3K_INIT 0x9d
43
44/* PSK_configuration bits */
45#define bm8pskStarted 0x01
46#define bm8pskFW_Loaded 0x02
47#define bmIntersilOn 0x04
48#define bmDVBmode 0x08
49#define bm22kHz 0x10
50#define bmSEL18V 0x20
51#define bmDCtuned 0x40
52#define bmArmed 0x80
53
54/* Satellite modulation modes */
55#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */
56#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */
57#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */
58#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */
59
60#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */
61#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */
62#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */
63#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */
64#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */
65#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
66
67/* firmware revision id's */
68#define GP8PSK_FW_REV1 0x020604
69#define GP8PSK_FW_REV2 0x020704
70#define GP8PSK_FW_VERS(_fw_vers) \
71 ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0])
72
73struct gp8psk_fe_ops {
74 int (*in)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
75 int (*out)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
76 int (*reload)(void *priv);
77};
78
79struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
80 void *priv, bool is_rev1);
81
82#endif
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index f95a6bc839d5..cede3975d04b 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -118,7 +118,7 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol,
118 *protocol = RC_TYPE_RC6_MCE; 118 *protocol = RC_TYPE_RC6_MCE;
119 dev &= 0x7f; 119 dev &= 0x7f;
120 dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n", 120 dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n",
121 toggle, vendor, dev, code); 121 *ptoggle, vendor, dev, code);
122 } else { 122 } else {
123 *ptoggle = 0; 123 *ptoggle = 0;
124 *protocol = RC_TYPE_RC6_6A_32; 124 *protocol = RC_TYPE_RC6_6A_32;
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index d4bdba60b0f7..52bc42da8a4c 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -73,23 +73,34 @@ static int flexcop_usb_readwrite_dw(struct flexcop_device *fc, u16 wRegOffsPCI,
73 u8 request_type = (read ? USB_DIR_IN : USB_DIR_OUT) | USB_TYPE_VENDOR; 73 u8 request_type = (read ? USB_DIR_IN : USB_DIR_OUT) | USB_TYPE_VENDOR;
74 u8 wAddress = B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(wRegOffsPCI) | 74 u8 wAddress = B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(wRegOffsPCI) |
75 (read ? 0x80 : 0); 75 (read ? 0x80 : 0);
76 int ret;
77
78 mutex_lock(&fc_usb->data_mutex);
79 if (!read)
80 memcpy(fc_usb->data, val, sizeof(*val));
76 81
77 int len = usb_control_msg(fc_usb->udev, 82 ret = usb_control_msg(fc_usb->udev,
78 read ? B2C2_USB_CTRL_PIPE_IN : B2C2_USB_CTRL_PIPE_OUT, 83 read ? B2C2_USB_CTRL_PIPE_IN : B2C2_USB_CTRL_PIPE_OUT,
79 request, 84 request,
80 request_type, /* 0xc0 read or 0x40 write */ 85 request_type, /* 0xc0 read or 0x40 write */
81 wAddress, 86 wAddress,
82 0, 87 0,
83 val, 88 fc_usb->data,
84 sizeof(u32), 89 sizeof(u32),
85 B2C2_WAIT_FOR_OPERATION_RDW * HZ); 90 B2C2_WAIT_FOR_OPERATION_RDW * HZ);
86 91
87 if (len != sizeof(u32)) { 92 if (ret != sizeof(u32)) {
88 err("error while %s dword from %d (%d).", read ? "reading" : 93 err("error while %s dword from %d (%d).", read ? "reading" :
89 "writing", wAddress, wRegOffsPCI); 94 "writing", wAddress, wRegOffsPCI);
90 return -EIO; 95 if (ret >= 0)
96 ret = -EIO;
91 } 97 }
92 return 0; 98
99 if (read && ret >= 0)
100 memcpy(val, fc_usb->data, sizeof(*val));
101 mutex_unlock(&fc_usb->data_mutex);
102
103 return ret;
93} 104}
94/* 105/*
95 * DKT 010817 - add support for V8 memory read/write and flash update 106 * DKT 010817 - add support for V8 memory read/write and flash update
@@ -100,9 +111,14 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
100{ 111{
101 u8 request_type = USB_TYPE_VENDOR; 112 u8 request_type = USB_TYPE_VENDOR;
102 u16 wIndex; 113 u16 wIndex;
103 int nWaitTime, pipe, len; 114 int nWaitTime, pipe, ret;
104 wIndex = page << 8; 115 wIndex = page << 8;
105 116
117 if (buflen > sizeof(fc_usb->data)) {
118 err("Buffer size bigger than max URB control message\n");
119 return -EIO;
120 }
121
106 switch (req) { 122 switch (req) {
107 case B2C2_USB_READ_V8_MEM: 123 case B2C2_USB_READ_V8_MEM:
108 nWaitTime = B2C2_WAIT_FOR_OPERATION_V8READ; 124 nWaitTime = B2C2_WAIT_FOR_OPERATION_V8READ;
@@ -127,17 +143,32 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
127 deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req, 143 deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req,
128 wAddress, wIndex, buflen); 144 wAddress, wIndex, buflen);
129 145
130 len = usb_control_msg(fc_usb->udev, pipe, 146 mutex_lock(&fc_usb->data_mutex);
147
148 if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
149 memcpy(fc_usb->data, pbBuffer, buflen);
150
151 ret = usb_control_msg(fc_usb->udev, pipe,
131 req, 152 req,
132 request_type, 153 request_type,
133 wAddress, 154 wAddress,
134 wIndex, 155 wIndex,
135 pbBuffer, 156 fc_usb->data,
136 buflen, 157 buflen,
137 nWaitTime * HZ); 158 nWaitTime * HZ);
159 if (ret != buflen)
160 ret = -EIO;
161
162 if (ret >= 0) {
163 ret = 0;
164 if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
165 memcpy(pbBuffer, fc_usb->data, buflen);
166 }
138 167
139 debug_dump(pbBuffer, len, deb_v8); 168 mutex_unlock(&fc_usb->data_mutex);
140 return len == buflen ? 0 : -EIO; 169
170 debug_dump(pbBuffer, ret, deb_v8);
171 return ret;
141} 172}
142 173
143#define bytes_left_to_read_on_page(paddr,buflen) \ 174#define bytes_left_to_read_on_page(paddr,buflen) \
@@ -196,29 +227,6 @@ static int flexcop_usb_get_mac_addr(struct flexcop_device *fc, int extended)
196 fc->dvb_adapter.proposed_mac, 6); 227 fc->dvb_adapter.proposed_mac, 6);
197} 228}
198 229
199#if 0
200static int flexcop_usb_utility_req(struct flexcop_usb *fc_usb, int set,
201 flexcop_usb_utility_function_t func, u8 extra, u16 wIndex,
202 u16 buflen, u8 *pvBuffer)
203{
204 u16 wValue;
205 u8 request_type = (set ? USB_DIR_OUT : USB_DIR_IN) | USB_TYPE_VENDOR;
206 int nWaitTime = 2,
207 pipe = set ? B2C2_USB_CTRL_PIPE_OUT : B2C2_USB_CTRL_PIPE_IN, len;
208 wValue = (func << 8) | extra;
209
210 len = usb_control_msg(fc_usb->udev,pipe,
211 B2C2_USB_UTILITY,
212 request_type,
213 wValue,
214 wIndex,
215 pvBuffer,
216 buflen,
217 nWaitTime * HZ);
218 return len == buflen ? 0 : -EIO;
219}
220#endif
221
222/* usb i2c stuff */ 230/* usb i2c stuff */
223static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, 231static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
224 flexcop_usb_request_t req, flexcop_usb_i2c_function_t func, 232 flexcop_usb_request_t req, flexcop_usb_i2c_function_t func,
@@ -226,9 +234,14 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
226{ 234{
227 struct flexcop_usb *fc_usb = i2c->fc->bus_specific; 235 struct flexcop_usb *fc_usb = i2c->fc->bus_specific;
228 u16 wValue, wIndex; 236 u16 wValue, wIndex;
229 int nWaitTime,pipe,len; 237 int nWaitTime, pipe, ret;
230 u8 request_type = USB_TYPE_VENDOR; 238 u8 request_type = USB_TYPE_VENDOR;
231 239
240 if (buflen > sizeof(fc_usb->data)) {
241 err("Buffer size bigger than max URB control message\n");
242 return -EIO;
243 }
244
232 switch (func) { 245 switch (func) {
233 case USB_FUNC_I2C_WRITE: 246 case USB_FUNC_I2C_WRITE:
234 case USB_FUNC_I2C_MULTIWRITE: 247 case USB_FUNC_I2C_MULTIWRITE:
@@ -257,15 +270,32 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
257 wValue & 0xff, wValue >> 8, 270 wValue & 0xff, wValue >> 8,
258 wIndex & 0xff, wIndex >> 8); 271 wIndex & 0xff, wIndex >> 8);
259 272
260 len = usb_control_msg(fc_usb->udev,pipe, 273 mutex_lock(&fc_usb->data_mutex);
274
275 if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
276 memcpy(fc_usb->data, buf, buflen);
277
278 ret = usb_control_msg(fc_usb->udev, pipe,
261 req, 279 req,
262 request_type, 280 request_type,
263 wValue, 281 wValue,
264 wIndex, 282 wIndex,
265 buf, 283 fc_usb->data,
266 buflen, 284 buflen,
267 nWaitTime * HZ); 285 nWaitTime * HZ);
268 return len == buflen ? 0 : -EREMOTEIO; 286
287 if (ret != buflen)
288 ret = -EIO;
289
290 if (ret >= 0) {
291 ret = 0;
292 if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
293 memcpy(buf, fc_usb->data, buflen);
294 }
295
296 mutex_unlock(&fc_usb->data_mutex);
297
298 return 0;
269} 299}
270 300
271/* actual bus specific access functions, 301/* actual bus specific access functions,
@@ -516,6 +546,7 @@ static int flexcop_usb_probe(struct usb_interface *intf,
516 /* general flexcop init */ 546 /* general flexcop init */
517 fc_usb = fc->bus_specific; 547 fc_usb = fc->bus_specific;
518 fc_usb->fc_dev = fc; 548 fc_usb->fc_dev = fc;
549 mutex_init(&fc_usb->data_mutex);
519 550
520 fc->read_ibi_reg = flexcop_usb_read_ibi_reg; 551 fc->read_ibi_reg = flexcop_usb_read_ibi_reg;
521 fc->write_ibi_reg = flexcop_usb_write_ibi_reg; 552 fc->write_ibi_reg = flexcop_usb_write_ibi_reg;
diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h
index 92529a9c4475..25ad43166e78 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.h
+++ b/drivers/media/usb/b2c2/flexcop-usb.h
@@ -29,6 +29,10 @@ struct flexcop_usb {
29 29
30 u8 tmp_buffer[1023+190]; 30 u8 tmp_buffer[1023+190];
31 int tmp_buffer_length; 31 int tmp_buffer_length;
32
33 /* for URB control messages */
34 u8 data[80];
35 struct mutex data_mutex;
32}; 36};
33 37
34#if 0 38#if 0
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 13620cdf0599..e9100a235831 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -545,18 +545,30 @@ static void free_sbufs(struct camera_data *cam)
545static int write_packet(struct usb_device *udev, 545static int write_packet(struct usb_device *udev,
546 u8 request, u8 * registers, u16 start, size_t size) 546 u8 request, u8 * registers, u16 start, size_t size)
547{ 547{
548 unsigned char *buf;
549 int ret;
550
548 if (!registers || size <= 0) 551 if (!registers || size <= 0)
549 return -EINVAL; 552 return -EINVAL;
550 553
551 return usb_control_msg(udev, 554 buf = kmalloc(size, GFP_KERNEL);
555 if (!buf)
556 return -ENOMEM;
557
558 memcpy(buf, registers, size);
559
560 ret = usb_control_msg(udev,
552 usb_sndctrlpipe(udev, 0), 561 usb_sndctrlpipe(udev, 0),
553 request, 562 request,
554 USB_TYPE_VENDOR | USB_RECIP_DEVICE, 563 USB_TYPE_VENDOR | USB_RECIP_DEVICE,
555 start, /* value */ 564 start, /* value */
556 0, /* index */ 565 0, /* index */
557 registers, /* buffer */ 566 buf, /* buffer */
558 size, 567 size,
559 HZ); 568 HZ);
569
570 kfree(buf);
571 return ret;
560} 572}
561 573
562/**************************************************************************** 574/****************************************************************************
@@ -567,18 +579,32 @@ static int write_packet(struct usb_device *udev,
567static int read_packet(struct usb_device *udev, 579static int read_packet(struct usb_device *udev,
568 u8 request, u8 * registers, u16 start, size_t size) 580 u8 request, u8 * registers, u16 start, size_t size)
569{ 581{
582 unsigned char *buf;
583 int ret;
584
570 if (!registers || size <= 0) 585 if (!registers || size <= 0)
571 return -EINVAL; 586 return -EINVAL;
572 587
573 return usb_control_msg(udev, 588 buf = kmalloc(size, GFP_KERNEL);
589 if (!buf)
590 return -ENOMEM;
591
592 ret = usb_control_msg(udev,
574 usb_rcvctrlpipe(udev, 0), 593 usb_rcvctrlpipe(udev, 0),
575 request, 594 request,
576 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE, 595 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE,
577 start, /* value */ 596 start, /* value */
578 0, /* index */ 597 0, /* index */
579 registers, /* buffer */ 598 buf, /* buffer */
580 size, 599 size,
581 HZ); 600 HZ);
601
602 if (ret >= 0)
603 memcpy(registers, buf, size);
604
605 kfree(buf);
606
607 return ret;
582} 608}
583 609
584/****************************************************************************** 610/******************************************************************************
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile
index 2a7b5a963acf..3b3f32b426d1 100644
--- a/drivers/media/usb/dvb-usb/Makefile
+++ b/drivers/media/usb/dvb-usb/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_DVB_USB_VP7045) += dvb-usb-vp7045.o
8dvb-usb-vp702x-objs := vp702x.o vp702x-fe.o 8dvb-usb-vp702x-objs := vp702x.o vp702x-fe.o
9obj-$(CONFIG_DVB_USB_VP702X) += dvb-usb-vp702x.o 9obj-$(CONFIG_DVB_USB_VP702X) += dvb-usb-vp702x.o
10 10
11dvb-usb-gp8psk-objs := gp8psk.o gp8psk-fe.o 11dvb-usb-gp8psk-objs := gp8psk.o
12obj-$(CONFIG_DVB_USB_GP8PSK) += dvb-usb-gp8psk.o 12obj-$(CONFIG_DVB_USB_GP8PSK) += dvb-usb-gp8psk.o
13 13
14dvb-usb-dtt200u-objs := dtt200u.o dtt200u-fe.o 14dvb-usb-dtt200u-objs := dtt200u.o dtt200u-fe.o
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index efa782ed6e2d..7853261906b1 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -52,17 +52,15 @@ u8 regmask[8] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
52struct af9005_device_state { 52struct af9005_device_state {
53 u8 sequence; 53 u8 sequence;
54 int led_state; 54 int led_state;
55 unsigned char data[256];
55}; 56};
56 57
57static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, 58static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
58 int readwrite, int type, u8 * values, int len) 59 int readwrite, int type, u8 * values, int len)
59{ 60{
60 struct af9005_device_state *st = d->priv; 61 struct af9005_device_state *st = d->priv;
61 u8 obuf[16] = { 0 }; 62 u8 command, seq;
62 u8 ibuf[17] = { 0 }; 63 int i, ret;
63 u8 command;
64 int i;
65 int ret;
66 64
67 if (len < 1) { 65 if (len < 1) {
68 err("generic read/write, less than 1 byte. Makes no sense."); 66 err("generic read/write, less than 1 byte. Makes no sense.");
@@ -73,16 +71,17 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
73 return -EINVAL; 71 return -EINVAL;
74 } 72 }
75 73
76 obuf[0] = 14; /* rest of buffer length low */ 74 mutex_lock(&d->data_mutex);
77 obuf[1] = 0; /* rest of buffer length high */ 75 st->data[0] = 14; /* rest of buffer length low */
76 st->data[1] = 0; /* rest of buffer length high */
78 77
79 obuf[2] = AF9005_REGISTER_RW; /* register operation */ 78 st->data[2] = AF9005_REGISTER_RW; /* register operation */
80 obuf[3] = 12; /* rest of buffer length */ 79 st->data[3] = 12; /* rest of buffer length */
81 80
82 obuf[4] = st->sequence++; /* sequence number */ 81 st->data[4] = seq = st->sequence++; /* sequence number */
83 82
84 obuf[5] = (u8) (reg >> 8); /* register address */ 83 st->data[5] = (u8) (reg >> 8); /* register address */
85 obuf[6] = (u8) (reg & 0xff); 84 st->data[6] = (u8) (reg & 0xff);
86 85
87 if (type == AF9005_OFDM_REG) { 86 if (type == AF9005_OFDM_REG) {
88 command = AF9005_CMD_OFDM_REG; 87 command = AF9005_CMD_OFDM_REG;
@@ -96,51 +95,52 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
96 command |= readwrite; 95 command |= readwrite;
97 if (readwrite == AF9005_CMD_WRITE) 96 if (readwrite == AF9005_CMD_WRITE)
98 for (i = 0; i < len; i++) 97 for (i = 0; i < len; i++)
99 obuf[8 + i] = values[i]; 98 st->data[8 + i] = values[i];
100 else if (type == AF9005_TUNER_REG) 99 else if (type == AF9005_TUNER_REG)
101 /* read command for tuner, the first byte contains the i2c address */ 100 /* read command for tuner, the first byte contains the i2c address */
102 obuf[8] = values[0]; 101 st->data[8] = values[0];
103 obuf[7] = command; 102 st->data[7] = command;
104 103
105 ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 17, 0); 104 ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 17, 0);
106 if (ret) 105 if (ret)
107 return ret; 106 goto ret;
108 107
109 /* sanity check */ 108 /* sanity check */
110 if (ibuf[2] != AF9005_REGISTER_RW_ACK) { 109 if (st->data[2] != AF9005_REGISTER_RW_ACK) {
111 err("generic read/write, wrong reply code."); 110 err("generic read/write, wrong reply code.");
112 return -EIO; 111 ret = -EIO;
112 goto ret;
113 } 113 }
114 if (ibuf[3] != 0x0d) { 114 if (st->data[3] != 0x0d) {
115 err("generic read/write, wrong length in reply."); 115 err("generic read/write, wrong length in reply.");
116 return -EIO; 116 ret = -EIO;
117 goto ret;
117 } 118 }
118 if (ibuf[4] != obuf[4]) { 119 if (st->data[4] != seq) {
119 err("generic read/write, wrong sequence in reply."); 120 err("generic read/write, wrong sequence in reply.");
120 return -EIO; 121 ret = -EIO;
122 goto ret;
121 } 123 }
122 /* 124 /*
123 Windows driver doesn't check these fields, in fact sometimes 125 * In thesis, both input and output buffers should have
124 the register in the reply is different that what has been sent 126 * identical values for st->data[5] to st->data[8].
125 127 * However, windows driver doesn't check these fields, in fact
126 if (ibuf[5] != obuf[5] || ibuf[6] != obuf[6]) { 128 * sometimes the register in the reply is different that what
127 err("generic read/write, wrong register in reply."); 129 * has been sent
128 return -EIO;
129 }
130 if (ibuf[7] != command) {
131 err("generic read/write wrong command in reply.");
132 return -EIO;
133 }
134 */ 130 */
135 if (ibuf[16] != 0x01) { 131 if (st->data[16] != 0x01) {
136 err("generic read/write wrong status code in reply."); 132 err("generic read/write wrong status code in reply.");
137 return -EIO; 133 ret = -EIO;
134 goto ret;
138 } 135 }
136
139 if (readwrite == AF9005_CMD_READ) 137 if (readwrite == AF9005_CMD_READ)
140 for (i = 0; i < len; i++) 138 for (i = 0; i < len; i++)
141 values[i] = ibuf[8 + i]; 139 values[i] = st->data[8 + i];
142 140
143 return 0; 141ret:
142 mutex_unlock(&d->data_mutex);
143 return ret;
144 144
145} 145}
146 146
@@ -464,8 +464,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
464 struct af9005_device_state *st = d->priv; 464 struct af9005_device_state *st = d->priv;
465 465
466 int ret, i, packet_len; 466 int ret, i, packet_len;
467 u8 buf[64]; 467 u8 seq;
468 u8 ibuf[64];
469 468
470 if (wlen < 0) { 469 if (wlen < 0) {
471 err("send command, wlen less than 0 bytes. Makes no sense."); 470 err("send command, wlen less than 0 bytes. Makes no sense.");
@@ -480,94 +479,97 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
480 return -EINVAL; 479 return -EINVAL;
481 } 480 }
482 packet_len = wlen + 5; 481 packet_len = wlen + 5;
483 buf[0] = (u8) (packet_len & 0xff); 482
484 buf[1] = (u8) ((packet_len & 0xff00) >> 8); 483 mutex_lock(&d->data_mutex);
485 484
486 buf[2] = 0x26; /* packet type */ 485 st->data[0] = (u8) (packet_len & 0xff);
487 buf[3] = wlen + 3; 486 st->data[1] = (u8) ((packet_len & 0xff00) >> 8);
488 buf[4] = st->sequence++; 487
489 buf[5] = command; 488 st->data[2] = 0x26; /* packet type */
490 buf[6] = wlen; 489 st->data[3] = wlen + 3;
490 st->data[4] = seq = st->sequence++;
491 st->data[5] = command;
492 st->data[6] = wlen;
491 for (i = 0; i < wlen; i++) 493 for (i = 0; i < wlen; i++)
492 buf[7 + i] = wbuf[i]; 494 st->data[7 + i] = wbuf[i];
493 ret = dvb_usb_generic_rw(d, buf, wlen + 7, ibuf, rlen + 7, 0); 495 ret = dvb_usb_generic_rw(d, st->data, wlen + 7, st->data, rlen + 7, 0);
494 if (ret) 496 if (st->data[2] != 0x27) {
495 return ret;
496 if (ibuf[2] != 0x27) {
497 err("send command, wrong reply code."); 497 err("send command, wrong reply code.");
498 return -EIO; 498 ret = -EIO;
499 } 499 } else if (st->data[4] != seq) {
500 if (ibuf[4] != buf[4]) {
501 err("send command, wrong sequence in reply."); 500 err("send command, wrong sequence in reply.");
502 return -EIO; 501 ret = -EIO;
503 } 502 } else if (st->data[5] != 0x01) {
504 if (ibuf[5] != 0x01) {
505 err("send command, wrong status code in reply."); 503 err("send command, wrong status code in reply.");
506 return -EIO; 504 ret = -EIO;
507 } 505 } else if (st->data[6] != rlen) {
508 if (ibuf[6] != rlen) {
509 err("send command, invalid data length in reply."); 506 err("send command, invalid data length in reply.");
510 return -EIO; 507 ret = -EIO;
511 } 508 }
512 for (i = 0; i < rlen; i++) 509 if (!ret) {
513 rbuf[i] = ibuf[i + 7]; 510 for (i = 0; i < rlen; i++)
514 return 0; 511 rbuf[i] = st->data[i + 7];
512 }
513
514 mutex_unlock(&d->data_mutex);
515 return ret;
515} 516}
516 517
517int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values, 518int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values,
518 int len) 519 int len)
519{ 520{
520 struct af9005_device_state *st = d->priv; 521 struct af9005_device_state *st = d->priv;
521 u8 obuf[16], ibuf[14]; 522 u8 seq;
522 int ret, i; 523 int ret, i;
523 524
524 memset(obuf, 0, sizeof(obuf)); 525 mutex_lock(&d->data_mutex);
525 memset(ibuf, 0, sizeof(ibuf));
526 526
527 obuf[0] = 14; /* length of rest of packet low */ 527 memset(st->data, 0, sizeof(st->data));
528 obuf[1] = 0; /* length of rest of packer high */
529 528
530 obuf[2] = 0x2a; /* read/write eeprom */ 529 st->data[0] = 14; /* length of rest of packet low */
530 st->data[1] = 0; /* length of rest of packer high */
531 531
532 obuf[3] = 12; /* size */ 532 st->data[2] = 0x2a; /* read/write eeprom */
533 533
534 obuf[4] = st->sequence++; 534 st->data[3] = 12; /* size */
535 535
536 obuf[5] = 0; /* read */ 536 st->data[4] = seq = st->sequence++;
537 537
538 obuf[6] = len; 538 st->data[5] = 0; /* read */
539 obuf[7] = address; 539
540 ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 14, 0); 540 st->data[6] = len;
541 if (ret) 541 st->data[7] = address;
542 return ret; 542 ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 14, 0);
543 if (ibuf[2] != 0x2b) { 543 if (st->data[2] != 0x2b) {
544 err("Read eeprom, invalid reply code"); 544 err("Read eeprom, invalid reply code");
545 return -EIO; 545 ret = -EIO;
546 } 546 } else if (st->data[3] != 10) {
547 if (ibuf[3] != 10) {
548 err("Read eeprom, invalid reply length"); 547 err("Read eeprom, invalid reply length");
549 return -EIO; 548 ret = -EIO;
550 } 549 } else if (st->data[4] != seq) {
551 if (ibuf[4] != obuf[4]) {
552 err("Read eeprom, wrong sequence in reply "); 550 err("Read eeprom, wrong sequence in reply ");
553 return -EIO; 551 ret = -EIO;
554 } 552 } else if (st->data[5] != 1) {
555 if (ibuf[5] != 1) {
556 err("Read eeprom, wrong status in reply "); 553 err("Read eeprom, wrong status in reply ");
557 return -EIO; 554 ret = -EIO;
558 } 555 }
559 for (i = 0; i < len; i++) { 556
560 values[i] = ibuf[6 + i]; 557 if (!ret) {
558 for (i = 0; i < len; i++)
559 values[i] = st->data[6 + i];
561 } 560 }
562 return 0; 561 mutex_unlock(&d->data_mutex);
562
563 return ret;
563} 564}
564 565
565static int af9005_boot_packet(struct usb_device *udev, int type, u8 * reply) 566static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply,
567 u8 *buf, int size)
566{ 568{
567 u8 buf[FW_BULKOUT_SIZE + 2];
568 u16 checksum; 569 u16 checksum;
569 int act_len, i, ret; 570 int act_len, i, ret;
570 memset(buf, 0, sizeof(buf)); 571
572 memset(buf, 0, size);
571 buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff); 573 buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
572 buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff); 574 buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff);
573 switch (type) { 575 switch (type) {
@@ -720,15 +722,21 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
720{ 722{
721 int i, packets, ret, act_len; 723 int i, packets, ret, act_len;
722 724
723 u8 buf[FW_BULKOUT_SIZE + 2]; 725 u8 *buf;
724 u8 reply; 726 u8 reply;
725 727
726 ret = af9005_boot_packet(udev, FW_CONFIG, &reply); 728 buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
729 if (!buf)
730 return -ENOMEM;
731
732 ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
733 FW_BULKOUT_SIZE + 2);
727 if (ret) 734 if (ret)
728 return ret; 735 goto err;
729 if (reply != 0x01) { 736 if (reply != 0x01) {
730 err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply); 737 err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply);
731 return -EIO; 738 ret = -EIO;
739 goto err;
732 } 740 }
733 packets = fw->size / FW_BULKOUT_SIZE; 741 packets = fw->size / FW_BULKOUT_SIZE;
734 buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff); 742 buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
@@ -743,28 +751,35 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
743 buf, FW_BULKOUT_SIZE + 2, &act_len, 1000); 751 buf, FW_BULKOUT_SIZE + 2, &act_len, 1000);
744 if (ret) { 752 if (ret) {
745 err("firmware download failed at packet %d with code %d", i, ret); 753 err("firmware download failed at packet %d with code %d", i, ret);
746 return ret; 754 goto err;
747 } 755 }
748 } 756 }
749 ret = af9005_boot_packet(udev, FW_CONFIRM, &reply); 757 ret = af9005_boot_packet(udev, FW_CONFIRM, &reply,
758 buf, FW_BULKOUT_SIZE + 2);
750 if (ret) 759 if (ret)
751 return ret; 760 goto err;
752 if (reply != (u8) (packets & 0xff)) { 761 if (reply != (u8) (packets & 0xff)) {
753 err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply); 762 err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply);
754 return -EIO; 763 ret = -EIO;
764 goto err;
755 } 765 }
756 ret = af9005_boot_packet(udev, FW_BOOT, &reply); 766 ret = af9005_boot_packet(udev, FW_BOOT, &reply, buf,
767 FW_BULKOUT_SIZE + 2);
757 if (ret) 768 if (ret)
758 return ret; 769 goto err;
759 ret = af9005_boot_packet(udev, FW_CONFIG, &reply); 770 ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
771 FW_BULKOUT_SIZE + 2);
760 if (ret) 772 if (ret)
761 return ret; 773 goto err;
762 if (reply != 0x02) { 774 if (reply != 0x02) {
763 err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply); 775 err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply);
764 return -EIO; 776 ret = -EIO;
777 goto err;
765 } 778 }
766 779
767 return 0; 780err:
781 kfree(buf);
782 return ret;
768 783
769} 784}
770 785
@@ -823,53 +838,59 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state)
823{ 838{
824 struct af9005_device_state *st = d->priv; 839 struct af9005_device_state *st = d->priv;
825 int ret, len; 840 int ret, len;
826 841 u8 seq;
827 u8 obuf[5];
828 u8 ibuf[256];
829 842
830 *state = REMOTE_NO_KEY_PRESSED; 843 *state = REMOTE_NO_KEY_PRESSED;
831 if (rc_decode == NULL) { 844 if (rc_decode == NULL) {
832 /* it shouldn't never come here */ 845 /* it shouldn't never come here */
833 return 0; 846 return 0;
834 } 847 }
848
849 mutex_lock(&d->data_mutex);
850
835 /* deb_info("rc_query\n"); */ 851 /* deb_info("rc_query\n"); */
836 obuf[0] = 3; /* rest of packet length low */ 852 st->data[0] = 3; /* rest of packet length low */
837 obuf[1] = 0; /* rest of packet lentgh high */ 853 st->data[1] = 0; /* rest of packet lentgh high */
838 obuf[2] = 0x40; /* read remote */ 854 st->data[2] = 0x40; /* read remote */
839 obuf[3] = 1; /* rest of packet length */ 855 st->data[3] = 1; /* rest of packet length */
840 obuf[4] = st->sequence++; /* sequence number */ 856 st->data[4] = seq = st->sequence++; /* sequence number */
841 ret = dvb_usb_generic_rw(d, obuf, 5, ibuf, 256, 0); 857 ret = dvb_usb_generic_rw(d, st->data, 5, st->data, 256, 0);
842 if (ret) { 858 if (ret) {
843 err("rc query failed"); 859 err("rc query failed");
844 return ret; 860 goto ret;
845 } 861 }
846 if (ibuf[2] != 0x41) { 862 if (st->data[2] != 0x41) {
847 err("rc query bad header."); 863 err("rc query bad header.");
848 return -EIO; 864 ret = -EIO;
849 } 865 goto ret;
850 if (ibuf[4] != obuf[4]) { 866 } else if (st->data[4] != seq) {
851 err("rc query bad sequence."); 867 err("rc query bad sequence.");
852 return -EIO; 868 ret = -EIO;
869 goto ret;
853 } 870 }
854 len = ibuf[5]; 871 len = st->data[5];
855 if (len > 246) { 872 if (len > 246) {
856 err("rc query invalid length"); 873 err("rc query invalid length");
857 return -EIO; 874 ret = -EIO;
875 goto ret;
858 } 876 }
859 if (len > 0) { 877 if (len > 0) {
860 deb_rc("rc data (%d) ", len); 878 deb_rc("rc data (%d) ", len);
861 debug_dump((ibuf + 6), len, deb_rc); 879 debug_dump((st->data + 6), len, deb_rc);
862 ret = rc_decode(d, &ibuf[6], len, event, state); 880 ret = rc_decode(d, &st->data[6], len, event, state);
863 if (ret) { 881 if (ret) {
864 err("rc_decode failed"); 882 err("rc_decode failed");
865 return ret; 883 goto ret;
866 } else { 884 } else {
867 deb_rc("rc_decode state %x event %x\n", *state, *event); 885 deb_rc("rc_decode state %x event %x\n", *state, *event);
868 if (*state == REMOTE_KEY_REPEAT) 886 if (*state == REMOTE_KEY_REPEAT)
869 *event = d->last_event; 887 *event = d->last_event;
870 } 888 }
871 } 889 }
872 return 0; 890
891ret:
892 mutex_unlock(&d->data_mutex);
893 return ret;
873} 894}
874 895
875static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff) 896static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff)
@@ -953,10 +974,16 @@ static int af9005_identify_state(struct usb_device *udev,
953 int *cold) 974 int *cold)
954{ 975{
955 int ret; 976 int ret;
956 u8 reply; 977 u8 reply, *buf;
957 ret = af9005_boot_packet(udev, FW_CONFIG, &reply); 978
979 buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
980 if (!buf)
981 return -ENOMEM;
982
983 ret = af9005_boot_packet(udev, FW_CONFIG, &reply,
984 buf, FW_BULKOUT_SIZE + 2);
958 if (ret) 985 if (ret)
959 return ret; 986 goto err;
960 deb_info("result of FW_CONFIG in identify state %d\n", reply); 987 deb_info("result of FW_CONFIG in identify state %d\n", reply);
961 if (reply == 0x01) 988 if (reply == 0x01)
962 *cold = 1; 989 *cold = 1;
@@ -965,7 +992,10 @@ static int af9005_identify_state(struct usb_device *udev,
965 else 992 else
966 return -EIO; 993 return -EIO;
967 deb_info("Identify state cold = %d\n", *cold); 994 deb_info("Identify state cold = %d\n", *cold);
968 return 0; 995
996err:
997 kfree(buf);
998 return ret;
969} 999}
970 1000
971static struct dvb_usb_device_properties af9005_properties; 1001static struct dvb_usb_device_properties af9005_properties;
@@ -974,7 +1004,7 @@ static int af9005_usb_probe(struct usb_interface *intf,
974 const struct usb_device_id *id) 1004 const struct usb_device_id *id)
975{ 1005{
976 return dvb_usb_device_init(intf, &af9005_properties, 1006 return dvb_usb_device_init(intf, &af9005_properties,
977 THIS_MODULE, NULL, adapter_nr); 1007 THIS_MODULE, NULL, adapter_nr);
978} 1008}
979 1009
980enum af9005_usb_table_entry { 1010enum af9005_usb_table_entry {
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 9fd1527494eb..290275bc7fde 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -41,6 +41,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
41 41
42struct cinergyt2_state { 42struct cinergyt2_state {
43 u8 rc_counter; 43 u8 rc_counter;
44 unsigned char data[64];
44}; 45};
45 46
46/* We are missing a release hook with usb_device data */ 47/* We are missing a release hook with usb_device data */
@@ -50,38 +51,57 @@ static struct dvb_usb_device_properties cinergyt2_properties;
50 51
51static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable) 52static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
52{ 53{
53 char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 }; 54 struct dvb_usb_device *d = adap->dev;
54 char result[64]; 55 struct cinergyt2_state *st = d->priv;
55 return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result, 56 int ret;
56 sizeof(result), 0); 57
58 mutex_lock(&d->data_mutex);
59 st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
60 st->data[1] = enable ? 1 : 0;
61
62 ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0);
63 mutex_unlock(&d->data_mutex);
64
65 return ret;
57} 66}
58 67
59static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable) 68static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
60{ 69{
61 char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 }; 70 struct cinergyt2_state *st = d->priv;
62 char state[3]; 71 int ret;
63 return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0); 72
73 mutex_lock(&d->data_mutex);
74 st->data[0] = CINERGYT2_EP1_SLEEP_MODE;
75 st->data[1] = enable ? 0 : 1;
76
77 ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0);
78 mutex_unlock(&d->data_mutex);
79
80 return ret;
64} 81}
65 82
66static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap) 83static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
67{ 84{
68 char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION }; 85 struct dvb_usb_device *d = adap->dev;
69 char state[3]; 86 struct cinergyt2_state *st = d->priv;
70 int ret; 87 int ret;
71 88
72 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev); 89 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
73 90
74 ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state, 91 mutex_lock(&d->data_mutex);
75 sizeof(state), 0); 92 st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
93
94 ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
76 if (ret < 0) { 95 if (ret < 0) {
77 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep " 96 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
78 "state info\n"); 97 "state info\n");
79 } 98 }
99 mutex_unlock(&d->data_mutex);
80 100
81 /* Copy this pointer as we are gonna need it in the release phase */ 101 /* Copy this pointer as we are gonna need it in the release phase */
82 cinergyt2_usb_device = adap->dev; 102 cinergyt2_usb_device = adap->dev;
83 103
84 return 0; 104 return ret;
85} 105}
86 106
87static struct rc_map_table rc_map_cinergyt2_table[] = { 107static struct rc_map_table rc_map_cinergyt2_table[] = {
@@ -141,13 +161,18 @@ static int repeatable_keys[] = {
141static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) 161static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
142{ 162{
143 struct cinergyt2_state *st = d->priv; 163 struct cinergyt2_state *st = d->priv;
144 u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS; 164 int i, ret;
145 int i;
146 165
147 *state = REMOTE_NO_KEY_PRESSED; 166 *state = REMOTE_NO_KEY_PRESSED;
148 167
149 dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0); 168 mutex_lock(&d->data_mutex);
150 if (key[4] == 0xff) { 169 st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS;
170
171 ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
172 if (ret < 0)
173 goto ret;
174
175 if (st->data[4] == 0xff) {
151 /* key repeat */ 176 /* key repeat */
152 st->rc_counter++; 177 st->rc_counter++;
153 if (st->rc_counter > RC_REPEAT_DELAY) { 178 if (st->rc_counter > RC_REPEAT_DELAY) {
@@ -157,34 +182,36 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
157 *event = d->last_event; 182 *event = d->last_event;
158 deb_rc("repeat key, event %x\n", 183 deb_rc("repeat key, event %x\n",
159 *event); 184 *event);
160 return 0; 185 goto ret;
161 } 186 }
162 } 187 }
163 deb_rc("repeated key (non repeatable)\n"); 188 deb_rc("repeated key (non repeatable)\n");
164 } 189 }
165 return 0; 190 goto ret;
166 } 191 }
167 192
168 /* hack to pass checksum on the custom field */ 193 /* hack to pass checksum on the custom field */
169 key[2] = ~key[1]; 194 st->data[2] = ~st->data[1];
170 dvb_usb_nec_rc_key_to_event(d, key, event, state); 195 dvb_usb_nec_rc_key_to_event(d, st->data, event, state);
171 if (key[0] != 0) { 196 if (st->data[0] != 0) {
172 if (*event != d->last_event) 197 if (*event != d->last_event)
173 st->rc_counter = 0; 198 st->rc_counter = 0;
174 199
175 deb_rc("key: %*ph\n", 5, key); 200 deb_rc("key: %*ph\n", 5, st->data);
176 } 201 }
177 return 0; 202
203ret:
204 mutex_unlock(&d->data_mutex);
205 return ret;
178} 206}
179 207
180static int cinergyt2_usb_probe(struct usb_interface *intf, 208static int cinergyt2_usb_probe(struct usb_interface *intf,
181 const struct usb_device_id *id) 209 const struct usb_device_id *id)
182{ 210{
183 return dvb_usb_device_init(intf, &cinergyt2_properties, 211 return dvb_usb_device_init(intf, &cinergyt2_properties,
184 THIS_MODULE, NULL, adapter_nr); 212 THIS_MODULE, NULL, adapter_nr);
185} 213}
186 214
187
188static struct usb_device_id cinergyt2_usb_table[] = { 215static struct usb_device_id cinergyt2_usb_table[] = {
189 { USB_DEVICE(USB_VID_TERRATEC, 0x0038) }, 216 { USB_DEVICE(USB_VID_TERRATEC, 0x0038) },
190 { 0 } 217 { 0 }
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index b3ec743a7a2e..2d29b4174dba 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -139,32 +139,42 @@ static uint16_t compute_tps(struct dtv_frontend_properties *op)
139struct cinergyt2_fe_state { 139struct cinergyt2_fe_state {
140 struct dvb_frontend fe; 140 struct dvb_frontend fe;
141 struct dvb_usb_device *d; 141 struct dvb_usb_device *d;
142
143 unsigned char data[64];
144 struct mutex data_mutex;
145
146 struct dvbt_get_status_msg status;
142}; 147};
143 148
144static int cinergyt2_fe_read_status(struct dvb_frontend *fe, 149static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
145 enum fe_status *status) 150 enum fe_status *status)
146{ 151{
147 struct cinergyt2_fe_state *state = fe->demodulator_priv; 152 struct cinergyt2_fe_state *state = fe->demodulator_priv;
148 struct dvbt_get_status_msg result;
149 u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
150 int ret; 153 int ret;
151 154
152 ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result, 155 mutex_lock(&state->data_mutex);
153 sizeof(result), 0); 156 state->data[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
157
158 ret = dvb_usb_generic_rw(state->d, state->data, 1,
159 state->data, sizeof(state->status), 0);
160 if (!ret)
161 memcpy(&state->status, state->data, sizeof(state->status));
162 mutex_unlock(&state->data_mutex);
163
154 if (ret < 0) 164 if (ret < 0)
155 return ret; 165 return ret;
156 166
157 *status = 0; 167 *status = 0;
158 168
159 if (0xffff - le16_to_cpu(result.gain) > 30) 169 if (0xffff - le16_to_cpu(state->status.gain) > 30)
160 *status |= FE_HAS_SIGNAL; 170 *status |= FE_HAS_SIGNAL;
161 if (result.lock_bits & (1 << 6)) 171 if (state->status.lock_bits & (1 << 6))
162 *status |= FE_HAS_LOCK; 172 *status |= FE_HAS_LOCK;
163 if (result.lock_bits & (1 << 5)) 173 if (state->status.lock_bits & (1 << 5))
164 *status |= FE_HAS_SYNC; 174 *status |= FE_HAS_SYNC;
165 if (result.lock_bits & (1 << 4)) 175 if (state->status.lock_bits & (1 << 4))
166 *status |= FE_HAS_CARRIER; 176 *status |= FE_HAS_CARRIER;
167 if (result.lock_bits & (1 << 1)) 177 if (state->status.lock_bits & (1 << 1))
168 *status |= FE_HAS_VITERBI; 178 *status |= FE_HAS_VITERBI;
169 179
170 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) != 180 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
@@ -177,34 +187,16 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
177static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber) 187static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
178{ 188{
179 struct cinergyt2_fe_state *state = fe->demodulator_priv; 189 struct cinergyt2_fe_state *state = fe->demodulator_priv;
180 struct dvbt_get_status_msg status;
181 char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
182 int ret;
183
184 ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
185 sizeof(status), 0);
186 if (ret < 0)
187 return ret;
188 190
189 *ber = le32_to_cpu(status.viterbi_error_rate); 191 *ber = le32_to_cpu(state->status.viterbi_error_rate);
190 return 0; 192 return 0;
191} 193}
192 194
193static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc) 195static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
194{ 196{
195 struct cinergyt2_fe_state *state = fe->demodulator_priv; 197 struct cinergyt2_fe_state *state = fe->demodulator_priv;
196 struct dvbt_get_status_msg status;
197 u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
198 int ret;
199 198
200 ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status, 199 *unc = le32_to_cpu(state->status.uncorrected_block_count);
201 sizeof(status), 0);
202 if (ret < 0) {
203 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
204 ret);
205 return ret;
206 }
207 *unc = le32_to_cpu(status.uncorrected_block_count);
208 return 0; 200 return 0;
209} 201}
210 202
@@ -212,35 +204,16 @@ static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
212 u16 *strength) 204 u16 *strength)
213{ 205{
214 struct cinergyt2_fe_state *state = fe->demodulator_priv; 206 struct cinergyt2_fe_state *state = fe->demodulator_priv;
215 struct dvbt_get_status_msg status;
216 char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
217 int ret;
218 207
219 ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, 208 *strength = (0xffff - le16_to_cpu(state->status.gain));
220 sizeof(status), 0);
221 if (ret < 0) {
222 err("cinergyt2_fe_read_signal_strength() Failed!"
223 " (Error=%d)\n", ret);
224 return ret;
225 }
226 *strength = (0xffff - le16_to_cpu(status.gain));
227 return 0; 209 return 0;
228} 210}
229 211
230static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr) 212static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
231{ 213{
232 struct cinergyt2_fe_state *state = fe->demodulator_priv; 214 struct cinergyt2_fe_state *state = fe->demodulator_priv;
233 struct dvbt_get_status_msg status;
234 char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
235 int ret;
236 215
237 ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, 216 *snr = (state->status.snr << 8) | state->status.snr;
238 sizeof(status), 0);
239 if (ret < 0) {
240 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
241 return ret;
242 }
243 *snr = (status.snr << 8) | status.snr;
244 return 0; 217 return 0;
245} 218}
246 219
@@ -266,34 +239,36 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
266{ 239{
267 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; 240 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
268 struct cinergyt2_fe_state *state = fe->demodulator_priv; 241 struct cinergyt2_fe_state *state = fe->demodulator_priv;
269 struct dvbt_set_parameters_msg param; 242 struct dvbt_set_parameters_msg *param;
270 char result[2];
271 int err; 243 int err;
272 244
273 param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; 245 mutex_lock(&state->data_mutex);
274 param.tps = cpu_to_le16(compute_tps(fep)); 246
275 param.freq = cpu_to_le32(fep->frequency / 1000); 247 param = (void *)state->data;
276 param.flags = 0; 248 param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
249 param->tps = cpu_to_le16(compute_tps(fep));
250 param->freq = cpu_to_le32(fep->frequency / 1000);
251 param->flags = 0;
277 252
278 switch (fep->bandwidth_hz) { 253 switch (fep->bandwidth_hz) {
279 default: 254 default:
280 case 8000000: 255 case 8000000:
281 param.bandwidth = 8; 256 param->bandwidth = 8;
282 break; 257 break;
283 case 7000000: 258 case 7000000:
284 param.bandwidth = 7; 259 param->bandwidth = 7;
285 break; 260 break;
286 case 6000000: 261 case 6000000:
287 param.bandwidth = 6; 262 param->bandwidth = 6;
288 break; 263 break;
289 } 264 }
290 265
291 err = dvb_usb_generic_rw(state->d, 266 err = dvb_usb_generic_rw(state->d, state->data, sizeof(*param),
292 (char *)&param, sizeof(param), 267 state->data, 2, 0);
293 result, sizeof(result), 0);
294 if (err < 0) 268 if (err < 0)
295 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err); 269 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
296 270
271 mutex_unlock(&state->data_mutex);
297 return (err < 0) ? err : 0; 272 return (err < 0) ? err : 0;
298} 273}
299 274
@@ -315,6 +290,7 @@ struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
315 s->d = d; 290 s->d = d;
316 memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops)); 291 memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops));
317 s->fe.demodulator_priv = s; 292 s->fe.demodulator_priv = s;
293 mutex_init(&s->data_mutex);
318 return &s->fe; 294 return &s->fe;
319} 295}
320 296
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 907ac01ae297..243403081fa5 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -45,9 +45,6 @@
45#include "si2168.h" 45#include "si2168.h"
46#include "si2157.h" 46#include "si2157.h"
47 47
48/* Max transfer size done by I2C transfer functions */
49#define MAX_XFER_SIZE 80
50
51/* debug */ 48/* debug */
52static int dvb_usb_cxusb_debug; 49static int dvb_usb_cxusb_debug;
53module_param_named(debug, dvb_usb_cxusb_debug, int, 0644); 50module_param_named(debug, dvb_usb_cxusb_debug, int, 0644);
@@ -61,23 +58,27 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
61static int cxusb_ctrl_msg(struct dvb_usb_device *d, 58static int cxusb_ctrl_msg(struct dvb_usb_device *d,
62 u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen) 59 u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
63{ 60{
64 int wo = (rbuf == NULL || rlen == 0); /* write-only */ 61 struct cxusb_state *st = d->priv;
65 u8 sndbuf[MAX_XFER_SIZE]; 62 int ret, wo;
66 63
67 if (1 + wlen > sizeof(sndbuf)) { 64 if (1 + wlen > MAX_XFER_SIZE) {
68 warn("i2c wr: len=%d is too big!\n", 65 warn("i2c wr: len=%d is too big!\n", wlen);
69 wlen);
70 return -EOPNOTSUPP; 66 return -EOPNOTSUPP;
71 } 67 }
72 68
73 memset(sndbuf, 0, 1+wlen); 69 wo = (rbuf == NULL || rlen == 0); /* write-only */
74 70
75 sndbuf[0] = cmd; 71 mutex_lock(&d->data_mutex);
76 memcpy(&sndbuf[1], wbuf, wlen); 72 st->data[0] = cmd;
73 memcpy(&st->data[1], wbuf, wlen);
77 if (wo) 74 if (wo)
78 return dvb_usb_generic_write(d, sndbuf, 1+wlen); 75 ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
79 else 76 else
80 return dvb_usb_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen, 0); 77 ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
78 rbuf, rlen, 0);
79
80 mutex_unlock(&d->data_mutex);
81 return ret;
81} 82}
82 83
83/* GPIO */ 84/* GPIO */
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 527ff7905e15..18acda19527a 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -28,10 +28,15 @@
28#define CMD_ANALOG 0x50 28#define CMD_ANALOG 0x50
29#define CMD_DIGITAL 0x51 29#define CMD_DIGITAL 0x51
30 30
31/* Max transfer size done by I2C transfer functions */
32#define MAX_XFER_SIZE 80
33
31struct cxusb_state { 34struct cxusb_state {
32 u8 gpio_write_state[3]; 35 u8 gpio_write_state[3];
33 struct i2c_client *i2c_client_demod; 36 struct i2c_client *i2c_client_demod;
34 struct i2c_client *i2c_client_tuner; 37 struct i2c_client *i2c_client_tuner;
38
39 unsigned char data[MAX_XFER_SIZE];
35}; 40};
36 41
37#endif 42#endif
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index f3196658fb70..47ce9d5de4c6 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -213,7 +213,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
213 usb_rcvctrlpipe(d->udev, 0), 213 usb_rcvctrlpipe(d->udev, 0),
214 REQUEST_NEW_I2C_READ, 214 REQUEST_NEW_I2C_READ,
215 USB_TYPE_VENDOR | USB_DIR_IN, 215 USB_TYPE_VENDOR | USB_DIR_IN,
216 value, index, msg[i].buf, 216 value, index, st->buf,
217 msg[i].len, 217 msg[i].len,
218 USB_CTRL_GET_TIMEOUT); 218 USB_CTRL_GET_TIMEOUT);
219 if (result < 0) { 219 if (result < 0) {
@@ -221,6 +221,14 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
221 break; 221 break;
222 } 222 }
223 223
224 if (msg[i].len > sizeof(st->buf)) {
225 deb_info("buffer too small to fit %d bytes\n",
226 msg[i].len);
227 return -EIO;
228 }
229
230 memcpy(msg[i].buf, st->buf, msg[i].len);
231
224 deb_data("<<< "); 232 deb_data("<<< ");
225 debug_dump(msg[i].buf, msg[i].len, deb_data); 233 debug_dump(msg[i].buf, msg[i].len, deb_data);
226 234
@@ -238,6 +246,13 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
238 /* I2C ctrl + FE bus; */ 246 /* I2C ctrl + FE bus; */
239 st->buf[3] = ((gen_mode << 6) & 0xC0) | 247 st->buf[3] = ((gen_mode << 6) & 0xC0) |
240 ((bus_mode << 4) & 0x30); 248 ((bus_mode << 4) & 0x30);
249
250 if (msg[i].len > sizeof(st->buf) - 4) {
251 deb_info("i2c message to big: %d\n",
252 msg[i].len);
253 return -EIO;
254 }
255
241 /* The Actual i2c payload */ 256 /* The Actual i2c payload */
242 memcpy(&st->buf[4], msg[i].buf, msg[i].len); 257 memcpy(&st->buf[4], msg[i].buf, msg[i].len);
243 258
@@ -283,6 +298,11 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
283 /* fill in the address */ 298 /* fill in the address */
284 st->buf[1] = msg[i].addr << 1; 299 st->buf[1] = msg[i].addr << 1;
285 /* fill the buffer */ 300 /* fill the buffer */
301 if (msg[i].len > sizeof(st->buf) - 2) {
302 deb_info("i2c xfer to big: %d\n",
303 msg[i].len);
304 return -EIO;
305 }
286 memcpy(&st->buf[2], msg[i].buf, msg[i].len); 306 memcpy(&st->buf[2], msg[i].buf, msg[i].len);
287 307
288 /* write/read request */ 308 /* write/read request */
@@ -292,13 +312,20 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
292 312
293 /* special thing in the current firmware: when length is zero the read-failed */ 313 /* special thing in the current firmware: when length is zero the read-failed */
294 len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2, 314 len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2,
295 msg[i+1].buf, msg[i+1].len); 315 st->buf, msg[i + 1].len);
296 if (len <= 0) { 316 if (len <= 0) {
297 deb_info("I2C read failed on address 0x%02x\n", 317 deb_info("I2C read failed on address 0x%02x\n",
298 msg[i].addr); 318 msg[i].addr);
299 break; 319 break;
300 } 320 }
301 321
322 if (msg[i + 1].len > sizeof(st->buf)) {
323 deb_info("i2c xfer buffer to small for %d\n",
324 msg[i].len);
325 return -EIO;
326 }
327 memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len);
328
302 msg[i+1].len = len; 329 msg[i+1].len = len;
303 330
304 i++; 331 i++;
@@ -677,7 +704,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
677 struct dvb_usb_device *d = purb->context; 704 struct dvb_usb_device *d = purb->context;
678 struct dib0700_rc_response *poll_reply; 705 struct dib0700_rc_response *poll_reply;
679 enum rc_type protocol; 706 enum rc_type protocol;
680 u32 uninitialized_var(keycode); 707 u32 keycode;
681 u8 toggle; 708 u8 toggle;
682 709
683 deb_info("%s()\n", __func__); 710 deb_info("%s()\n", __func__);
@@ -718,7 +745,8 @@ static void dib0700_rc_urb_completion(struct urb *purb)
718 poll_reply->nec.data == 0x00 && 745 poll_reply->nec.data == 0x00 &&
719 poll_reply->nec.not_data == 0xff) { 746 poll_reply->nec.not_data == 0xff) {
720 poll_reply->data_state = 2; 747 poll_reply->data_state = 2;
721 break; 748 rc_repeat(d->rc_dev);
749 goto resubmit;
722 } 750 }
723 751
724 if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) { 752 if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) {
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 0857b56e652c..ef1b8ee75c57 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -508,8 +508,6 @@ static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap)
508 508
509#define DEFAULT_RC_INTERVAL 50 509#define DEFAULT_RC_INTERVAL 50
510 510
511static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
512
513/* 511/*
514 * This function is used only when firmware is < 1.20 version. Newer 512 * This function is used only when firmware is < 1.20 version. Newer
515 * firmwares use bulk mode, with functions implemented at dib0700_core, 513 * firmwares use bulk mode, with functions implemented at dib0700_core,
@@ -517,7 +515,6 @@ static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
517 */ 515 */
518static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d) 516static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
519{ 517{
520 u8 key[4];
521 enum rc_type protocol; 518 enum rc_type protocol;
522 u32 scancode; 519 u32 scancode;
523 u8 toggle; 520 u8 toggle;
@@ -532,39 +529,43 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
532 return 0; 529 return 0;
533 } 530 }
534 531
535 i = dib0700_ctrl_rd(d, rc_request, 2, key, 4); 532 st->buf[0] = REQUEST_POLL_RC;
533 st->buf[1] = 0;
534
535 i = dib0700_ctrl_rd(d, st->buf, 2, st->buf, 4);
536 if (i <= 0) { 536 if (i <= 0) {
537 err("RC Query Failed"); 537 err("RC Query Failed");
538 return -1; 538 return -EIO;
539 } 539 }
540 540
541 /* losing half of KEY_0 events from Philipps rc5 remotes.. */ 541 /* losing half of KEY_0 events from Philipps rc5 remotes.. */
542 if (key[0] == 0 && key[1] == 0 && key[2] == 0 && key[3] == 0) 542 if (st->buf[0] == 0 && st->buf[1] == 0
543 && st->buf[2] == 0 && st->buf[3] == 0)
543 return 0; 544 return 0;
544 545
545 /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)key[3-2],(int)key[3-3],(int)key[3-1],(int)key[3]); */ 546 /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)st->buf[3 - 2],(int)st->buf[3 - 3],(int)st->buf[3 - 1],(int)st->buf[3]); */
546 547
547 dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */ 548 dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */
548 549
549 switch (d->props.rc.core.protocol) { 550 switch (d->props.rc.core.protocol) {
550 case RC_BIT_NEC: 551 case RC_BIT_NEC:
551 /* NEC protocol sends repeat code as 0 0 0 FF */ 552 /* NEC protocol sends repeat code as 0 0 0 FF */
552 if ((key[3-2] == 0x00) && (key[3-3] == 0x00) && 553 if ((st->buf[3 - 2] == 0x00) && (st->buf[3 - 3] == 0x00) &&
553 (key[3] == 0xff)) { 554 (st->buf[3] == 0xff)) {
554 rc_repeat(d->rc_dev); 555 rc_repeat(d->rc_dev);
555 return 0; 556 return 0;
556 } 557 }
557 558
558 protocol = RC_TYPE_NEC; 559 protocol = RC_TYPE_NEC;
559 scancode = RC_SCANCODE_NEC(key[3-2], key[3-3]); 560 scancode = RC_SCANCODE_NEC(st->buf[3 - 2], st->buf[3 - 3]);
560 toggle = 0; 561 toggle = 0;
561 break; 562 break;
562 563
563 default: 564 default:
564 /* RC-5 protocol changes toggle bit on new keypress */ 565 /* RC-5 protocol changes toggle bit on new keypress */
565 protocol = RC_TYPE_RC5; 566 protocol = RC_TYPE_RC5;
566 scancode = RC_SCANCODE_RC5(key[3-2], key[3-3]); 567 scancode = RC_SCANCODE_RC5(st->buf[3 - 2], st->buf[3 - 3]);
567 toggle = key[3-1]; 568 toggle = st->buf[3 - 1];
568 break; 569 break;
569 } 570 }
570 571
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 18ed3bfbb5e2..de3ee2547479 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -62,72 +62,117 @@ EXPORT_SYMBOL(dibusb_pid_filter_ctrl);
62 62
63int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff) 63int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff)
64{ 64{
65 u8 b[3]; 65 u8 *b;
66 int ret; 66 int ret;
67
68 b = kmalloc(3, GFP_KERNEL);
69 if (!b)
70 return -ENOMEM;
71
67 b[0] = DIBUSB_REQ_SET_IOCTL; 72 b[0] = DIBUSB_REQ_SET_IOCTL;
68 b[1] = DIBUSB_IOCTL_CMD_POWER_MODE; 73 b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
69 b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP; 74 b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP;
70 ret = dvb_usb_generic_write(d,b,3); 75
76 ret = dvb_usb_generic_write(d, b, 3);
77
78 kfree(b);
79
71 msleep(10); 80 msleep(10);
81
72 return ret; 82 return ret;
73} 83}
74EXPORT_SYMBOL(dibusb_power_ctrl); 84EXPORT_SYMBOL(dibusb_power_ctrl);
75 85
76int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) 86int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
77{ 87{
78 u8 b[3] = { 0 };
79 int ret; 88 int ret;
89 u8 *b;
90
91 b = kmalloc(3, GFP_KERNEL);
92 if (!b)
93 return -ENOMEM;
80 94
81 if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0) 95 if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0)
82 return ret; 96 goto ret;
83 97
84 if (onoff) { 98 if (onoff) {
85 b[0] = DIBUSB_REQ_SET_STREAMING_MODE; 99 b[0] = DIBUSB_REQ_SET_STREAMING_MODE;
86 b[1] = 0x00; 100 b[1] = 0x00;
87 if ((ret = dvb_usb_generic_write(adap->dev,b,2)) < 0) 101 ret = dvb_usb_generic_write(adap->dev, b, 2);
88 return ret; 102 if (ret < 0)
103 goto ret;
89 } 104 }
90 105
91 b[0] = DIBUSB_REQ_SET_IOCTL; 106 b[0] = DIBUSB_REQ_SET_IOCTL;
92 b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM; 107 b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM;
93 return dvb_usb_generic_write(adap->dev,b,3); 108 ret = dvb_usb_generic_write(adap->dev, b, 3);
109
110ret:
111 kfree(b);
112 return ret;
94} 113}
95EXPORT_SYMBOL(dibusb2_0_streaming_ctrl); 114EXPORT_SYMBOL(dibusb2_0_streaming_ctrl);
96 115
97int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff) 116int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff)
98{ 117{
99 if (onoff) { 118 u8 *b;
100 u8 b[3] = { DIBUSB_REQ_SET_IOCTL, DIBUSB_IOCTL_CMD_POWER_MODE, DIBUSB_IOCTL_POWER_WAKEUP }; 119 int ret;
101 return dvb_usb_generic_write(d,b,3); 120
102 } else 121 if (!onoff)
103 return 0; 122 return 0;
123
124 b = kmalloc(3, GFP_KERNEL);
125 if (!b)
126 return -ENOMEM;
127
128 b[0] = DIBUSB_REQ_SET_IOCTL;
129 b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
130 b[2] = DIBUSB_IOCTL_POWER_WAKEUP;
131
132 ret = dvb_usb_generic_write(d, b, 3);
133
134 kfree(b);
135
136 return ret;
104} 137}
105EXPORT_SYMBOL(dibusb2_0_power_ctrl); 138EXPORT_SYMBOL(dibusb2_0_power_ctrl);
106 139
107static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr, 140static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr,
108 u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) 141 u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
109{ 142{
110 u8 sndbuf[MAX_XFER_SIZE]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */ 143 u8 *sndbuf;
144 int ret, wo, len;
145
111 /* write only ? */ 146 /* write only ? */
112 int wo = (rbuf == NULL || rlen == 0), 147 wo = (rbuf == NULL || rlen == 0);
113 len = 2 + wlen + (wo ? 0 : 2); 148
149 len = 2 + wlen + (wo ? 0 : 2);
150
151 sndbuf = kmalloc(MAX_XFER_SIZE, GFP_KERNEL);
152 if (!sndbuf)
153 return -ENOMEM;
114 154
115 if (4 + wlen > sizeof(sndbuf)) { 155 if (4 + wlen > MAX_XFER_SIZE) {
116 warn("i2c wr: len=%d is too big!\n", wlen); 156 warn("i2c wr: len=%d is too big!\n", wlen);
117 return -EOPNOTSUPP; 157 ret = -EOPNOTSUPP;
158 goto ret;
118 } 159 }
119 160
120 sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ; 161 sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ;
121 sndbuf[1] = (addr << 1) | (wo ? 0 : 1); 162 sndbuf[1] = (addr << 1) | (wo ? 0 : 1);
122 163
123 memcpy(&sndbuf[2],wbuf,wlen); 164 memcpy(&sndbuf[2], wbuf, wlen);
124 165
125 if (!wo) { 166 if (!wo) {
126 sndbuf[wlen+2] = (rlen >> 8) & 0xff; 167 sndbuf[wlen + 2] = (rlen >> 8) & 0xff;
127 sndbuf[wlen+3] = rlen & 0xff; 168 sndbuf[wlen + 3] = rlen & 0xff;
128 } 169 }
129 170
130 return dvb_usb_generic_rw(d,sndbuf,len,rbuf,rlen,0); 171 ret = dvb_usb_generic_rw(d, sndbuf, len, rbuf, rlen, 0);
172
173ret:
174 kfree(sndbuf);
175 return ret;
131} 176}
132 177
133/* 178/*
@@ -319,11 +364,27 @@ EXPORT_SYMBOL(rc_map_dibusb_table);
319 364
320int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state) 365int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
321{ 366{
322 u8 key[5],cmd = DIBUSB_REQ_POLL_REMOTE; 367 u8 *buf;
323 dvb_usb_generic_rw(d,&cmd,1,key,5,0); 368 int ret;
324 dvb_usb_nec_rc_key_to_event(d,key,event,state); 369
325 if (key[0] != 0) 370 buf = kmalloc(5, GFP_KERNEL);
326 deb_info("key: %*ph\n", 5, key); 371 if (!buf)
327 return 0; 372 return -ENOMEM;
373
374 buf[0] = DIBUSB_REQ_POLL_REMOTE;
375
376 ret = dvb_usb_generic_rw(d, buf, 1, buf, 5, 0);
377 if (ret < 0)
378 goto ret;
379
380 dvb_usb_nec_rc_key_to_event(d, buf, event, state);
381
382 if (buf[0] != 0)
383 deb_info("key: %*ph\n", 5, buf);
384
385 kfree(buf);
386
387ret:
388 return ret;
328} 389}
329EXPORT_SYMBOL(dibusb_rc_query); 390EXPORT_SYMBOL(dibusb_rc_query);
diff --git a/drivers/media/usb/dvb-usb/dibusb.h b/drivers/media/usb/dvb-usb/dibusb.h
index 3f82163d8ab8..697be2a17ade 100644
--- a/drivers/media/usb/dvb-usb/dibusb.h
+++ b/drivers/media/usb/dvb-usb/dibusb.h
@@ -96,6 +96,9 @@
96#define DIBUSB_IOCTL_CMD_ENABLE_STREAM 0x01 96#define DIBUSB_IOCTL_CMD_ENABLE_STREAM 0x01
97#define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02 97#define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
98 98
99/* Max transfer size done by I2C transfer functions */
100#define MAX_XFER_SIZE 64
101
99struct dibusb_state { 102struct dibusb_state {
100 struct dib_fe_xfer_ops ops; 103 struct dib_fe_xfer_ops ops;
101 int mt2060_present; 104 int mt2060_present;
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 63134335c994..4284f6984dc1 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -28,22 +28,26 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
28static int digitv_ctrl_msg(struct dvb_usb_device *d, 28static int digitv_ctrl_msg(struct dvb_usb_device *d,
29 u8 cmd, u8 vv, u8 *wbuf, int wlen, u8 *rbuf, int rlen) 29 u8 cmd, u8 vv, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
30{ 30{
31 int wo = (rbuf == NULL || rlen == 0); /* write-only */ 31 struct digitv_state *st = d->priv;
32 u8 sndbuf[7],rcvbuf[7]; 32 int ret, wo;
33 memset(sndbuf,0,7); memset(rcvbuf,0,7);
34 33
35 sndbuf[0] = cmd; 34 wo = (rbuf == NULL || rlen == 0); /* write-only */
36 sndbuf[1] = vv; 35
37 sndbuf[2] = wo ? wlen : rlen; 36 memset(st->sndbuf, 0, 7);
37 memset(st->rcvbuf, 0, 7);
38
39 st->sndbuf[0] = cmd;
40 st->sndbuf[1] = vv;
41 st->sndbuf[2] = wo ? wlen : rlen;
38 42
39 if (wo) { 43 if (wo) {
40 memcpy(&sndbuf[3],wbuf,wlen); 44 memcpy(&st->sndbuf[3], wbuf, wlen);
41 dvb_usb_generic_write(d,sndbuf,7); 45 ret = dvb_usb_generic_write(d, st->sndbuf, 7);
42 } else { 46 } else {
43 dvb_usb_generic_rw(d,sndbuf,7,rcvbuf,7,10); 47 ret = dvb_usb_generic_rw(d, st->sndbuf, 7, st->rcvbuf, 7, 10);
44 memcpy(rbuf,&rcvbuf[3],rlen); 48 memcpy(rbuf, &st->rcvbuf[3], rlen);
45 } 49 }
46 return 0; 50 return ret;
47} 51}
48 52
49/* I2C */ 53/* I2C */
diff --git a/drivers/media/usb/dvb-usb/digitv.h b/drivers/media/usb/dvb-usb/digitv.h
index 908c09f4966b..581e09c25491 100644
--- a/drivers/media/usb/dvb-usb/digitv.h
+++ b/drivers/media/usb/dvb-usb/digitv.h
@@ -5,7 +5,10 @@
5#include "dvb-usb.h" 5#include "dvb-usb.h"
6 6
7struct digitv_state { 7struct digitv_state {
8 int is_nxt6000; 8 int is_nxt6000;
9
10 unsigned char sndbuf[7];
11 unsigned char rcvbuf[7];
9}; 12};
10 13
11/* protocol (from usblogging and the SDK: 14/* protocol (from usblogging and the SDK:
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index c09332bd99cb..f5c042baa254 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -18,17 +18,28 @@ struct dtt200u_fe_state {
18 18
19 struct dtv_frontend_properties fep; 19 struct dtv_frontend_properties fep;
20 struct dvb_frontend frontend; 20 struct dvb_frontend frontend;
21
22 unsigned char data[80];
23 struct mutex data_mutex;
21}; 24};
22 25
23static int dtt200u_fe_read_status(struct dvb_frontend *fe, 26static int dtt200u_fe_read_status(struct dvb_frontend *fe,
24 enum fe_status *stat) 27 enum fe_status *stat)
25{ 28{
26 struct dtt200u_fe_state *state = fe->demodulator_priv; 29 struct dtt200u_fe_state *state = fe->demodulator_priv;
27 u8 st = GET_TUNE_STATUS, b[3]; 30 int ret;
31
32 mutex_lock(&state->data_mutex);
33 state->data[0] = GET_TUNE_STATUS;
28 34
29 dvb_usb_generic_rw(state->d,&st,1,b,3,0); 35 ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
36 if (ret < 0) {
37 *stat = 0;
38 mutex_unlock(&state->data_mutex);
39 return ret;
40 }
30 41
31 switch (b[0]) { 42 switch (state->data[0]) {
32 case 0x01: 43 case 0x01:
33 *stat = FE_HAS_SIGNAL | FE_HAS_CARRIER | 44 *stat = FE_HAS_SIGNAL | FE_HAS_CARRIER |
34 FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; 45 FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
@@ -41,51 +52,86 @@ static int dtt200u_fe_read_status(struct dvb_frontend *fe,
41 *stat = 0; 52 *stat = 0;
42 break; 53 break;
43 } 54 }
55 mutex_unlock(&state->data_mutex);
44 return 0; 56 return 0;
45} 57}
46 58
47static int dtt200u_fe_read_ber(struct dvb_frontend* fe, u32 *ber) 59static int dtt200u_fe_read_ber(struct dvb_frontend* fe, u32 *ber)
48{ 60{
49 struct dtt200u_fe_state *state = fe->demodulator_priv; 61 struct dtt200u_fe_state *state = fe->demodulator_priv;
50 u8 bw = GET_VIT_ERR_CNT,b[3]; 62 int ret;
51 dvb_usb_generic_rw(state->d,&bw,1,b,3,0); 63
52 *ber = (b[0] << 16) | (b[1] << 8) | b[2]; 64 mutex_lock(&state->data_mutex);
53 return 0; 65 state->data[0] = GET_VIT_ERR_CNT;
66
67 ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
68 if (ret >= 0)
69 *ber = (state->data[0] << 16) | (state->data[1] << 8) | state->data[2];
70
71 mutex_unlock(&state->data_mutex);
72 return ret;
54} 73}
55 74
56static int dtt200u_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc) 75static int dtt200u_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc)
57{ 76{
58 struct dtt200u_fe_state *state = fe->demodulator_priv; 77 struct dtt200u_fe_state *state = fe->demodulator_priv;
59 u8 bw = GET_RS_UNCOR_BLK_CNT,b[2]; 78 int ret;
60 79
61 dvb_usb_generic_rw(state->d,&bw,1,b,2,0); 80 mutex_lock(&state->data_mutex);
62 *unc = (b[0] << 8) | b[1]; 81 state->data[0] = GET_RS_UNCOR_BLK_CNT;
63 return 0; 82
83 ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 2, 0);
84 if (ret >= 0)
85 *unc = (state->data[0] << 8) | state->data[1];
86
87 mutex_unlock(&state->data_mutex);
88 return ret;
64} 89}
65 90
66static int dtt200u_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength) 91static int dtt200u_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength)
67{ 92{
68 struct dtt200u_fe_state *state = fe->demodulator_priv; 93 struct dtt200u_fe_state *state = fe->demodulator_priv;
69 u8 bw = GET_AGC, b; 94 int ret;
70 dvb_usb_generic_rw(state->d,&bw,1,&b,1,0); 95
71 *strength = (b << 8) | b; 96 mutex_lock(&state->data_mutex);
72 return 0; 97 state->data[0] = GET_AGC;
98
99 ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
100 if (ret >= 0)
101 *strength = (state->data[0] << 8) | state->data[0];
102
103 mutex_unlock(&state->data_mutex);
104 return ret;
73} 105}
74 106
75static int dtt200u_fe_read_snr(struct dvb_frontend* fe, u16 *snr) 107static int dtt200u_fe_read_snr(struct dvb_frontend* fe, u16 *snr)
76{ 108{
77 struct dtt200u_fe_state *state = fe->demodulator_priv; 109 struct dtt200u_fe_state *state = fe->demodulator_priv;
78 u8 bw = GET_SNR,br; 110 int ret;
79 dvb_usb_generic_rw(state->d,&bw,1,&br,1,0); 111
80 *snr = ~((br << 8) | br); 112 mutex_lock(&state->data_mutex);
81 return 0; 113 state->data[0] = GET_SNR;
114
115 ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
116 if (ret >= 0)
117 *snr = ~((state->data[0] << 8) | state->data[0]);
118
119 mutex_unlock(&state->data_mutex);
120 return ret;
82} 121}
83 122
84static int dtt200u_fe_init(struct dvb_frontend* fe) 123static int dtt200u_fe_init(struct dvb_frontend* fe)
85{ 124{
86 struct dtt200u_fe_state *state = fe->demodulator_priv; 125 struct dtt200u_fe_state *state = fe->demodulator_priv;
87 u8 b = SET_INIT; 126 int ret;
88 return dvb_usb_generic_write(state->d,&b,1); 127
128 mutex_lock(&state->data_mutex);
129 state->data[0] = SET_INIT;
130
131 ret = dvb_usb_generic_write(state->d, state->data, 1);
132 mutex_unlock(&state->data_mutex);
133
134 return ret;
89} 135}
90 136
91static int dtt200u_fe_sleep(struct dvb_frontend* fe) 137static int dtt200u_fe_sleep(struct dvb_frontend* fe)
@@ -105,39 +151,40 @@ static int dtt200u_fe_set_frontend(struct dvb_frontend *fe)
105{ 151{
106 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; 152 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
107 struct dtt200u_fe_state *state = fe->demodulator_priv; 153 struct dtt200u_fe_state *state = fe->demodulator_priv;
108 int i; 154 int ret;
109 enum fe_status st;
110 u16 freq = fep->frequency / 250000; 155 u16 freq = fep->frequency / 250000;
111 u8 bwbuf[2] = { SET_BANDWIDTH, 0 },freqbuf[3] = { SET_RF_FREQ, 0, 0 };
112 156
157 mutex_lock(&state->data_mutex);
158 state->data[0] = SET_BANDWIDTH;
113 switch (fep->bandwidth_hz) { 159 switch (fep->bandwidth_hz) {
114 case 8000000: 160 case 8000000:
115 bwbuf[1] = 8; 161 state->data[1] = 8;
116 break; 162 break;
117 case 7000000: 163 case 7000000:
118 bwbuf[1] = 7; 164 state->data[1] = 7;
119 break; 165 break;
120 case 6000000: 166 case 6000000:
121 bwbuf[1] = 6; 167 state->data[1] = 6;
122 break; 168 break;
123 default: 169 default:
124 return -EINVAL; 170 ret = -EINVAL;
171 goto ret;
125 } 172 }
126 173
127 dvb_usb_generic_write(state->d,bwbuf,2); 174 ret = dvb_usb_generic_write(state->d, state->data, 2);
175 if (ret < 0)
176 goto ret;
128 177
129 freqbuf[1] = freq & 0xff; 178 state->data[0] = SET_RF_FREQ;
130 freqbuf[2] = (freq >> 8) & 0xff; 179 state->data[1] = freq & 0xff;
131 dvb_usb_generic_write(state->d,freqbuf,3); 180 state->data[2] = (freq >> 8) & 0xff;
181 ret = dvb_usb_generic_write(state->d, state->data, 3);
182 if (ret < 0)
183 goto ret;
132 184
133 for (i = 0; i < 30; i++) { 185ret:
134 msleep(20); 186 mutex_unlock(&state->data_mutex);
135 dtt200u_fe_read_status(fe, &st); 187 return ret;
136 if (st & FE_TIMEDOUT)
137 continue;
138 }
139
140 return 0;
141} 188}
142 189
143static int dtt200u_fe_get_frontend(struct dvb_frontend* fe, 190static int dtt200u_fe_get_frontend(struct dvb_frontend* fe,
@@ -169,6 +216,7 @@ struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d)
169 deb_info("attaching frontend dtt200u\n"); 216 deb_info("attaching frontend dtt200u\n");
170 217
171 state->d = d; 218 state->d = d;
219 mutex_init(&state->data_mutex);
172 220
173 memcpy(&state->frontend.ops,&dtt200u_fe_ops,sizeof(struct dvb_frontend_ops)); 221 memcpy(&state->frontend.ops,&dtt200u_fe_ops,sizeof(struct dvb_frontend_ops));
174 state->frontend.demodulator_priv = state; 222 state->frontend.demodulator_priv = state;
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index d2a01b50af0d..fcbff7fb0c4e 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -20,75 +20,115 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2 (or-able))." DVB_USB
20 20
21DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 21DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
22 22
23struct dtt200u_state {
24 unsigned char data[80];
25};
26
23static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff) 27static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff)
24{ 28{
25 u8 b = SET_INIT; 29 struct dtt200u_state *st = d->priv;
30 int ret = 0;
31
32 mutex_lock(&d->data_mutex);
33
34 st->data[0] = SET_INIT;
26 35
27 if (onoff) 36 if (onoff)
28 dvb_usb_generic_write(d,&b,2); 37 ret = dvb_usb_generic_write(d, st->data, 2);
29 38
30 return 0; 39 mutex_unlock(&d->data_mutex);
40 return ret;
31} 41}
32 42
33static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) 43static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
34{ 44{
35 u8 b_streaming[2] = { SET_STREAMING, onoff }; 45 struct dvb_usb_device *d = adap->dev;
36 u8 b_rst_pid = RESET_PID_FILTER; 46 struct dtt200u_state *st = d->priv;
47 int ret;
37 48
38 dvb_usb_generic_write(adap->dev, b_streaming, 2); 49 mutex_lock(&d->data_mutex);
50 st->data[0] = SET_STREAMING;
51 st->data[1] = onoff;
39 52
40 if (onoff == 0) 53 ret = dvb_usb_generic_write(adap->dev, st->data, 2);
41 dvb_usb_generic_write(adap->dev, &b_rst_pid, 1); 54 if (ret < 0)
42 return 0; 55 goto ret;
56
57 if (onoff)
58 goto ret;
59
60 st->data[0] = RESET_PID_FILTER;
61 ret = dvb_usb_generic_write(adap->dev, st->data, 1);
62
63ret:
64 mutex_unlock(&d->data_mutex);
65
66 return ret;
43} 67}
44 68
45static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) 69static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff)
46{ 70{
47 u8 b_pid[4]; 71 struct dvb_usb_device *d = adap->dev;
72 struct dtt200u_state *st = d->priv;
73 int ret;
74
48 pid = onoff ? pid : 0; 75 pid = onoff ? pid : 0;
49 76
50 b_pid[0] = SET_PID_FILTER; 77 mutex_lock(&d->data_mutex);
51 b_pid[1] = index; 78 st->data[0] = SET_PID_FILTER;
52 b_pid[2] = pid & 0xff; 79 st->data[1] = index;
53 b_pid[3] = (pid >> 8) & 0x1f; 80 st->data[2] = pid & 0xff;
81 st->data[3] = (pid >> 8) & 0x1f;
82
83 ret = dvb_usb_generic_write(adap->dev, st->data, 4);
84 mutex_unlock(&d->data_mutex);
54 85
55 return dvb_usb_generic_write(adap->dev, b_pid, 4); 86 return ret;
56} 87}
57 88
58static int dtt200u_rc_query(struct dvb_usb_device *d) 89static int dtt200u_rc_query(struct dvb_usb_device *d)
59{ 90{
60 u8 key[5],cmd = GET_RC_CODE; 91 struct dtt200u_state *st = d->priv;
61 u32 scancode; 92 u32 scancode;
93 int ret;
94
95 mutex_lock(&d->data_mutex);
96 st->data[0] = GET_RC_CODE;
62 97
63 dvb_usb_generic_rw(d,&cmd,1,key,5,0); 98 ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
64 if (key[0] == 1) { 99 if (ret < 0)
100 goto ret;
101
102 if (st->data[0] == 1) {
65 enum rc_type proto = RC_TYPE_NEC; 103 enum rc_type proto = RC_TYPE_NEC;
66 104
67 scancode = key[1]; 105 scancode = st->data[1];
68 if ((u8) ~key[1] != key[2]) { 106 if ((u8) ~st->data[1] != st->data[2]) {
69 /* Extended NEC */ 107 /* Extended NEC */
70 scancode = scancode << 8; 108 scancode = scancode << 8;
71 scancode |= key[2]; 109 scancode |= st->data[2];
72 proto = RC_TYPE_NECX; 110 proto = RC_TYPE_NECX;
73 } 111 }
74 scancode = scancode << 8; 112 scancode = scancode << 8;
75 scancode |= key[3]; 113 scancode |= st->data[3];
76 114
77 /* Check command checksum is ok */ 115 /* Check command checksum is ok */
78 if ((u8) ~key[3] == key[4]) 116 if ((u8) ~st->data[3] == st->data[4])
79 rc_keydown(d->rc_dev, proto, scancode, 0); 117 rc_keydown(d->rc_dev, proto, scancode, 0);
80 else 118 else
81 rc_keyup(d->rc_dev); 119 rc_keyup(d->rc_dev);
82 } else if (key[0] == 2) { 120 } else if (st->data[0] == 2) {
83 rc_repeat(d->rc_dev); 121 rc_repeat(d->rc_dev);
84 } else { 122 } else {
85 rc_keyup(d->rc_dev); 123 rc_keyup(d->rc_dev);
86 } 124 }
87 125
88 if (key[0] != 0) 126 if (st->data[0] != 0)
89 deb_info("key: %*ph\n", 5, key); 127 deb_info("st->data: %*ph\n", 5, st->data);
90 128
91 return 0; 129ret:
130 mutex_unlock(&d->data_mutex);
131 return ret;
92} 132}
93 133
94static int dtt200u_frontend_attach(struct dvb_usb_adapter *adap) 134static int dtt200u_frontend_attach(struct dvb_usb_adapter *adap)
@@ -140,6 +180,8 @@ static struct dvb_usb_device_properties dtt200u_properties = {
140 .usb_ctrl = CYPRESS_FX2, 180 .usb_ctrl = CYPRESS_FX2,
141 .firmware = "dvb-usb-dtt200u-01.fw", 181 .firmware = "dvb-usb-dtt200u-01.fw",
142 182
183 .size_of_priv = sizeof(struct dtt200u_state),
184
143 .num_adapters = 1, 185 .num_adapters = 1,
144 .adapter = { 186 .adapter = {
145 { 187 {
@@ -190,6 +232,8 @@ static struct dvb_usb_device_properties wt220u_properties = {
190 .usb_ctrl = CYPRESS_FX2, 232 .usb_ctrl = CYPRESS_FX2,
191 .firmware = "dvb-usb-wt220u-02.fw", 233 .firmware = "dvb-usb-wt220u-02.fw",
192 234
235 .size_of_priv = sizeof(struct dtt200u_state),
236
193 .num_adapters = 1, 237 .num_adapters = 1,
194 .adapter = { 238 .adapter = {
195 { 239 {
@@ -240,6 +284,8 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
240 .usb_ctrl = CYPRESS_FX2, 284 .usb_ctrl = CYPRESS_FX2,
241 .firmware = "dvb-usb-wt220u-fc03.fw", 285 .firmware = "dvb-usb-wt220u-fc03.fw",
242 286
287 .size_of_priv = sizeof(struct dtt200u_state),
288
243 .num_adapters = 1, 289 .num_adapters = 1,
244 .adapter = { 290 .adapter = {
245 { 291 {
@@ -290,6 +336,8 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
290 .usb_ctrl = CYPRESS_FX2, 336 .usb_ctrl = CYPRESS_FX2,
291 .firmware = "dvb-usb-wt220u-zl0353-01.fw", 337 .firmware = "dvb-usb-wt220u-zl0353-01.fw",
292 338
339 .size_of_priv = sizeof(struct dtt200u_state),
340
293 .num_adapters = 1, 341 .num_adapters = 1,
294 .adapter = { 342 .adapter = {
295 { 343 {
@@ -340,6 +388,8 @@ static struct dvb_usb_device_properties wt220u_miglia_properties = {
340 .usb_ctrl = CYPRESS_FX2, 388 .usb_ctrl = CYPRESS_FX2,
341 .firmware = "dvb-usb-wt220u-miglia-01.fw", 389 .firmware = "dvb-usb-wt220u-miglia-01.fw",
342 390
391 .size_of_priv = sizeof(struct dtt200u_state),
392
343 .num_adapters = 1, 393 .num_adapters = 1,
344 .generic_bulk_ctrl_endpoint = 0x01, 394 .generic_bulk_ctrl_endpoint = 0x01,
345 395
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
index 3d11df41cac0..c60fb54f445f 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.c
+++ b/drivers/media/usb/dvb-usb/dtv5100.c
@@ -31,9 +31,14 @@ module_param_named(debug, dvb_usb_dtv5100_debug, int, 0644);
31MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS); 31MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS);
32DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 32DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
33 33
34struct dtv5100_state {
35 unsigned char data[80];
36};
37
34static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr, 38static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
35 u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) 39 u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
36{ 40{
41 struct dtv5100_state *st = d->priv;
37 u8 request; 42 u8 request;
38 u8 type; 43 u8 type;
39 u16 value; 44 u16 value;
@@ -60,9 +65,10 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
60 } 65 }
61 index = (addr << 8) + wbuf[0]; 66 index = (addr << 8) + wbuf[0];
62 67
68 memcpy(st->data, rbuf, rlen);
63 msleep(1); /* avoid I2C errors */ 69 msleep(1); /* avoid I2C errors */
64 return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request, 70 return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request,
65 type, value, index, rbuf, rlen, 71 type, value, index, st->data, rlen,
66 DTV5100_USB_TIMEOUT); 72 DTV5100_USB_TIMEOUT);
67} 73}
68 74
@@ -176,7 +182,7 @@ static struct dvb_usb_device_properties dtv5100_properties = {
176 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 182 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
177 .usb_ctrl = DEVICE_SPECIFIC, 183 .usb_ctrl = DEVICE_SPECIFIC,
178 184
179 .size_of_priv = 0, 185 .size_of_priv = sizeof(struct dtv5100_state),
180 186
181 .num_adapters = 1, 187 .num_adapters = 1,
182 .adapter = {{ 188 .adapter = {{
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 3896ba9a4179..84308569e7dc 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -142,6 +142,7 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
142{ 142{
143 int ret = 0; 143 int ret = 0;
144 144
145 mutex_init(&d->data_mutex);
145 mutex_init(&d->usb_mutex); 146 mutex_init(&d->usb_mutex);
146 mutex_init(&d->i2c_mutex); 147 mutex_init(&d->i2c_mutex);
147 148
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 639c4678c65b..107255b08b2b 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -404,8 +404,12 @@ struct dvb_usb_adapter {
404 * Powered is in/decremented for each call to modify the state. 404 * Powered is in/decremented for each call to modify the state.
405 * @udev: pointer to the device's struct usb_device. 405 * @udev: pointer to the device's struct usb_device.
406 * 406 *
407 * @usb_mutex: semaphore of USB control messages (reading needs two messages) 407 * @data_mutex: mutex to protect the data structure used to store URB data
408 * @i2c_mutex: semaphore for i2c-transfers 408 * @usb_mutex: mutex of USB control messages (reading needs two messages).
409 * Please notice that this mutex is used internally at the generic
410 * URB control functions. So, drivers using dvb_usb_generic_rw() and
411 * derivated functions should not lock it internally.
412 * @i2c_mutex: mutex for i2c-transfers
409 * 413 *
410 * @i2c_adap: device's i2c_adapter if it uses I2CoverUSB 414 * @i2c_adap: device's i2c_adapter if it uses I2CoverUSB
411 * 415 *
@@ -433,6 +437,7 @@ struct dvb_usb_device {
433 int powered; 437 int powered;
434 438
435 /* locking */ 439 /* locking */
440 struct mutex data_mutex;
436 struct mutex usb_mutex; 441 struct mutex usb_mutex;
437 442
438 /* i2c */ 443 /* i2c */
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 5fb0c650926e..2c720cb2fb00 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -852,7 +852,7 @@ static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
852 if (i && !state->initialized) { 852 if (i && !state->initialized) {
853 state->initialized = 1; 853 state->initialized = 1;
854 /* reset board */ 854 /* reset board */
855 dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); 855 return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
856 } 856 }
857 857
858 return 0; 858 return 0;
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 5d0384dd45b5..993bb7a72985 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -15,6 +15,7 @@
15 * see Documentation/dvb/README.dvb-usb for more information 15 * see Documentation/dvb/README.dvb-usb for more information
16 */ 16 */
17#include "gp8psk.h" 17#include "gp8psk.h"
18#include "gp8psk-fe.h"
18 19
19/* debug */ 20/* debug */
20static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw"; 21static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw";
@@ -24,37 +25,19 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DV
24 25
25DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 26DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
26 27
27static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) 28struct gp8psk_state {
28{ 29 unsigned char data[80];
29 return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6)); 30};
30}
31
32static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers)
33{
34 return (gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1));
35}
36
37static void gp8psk_info(struct dvb_usb_device *d)
38{
39 u8 fpga_vers, fw_vers[6];
40
41 if (!gp8psk_get_fw_version(d, fw_vers))
42 info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i",
43 fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers),
44 2000 + fw_vers[5], fw_vers[4], fw_vers[3]);
45 else
46 info("failed to get FW version");
47
48 if (!gp8psk_get_fpga_version(d, &fpga_vers))
49 info("FPGA Version = %i", fpga_vers);
50 else
51 info("failed to get FPGA version");
52}
53 31
54int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) 32static int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
33 u16 index, u8 *b, int blen)
55{ 34{
35 struct gp8psk_state *st = d->priv;
56 int ret = 0,try = 0; 36 int ret = 0,try = 0;
57 37
38 if (blen > sizeof(st->data))
39 return -EIO;
40
58 if ((ret = mutex_lock_interruptible(&d->usb_mutex))) 41 if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
59 return ret; 42 return ret;
60 43
@@ -63,7 +46,7 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
63 usb_rcvctrlpipe(d->udev,0), 46 usb_rcvctrlpipe(d->udev,0),
64 req, 47 req,
65 USB_TYPE_VENDOR | USB_DIR_IN, 48 USB_TYPE_VENDOR | USB_DIR_IN,
66 value,index,b,blen, 49 value, index, st->data, blen,
67 2000); 50 2000);
68 deb_info("reading number %d (ret: %d)\n",try,ret); 51 deb_info("reading number %d (ret: %d)\n",try,ret);
69 try++; 52 try++;
@@ -72,8 +55,10 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
72 if (ret < 0 || ret != blen) { 55 if (ret < 0 || ret != blen) {
73 warn("usb in %d operation failed.", req); 56 warn("usb in %d operation failed.", req);
74 ret = -EIO; 57 ret = -EIO;
75 } else 58 } else {
76 ret = 0; 59 ret = 0;
60 memcpy(b, st->data, blen);
61 }
77 62
78 deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index); 63 deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
79 debug_dump(b,blen,deb_xfer); 64 debug_dump(b,blen,deb_xfer);
@@ -83,22 +68,27 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
83 return ret; 68 return ret;
84} 69}
85 70
86int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, 71static int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
87 u16 index, u8 *b, int blen) 72 u16 index, u8 *b, int blen)
88{ 73{
74 struct gp8psk_state *st = d->priv;
89 int ret; 75 int ret;
90 76
91 deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index); 77 deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
92 debug_dump(b,blen,deb_xfer); 78 debug_dump(b,blen,deb_xfer);
93 79
80 if (blen > sizeof(st->data))
81 return -EIO;
82
94 if ((ret = mutex_lock_interruptible(&d->usb_mutex))) 83 if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
95 return ret; 84 return ret;
96 85
86 memcpy(st->data, b, blen);
97 if (usb_control_msg(d->udev, 87 if (usb_control_msg(d->udev,
98 usb_sndctrlpipe(d->udev,0), 88 usb_sndctrlpipe(d->udev,0),
99 req, 89 req,
100 USB_TYPE_VENDOR | USB_DIR_OUT, 90 USB_TYPE_VENDOR | USB_DIR_OUT,
101 value,index,b,blen, 91 value, index, st->data, blen,
102 2000) != blen) { 92 2000) != blen) {
103 warn("usb out operation failed."); 93 warn("usb out operation failed.");
104 ret = -EIO; 94 ret = -EIO;
@@ -109,6 +99,34 @@ int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
109 return ret; 99 return ret;
110} 100}
111 101
102
103static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers)
104{
105 return gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6);
106}
107
108static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers)
109{
110 return gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1);
111}
112
113static void gp8psk_info(struct dvb_usb_device *d)
114{
115 u8 fpga_vers, fw_vers[6];
116
117 if (!gp8psk_get_fw_version(d, fw_vers))
118 info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i",
119 fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers),
120 2000 + fw_vers[5], fw_vers[4], fw_vers[3]);
121 else
122 info("failed to get FW version");
123
124 if (!gp8psk_get_fpga_version(d, &fpga_vers))
125 info("FPGA Version = %i", fpga_vers);
126 else
127 info("failed to get FPGA version");
128}
129
112static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d) 130static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
113{ 131{
114 int ret; 132 int ret;
@@ -143,6 +161,11 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
143 err("failed to load bcm4500 firmware."); 161 err("failed to load bcm4500 firmware.");
144 goto out_free; 162 goto out_free;
145 } 163 }
164 if (buflen > 64) {
165 err("firmare chunk size bigger than 64 bytes.");
166 goto out_free;
167 }
168
146 memcpy(buf, ptr, buflen); 169 memcpy(buf, ptr, buflen);
147 if (dvb_usb_generic_write(d, buf, buflen)) { 170 if (dvb_usb_generic_write(d, buf, buflen)) {
148 err("failed to load bcm4500 firmware."); 171 err("failed to load bcm4500 firmware.");
@@ -206,10 +229,13 @@ static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
206 return 0; 229 return 0;
207} 230}
208 231
209int gp8psk_bcm4500_reload(struct dvb_usb_device *d) 232static int gp8psk_bcm4500_reload(struct dvb_usb_device *d)
210{ 233{
211 u8 buf; 234 u8 buf;
212 int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); 235 int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
236
237 deb_xfer("reloading firmware\n");
238
213 /* Turn off 8psk power */ 239 /* Turn off 8psk power */
214 if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1)) 240 if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1))
215 return -EINVAL; 241 return -EINVAL;
@@ -228,9 +254,47 @@ static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
228 return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0); 254 return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0);
229} 255}
230 256
257/* Callbacks for gp8psk-fe.c */
258
259static int gp8psk_fe_in(void *priv, u8 req, u16 value,
260 u16 index, u8 *b, int blen)
261{
262 struct dvb_usb_device *d = priv;
263
264 return gp8psk_usb_in_op(d, req, value, index, b, blen);
265}
266
267static int gp8psk_fe_out(void *priv, u8 req, u16 value,
268 u16 index, u8 *b, int blen)
269{
270 struct dvb_usb_device *d = priv;
271
272 return gp8psk_usb_out_op(d, req, value, index, b, blen);
273}
274
275static int gp8psk_fe_reload(void *priv)
276{
277 struct dvb_usb_device *d = priv;
278
279 return gp8psk_bcm4500_reload(d);
280}
281
282const struct gp8psk_fe_ops gp8psk_fe_ops = {
283 .in = gp8psk_fe_in,
284 .out = gp8psk_fe_out,
285 .reload = gp8psk_fe_reload,
286};
287
231static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap) 288static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap)
232{ 289{
233 adap->fe_adap[0].fe = gp8psk_fe_attach(adap->dev); 290 struct dvb_usb_device *d = adap->dev;
291 int id = le16_to_cpu(d->udev->descriptor.idProduct);
292 int is_rev1;
293
294 is_rev1 = (id == USB_PID_GENPIX_8PSK_REV_1_WARM) ? true : false;
295
296 adap->fe_adap[0].fe = dvb_attach(gp8psk_fe_attach,
297 &gp8psk_fe_ops, d, is_rev1);
234 return 0; 298 return 0;
235} 299}
236 300
@@ -265,6 +329,8 @@ static struct dvb_usb_device_properties gp8psk_properties = {
265 .usb_ctrl = CYPRESS_FX2, 329 .usb_ctrl = CYPRESS_FX2,
266 .firmware = "dvb-usb-gp8psk-01.fw", 330 .firmware = "dvb-usb-gp8psk-01.fw",
267 331
332 .size_of_priv = sizeof(struct gp8psk_state),
333
268 .num_adapters = 1, 334 .num_adapters = 1,
269 .adapter = { 335 .adapter = {
270 { 336 {
diff --git a/drivers/media/usb/dvb-usb/gp8psk.h b/drivers/media/usb/dvb-usb/gp8psk.h
index ed32b9da4843..d8975b866dee 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.h
+++ b/drivers/media/usb/dvb-usb/gp8psk.h
@@ -24,58 +24,6 @@ extern int dvb_usb_gp8psk_debug;
24#define deb_info(args...) dprintk(dvb_usb_gp8psk_debug,0x01,args) 24#define deb_info(args...) dprintk(dvb_usb_gp8psk_debug,0x01,args)
25#define deb_xfer(args...) dprintk(dvb_usb_gp8psk_debug,0x02,args) 25#define deb_xfer(args...) dprintk(dvb_usb_gp8psk_debug,0x02,args)
26#define deb_rc(args...) dprintk(dvb_usb_gp8psk_debug,0x04,args) 26#define deb_rc(args...) dprintk(dvb_usb_gp8psk_debug,0x04,args)
27#define deb_fe(args...) dprintk(dvb_usb_gp8psk_debug,0x08,args)
28
29/* Twinhan Vendor requests */
30#define TH_COMMAND_IN 0xC0
31#define TH_COMMAND_OUT 0xC1
32
33/* gp8psk commands */
34
35#define GET_8PSK_CONFIG 0x80 /* in */
36#define SET_8PSK_CONFIG 0x81
37#define I2C_WRITE 0x83
38#define I2C_READ 0x84
39#define ARM_TRANSFER 0x85
40#define TUNE_8PSK 0x86
41#define GET_SIGNAL_STRENGTH 0x87 /* in */
42#define LOAD_BCM4500 0x88
43#define BOOT_8PSK 0x89 /* in */
44#define START_INTERSIL 0x8A /* in */
45#define SET_LNB_VOLTAGE 0x8B
46#define SET_22KHZ_TONE 0x8C
47#define SEND_DISEQC_COMMAND 0x8D
48#define SET_DVB_MODE 0x8E
49#define SET_DN_SWITCH 0x8F
50#define GET_SIGNAL_LOCK 0x90 /* in */
51#define GET_FW_VERS 0x92
52#define GET_SERIAL_NUMBER 0x93 /* in */
53#define USE_EXTRA_VOLT 0x94
54#define GET_FPGA_VERS 0x95
55#define CW3K_INIT 0x9d
56
57/* PSK_configuration bits */
58#define bm8pskStarted 0x01
59#define bm8pskFW_Loaded 0x02
60#define bmIntersilOn 0x04
61#define bmDVBmode 0x08
62#define bm22kHz 0x10
63#define bmSEL18V 0x20
64#define bmDCtuned 0x40
65#define bmArmed 0x80
66
67/* Satellite modulation modes */
68#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */
69#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */
70#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */
71#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */
72
73#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */
74#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */
75#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */
76#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */
77#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */
78#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
79 27
80#define GET_USB_SPEED 0x07 28#define GET_USB_SPEED 0x07
81 29
@@ -86,15 +34,4 @@ extern int dvb_usb_gp8psk_debug;
86#define PRODUCT_STRING_READ 0x0D 34#define PRODUCT_STRING_READ 0x0D
87#define FW_BCD_VERSION_READ 0x14 35#define FW_BCD_VERSION_READ 0x14
88 36
89/* firmware revision id's */
90#define GP8PSK_FW_REV1 0x020604
91#define GP8PSK_FW_REV2 0x020704
92#define GP8PSK_FW_VERS(_fw_vers) ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0])
93
94extern struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d);
95extern int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen);
96extern int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
97 u16 index, u8 *b, int blen);
98extern int gp8psk_bcm4500_reload(struct dvb_usb_device *d);
99
100#endif 37#endif
diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
index fc7569e2728d..1babd3341910 100644
--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
@@ -74,22 +74,31 @@ static struct rc_map_table rc_map_haupp_table[] = {
74 */ 74 */
75static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state) 75static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
76{ 76{
77 u8 key[5],cmd[2] = { DIBUSB_REQ_POLL_REMOTE, 0x35 }, data,toggle,custom; 77 u8 *buf, data, toggle, custom;
78 u16 raw; 78 u16 raw;
79 int i; 79 int i, ret;
80 struct dibusb_device_state *st = d->priv; 80 struct dibusb_device_state *st = d->priv;
81 81
82 dvb_usb_generic_rw(d,cmd,2,key,5,0); 82 buf = kmalloc(5, GFP_KERNEL);
83 if (!buf)
84 return -ENOMEM;
85
86 buf[0] = DIBUSB_REQ_POLL_REMOTE;
87 buf[1] = 0x35;
88 ret = dvb_usb_generic_rw(d, buf, 2, buf, 5, 0);
89 if (ret < 0)
90 goto ret;
83 91
84 *state = REMOTE_NO_KEY_PRESSED; 92 *state = REMOTE_NO_KEY_PRESSED;
85 switch (key[0]) { 93 switch (buf[0]) {
86 case DIBUSB_RC_HAUPPAUGE_KEY_PRESSED: 94 case DIBUSB_RC_HAUPPAUGE_KEY_PRESSED:
87 raw = ((key[1] << 8) | key[2]) >> 3; 95 raw = ((buf[1] << 8) | buf[2]) >> 3;
88 toggle = !!(raw & 0x800); 96 toggle = !!(raw & 0x800);
89 data = raw & 0x3f; 97 data = raw & 0x3f;
90 custom = (raw >> 6) & 0x1f; 98 custom = (raw >> 6) & 0x1f;
91 99
92 deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",key[1],key[2],key[3],custom,data,toggle); 100 deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",
101 buf[1], buf[2], buf[3], custom, data, toggle);
93 102
94 for (i = 0; i < ARRAY_SIZE(rc_map_haupp_table); i++) { 103 for (i = 0; i < ARRAY_SIZE(rc_map_haupp_table); i++) {
95 if (rc5_data(&rc_map_haupp_table[i]) == data && 104 if (rc5_data(&rc_map_haupp_table[i]) == data &&
@@ -117,7 +126,9 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
117 break; 126 break;
118 } 127 }
119 128
120 return 0; 129ret:
130 kfree(buf);
131 return ret;
121} 132}
122 133
123static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6]) 134static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index c05de1b088a4..07fa08be9e99 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,48 +97,53 @@ struct pctv452e_state {
97 u8 c; /* transaction counter, wraps around... */ 97 u8 c; /* transaction counter, wraps around... */
98 u8 initialized; /* set to 1 if 0x15 has been sent */ 98 u8 initialized; /* set to 1 if 0x15 has been sent */
99 u16 last_rc_key; 99 u16 last_rc_key;
100
101 unsigned char data[80];
100}; 102};
101 103
102static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, 104static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
103 unsigned int write_len, unsigned int read_len) 105 unsigned int write_len, unsigned int read_len)
104{ 106{
105 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 107 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
106 u8 buf[64];
107 u8 id; 108 u8 id;
108 unsigned int rlen; 109 unsigned int rlen;
109 int ret; 110 int ret;
110 111
111 BUG_ON(NULL == data && 0 != (write_len | read_len)); 112 if (!data || (write_len > 64 - 4) || (read_len > 64 - 4)) {
112 BUG_ON(write_len > 64 - 4); 113 err("%s: transfer data invalid", __func__);
113 BUG_ON(read_len > 64 - 4); 114 return -EIO;
115 }
114 116
117 mutex_lock(&state->ca_mutex);
115 id = state->c++; 118 id = state->c++;
116 119
117 buf[0] = SYNC_BYTE_OUT; 120 state->data[0] = SYNC_BYTE_OUT;
118 buf[1] = id; 121 state->data[1] = id;
119 buf[2] = cmd; 122 state->data[2] = cmd;
120 buf[3] = write_len; 123 state->data[3] = write_len;
121 124
122 memcpy(buf + 4, data, write_len); 125 memcpy(state->data + 4, data, write_len);
123 126
124 rlen = (read_len > 0) ? 64 : 0; 127 rlen = (read_len > 0) ? 64 : 0;
125 ret = dvb_usb_generic_rw(d, buf, 4 + write_len, 128 ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
126 buf, rlen, /* delay_ms */ 0); 129 state->data, rlen, /* delay_ms */ 0);
127 if (0 != ret) 130 if (0 != ret)
128 goto failed; 131 goto failed;
129 132
130 ret = -EIO; 133 ret = -EIO;
131 if (SYNC_BYTE_IN != buf[0] || id != buf[1]) 134 if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
132 goto failed; 135 goto failed;
133 136
134 memcpy(data, buf + 4, read_len); 137 memcpy(data, state->data + 4, read_len);
135 138
139 mutex_unlock(&state->ca_mutex);
136 return 0; 140 return 0;
137 141
138failed: 142failed:
139 err("CI error %d; %02X %02X %02X -> %*ph.", 143 err("CI error %d; %02X %02X %02X -> %*ph.",
140 ret, SYNC_BYTE_OUT, id, cmd, 3, buf); 144 ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
141 145
146 mutex_unlock(&state->ca_mutex);
142 return ret; 147 return ret;
143} 148}
144 149
@@ -405,52 +410,53 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
405 u8 *rcv_buf, u8 rcv_len) 410 u8 *rcv_buf, u8 rcv_len)
406{ 411{
407 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 412 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
408 u8 buf[64];
409 u8 id; 413 u8 id;
410 int ret; 414 int ret;
411 415
416 mutex_lock(&state->ca_mutex);
412 id = state->c++; 417 id = state->c++;
413 418
414 ret = -EINVAL; 419 ret = -EINVAL;
415 if (snd_len > 64 - 7 || rcv_len > 64 - 7) 420 if (snd_len > 64 - 7 || rcv_len > 64 - 7)
416 goto failed; 421 goto failed;
417 422
418 buf[0] = SYNC_BYTE_OUT; 423 state->data[0] = SYNC_BYTE_OUT;
419 buf[1] = id; 424 state->data[1] = id;
420 buf[2] = PCTV_CMD_I2C; 425 state->data[2] = PCTV_CMD_I2C;
421 buf[3] = snd_len + 3; 426 state->data[3] = snd_len + 3;
422 buf[4] = addr << 1; 427 state->data[4] = addr << 1;
423 buf[5] = snd_len; 428 state->data[5] = snd_len;
424 buf[6] = rcv_len; 429 state->data[6] = rcv_len;
425 430
426 memcpy(buf + 7, snd_buf, snd_len); 431 memcpy(state->data + 7, snd_buf, snd_len);
427 432
428 ret = dvb_usb_generic_rw(d, buf, 7 + snd_len, 433 ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
429 buf, /* rcv_len */ 64, 434 state->data, /* rcv_len */ 64,
430 /* delay_ms */ 0); 435 /* delay_ms */ 0);
431 if (ret < 0) 436 if (ret < 0)
432 goto failed; 437 goto failed;
433 438
434 /* TT USB protocol error. */ 439 /* TT USB protocol error. */
435 ret = -EIO; 440 ret = -EIO;
436 if (SYNC_BYTE_IN != buf[0] || id != buf[1]) 441 if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
437 goto failed; 442 goto failed;
438 443
439 /* I2C device didn't respond as expected. */ 444 /* I2C device didn't respond as expected. */
440 ret = -EREMOTEIO; 445 ret = -EREMOTEIO;
441 if (buf[5] < snd_len || buf[6] < rcv_len) 446 if (state->data[5] < snd_len || state->data[6] < rcv_len)
442 goto failed; 447 goto failed;
443 448
444 memcpy(rcv_buf, buf + 7, rcv_len); 449 memcpy(rcv_buf, state->data + 7, rcv_len);
450 mutex_unlock(&state->ca_mutex);
445 451
446 return rcv_len; 452 return rcv_len;
447 453
448failed: 454failed:
449 err("I2C error %d; %02X %02X %02X %02X %02X -> " 455 err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
450 "%02X %02X %02X %02X %02X.",
451 ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, 456 ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
452 buf[0], buf[1], buf[4], buf[5], buf[6]); 457 7, state->data);
453 458
459 mutex_unlock(&state->ca_mutex);
454 return ret; 460 return ret;
455} 461}
456 462
@@ -499,8 +505,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
499static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) 505static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
500{ 506{
501 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 507 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
502 u8 b0[] = { 0xaa, 0, PCTV_CMD_RESET, 1, 0 }; 508 u8 *rx;
503 u8 rx[PCTV_ANSWER_LEN];
504 int ret; 509 int ret;
505 510
506 info("%s: %d\n", __func__, i); 511 info("%s: %d\n", __func__, i);
@@ -511,6 +516,11 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
511 if (state->initialized) 516 if (state->initialized)
512 return 0; 517 return 0;
513 518
519 rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
520 if (!rx)
521 return -ENOMEM;
522
523 mutex_lock(&state->ca_mutex);
514 /* hmm where shoud this should go? */ 524 /* hmm where shoud this should go? */
515 ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); 525 ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
516 if (ret != 0) 526 if (ret != 0)
@@ -518,65 +528,75 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
518 __func__, ret); 528 __func__, ret);
519 529
520 /* this is a one-time initialization, dont know where to put */ 530 /* this is a one-time initialization, dont know where to put */
521 b0[1] = state->c++; 531 state->data[0] = 0xaa;
532 state->data[1] = state->c++;
533 state->data[2] = PCTV_CMD_RESET;
534 state->data[3] = 1;
535 state->data[4] = 0;
522 /* reset board */ 536 /* reset board */
523 ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0); 537 ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
524 if (ret) 538 if (ret)
525 return ret; 539 goto ret;
526 540
527 b0[1] = state->c++; 541 state->data[1] = state->c++;
528 b0[4] = 1; 542 state->data[4] = 1;
529 /* reset board (again?) */ 543 /* reset board (again?) */
530 ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0); 544 ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
531 if (ret) 545 if (ret)
532 return ret; 546 goto ret;
533 547
534 state->initialized = 1; 548 state->initialized = 1;
535 549
536 return 0; 550ret:
551 mutex_unlock(&state->ca_mutex);
552 kfree(rx);
553 return ret;
537} 554}
538 555
539static int pctv452e_rc_query(struct dvb_usb_device *d) 556static int pctv452e_rc_query(struct dvb_usb_device *d)
540{ 557{
541 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 558 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
542 u8 b[CMD_BUFFER_SIZE];
543 u8 rx[PCTV_ANSWER_LEN];
544 int ret, i; 559 int ret, i;
545 u8 id = state->c++; 560 u8 id;
561
562 mutex_lock(&state->ca_mutex);
563 id = state->c++;
546 564
547 /* prepare command header */ 565 /* prepare command header */
548 b[0] = SYNC_BYTE_OUT; 566 state->data[0] = SYNC_BYTE_OUT;
549 b[1] = id; 567 state->data[1] = id;
550 b[2] = PCTV_CMD_IR; 568 state->data[2] = PCTV_CMD_IR;
551 b[3] = 0; 569 state->data[3] = 0;
552 570
553 /* send ir request */ 571 /* send ir request */
554 ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0); 572 ret = dvb_usb_generic_rw(d, state->data, 4,
573 state->data, PCTV_ANSWER_LEN, 0);
555 if (ret != 0) 574 if (ret != 0)
556 return ret; 575 goto ret;
557 576
558 if (debug > 3) { 577 if (debug > 3) {
559 info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx); 578 info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
560 for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++) 579 for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
561 info(" %02x", rx[i+3]); 580 info(" %02x", state->data[i + 3]);
562 581
563 info("\n"); 582 info("\n");
564 } 583 }
565 584
566 if ((rx[3] == 9) && (rx[12] & 0x01)) { 585 if ((state->data[3] == 9) && (state->data[12] & 0x01)) {
567 /* got a "press" event */ 586 /* got a "press" event */
568 state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]); 587 state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
569 if (debug > 2) 588 if (debug > 2)
570 info("%s: cmd=0x%02x sys=0x%02x\n", 589 info("%s: cmd=0x%02x sys=0x%02x\n",
571 __func__, rx[6], rx[7]); 590 __func__, state->data[6], state->data[7]);
572 591
573 rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0); 592 rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
574 } else if (state->last_rc_key) { 593 } else if (state->last_rc_key) {
575 rc_keyup(d->rc_dev); 594 rc_keyup(d->rc_dev);
576 state->last_rc_key = 0; 595 state->last_rc_key = 0;
577 } 596 }
578 597ret:
579 return 0; 598 mutex_unlock(&state->ca_mutex);
599 return ret;
580} 600}
581 601
582static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) 602static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index d9f3262bf071..4706628a3ed5 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -89,9 +89,13 @@ struct technisat_usb2_state {
89static int technisat_usb2_i2c_access(struct usb_device *udev, 89static int technisat_usb2_i2c_access(struct usb_device *udev,
90 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen) 90 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
91{ 91{
92 u8 b[64]; 92 u8 *b;
93 int ret, actual_length; 93 int ret, actual_length;
94 94
95 b = kmalloc(64, GFP_KERNEL);
96 if (!b)
97 return -ENOMEM;
98
95 deb_i2c("i2c-access: %02x, tx: ", device_addr); 99 deb_i2c("i2c-access: %02x, tx: ", device_addr);
96 debug_dump(tx, txlen, deb_i2c); 100 debug_dump(tx, txlen, deb_i2c);
97 deb_i2c(" "); 101 deb_i2c(" ");
@@ -123,7 +127,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
123 127
124 if (ret < 0) { 128 if (ret < 0) {
125 err("i2c-error: out failed %02x = %d", device_addr, ret); 129 err("i2c-error: out failed %02x = %d", device_addr, ret);
126 return -ENODEV; 130 goto err;
127 } 131 }
128 132
129 ret = usb_bulk_msg(udev, 133 ret = usb_bulk_msg(udev,
@@ -131,7 +135,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
131 b, 64, &actual_length, 1000); 135 b, 64, &actual_length, 1000);
132 if (ret < 0) { 136 if (ret < 0) {
133 err("i2c-error: in failed %02x = %d", device_addr, ret); 137 err("i2c-error: in failed %02x = %d", device_addr, ret);
134 return -ENODEV; 138 goto err;
135 } 139 }
136 140
137 if (b[0] != I2C_STATUS_OK) { 141 if (b[0] != I2C_STATUS_OK) {
@@ -140,7 +144,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
140 if (!(b[0] == I2C_STATUS_NAK && 144 if (!(b[0] == I2C_STATUS_NAK &&
141 device_addr == 0x60 145 device_addr == 0x60
142 /* && device_is_technisat_usb2 */)) 146 /* && device_is_technisat_usb2 */))
143 return -ENODEV; 147 goto err;
144 } 148 }
145 149
146 deb_i2c("status: %d, ", b[0]); 150 deb_i2c("status: %d, ", b[0]);
@@ -154,7 +158,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
154 158
155 deb_i2c("\n"); 159 deb_i2c("\n");
156 160
157 return 0; 161err:
162 kfree(b);
163 return ret;
158} 164}
159 165
160static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, 166static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index c3a0e87066eb..f7bb78c1873c 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1901,19 +1901,30 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request,
1901 s32 TransferBufferLength, int bOut) 1901 s32 TransferBufferLength, int bOut)
1902{ 1902{
1903 int r; 1903 int r;
1904 unsigned char *buf;
1905
1906 buf = kmalloc(TransferBufferLength, GFP_KERNEL);
1907 if (!buf)
1908 return -ENOMEM;
1909
1904 if (!bOut) { 1910 if (!bOut) {
1905 r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 1911 r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
1906 Request, 1912 Request,
1907 USB_TYPE_VENDOR | USB_RECIP_DEVICE | 1913 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
1908 USB_DIR_IN, 1914 USB_DIR_IN,
1909 Value, Index, TransferBuffer, 1915 Value, Index, buf,
1910 TransferBufferLength, HZ * 5); 1916 TransferBufferLength, HZ * 5);
1917
1918 if (r >= 0)
1919 memcpy(TransferBuffer, buf, TransferBufferLength);
1911 } else { 1920 } else {
1921 memcpy(buf, TransferBuffer, TransferBufferLength);
1912 r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 1922 r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
1913 Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1923 Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1914 Value, Index, TransferBuffer, 1924 Value, Index, buf,
1915 TransferBufferLength, HZ * 5); 1925 TransferBufferLength, HZ * 5);
1916 } 1926 }
1927 kfree(buf);
1917 return r; 1928 return r;
1918} 1929}
1919 1930
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index db200c9d796d..22a9aae16291 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -147,20 +147,26 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
147int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value) 147int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
148{ 148{
149 struct usb_device *udev = dev->udev; 149 struct usb_device *udev = dev->udev;
150 unsigned char *buf;
150 int ret; 151 int ret;
151 152
153 buf = kmalloc(sizeof(u8), GFP_KERNEL);
154 if (!buf)
155 return -ENOMEM;
156
152 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 157 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
153 0x00, 158 0x00,
154 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 159 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
155 0x00, 160 0x00,
156 index, 161 index,
157 (u8 *) value, 162 buf,
158 sizeof(u8), 163 sizeof(u8),
159 500); 164 500);
160 if (ret < 0) 165 if (ret >= 0)
161 return ret; 166 memcpy(value, buf, sizeof(u8));
162 else 167
163 return 0; 168 kfree(buf);
169 return ret;
164} 170}
165 171
166static int stk_start_stream(struct stk_camera *dev) 172static int stk_start_stream(struct stk_camera *dev)
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index e9e6ea3ab73c..75b9d4ac8b1e 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -178,7 +178,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
178 178
179 ret = 0; 179 ret = 0;
180 bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); 180 bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
181 if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { 181 if (bytes_recv < if_version_length) {
182 dev_err(bus->dev, "Could not read IF version\n"); 182 dev_err(bus->dev, "Could not read IF version\n");
183 ret = -EIO; 183 ret = -EIO;
184 goto err; 184 goto err;
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 5a8dc5a76e0d..3678220964fe 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2347,7 +2347,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2347 struct mmc_test_req *rq = mmc_test_req_alloc(); 2347 struct mmc_test_req *rq = mmc_test_req_alloc();
2348 struct mmc_host *host = test->card->host; 2348 struct mmc_host *host = test->card->host;
2349 struct mmc_test_area *t = &test->area; 2349 struct mmc_test_area *t = &test->area;
2350 struct mmc_async_req areq; 2350 struct mmc_test_async_req test_areq = { .test = test };
2351 struct mmc_request *mrq; 2351 struct mmc_request *mrq;
2352 unsigned long timeout; 2352 unsigned long timeout;
2353 bool expired = false; 2353 bool expired = false;
@@ -2363,8 +2363,8 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2363 mrq->sbc = &rq->sbc; 2363 mrq->sbc = &rq->sbc;
2364 mrq->cap_cmd_during_tfr = true; 2364 mrq->cap_cmd_during_tfr = true;
2365 2365
2366 areq.mrq = mrq; 2366 test_areq.areq.mrq = mrq;
2367 areq.err_check = mmc_test_check_result_async; 2367 test_areq.areq.err_check = mmc_test_check_result_async;
2368 2368
2369 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, 2369 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
2370 512, write); 2370 512, write);
@@ -2378,7 +2378,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2378 2378
2379 /* Start ongoing data request */ 2379 /* Start ongoing data request */
2380 if (use_areq) { 2380 if (use_areq) {
2381 mmc_start_req(host, &areq, &ret); 2381 mmc_start_req(host, &test_areq.areq, &ret);
2382 if (ret) 2382 if (ret)
2383 goto out_free; 2383 goto out_free;
2384 } else { 2384 } else {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 39fc5b2b96c5..df19777068a6 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -26,6 +26,8 @@
26#include "mmc_ops.h" 26#include "mmc_ops.h"
27#include "sd_ops.h" 27#include "sd_ops.h"
28 28
29#define DEFAULT_CMD6_TIMEOUT_MS 500
30
29static const unsigned int tran_exp[] = { 31static const unsigned int tran_exp[] = {
30 10000, 100000, 1000000, 10000000, 32 10000, 100000, 1000000, 10000000,
31 0, 0, 0, 0 33 0, 0, 0, 0
@@ -571,6 +573,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
571 card->erased_byte = 0x0; 573 card->erased_byte = 0x0;
572 574
573 /* eMMC v4.5 or later */ 575 /* eMMC v4.5 or later */
576 card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
574 if (card->ext_csd.rev >= 6) { 577 if (card->ext_csd.rev >= 6) {
575 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; 578 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
576 579
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index c0bb0c793e84..dbbc4303bdd0 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -46,12 +46,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
46 host->pdata = pdev->dev.platform_data; 46 host->pdata = pdev->dev.platform_data;
47 47
48 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 48 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
49 /* Get registers' physical base address */
50 host->phy_regs = regs->start;
51 host->regs = devm_ioremap_resource(&pdev->dev, regs); 49 host->regs = devm_ioremap_resource(&pdev->dev, regs);
52 if (IS_ERR(host->regs)) 50 if (IS_ERR(host->regs))
53 return PTR_ERR(host->regs); 51 return PTR_ERR(host->regs);
54 52
53 /* Get registers' physical base address */
54 host->phy_regs = regs->start;
55
55 platform_set_drvdata(pdev, host); 56 platform_set_drvdata(pdev, host);
56 return dw_mci_probe(host); 57 return dw_mci_probe(host);
57} 58}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 4fcbc4012ed0..50a674be6655 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2940,7 +2940,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2940 return ERR_PTR(-ENOMEM); 2940 return ERR_PTR(-ENOMEM);
2941 2941
2942 /* find reset controller when exist */ 2942 /* find reset controller when exist */
2943 pdata->rstc = devm_reset_control_get_optional(dev, NULL); 2943 pdata->rstc = devm_reset_control_get_optional(dev, "reset");
2944 if (IS_ERR(pdata->rstc)) { 2944 if (IS_ERR(pdata->rstc)) {
2945 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) 2945 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
2946 return ERR_PTR(-EPROBE_DEFER); 2946 return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index d839147e591d..44ecebd1ea8c 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -661,13 +661,13 @@ static int mxs_mmc_probe(struct platform_device *pdev)
661 661
662 platform_set_drvdata(pdev, mmc); 662 platform_set_drvdata(pdev, mmc);
663 663
664 spin_lock_init(&host->lock);
665
664 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, 666 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
665 dev_name(&pdev->dev), host); 667 dev_name(&pdev->dev), host);
666 if (ret) 668 if (ret)
667 goto out_free_dma; 669 goto out_free_dma;
668 670
669 spin_lock_init(&host->lock);
670
671 ret = mmc_add_host(mmc); 671 ret = mmc_add_host(mmc);
672 if (ret) 672 if (ret)
673 goto out_free_dma; 673 goto out_free_dma;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 8ef44a2a2fd9..90ed2e12d345 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -647,6 +647,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
647 if (msm_host->pwr_irq < 0) { 647 if (msm_host->pwr_irq < 0) {
648 dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n", 648 dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
649 msm_host->pwr_irq); 649 msm_host->pwr_irq);
650 ret = msm_host->pwr_irq;
650 goto clk_disable; 651 goto clk_disable;
651 } 652 }
652 653
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 71654b90227f..42ef3ebb1d8c 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2086,6 +2086,10 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2086 2086
2087 if (!host->tuning_done) { 2087 if (!host->tuning_done) {
2088 pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n"); 2088 pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
2089
2090 sdhci_do_reset(host, SDHCI_RESET_CMD);
2091 sdhci_do_reset(host, SDHCI_RESET_DATA);
2092
2089 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2093 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2090 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2094 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2091 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2095 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
@@ -2286,10 +2290,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
2286 2290
2287 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 2291 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2288 mrq = host->mrqs_done[i]; 2292 mrq = host->mrqs_done[i];
2289 if (mrq) { 2293 if (mrq)
2290 host->mrqs_done[i] = NULL;
2291 break; 2294 break;
2292 }
2293 } 2295 }
2294 2296
2295 if (!mrq) { 2297 if (!mrq) {
@@ -2320,6 +2322,17 @@ static bool sdhci_request_done(struct sdhci_host *host)
2320 * upon error conditions. 2322 * upon error conditions.
2321 */ 2323 */
2322 if (sdhci_needs_reset(host, mrq)) { 2324 if (sdhci_needs_reset(host, mrq)) {
2325 /*
2326 * Do not finish until command and data lines are available for
2327 * reset. Note there can only be one other mrq, so it cannot
2328 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2329 * would both be null.
2330 */
2331 if (host->cmd || host->data_cmd) {
2332 spin_unlock_irqrestore(&host->lock, flags);
2333 return true;
2334 }
2335
2323 /* Some controllers need this kick or reset won't work here */ 2336 /* Some controllers need this kick or reset won't work here */
2324 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2337 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2325 /* This is to force an update */ 2338 /* This is to force an update */
@@ -2327,10 +2340,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
2327 2340
2328 /* Spec says we should do both at the same time, but Ricoh 2341 /* Spec says we should do both at the same time, but Ricoh
2329 controllers do not like that. */ 2342 controllers do not like that. */
2330 if (!host->cmd) 2343 sdhci_do_reset(host, SDHCI_RESET_CMD);
2331 sdhci_do_reset(host, SDHCI_RESET_CMD); 2344 sdhci_do_reset(host, SDHCI_RESET_DATA);
2332 if (!host->data_cmd)
2333 sdhci_do_reset(host, SDHCI_RESET_DATA);
2334 2345
2335 host->pending_reset = false; 2346 host->pending_reset = false;
2336 } 2347 }
@@ -2338,6 +2349,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
2338 if (!sdhci_has_requests(host)) 2349 if (!sdhci_has_requests(host))
2339 sdhci_led_deactivate(host); 2350 sdhci_led_deactivate(host);
2340 2351
2352 host->mrqs_done[i] = NULL;
2353
2341 mmiowb(); 2354 mmiowb();
2342 spin_unlock_irqrestore(&host->lock, flags); 2355 spin_unlock_irqrestore(&host->lock, flags);
2343 2356
@@ -2512,9 +2525,6 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2512 if (!host->data) { 2525 if (!host->data) {
2513 struct mmc_command *data_cmd = host->data_cmd; 2526 struct mmc_command *data_cmd = host->data_cmd;
2514 2527
2515 if (data_cmd)
2516 host->data_cmd = NULL;
2517
2518 /* 2528 /*
2519 * The "data complete" interrupt is also used to 2529 * The "data complete" interrupt is also used to
2520 * indicate that a busy state has ended. See comment 2530 * indicate that a busy state has ended. See comment
@@ -2522,11 +2532,13 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2522 */ 2532 */
2523 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 2533 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2524 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 2534 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2535 host->data_cmd = NULL;
2525 data_cmd->error = -ETIMEDOUT; 2536 data_cmd->error = -ETIMEDOUT;
2526 sdhci_finish_mrq(host, data_cmd->mrq); 2537 sdhci_finish_mrq(host, data_cmd->mrq);
2527 return; 2538 return;
2528 } 2539 }
2529 if (intmask & SDHCI_INT_DATA_END) { 2540 if (intmask & SDHCI_INT_DATA_END) {
2541 host->data_cmd = NULL;
2530 /* 2542 /*
2531 * Some cards handle busy-end interrupt 2543 * Some cards handle busy-end interrupt
2532 * before the command completed, so make 2544 * before the command completed, so make
@@ -2912,6 +2924,10 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
2912 spin_unlock_irqrestore(&host->lock, flags); 2924 spin_unlock_irqrestore(&host->lock, flags);
2913 } 2925 }
2914 2926
2927 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
2928 mmc->ops->hs400_enhanced_strobe)
2929 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
2930
2915 spin_lock_irqsave(&host->lock, flags); 2931 spin_lock_irqsave(&host->lock, flags);
2916 2932
2917 host->runtime_suspended = false; 2933 host->runtime_suspended = false;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 0f68a99fc4ad..141bd70a49c2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -161,7 +161,7 @@ int gpmi_init(struct gpmi_nand_data *this)
161 161
162 ret = gpmi_enable_clk(this); 162 ret = gpmi_enable_clk(this);
163 if (ret) 163 if (ret)
164 goto err_out; 164 return ret;
165 ret = gpmi_reset_block(r->gpmi_regs, false); 165 ret = gpmi_reset_block(r->gpmi_regs, false);
166 if (ret) 166 if (ret)
167 goto err_out; 167 goto err_out;
@@ -197,6 +197,7 @@ int gpmi_init(struct gpmi_nand_data *this)
197 gpmi_disable_clk(this); 197 gpmi_disable_clk(this);
198 return 0; 198 return 0;
199err_out: 199err_out:
200 gpmi_disable_clk(this);
200 return ret; 201 return ret;
201} 202}
202 203
@@ -270,7 +271,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
270 271
271 ret = gpmi_enable_clk(this); 272 ret = gpmi_enable_clk(this);
272 if (ret) 273 if (ret)
273 goto err_out; 274 return ret;
274 275
275 /* 276 /*
276 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this 277 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
@@ -308,6 +309,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
308 gpmi_disable_clk(this); 309 gpmi_disable_clk(this);
309 return 0; 310 return 0;
310err_out: 311err_out:
312 gpmi_disable_clk(this);
311 return ret; 313 return ret;
312} 314}
313 315
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index d54f666417e1..dbf256217b3e 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -86,6 +86,8 @@ struct mtk_ecc {
86 struct completion done; 86 struct completion done;
87 struct mutex lock; 87 struct mutex lock;
88 u32 sectors; 88 u32 sectors;
89
90 u8 eccdata[112];
89}; 91};
90 92
91static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, 93static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
@@ -366,9 +368,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
366 u8 *data, u32 bytes) 368 u8 *data, u32 bytes)
367{ 369{
368 dma_addr_t addr; 370 dma_addr_t addr;
369 u8 *p; 371 u32 len;
370 u32 len, i, val; 372 int ret;
371 int ret = 0;
372 373
373 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); 374 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
374 ret = dma_mapping_error(ecc->dev, addr); 375 ret = dma_mapping_error(ecc->dev, addr);
@@ -393,14 +394,12 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
393 394
394 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ 395 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
395 len = (config->strength * ECC_PARITY_BITS + 7) >> 3; 396 len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
396 p = data + bytes;
397 397
398 /* write the parity bytes generated by the ECC back to the OOB region */ 398 /* write the parity bytes generated by the ECC back to temp buffer */
399 for (i = 0; i < len; i++) { 399 __ioread32_copy(ecc->eccdata, ecc->regs + ECC_ENCPAR(0), round_up(len, 4));
400 if ((i % 4) == 0) 400
401 val = readl(ecc->regs + ECC_ENCPAR(i / 4)); 401 /* copy into possibly unaligned OOB region with actual length */
402 p[i] = (val >> ((i % 4) * 8)) & 0xff; 402 memcpy(data + bytes, ecc->eccdata, len);
403 }
404timeout: 403timeout:
405 404
406 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); 405 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index e5718e5ecf92..3bde96a3f7bf 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1095,10 +1095,11 @@ static void nand_release_data_interface(struct nand_chip *chip)
1095/** 1095/**
1096 * nand_reset - Reset and initialize a NAND device 1096 * nand_reset - Reset and initialize a NAND device
1097 * @chip: The NAND chip 1097 * @chip: The NAND chip
1098 * @chipnr: Internal die id
1098 * 1099 *
1099 * Returns 0 for success or negative error code otherwise 1100 * Returns 0 for success or negative error code otherwise
1100 */ 1101 */
1101int nand_reset(struct nand_chip *chip) 1102int nand_reset(struct nand_chip *chip, int chipnr)
1102{ 1103{
1103 struct mtd_info *mtd = nand_to_mtd(chip); 1104 struct mtd_info *mtd = nand_to_mtd(chip);
1104 int ret; 1105 int ret;
@@ -1107,9 +1108,17 @@ int nand_reset(struct nand_chip *chip)
1107 if (ret) 1108 if (ret)
1108 return ret; 1109 return ret;
1109 1110
1111 /*
1112 * The CS line has to be released before we can apply the new NAND
1113 * interface settings, hence this weird ->select_chip() dance.
1114 */
1115 chip->select_chip(mtd, chipnr);
1110 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 1116 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1117 chip->select_chip(mtd, -1);
1111 1118
1119 chip->select_chip(mtd, chipnr);
1112 ret = nand_setup_data_interface(chip); 1120 ret = nand_setup_data_interface(chip);
1121 chip->select_chip(mtd, -1);
1113 if (ret) 1122 if (ret)
1114 return ret; 1123 return ret;
1115 1124
@@ -1185,8 +1194,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1185 /* Shift to get chip number */ 1194 /* Shift to get chip number */
1186 chipnr = ofs >> chip->chip_shift; 1195 chipnr = ofs >> chip->chip_shift;
1187 1196
1188 chip->select_chip(mtd, chipnr);
1189
1190 /* 1197 /*
1191 * Reset the chip. 1198 * Reset the chip.
1192 * If we want to check the WP through READ STATUS and check the bit 7 1199 * If we want to check the WP through READ STATUS and check the bit 7
@@ -1194,7 +1201,9 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1194 * some operation can also clear the bit 7 of status register 1201 * some operation can also clear the bit 7 of status register
1195 * eg. erase/program a locked block 1202 * eg. erase/program a locked block
1196 */ 1203 */
1197 nand_reset(chip); 1204 nand_reset(chip, chipnr);
1205
1206 chip->select_chip(mtd, chipnr);
1198 1207
1199 /* Check, if it is write protected */ 1208 /* Check, if it is write protected */
1200 if (nand_check_wp(mtd)) { 1209 if (nand_check_wp(mtd)) {
@@ -1244,8 +1253,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1244 /* Shift to get chip number */ 1253 /* Shift to get chip number */
1245 chipnr = ofs >> chip->chip_shift; 1254 chipnr = ofs >> chip->chip_shift;
1246 1255
1247 chip->select_chip(mtd, chipnr);
1248
1249 /* 1256 /*
1250 * Reset the chip. 1257 * Reset the chip.
1251 * If we want to check the WP through READ STATUS and check the bit 7 1258 * If we want to check the WP through READ STATUS and check the bit 7
@@ -1253,7 +1260,9 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1253 * some operation can also clear the bit 7 of status register 1260 * some operation can also clear the bit 7 of status register
1254 * eg. erase/program a locked block 1261 * eg. erase/program a locked block
1255 */ 1262 */
1256 nand_reset(chip); 1263 nand_reset(chip, chipnr);
1264
1265 chip->select_chip(mtd, chipnr);
1257 1266
1258 /* Check, if it is write protected */ 1267 /* Check, if it is write protected */
1259 if (nand_check_wp(mtd)) { 1268 if (nand_check_wp(mtd)) {
@@ -2940,10 +2949,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2940 } 2949 }
2941 2950
2942 chipnr = (int)(to >> chip->chip_shift); 2951 chipnr = (int)(to >> chip->chip_shift);
2943 chip->select_chip(mtd, chipnr);
2944
2945 /* Shift to get page */
2946 page = (int)(to >> chip->page_shift);
2947 2952
2948 /* 2953 /*
2949 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one 2954 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
@@ -2951,7 +2956,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2951 * if we don't do this. I have no clue why, but I seem to have 'fixed' 2956 * if we don't do this. I have no clue why, but I seem to have 'fixed'
2952 * it in the doc2000 driver in August 1999. dwmw2. 2957 * it in the doc2000 driver in August 1999. dwmw2.
2953 */ 2958 */
2954 nand_reset(chip); 2959 nand_reset(chip, chipnr);
2960
2961 chip->select_chip(mtd, chipnr);
2962
2963 /* Shift to get page */
2964 page = (int)(to >> chip->page_shift);
2955 2965
2956 /* Check, if it is write protected */ 2966 /* Check, if it is write protected */
2957 if (nand_check_wp(mtd)) { 2967 if (nand_check_wp(mtd)) {
@@ -3984,14 +3994,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3984 int i, maf_idx; 3994 int i, maf_idx;
3985 u8 id_data[8]; 3995 u8 id_data[8];
3986 3996
3987 /* Select the device */
3988 chip->select_chip(mtd, 0);
3989
3990 /* 3997 /*
3991 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 3998 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
3992 * after power-up. 3999 * after power-up.
3993 */ 4000 */
3994 nand_reset(chip); 4001 nand_reset(chip, 0);
4002
4003 /* Select the device */
4004 chip->select_chip(mtd, 0);
3995 4005
3996 /* Send the command for reading device ID */ 4006 /* Send the command for reading device ID */
3997 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 4007 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
@@ -4329,17 +4339,31 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4329 return PTR_ERR(type); 4339 return PTR_ERR(type);
4330 } 4340 }
4331 4341
4342 /* Initialize the ->data_interface field. */
4332 ret = nand_init_data_interface(chip); 4343 ret = nand_init_data_interface(chip);
4333 if (ret) 4344 if (ret)
4334 return ret; 4345 return ret;
4335 4346
4347 /*
4348 * Setup the data interface correctly on the chip and controller side.
4349 * This explicit call to nand_setup_data_interface() is only required
4350 * for the first die, because nand_reset() has been called before
4351 * ->data_interface and ->default_onfi_timing_mode were set.
4352 * For the other dies, nand_reset() will automatically switch to the
4353 * best mode for us.
4354 */
4355 ret = nand_setup_data_interface(chip);
4356 if (ret)
4357 return ret;
4358
4336 chip->select_chip(mtd, -1); 4359 chip->select_chip(mtd, -1);
4337 4360
4338 /* Check for a chip array */ 4361 /* Check for a chip array */
4339 for (i = 1; i < maxchips; i++) { 4362 for (i = 1; i < maxchips; i++) {
4340 chip->select_chip(mtd, i);
4341 /* See comment in nand_get_flash_type for reset */ 4363 /* See comment in nand_get_flash_type for reset */
4342 nand_reset(chip); 4364 nand_reset(chip, i);
4365
4366 chip->select_chip(mtd, i);
4343 /* Send the command for reading device ID */ 4367 /* Send the command for reading device ID */
4344 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 4368 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4345 /* Read manufacturer and device IDs */ 4369 /* Read manufacturer and device IDs */
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 3eb7430dffbf..f8ff25c8ee2e 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -142,6 +142,9 @@ struct plx_pci_card {
142#define CTI_PCI_VENDOR_ID 0x12c4 142#define CTI_PCI_VENDOR_ID 0x12c4
143#define CTI_PCI_DEVICE_ID_CRG001 0x0900 143#define CTI_PCI_DEVICE_ID_CRG001 0x0900
144 144
145#define MOXA_PCI_VENDOR_ID 0x1393
146#define MOXA_PCI_DEVICE_ID 0x0100
147
145static void plx_pci_reset_common(struct pci_dev *pdev); 148static void plx_pci_reset_common(struct pci_dev *pdev);
146static void plx9056_pci_reset_common(struct pci_dev *pdev); 149static void plx9056_pci_reset_common(struct pci_dev *pdev);
147static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); 150static void plx_pci_reset_marathon_pci(struct pci_dev *pdev);
@@ -258,6 +261,14 @@ static struct plx_pci_card_info plx_pci_card_info_elcus = {
258 /* based on PLX9030 */ 261 /* based on PLX9030 */
259}; 262};
260 263
264static struct plx_pci_card_info plx_pci_card_info_moxa = {
265 "MOXA", 2,
266 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
267 {0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} },
268 &plx_pci_reset_common
269 /* based on PLX9052 */
270};
271
261static const struct pci_device_id plx_pci_tbl[] = { 272static const struct pci_device_id plx_pci_tbl[] = {
262 { 273 {
263 /* Adlink PCI-7841/cPCI-7841 */ 274 /* Adlink PCI-7841/cPCI-7841 */
@@ -357,6 +368,13 @@ static const struct pci_device_id plx_pci_tbl[] = {
357 0, 0, 368 0, 0,
358 (kernel_ulong_t)&plx_pci_card_info_elcus 369 (kernel_ulong_t)&plx_pci_card_info_elcus
359 }, 370 },
371 {
372 /* moxa */
373 MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID,
374 PCI_ANY_ID, PCI_ANY_ID,
375 0, 0,
376 (kernel_ulong_t)&plx_pci_card_info_moxa
377 },
360 { 0,} 378 { 0,}
361}; 379};
362MODULE_DEVICE_TABLE(pci, plx_pci_tbl); 380MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index c481f104a8fe..5390ae89136c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -204,17 +204,6 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
204 return num_msgs; 204 return num_msgs;
205} 205}
206 206
207static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
208{
209 u32 data = 0x7777;
210
211 xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
212 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
213 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
214 xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
215 xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
216}
217
218void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, 207void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
219 struct xgene_enet_pdata *pdata, 208 struct xgene_enet_pdata *pdata,
220 enum xgene_enet_err_code status) 209 enum xgene_enet_err_code status)
@@ -929,5 +918,4 @@ struct xgene_ring_ops xgene_ring1_ops = {
929 .clear = xgene_enet_clear_ring, 918 .clear = xgene_enet_clear_ring,
930 .wr_cmd = xgene_enet_wr_cmd, 919 .wr_cmd = xgene_enet_wr_cmd,
931 .len = xgene_enet_ring_len, 920 .len = xgene_enet_ring_len,
932 .coalesce = xgene_enet_setup_coalescing,
933}; 921};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 8456337a237d..06e598c8bc16 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -55,8 +55,10 @@ enum xgene_enet_rm {
55#define PREFETCH_BUF_EN BIT(21) 55#define PREFETCH_BUF_EN BIT(21)
56#define CSR_RING_ID_BUF 0x000c 56#define CSR_RING_ID_BUF 0x000c
57#define CSR_PBM_COAL 0x0014 57#define CSR_PBM_COAL 0x0014
58#define CSR_PBM_CTICK0 0x0018
58#define CSR_PBM_CTICK1 0x001c 59#define CSR_PBM_CTICK1 0x001c
59#define CSR_PBM_CTICK2 0x0020 60#define CSR_PBM_CTICK2 0x0020
61#define CSR_PBM_CTICK3 0x0024
60#define CSR_THRESHOLD0_SET1 0x0030 62#define CSR_THRESHOLD0_SET1 0x0030
61#define CSR_THRESHOLD1_SET1 0x0034 63#define CSR_THRESHOLD1_SET1 0x0034
62#define CSR_RING_NE_INT_MODE 0x017c 64#define CSR_RING_NE_INT_MODE 0x017c
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3fc7b0db952b..1352b5245fcc 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1188,7 +1188,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
1188 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 1188 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1189 } 1189 }
1190 1190
1191 pdata->ring_ops->coalesce(pdata->tx_ring[0]); 1191 if (pdata->ring_ops->coalesce)
1192 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1192 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; 1193 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1193 1194
1194 return 0; 1195 return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
index 2b76732add5d..af51dd5844ce 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -30,7 +30,7 @@ static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
30 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); 30 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
31 ring_cfg[3] |= SET_BIT(X2_DEQINTEN); 31 ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
32 } 32 }
33 ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1); 33 ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2);
34 34
35 addr >>= 8; 35 addr >>= 8;
36 ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr); 36 ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
@@ -192,13 +192,15 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
192 192
193static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) 193static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
194{ 194{
195 u32 data = 0x7777; 195 u32 data = 0x77777777;
196 196
197 xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); 197 xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
198 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
198 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); 199 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
199 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); 200 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
200 xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); 201 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
201 xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); 202 xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
203 xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);
202} 204}
203 205
204struct xgene_ring_ops xgene_ring2_ops = { 206struct xgene_ring_ops xgene_ring2_ops = {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index a29787fbb572..0e066dc6b8cc 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -307,6 +307,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
307 u32 ctl; 307 u32 ctl;
308 308
309 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); 309 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
310
311 /* preserve ONLY bits 16-17 from current hardware value */
312 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
313
310 if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { 314 if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
311 ctl &= ~BGMAC_DMA_RX_BL_MASK; 315 ctl &= ~BGMAC_DMA_RX_BL_MASK;
312 ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; 316 ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
@@ -317,7 +321,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
317 ctl &= ~BGMAC_DMA_RX_PT_MASK; 321 ctl &= ~BGMAC_DMA_RX_PT_MASK;
318 ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; 322 ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
319 } 323 }
320 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
321 ctl |= BGMAC_DMA_RX_ENABLE; 324 ctl |= BGMAC_DMA_RX_ENABLE;
322 ctl |= BGMAC_DMA_RX_PARITY_DISABLE; 325 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
323 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; 326 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
@@ -1046,9 +1049,9 @@ static void bgmac_enable(struct bgmac *bgmac)
1046 1049
1047 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> 1050 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1048 BGMAC_DS_MM_SHIFT; 1051 BGMAC_DS_MM_SHIFT;
1049 if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0) 1052 if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
1050 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); 1053 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1051 if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) 1054 if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
1052 bgmac_cco_ctl_maskset(bgmac, 1, ~0, 1055 bgmac_cco_ctl_maskset(bgmac, 1, ~0,
1053 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); 1056 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1054 1057
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index eab49ff4eecd..7baf30082ab3 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -49,6 +49,7 @@
49#include <linux/firmware.h> 49#include <linux/firmware.h>
50#include <linux/log2.h> 50#include <linux/log2.h>
51#include <linux/aer.h> 51#include <linux/aer.h>
52#include <linux/crash_dump.h>
52 53
53#if IS_ENABLED(CONFIG_CNIC) 54#if IS_ENABLED(CONFIG_CNIC)
54#define BCM_CNIC 1 55#define BCM_CNIC 1
@@ -4764,15 +4765,16 @@ bnx2_setup_msix_tbl(struct bnx2 *bp)
4764 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); 4765 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4765} 4766}
4766 4767
4767static int 4768static void
4768bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) 4769bnx2_wait_dma_complete(struct bnx2 *bp)
4769{ 4770{
4770 u32 val; 4771 u32 val;
4771 int i, rc = 0; 4772 int i;
4772 u8 old_port;
4773 4773
4774 /* Wait for the current PCI transaction to complete before 4774 /*
4775 * issuing a reset. */ 4775 * Wait for the current PCI transaction to complete before
4776 * issuing a reset.
4777 */
4776 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || 4778 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4777 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { 4779 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4778 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, 4780 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
@@ -4796,6 +4798,21 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4796 } 4798 }
4797 } 4799 }
4798 4800
4801 return;
4802}
4803
4804
4805static int
4806bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4807{
4808 u32 val;
4809 int i, rc = 0;
4810 u8 old_port;
4811
4812 /* Wait for the current PCI transaction to complete before
4813 * issuing a reset. */
4814 bnx2_wait_dma_complete(bp);
4815
4799 /* Wait for the firmware to tell us it is ok to issue a reset. */ 4816 /* Wait for the firmware to tell us it is ok to issue a reset. */
4800 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); 4817 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4801 4818
@@ -6361,6 +6378,10 @@ bnx2_open(struct net_device *dev)
6361 struct bnx2 *bp = netdev_priv(dev); 6378 struct bnx2 *bp = netdev_priv(dev);
6362 int rc; 6379 int rc;
6363 6380
6381 rc = bnx2_request_firmware(bp);
6382 if (rc < 0)
6383 goto out;
6384
6364 netif_carrier_off(dev); 6385 netif_carrier_off(dev);
6365 6386
6366 bnx2_disable_int(bp); 6387 bnx2_disable_int(bp);
@@ -6429,6 +6450,7 @@ open_err:
6429 bnx2_free_irq(bp); 6450 bnx2_free_irq(bp);
6430 bnx2_free_mem(bp); 6451 bnx2_free_mem(bp);
6431 bnx2_del_napi(bp); 6452 bnx2_del_napi(bp);
6453 bnx2_release_firmware(bp);
6432 goto out; 6454 goto out;
6433} 6455}
6434 6456
@@ -8571,12 +8593,15 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8571 8593
8572 pci_set_drvdata(pdev, dev); 8594 pci_set_drvdata(pdev, dev);
8573 8595
8574 rc = bnx2_request_firmware(bp); 8596 /*
8575 if (rc < 0) 8597 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8576 goto error; 8598 * New io-page table has been created before bnx2 does reset at open stage.
8577 8599 * We have to wait for the in-flight DMA to complete to avoid it look up
8600 * into the newly created io-page table.
8601 */
8602 if (is_kdump_kernel())
8603 bnx2_wait_dma_complete(bp);
8578 8604
8579 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
8580 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); 8605 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8581 8606
8582 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 8607 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8611,7 +8636,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8611 return 0; 8636 return 0;
8612 8637
8613error: 8638error:
8614 bnx2_release_firmware(bp);
8615 pci_iounmap(pdev, bp->regview); 8639 pci_iounmap(pdev, bp->regview);
8616 pci_release_regions(pdev); 8640 pci_release_regions(pdev);
8617 pci_disable_device(pdev); 8641 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d313b02485a1..27a2dd917643 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6305,6 +6305,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6305 struct tc_to_netdev *ntc) 6305 struct tc_to_netdev *ntc)
6306{ 6306{
6307 struct bnxt *bp = netdev_priv(dev); 6307 struct bnxt *bp = netdev_priv(dev);
6308 bool sh = false;
6308 u8 tc; 6309 u8 tc;
6309 6310
6310 if (ntc->type != TC_SETUP_MQPRIO) 6311 if (ntc->type != TC_SETUP_MQPRIO)
@@ -6321,12 +6322,11 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6321 if (netdev_get_num_tc(dev) == tc) 6322 if (netdev_get_num_tc(dev) == tc)
6322 return 0; 6323 return 0;
6323 6324
6325 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6326 sh = true;
6327
6324 if (tc) { 6328 if (tc) {
6325 int max_rx_rings, max_tx_rings, rc; 6329 int max_rx_rings, max_tx_rings, rc;
6326 bool sh = false;
6327
6328 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6329 sh = true;
6330 6330
6331 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); 6331 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6332 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) 6332 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
@@ -6344,7 +6344,8 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6344 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 6344 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6345 netdev_reset_tc(dev); 6345 netdev_reset_tc(dev);
6346 } 6346 }
6347 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); 6347 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6348 bp->tx_nr_rings + bp->rx_nr_rings;
6348 bp->num_stat_ctxs = bp->cp_nr_rings; 6349 bp->num_stat_ctxs = bp->cp_nr_rings;
6349 6350
6350 if (netif_running(bp->dev)) 6351 if (netif_running(bp->dev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index ec6cd18842c3..60e2af8678bd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -774,8 +774,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
774 774
775 if (vf->flags & BNXT_VF_LINK_UP) { 775 if (vf->flags & BNXT_VF_LINK_UP) {
776 /* if physical link is down, force link up on VF */ 776 /* if physical link is down, force link up on VF */
777 if (phy_qcfg_resp.link == 777 if (phy_qcfg_resp.link !=
778 PORT_PHY_QCFG_RESP_LINK_NO_LINK) { 778 PORT_PHY_QCFG_RESP_LINK_LINK) {
779 phy_qcfg_resp.link = 779 phy_qcfg_resp.link =
780 PORT_PHY_QCFG_RESP_LINK_LINK; 780 PORT_PHY_QCFG_RESP_LINK_LINK;
781 phy_qcfg_resp.link_speed = cpu_to_le16( 781 phy_qcfg_resp.link_speed = cpu_to_le16(
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index b200a783443e..112030828c4b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -177,6 +177,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
177 return 0; 177 return 0;
178 178
179 hw_cons = *(tcb->hw_consumer_index); 179 hw_cons = *(tcb->hw_consumer_index);
180 rmb();
180 cons = tcb->consumer_index; 181 cons = tcb->consumer_index;
181 q_depth = tcb->q_depth; 182 q_depth = tcb->q_depth;
182 183
@@ -3094,7 +3095,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3094 BNA_QE_INDX_INC(prod, q_depth); 3095 BNA_QE_INDX_INC(prod, q_depth);
3095 tcb->producer_index = prod; 3096 tcb->producer_index = prod;
3096 3097
3097 smp_mb(); 3098 wmb();
3098 3099
3099 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 3100 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3100 return NETDEV_TX_OK; 3101 return NETDEV_TX_OK;
@@ -3102,7 +3103,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3102 skb_tx_timestamp(skb); 3103 skb_tx_timestamp(skb);
3103 3104
3104 bna_txq_prod_indx_doorbell(tcb); 3105 bna_txq_prod_indx_doorbell(tcb);
3105 smp_mb();
3106 3106
3107 return NETDEV_TX_OK; 3107 return NETDEV_TX_OK;
3108} 3108}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 50812a1d67bd..df1573c4a659 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -178,9 +178,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
178 CH_PCI_ID_TABLE_FENTRY(0x6005), 178 CH_PCI_ID_TABLE_FENTRY(0x6005),
179 CH_PCI_ID_TABLE_FENTRY(0x6006), 179 CH_PCI_ID_TABLE_FENTRY(0x6006),
180 CH_PCI_ID_TABLE_FENTRY(0x6007), 180 CH_PCI_ID_TABLE_FENTRY(0x6007),
181 CH_PCI_ID_TABLE_FENTRY(0x6008),
181 CH_PCI_ID_TABLE_FENTRY(0x6009), 182 CH_PCI_ID_TABLE_FENTRY(0x6009),
182 CH_PCI_ID_TABLE_FENTRY(0x600d), 183 CH_PCI_ID_TABLE_FENTRY(0x600d),
183 CH_PCI_ID_TABLE_FENTRY(0x6010),
184 CH_PCI_ID_TABLE_FENTRY(0x6011), 184 CH_PCI_ID_TABLE_FENTRY(0x6011),
185 CH_PCI_ID_TABLE_FENTRY(0x6014), 185 CH_PCI_ID_TABLE_FENTRY(0x6014),
186 CH_PCI_ID_TABLE_FENTRY(0x6015), 186 CH_PCI_ID_TABLE_FENTRY(0x6015),
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index c54c6fac0d1d..b6ed818f78ff 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -332,8 +332,10 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
332 return ERR_PTR(-ENODEV); 332 return ERR_PTR(-ENODEV);
333 333
334 handle = dev->ops->get_handle(dev, port_id); 334 handle = dev->ops->get_handle(dev, port_id);
335 if (IS_ERR(handle)) 335 if (IS_ERR(handle)) {
336 put_device(&dev->cls_dev);
336 return handle; 337 return handle;
338 }
337 339
338 handle->dev = dev; 340 handle->dev = dev;
339 handle->owner_dev = owner_dev; 341 handle->owner_dev = owner_dev;
@@ -356,6 +358,8 @@ out_when_init_queue:
356 for (j = i - 1; j >= 0; j--) 358 for (j = i - 1; j >= 0; j--)
357 hnae_fini_queue(handle->qs[j]); 359 hnae_fini_queue(handle->qs[j]);
358 360
361 put_device(&dev->cls_dev);
362
359 return ERR_PTR(-ENOMEM); 363 return ERR_PTR(-ENOMEM);
360} 364}
361EXPORT_SYMBOL(hnae_get_handle); 365EXPORT_SYMBOL(hnae_get_handle);
@@ -377,6 +381,8 @@ void hnae_put_handle(struct hnae_handle *h)
377 dev->ops->put_handle(h); 381 dev->ops->put_handle(h);
378 382
379 module_put(dev->owner); 383 module_put(dev->owner);
384
385 put_device(&dev->cls_dev);
380} 386}
381EXPORT_SYMBOL(hnae_put_handle); 387EXPORT_SYMBOL(hnae_put_handle);
382 388
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e9719ba450d7..fa66fa6f8bee 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2438,6 +2438,8 @@ static int ehea_open(struct net_device *dev)
2438 2438
2439 netif_info(port, ifup, dev, "enabling port\n"); 2439 netif_info(port, ifup, dev, "enabling port\n");
2440 2440
2441 netif_carrier_off(dev);
2442
2441 ret = ehea_up(dev); 2443 ret = ehea_up(dev);
2442 if (!ret) { 2444 if (!ret) {
2443 port_napi_enable(port); 2445 port_napi_enable(port);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index d54405b46109..1e486d1312e9 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1493,9 +1493,8 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1493 adapter->max_rx_add_entries_per_subcrq > entries_page ? 1493 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1494 entries_page : adapter->max_rx_add_entries_per_subcrq; 1494 entries_page : adapter->max_rx_add_entries_per_subcrq;
1495 1495
1496 /* Choosing the maximum number of queues supported by firmware*/ 1496 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1497 adapter->req_tx_queues = adapter->max_tx_queues; 1497 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1498 adapter->req_rx_queues = adapter->max_rx_queues;
1499 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 1498 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1500 1499
1501 adapter->req_mtu = adapter->max_mtu; 1500 adapter->req_mtu = adapter->max_mtu;
@@ -3698,7 +3697,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3698 struct net_device *netdev; 3697 struct net_device *netdev;
3699 unsigned char *mac_addr_p; 3698 unsigned char *mac_addr_p;
3700 struct dentry *ent; 3699 struct dentry *ent;
3701 char buf[16]; /* debugfs name buf */ 3700 char buf[17]; /* debugfs name buf */
3702 int rc; 3701 int rc;
3703 3702
3704 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 3703 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -3837,6 +3836,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
3837 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) 3836 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3838 debugfs_remove_recursive(adapter->debugfs_dir); 3837 debugfs_remove_recursive(adapter->debugfs_dir);
3839 3838
3839 dma_unmap_single(&dev->dev, adapter->stats_token,
3840 sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
3841
3840 if (adapter->ras_comps) 3842 if (adapter->ras_comps)
3841 dma_free_coherent(&dev->dev, 3843 dma_free_coherent(&dev->dev,
3842 adapter->ras_comp_num * 3844 adapter->ras_comp_num *
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 6749b831998c..81b08d71c0f8 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1379,6 +1379,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1379 temp = (val & 0x003fff00) >> 8; 1379 temp = (val & 0x003fff00) >> 8;
1380 1380
1381 temp *= 64000000; 1381 temp *= 64000000;
1382 temp += mp->t_clk / 2;
1382 do_div(temp, mp->t_clk); 1383 do_div(temp, mp->t_clk);
1383 1384
1384 return (unsigned int)temp; 1385 return (unsigned int)temp;
@@ -1415,6 +1416,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1415 1416
1416 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; 1417 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1417 temp *= 64000000; 1418 temp *= 64000000;
1419 temp += mp->t_clk / 2;
1418 do_div(temp, mp->t_clk); 1420 do_div(temp, mp->t_clk);
1419 1421
1420 return (unsigned int)temp; 1422 return (unsigned int)temp;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 9bf7320107b0..9a807e93c9fd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2254,7 +2254,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2254 2254
2255 if (!shutdown) 2255 if (!shutdown)
2256 free_netdev(dev); 2256 free_netdev(dev);
2257 dev->ethtool_ops = NULL;
2258} 2257}
2259 2258
2260static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 2259static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 313b765e2d4f..52b4fd53b1a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1445,6 +1445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1445 c->netdev = priv->netdev; 1445 c->netdev = priv->netdev;
1446 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); 1446 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1447 c->num_tc = priv->params.num_tc; 1447 c->num_tc = priv->params.num_tc;
1448 c->xdp = !!priv->xdp_prog;
1448 1449
1449 if (priv->params.rx_am_enabled) 1450 if (priv->params.rx_am_enabled)
1450 rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); 1451 rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
@@ -1468,6 +1469,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1468 if (err) 1469 if (err)
1469 goto err_close_tx_cqs; 1470 goto err_close_tx_cqs;
1470 1471
1472 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1473 err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
1474 priv->params.tx_cq_moderation) : 0;
1475 if (err)
1476 goto err_close_rx_cq;
1477
1471 napi_enable(&c->napi); 1478 napi_enable(&c->napi);
1472 1479
1473 err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); 1480 err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
@@ -1488,21 +1495,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1488 } 1495 }
1489 } 1496 }
1490 1497
1491 if (priv->xdp_prog) { 1498 err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
1492 /* XDP SQ CQ params are same as normal TXQ sq CQ params */ 1499 if (err)
1493 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, 1500 goto err_close_sqs;
1494 priv->params.tx_cq_moderation);
1495 if (err)
1496 goto err_close_sqs;
1497
1498 err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq);
1499 if (err) {
1500 mlx5e_close_cq(&c->xdp_sq.cq);
1501 goto err_close_sqs;
1502 }
1503 }
1504 1501
1505 c->xdp = !!priv->xdp_prog;
1506 err = mlx5e_open_rq(c, &cparam->rq, &c->rq); 1502 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1507 if (err) 1503 if (err)
1508 goto err_close_xdp_sq; 1504 goto err_close_xdp_sq;
@@ -1512,7 +1508,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1512 1508
1513 return 0; 1509 return 0;
1514err_close_xdp_sq: 1510err_close_xdp_sq:
1515 mlx5e_close_sq(&c->xdp_sq); 1511 if (c->xdp)
1512 mlx5e_close_sq(&c->xdp_sq);
1516 1513
1517err_close_sqs: 1514err_close_sqs:
1518 mlx5e_close_sqs(c); 1515 mlx5e_close_sqs(c);
@@ -1522,6 +1519,10 @@ err_close_icosq:
1522 1519
1523err_disable_napi: 1520err_disable_napi:
1524 napi_disable(&c->napi); 1521 napi_disable(&c->napi);
1522 if (c->xdp)
1523 mlx5e_close_cq(&c->xdp_sq.cq);
1524
1525err_close_rx_cq:
1525 mlx5e_close_cq(&c->rq.cq); 1526 mlx5e_close_cq(&c->rq.cq);
1526 1527
1527err_close_tx_cqs: 1528err_close_tx_cqs:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 47dfd5b14dfb..a84825d59f33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -310,7 +310,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
310 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; 310 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
311#endif 311#endif
312 312
313 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC; 313 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
314 netdev->hw_features |= NETIF_F_HW_TC; 314 netdev->hw_features |= NETIF_F_HW_TC;
315 315
316 eth_hw_addr_random(netdev); 316 eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9d133fc6c65e..4b991124bc57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -398,12 +398,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
398 skb_flow_dissector_target(f->dissector, 398 skb_flow_dissector_target(f->dissector,
399 FLOW_DISSECTOR_KEY_VLAN, 399 FLOW_DISSECTOR_KEY_VLAN,
400 f->mask); 400 f->mask);
401 if (mask->vlan_id) { 401 if (mask->vlan_id || mask->vlan_priority) {
402 MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); 402 MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
403 MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); 403 MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
404 404
405 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); 405 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
406 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); 406 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
407
408 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
409 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
407 } 410 }
408 } 411 }
409 412
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 50fe8e8861bb..731f28625cc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -58,7 +58,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
58 if (esw->mode != SRIOV_OFFLOADS) 58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP); 59 return ERR_PTR(-EOPNOTSUPP);
60 60
61 flow_act.action = attr->action; 61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
62 63
63 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
64 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9adc766c7a3f..68ec4ea25d50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1836,7 +1836,7 @@ static int init_root_ns(struct mlx5_flow_steering *steering)
1836{ 1836{
1837 1837
1838 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); 1838 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
1839 if (IS_ERR_OR_NULL(steering->root_ns)) 1839 if (!steering->root_ns)
1840 goto cleanup; 1840 goto cleanup;
1841 1841
1842 if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) 1842 if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a3a0887d80e4..c07493307a83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1226,6 +1226,9 @@ static int init_one(struct pci_dev *pdev,
1226 1226
1227 pci_set_drvdata(pdev, dev); 1227 pci_set_drvdata(pdev, dev);
1228 1228
1229 dev->pdev = pdev;
1230 dev->event = mlx5_core_event;
1231
1229 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { 1232 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
1230 mlx5_core_warn(dev, 1233 mlx5_core_warn(dev,
1231 "selected profile out of range, selecting default (%d)\n", 1234 "selected profile out of range, selecting default (%d)\n",
@@ -1233,8 +1236,6 @@ static int init_one(struct pci_dev *pdev,
1233 prof_sel = MLX5_DEFAULT_PROF; 1236 prof_sel = MLX5_DEFAULT_PROF;
1234 } 1237 }
1235 dev->profile = &profile[prof_sel]; 1238 dev->profile = &profile[prof_sel];
1236 dev->pdev = pdev;
1237 dev->event = mlx5_core_event;
1238 1239
1239 INIT_LIST_HEAD(&priv->ctx_list); 1240 INIT_LIST_HEAD(&priv->ctx_list);
1240 spin_lock_init(&priv->ctx_lock); 1241 spin_lock_init(&priv->ctx_lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index a5433e425484..4a1f9d5f7c03 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -231,7 +231,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
231 231
232 span_entry->used = true; 232 span_entry->used = true;
233 span_entry->id = index; 233 span_entry->id = index;
234 span_entry->ref_count = 0; 234 span_entry->ref_count = 1;
235 span_entry->local_port = local_port; 235 span_entry->local_port = local_port;
236 return span_entry; 236 return span_entry;
237} 237}
@@ -270,6 +270,7 @@ static struct mlxsw_sp_span_entry
270 270
271 span_entry = mlxsw_sp_span_entry_find(port); 271 span_entry = mlxsw_sp_span_entry_find(port);
272 if (span_entry) { 272 if (span_entry) {
273 /* Already exists, just take a reference */
273 span_entry->ref_count++; 274 span_entry->ref_count++;
274 return span_entry; 275 return span_entry;
275 } 276 }
@@ -280,6 +281,7 @@ static struct mlxsw_sp_span_entry
280static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 281static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
281 struct mlxsw_sp_span_entry *span_entry) 282 struct mlxsw_sp_span_entry *span_entry)
282{ 283{
284 WARN_ON(!span_entry->ref_count);
283 if (--span_entry->ref_count == 0) 285 if (--span_entry->ref_count == 0)
284 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 286 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
285 return 0; 287 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 04a2bc7043bc..cc1af19d699a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -115,7 +115,7 @@ struct mlxsw_sp_rif {
115struct mlxsw_sp_mid { 115struct mlxsw_sp_mid {
116 struct list_head list; 116 struct list_head list;
117 unsigned char addr[ETH_ALEN]; 117 unsigned char addr[ETH_ALEN];
118 u16 vid; 118 u16 fid;
119 u16 mid; 119 u16 mid;
120 unsigned int ref_count; 120 unsigned int ref_count;
121}; 121};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index df31f3861c4f..164bd309e92b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -589,21 +589,22 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
589 return 0; 589 return 0;
590} 590}
591 591
592static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
593
592static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) 594static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
593{ 595{
596 mlxsw_sp_router_fib_flush(mlxsw_sp);
594 kfree(mlxsw_sp->router.vrs); 597 kfree(mlxsw_sp->router.vrs);
595} 598}
596 599
597struct mlxsw_sp_neigh_key { 600struct mlxsw_sp_neigh_key {
598 unsigned char addr[sizeof(struct in6_addr)]; 601 struct neighbour *n;
599 struct net_device *dev;
600}; 602};
601 603
602struct mlxsw_sp_neigh_entry { 604struct mlxsw_sp_neigh_entry {
603 struct rhash_head ht_node; 605 struct rhash_head ht_node;
604 struct mlxsw_sp_neigh_key key; 606 struct mlxsw_sp_neigh_key key;
605 u16 rif; 607 u16 rif;
606 struct neighbour *n;
607 bool offloaded; 608 bool offloaded;
608 struct delayed_work dw; 609 struct delayed_work dw;
609 struct mlxsw_sp_port *mlxsw_sp_port; 610 struct mlxsw_sp_port *mlxsw_sp_port;
@@ -641,19 +642,15 @@ mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
641static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); 642static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
642 643
643static struct mlxsw_sp_neigh_entry * 644static struct mlxsw_sp_neigh_entry *
644mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len, 645mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif)
645 struct net_device *dev, u16 rif,
646 struct neighbour *n)
647{ 646{
648 struct mlxsw_sp_neigh_entry *neigh_entry; 647 struct mlxsw_sp_neigh_entry *neigh_entry;
649 648
650 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); 649 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
651 if (!neigh_entry) 650 if (!neigh_entry)
652 return NULL; 651 return NULL;
653 memcpy(neigh_entry->key.addr, addr, addr_len); 652 neigh_entry->key.n = n;
654 neigh_entry->key.dev = dev;
655 neigh_entry->rif = rif; 653 neigh_entry->rif = rif;
656 neigh_entry->n = n;
657 INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); 654 INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
658 INIT_LIST_HEAD(&neigh_entry->nexthop_list); 655 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
659 return neigh_entry; 656 return neigh_entry;
@@ -666,13 +663,11 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
666} 663}
667 664
668static struct mlxsw_sp_neigh_entry * 665static struct mlxsw_sp_neigh_entry *
669mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr, 666mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
670 size_t addr_len, struct net_device *dev)
671{ 667{
672 struct mlxsw_sp_neigh_key key = {{ 0 } }; 668 struct mlxsw_sp_neigh_key key;
673 669
674 memcpy(key.addr, addr, addr_len); 670 key.n = n;
675 key.dev = dev;
676 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, 671 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
677 &key, mlxsw_sp_neigh_ht_params); 672 &key, mlxsw_sp_neigh_ht_params);
678} 673}
@@ -684,26 +679,20 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
684 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 679 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
685 struct mlxsw_sp_neigh_entry *neigh_entry; 680 struct mlxsw_sp_neigh_entry *neigh_entry;
686 struct mlxsw_sp_rif *r; 681 struct mlxsw_sp_rif *r;
687 u32 dip;
688 int err; 682 int err;
689 683
690 if (n->tbl != &arp_tbl) 684 if (n->tbl != &arp_tbl)
691 return 0; 685 return 0;
692 686
693 dip = ntohl(*((__be32 *) n->primary_key)); 687 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
694 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), 688 if (neigh_entry)
695 n->dev);
696 if (neigh_entry) {
697 WARN_ON(neigh_entry->n != n);
698 return 0; 689 return 0;
699 }
700 690
701 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); 691 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
702 if (WARN_ON(!r)) 692 if (WARN_ON(!r))
703 return -EINVAL; 693 return -EINVAL;
704 694
705 neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev, 695 neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif);
706 r->rif, n);
707 if (!neigh_entry) 696 if (!neigh_entry)
708 return -ENOMEM; 697 return -ENOMEM;
709 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); 698 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
@@ -722,14 +711,11 @@ void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
722 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 711 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
723 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 712 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724 struct mlxsw_sp_neigh_entry *neigh_entry; 713 struct mlxsw_sp_neigh_entry *neigh_entry;
725 u32 dip;
726 714
727 if (n->tbl != &arp_tbl) 715 if (n->tbl != &arp_tbl)
728 return; 716 return;
729 717
730 dip = ntohl(*((__be32 *) n->primary_key)); 718 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
731 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
732 n->dev);
733 if (!neigh_entry) 719 if (!neigh_entry)
734 return; 720 return;
735 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); 721 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
@@ -812,6 +798,26 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
812 } 798 }
813} 799}
814 800
801static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
802{
803 u8 num_rec, last_rec_index, num_entries;
804
805 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
806 last_rec_index = num_rec - 1;
807
808 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
809 return false;
810 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
811 MLXSW_REG_RAUHTD_TYPE_IPV6)
812 return true;
813
814 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
815 last_rec_index);
816 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
817 return true;
818 return false;
819}
820
815static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) 821static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
816{ 822{
817 char *rauhtd_pl; 823 char *rauhtd_pl;
@@ -838,7 +844,7 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
838 for (i = 0; i < num_rec; i++) 844 for (i = 0; i < num_rec; i++)
839 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, 845 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
840 i); 846 i);
841 } while (num_rec); 847 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
842 rtnl_unlock(); 848 rtnl_unlock();
843 849
844 kfree(rauhtd_pl); 850 kfree(rauhtd_pl);
@@ -857,7 +863,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
857 * is active regardless of the traffic. 863 * is active regardless of the traffic.
858 */ 864 */
859 if (!list_empty(&neigh_entry->nexthop_list)) 865 if (!list_empty(&neigh_entry->nexthop_list))
860 neigh_event_send(neigh_entry->n, NULL); 866 neigh_event_send(neigh_entry->key.n, NULL);
861 } 867 }
862 rtnl_unlock(); 868 rtnl_unlock();
863} 869}
@@ -903,9 +909,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
903 rtnl_lock(); 909 rtnl_lock();
904 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, 910 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
905 nexthop_neighs_list_node) { 911 nexthop_neighs_list_node) {
906 if (!(neigh_entry->n->nud_state & NUD_VALID) && 912 if (!(neigh_entry->key.n->nud_state & NUD_VALID) &&
907 !list_empty(&neigh_entry->nexthop_list)) 913 !list_empty(&neigh_entry->nexthop_list))
908 neigh_event_send(neigh_entry->n, NULL); 914 neigh_event_send(neigh_entry->key.n, NULL);
909 } 915 }
910 rtnl_unlock(); 916 rtnl_unlock();
911 917
@@ -922,7 +928,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
922{ 928{
923 struct mlxsw_sp_neigh_entry *neigh_entry = 929 struct mlxsw_sp_neigh_entry *neigh_entry =
924 container_of(work, struct mlxsw_sp_neigh_entry, dw.work); 930 container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
925 struct neighbour *n = neigh_entry->n; 931 struct neighbour *n = neigh_entry->key.n;
926 struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; 932 struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
927 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 933 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
928 char rauht_pl[MLXSW_REG_RAUHT_LEN]; 934 char rauht_pl[MLXSW_REG_RAUHT_LEN];
@@ -1025,11 +1031,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1025 1031
1026 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1032 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1027 dip = ntohl(*((__be32 *) n->primary_key)); 1033 dip = ntohl(*((__be32 *) n->primary_key));
1028 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, 1034 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1029 &dip, 1035 if (WARN_ON(!neigh_entry)) {
1030 sizeof(__be32),
1031 dev);
1032 if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
1033 mlxsw_sp_port_dev_put(mlxsw_sp_port); 1036 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1034 return NOTIFY_DONE; 1037 return NOTIFY_DONE;
1035 } 1038 }
@@ -1338,33 +1341,26 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1338 struct fib_nh *fib_nh) 1341 struct fib_nh *fib_nh)
1339{ 1342{
1340 struct mlxsw_sp_neigh_entry *neigh_entry; 1343 struct mlxsw_sp_neigh_entry *neigh_entry;
1341 u32 gwip = ntohl(fib_nh->nh_gw);
1342 struct net_device *dev = fib_nh->nh_dev; 1344 struct net_device *dev = fib_nh->nh_dev;
1343 struct neighbour *n; 1345 struct neighbour *n;
1344 u8 nud_state; 1346 u8 nud_state;
1345 1347
1346 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, 1348 /* Take a reference of neigh here ensuring that neigh would
1347 sizeof(gwip), dev); 1349 * not be detructed before the nexthop entry is finished.
1348 if (!neigh_entry) { 1350 * The reference is taken either in neigh_lookup() or
1349 __be32 gwipn = htonl(gwip); 1351 * in neith_create() in case n is not found.
1350 1352 */
1351 n = neigh_create(&arp_tbl, &gwipn, dev); 1353 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev);
1354 if (!n) {
1355 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev);
1352 if (IS_ERR(n)) 1356 if (IS_ERR(n))
1353 return PTR_ERR(n); 1357 return PTR_ERR(n);
1354 neigh_event_send(n, NULL); 1358 neigh_event_send(n, NULL);
1355 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, 1359 }
1356 sizeof(gwip), dev); 1360 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1357 if (!neigh_entry) { 1361 if (!neigh_entry) {
1358 neigh_release(n); 1362 neigh_release(n);
1359 return -EINVAL; 1363 return -EINVAL;
1360 }
1361 } else {
1362 /* Take a reference of neigh here ensuring that neigh would
1363 * not be detructed before the nexthop entry is finished.
1364 * The second branch takes the reference in neith_create()
1365 */
1366 n = neigh_entry->n;
1367 neigh_clone(n);
1368 } 1364 }
1369 1365
1370 /* If that is the first nexthop connected to that neigh, add to 1366 /* If that is the first nexthop connected to that neigh, add to
@@ -1398,7 +1394,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1398 if (list_empty(&nh->neigh_entry->nexthop_list)) 1394 if (list_empty(&nh->neigh_entry->nexthop_list))
1399 list_del(&nh->neigh_entry->nexthop_neighs_list_node); 1395 list_del(&nh->neigh_entry->nexthop_neighs_list_node);
1400 1396
1401 neigh_release(neigh_entry->n); 1397 neigh_release(neigh_entry->key.n);
1402} 1398}
1403 1399
1404static struct mlxsw_sp_nexthop_group * 1400static struct mlxsw_sp_nexthop_group *
@@ -1458,11 +1454,11 @@ static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
1458 1454
1459 for (i = 0; i < fi->fib_nhs; i++) { 1455 for (i = 0; i < fi->fib_nhs; i++) {
1460 struct fib_nh *fib_nh = &fi->fib_nh[i]; 1456 struct fib_nh *fib_nh = &fi->fib_nh[i];
1461 u32 gwip = ntohl(fib_nh->nh_gw); 1457 struct neighbour *n = nh->neigh_entry->key.n;
1462 1458
1463 if (memcmp(nh->neigh_entry->key.addr, 1459 if (memcmp(n->primary_key, &fib_nh->nh_gw,
1464 &gwip, sizeof(u32)) == 0 && 1460 sizeof(fib_nh->nh_gw)) == 0 &&
1465 nh->neigh_entry->key.dev == fib_nh->nh_dev) 1461 n->dev == fib_nh->nh_dev)
1466 return true; 1462 return true;
1467 } 1463 }
1468 return false; 1464 return false;
@@ -1869,19 +1865,19 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
1869 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 1865 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1870} 1866}
1871 1867
1872static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) 1868static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
1873{ 1869{
1874 struct mlxsw_sp_fib_entry *fib_entry; 1870 struct mlxsw_sp_fib_entry *fib_entry;
1875 struct mlxsw_sp_fib_entry *tmp; 1871 struct mlxsw_sp_fib_entry *tmp;
1876 struct mlxsw_sp_vr *vr; 1872 struct mlxsw_sp_vr *vr;
1877 int i; 1873 int i;
1878 int err;
1879 1874
1880 if (mlxsw_sp->router.aborted) 1875 if (mlxsw_sp->router.aborted)
1881 return; 1876 return;
1882 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n"); 1877 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
1883 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 1878 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
1884 vr = &mlxsw_sp->router.vrs[i]; 1879 vr = &mlxsw_sp->router.vrs[i];
1880
1885 if (!vr->used) 1881 if (!vr->used)
1886 continue; 1882 continue;
1887 1883
@@ -1897,6 +1893,13 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
1897 break; 1893 break;
1898 } 1894 }
1899 } 1895 }
1896}
1897
1898static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
1899{
1900 int err;
1901
1902 mlxsw_sp_router_fib_flush(mlxsw_sp);
1900 mlxsw_sp->router.aborted = true; 1903 mlxsw_sp->router.aborted = true;
1901 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); 1904 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
1902 if (err) 1905 if (err)
@@ -1952,6 +1955,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
1952 struct fib_entry_notifier_info *fen_info = ptr; 1955 struct fib_entry_notifier_info *fen_info = ptr;
1953 int err; 1956 int err;
1954 1957
1958 if (!net_eq(fen_info->info.net, &init_net))
1959 return NOTIFY_DONE;
1960
1955 switch (event) { 1961 switch (event) {
1956 case FIB_EVENT_ENTRY_ADD: 1962 case FIB_EVENT_ENTRY_ADD:
1957 err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info); 1963 err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index b19552a72778..b87ba7d36bc4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -929,12 +929,12 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
929 929
930static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, 930static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
931 const unsigned char *addr, 931 const unsigned char *addr,
932 u16 vid) 932 u16 fid)
933{ 933{
934 struct mlxsw_sp_mid *mid; 934 struct mlxsw_sp_mid *mid;
935 935
936 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { 936 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
937 if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) 937 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
938 return mid; 938 return mid;
939 } 939 }
940 return NULL; 940 return NULL;
@@ -942,7 +942,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
942 942
943static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 943static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
944 const unsigned char *addr, 944 const unsigned char *addr,
945 u16 vid) 945 u16 fid)
946{ 946{
947 struct mlxsw_sp_mid *mid; 947 struct mlxsw_sp_mid *mid;
948 u16 mid_idx; 948 u16 mid_idx;
@@ -958,7 +958,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
958 958
959 set_bit(mid_idx, mlxsw_sp->br_mids.mapped); 959 set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
960 ether_addr_copy(mid->addr, addr); 960 ether_addr_copy(mid->addr, addr);
961 mid->vid = vid; 961 mid->fid = fid;
962 mid->mid = mid_idx; 962 mid->mid = mid_idx;
963 mid->ref_count = 0; 963 mid->ref_count = 0;
964 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); 964 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
@@ -991,9 +991,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
991 if (switchdev_trans_ph_prepare(trans)) 991 if (switchdev_trans_ph_prepare(trans))
992 return 0; 992 return 0;
993 993
994 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 994 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
995 if (!mid) { 995 if (!mid) {
996 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); 996 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
997 if (!mid) { 997 if (!mid) {
998 netdev_err(dev, "Unable to allocate MC group\n"); 998 netdev_err(dev, "Unable to allocate MC group\n");
999 return -ENOMEM; 999 return -ENOMEM;
@@ -1137,7 +1137,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1137 u16 mid_idx; 1137 u16 mid_idx;
1138 int err = 0; 1138 int err = 0;
1139 1139
1140 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 1140 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
1141 if (!mid) { 1141 if (!mid) {
1142 netdev_err(dev, "Unable to remove port from MC DB\n"); 1142 netdev_err(dev, "Unable to remove port from MC DB\n");
1143 return -EINVAL; 1143 return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index f5a4ebb3963f..785ab03683eb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -727,9 +727,6 @@ struct core_tx_bd_flags {
727#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 727#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
728#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 728#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
729#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 729#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
730#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1
731#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12
732
733}; 730};
734 731
735struct core_tx_bd { 732struct core_tx_bd {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 63e1a1b0ef8e..f95385cbbd40 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1119,6 +1119,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1119 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << 1119 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1120 CORE_TX_BD_FLAGS_START_BD_SHIFT; 1120 CORE_TX_BD_FLAGS_START_BD_SHIFT;
1121 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); 1121 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
1122 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
1122 DMA_REGPAIR_LE(start_bd->addr, first_frag); 1123 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1123 start_bd->nbytes = cpu_to_le16(first_frag_len); 1124 start_bd->nbytes = cpu_to_le16(first_frag_len);
1124 1125
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index b71d73a41b10..aeb98d8c5626 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -845,20 +845,19 @@ static void qed_update_pf_params(struct qed_dev *cdev,
845{ 845{
846 int i; 846 int i;
847 847
848 if (IS_ENABLED(CONFIG_QED_RDMA)) {
849 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
850 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
851 /* divide by 3 the MRs to avoid MF ILT overflow */
852 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
853 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
854 }
855
848 for (i = 0; i < cdev->num_hwfns; i++) { 856 for (i = 0; i < cdev->num_hwfns; i++) {
849 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 857 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
850 858
851 p_hwfn->pf_params = *params; 859 p_hwfn->pf_params = *params;
852 } 860 }
853
854 if (!IS_ENABLED(CONFIG_QED_RDMA))
855 return;
856
857 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
858 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
859 /* divide by 3 the MRs to avoid MF ILT overflow */
860 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
861 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
862} 861}
863 862
864static int qed_slowpath_start(struct qed_dev *cdev, 863static int qed_slowpath_start(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index fe7e7b8650dd..8a3debef39ee 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -177,16 +177,23 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
177 for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { 177 for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
178 int tc; 178 int tc;
179 179
180 for (j = 0; j < QEDE_NUM_RQSTATS; j++) 180 if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
181 sprintf(buf + (k + j) * ETH_GSTRING_LEN, 181 for (j = 0; j < QEDE_NUM_RQSTATS; j++)
182 "%d: %s", i, qede_rqstats_arr[j].string);
183 k += QEDE_NUM_RQSTATS;
184 for (tc = 0; tc < edev->num_tc; tc++) {
185 for (j = 0; j < QEDE_NUM_TQSTATS; j++)
186 sprintf(buf + (k + j) * ETH_GSTRING_LEN, 182 sprintf(buf + (k + j) * ETH_GSTRING_LEN,
187 "%d.%d: %s", i, tc, 183 "%d: %s", i,
188 qede_tqstats_arr[j].string); 184 qede_rqstats_arr[j].string);
189 k += QEDE_NUM_TQSTATS; 185 k += QEDE_NUM_RQSTATS;
186 }
187
188 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
189 for (tc = 0; tc < edev->num_tc; tc++) {
190 for (j = 0; j < QEDE_NUM_TQSTATS; j++)
191 sprintf(buf + (k + j) *
192 ETH_GSTRING_LEN,
193 "%d.%d: %s", i, tc,
194 qede_tqstats_arr[j].string);
195 k += QEDE_NUM_TQSTATS;
196 }
190 } 197 }
191 } 198 }
192 199
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index df02c454eccc..b84a2c4ef083 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -2915,7 +2915,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
2915 } 2915 }
2916 2916
2917 mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, 2917 mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
2918 rxq->rx_buf_size, DMA_FROM_DEVICE); 2918 PAGE_SIZE, DMA_FROM_DEVICE);
2919 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { 2919 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
2920 DP_NOTICE(edev, 2920 DP_NOTICE(edev,
2921 "Failed to map TPA replacement buffer\n"); 2921 "Failed to map TPA replacement buffer\n");
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 6fb3bee904d3..0b4deb31e742 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -575,10 +575,11 @@ void emac_mac_start(struct emac_adapter *adpt)
575 575
576 mac |= TXEN | RXEN; /* enable RX/TX */ 576 mac |= TXEN | RXEN; /* enable RX/TX */
577 577
578 /* We don't have ethtool support yet, so force flow-control mode 578 /* Configure MAC flow control to match the PHY's settings. */
579 * to 'full' always. 579 if (phydev->pause)
580 */ 580 mac |= RXFC;
581 mac |= TXFC | RXFC; 581 if (phydev->pause != phydev->asym_pause)
582 mac |= TXFC;
582 583
583 /* setup link speed */ 584 /* setup link speed */
584 mac &= ~SPEED_MASK; 585 mac &= ~SPEED_MASK;
@@ -1003,6 +1004,12 @@ int emac_mac_up(struct emac_adapter *adpt)
1003 writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); 1004 writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
1004 writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); 1005 writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
1005 1006
1007 /* Enable pause frames. Without this feature, the EMAC has been shown
1008 * to receive (and drop) frames with FCS errors at gigabit connections.
1009 */
1010 adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1011 adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1012
1006 adpt->phydev->irq = PHY_IGNORE_INTERRUPT; 1013 adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
1007 phy_start(adpt->phydev); 1014 phy_start(adpt->phydev);
1008 1015
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 75c1b530e39e..72fe343c7a36 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -421,7 +421,7 @@ static const struct emac_reg_write sgmii_v2_laned[] = {
421 /* CDR Settings */ 421 /* CDR Settings */
422 {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, 422 {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
423 UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, 423 UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
424 {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)}, 424 {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
425 {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, 425 {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
426 426
427 /* TX/RX Settings */ 427 /* TX/RX Settings */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b626da6e80a5..649bc508fa4b 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -485,6 +485,9 @@ efx_copy_channel(const struct efx_channel *old_channel)
485 *channel = *old_channel; 485 *channel = *old_channel;
486 486
487 channel->napi_dev = NULL; 487 channel->napi_dev = NULL;
488 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
489 channel->napi_str.napi_id = 0;
490 channel->napi_str.state = 0;
488 memset(&channel->eventq, 0, sizeof(channel->eventq)); 491 memset(&channel->eventq, 0, sizeof(channel->eventq));
489 492
490 for (j = 0; j < EFX_TXQ_TYPES; j++) { 493 for (j = 0; j < EFX_TXQ_TYPES; j++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 10909c9c0033..03dbf8e89c4c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -882,6 +882,13 @@ static int stmmac_init_phy(struct net_device *dev)
882 return -ENODEV; 882 return -ENODEV;
883 } 883 }
884 884
885 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
886 * subsequent PHY polling, make sure we force a link transition if
887 * we have a UP/DOWN/UP transition
888 */
889 if (phydev->is_pseudo_fixed_link)
890 phydev->irq = PHY_POLL;
891
885 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" 892 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
886 " Link = %d\n", dev->name, phydev->phy_id, phydev->link); 893 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
887 894
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 054a8dd23dae..ba1e45ff6aae 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -176,9 +176,12 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
176 } 176 }
177 177
178 dev = bus_find_device(&platform_bus_type, NULL, node, match); 178 dev = bus_find_device(&platform_bus_type, NULL, node, match);
179 of_node_put(node);
179 priv = dev_get_drvdata(dev); 180 priv = dev_get_drvdata(dev);
180 181
181 priv->cpsw_phy_sel(priv, phy_mode, slave); 182 priv->cpsw_phy_sel(priv, phy_mode, slave);
183
184 put_device(dev);
182} 185}
183EXPORT_SYMBOL_GPL(cpsw_phy_sel); 186EXPORT_SYMBOL_GPL(cpsw_phy_sel);
184 187
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2fd94a5bc1f3..84fbe5714f8b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1410,6 +1410,7 @@ static int emac_dev_open(struct net_device *ndev)
1410 int i = 0; 1410 int i = 0;
1411 struct emac_priv *priv = netdev_priv(ndev); 1411 struct emac_priv *priv = netdev_priv(ndev);
1412 struct phy_device *phydev = NULL; 1412 struct phy_device *phydev = NULL;
1413 struct device *phy = NULL;
1413 1414
1414 ret = pm_runtime_get_sync(&priv->pdev->dev); 1415 ret = pm_runtime_get_sync(&priv->pdev->dev);
1415 if (ret < 0) { 1416 if (ret < 0) {
@@ -1488,19 +1489,20 @@ static int emac_dev_open(struct net_device *ndev)
1488 1489
1489 /* use the first phy on the bus if pdata did not give us a phy id */ 1490 /* use the first phy on the bus if pdata did not give us a phy id */
1490 if (!phydev && !priv->phy_id) { 1491 if (!phydev && !priv->phy_id) {
1491 struct device *phy;
1492
1493 phy = bus_find_device(&mdio_bus_type, NULL, NULL, 1492 phy = bus_find_device(&mdio_bus_type, NULL, NULL,
1494 match_first_device); 1493 match_first_device);
1495 if (phy) 1494 if (phy) {
1496 priv->phy_id = dev_name(phy); 1495 priv->phy_id = dev_name(phy);
1496 if (!priv->phy_id || !*priv->phy_id)
1497 put_device(phy);
1498 }
1497 } 1499 }
1498 1500
1499 if (!phydev && priv->phy_id && *priv->phy_id) { 1501 if (!phydev && priv->phy_id && *priv->phy_id) {
1500 phydev = phy_connect(ndev, priv->phy_id, 1502 phydev = phy_connect(ndev, priv->phy_id,
1501 &emac_adjust_link, 1503 &emac_adjust_link,
1502 PHY_INTERFACE_MODE_MII); 1504 PHY_INTERFACE_MODE_MII);
1503 1505 put_device(phy); /* reference taken by bus_find_device */
1504 if (IS_ERR(phydev)) { 1506 if (IS_ERR(phydev)) {
1505 dev_err(emac_dev, "could not connect to phy %s\n", 1507 dev_err(emac_dev, "could not connect to phy %s\n",
1506 priv->phy_id); 1508 priv->phy_id);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index b3abd02dc949..eed18f88bdff 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1694,7 +1694,7 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
1694 pr_debug("%s: bssid matched\n", __func__); 1694 pr_debug("%s: bssid matched\n", __func__);
1695 break; 1695 break;
1696 } else { 1696 } else {
1697 pr_debug("%s: bssid unmached\n", __func__); 1697 pr_debug("%s: bssid unmatched\n", __func__);
1698 continue; 1698 continue;
1699 } 1699 }
1700 } 1700 }
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 46cc33b9e926..07d862d90869 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -708,8 +708,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
708 if (!qmgr_stat_below_low_watermark(rxq) && 708 if (!qmgr_stat_below_low_watermark(rxq) &&
709 napi_reschedule(napi)) { /* not empty again */ 709 napi_reschedule(napi)) { /* not empty again */
710#if DEBUG_RX 710#if DEBUG_RX
711 printk(KERN_DEBUG "%s: eth_poll" 711 printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n",
712 " napi_reschedule successed\n",
713 dev->name); 712 dev->name);
714#endif 713#endif
715 qmgr_disable_irq(rxq); 714 qmgr_disable_irq(rxq);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d0361f3197c9..13b7e0b9bd9b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1280,6 +1280,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1280 struct net_device *lowerdev; 1280 struct net_device *lowerdev;
1281 int err; 1281 int err;
1282 int macmode; 1282 int macmode;
1283 bool create = false;
1283 1284
1284 if (!tb[IFLA_LINK]) 1285 if (!tb[IFLA_LINK])
1285 return -EINVAL; 1286 return -EINVAL;
@@ -1310,12 +1311,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1310 err = macvlan_port_create(lowerdev); 1311 err = macvlan_port_create(lowerdev);
1311 if (err < 0) 1312 if (err < 0)
1312 return err; 1313 return err;
1314 create = true;
1313 } 1315 }
1314 port = macvlan_port_get_rtnl(lowerdev); 1316 port = macvlan_port_get_rtnl(lowerdev);
1315 1317
1316 /* Only 1 macvlan device can be created in passthru mode */ 1318 /* Only 1 macvlan device can be created in passthru mode */
1317 if (port->passthru) 1319 if (port->passthru) {
1318 return -EINVAL; 1320 /* The macvlan port must be not created this time,
1321 * still goto destroy_macvlan_port for readability.
1322 */
1323 err = -EINVAL;
1324 goto destroy_macvlan_port;
1325 }
1319 1326
1320 vlan->lowerdev = lowerdev; 1327 vlan->lowerdev = lowerdev;
1321 vlan->dev = dev; 1328 vlan->dev = dev;
@@ -1331,24 +1338,28 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1331 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 1338 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
1332 1339
1333 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 1340 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
1334 if (port->count) 1341 if (port->count) {
1335 return -EINVAL; 1342 err = -EINVAL;
1343 goto destroy_macvlan_port;
1344 }
1336 port->passthru = true; 1345 port->passthru = true;
1337 eth_hw_addr_inherit(dev, lowerdev); 1346 eth_hw_addr_inherit(dev, lowerdev);
1338 } 1347 }
1339 1348
1340 if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { 1349 if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
1341 if (vlan->mode != MACVLAN_MODE_SOURCE) 1350 if (vlan->mode != MACVLAN_MODE_SOURCE) {
1342 return -EINVAL; 1351 err = -EINVAL;
1352 goto destroy_macvlan_port;
1353 }
1343 macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); 1354 macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]);
1344 err = macvlan_changelink_sources(vlan, macmode, data); 1355 err = macvlan_changelink_sources(vlan, macmode, data);
1345 if (err) 1356 if (err)
1346 return err; 1357 goto destroy_macvlan_port;
1347 } 1358 }
1348 1359
1349 err = register_netdevice(dev); 1360 err = register_netdevice(dev);
1350 if (err < 0) 1361 if (err < 0)
1351 return err; 1362 goto destroy_macvlan_port;
1352 1363
1353 dev->priv_flags |= IFF_MACVLAN; 1364 dev->priv_flags |= IFF_MACVLAN;
1354 err = netdev_upper_dev_link(lowerdev, dev); 1365 err = netdev_upper_dev_link(lowerdev, dev);
@@ -1363,7 +1374,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1363 1374
1364unregister_netdev: 1375unregister_netdev:
1365 unregister_netdevice(dev); 1376 unregister_netdevice(dev);
1366 1377destroy_macvlan_port:
1378 if (create)
1379 macvlan_port_destroy(port->dev);
1367 return err; 1380 return err;
1368} 1381}
1369EXPORT_SYMBOL_GPL(macvlan_common_newlink); 1382EXPORT_SYMBOL_GPL(macvlan_common_newlink);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 49a1c988d29c..9e8f048891bd 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -724,6 +724,7 @@ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
724 phydev = to_phy_device(d); 724 phydev = to_phy_device(d);
725 725
726 rc = phy_connect_direct(dev, phydev, handler, interface); 726 rc = phy_connect_direct(dev, phydev, handler, interface);
727 put_device(d);
727 if (rc) 728 if (rc)
728 return ERR_PTR(rc); 729 return ERR_PTR(rc);
729 730
@@ -956,6 +957,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
956 phydev = to_phy_device(d); 957 phydev = to_phy_device(d);
957 958
958 rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); 959 rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
960 put_device(d);
959 if (rc) 961 if (rc)
960 return ERR_PTR(rc); 962 return ERR_PTR(rc);
961 963
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 36c70d6f7363..a3a7db0702d8 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1654,6 +1654,19 @@ static const struct driver_info ax88178a_info = {
1654 .tx_fixup = ax88179_tx_fixup, 1654 .tx_fixup = ax88179_tx_fixup,
1655}; 1655};
1656 1656
1657static const struct driver_info cypress_GX3_info = {
1658 .description = "Cypress GX3 SuperSpeed to Gigabit Ethernet Controller",
1659 .bind = ax88179_bind,
1660 .unbind = ax88179_unbind,
1661 .status = ax88179_status,
1662 .link_reset = ax88179_link_reset,
1663 .reset = ax88179_reset,
1664 .stop = ax88179_stop,
1665 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1666 .rx_fixup = ax88179_rx_fixup,
1667 .tx_fixup = ax88179_tx_fixup,
1668};
1669
1657static const struct driver_info dlink_dub1312_info = { 1670static const struct driver_info dlink_dub1312_info = {
1658 .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", 1671 .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter",
1659 .bind = ax88179_bind, 1672 .bind = ax88179_bind,
@@ -1716,6 +1729,10 @@ static const struct usb_device_id products[] = {
1716 USB_DEVICE(0x0b95, 0x178a), 1729 USB_DEVICE(0x0b95, 0x178a),
1717 .driver_info = (unsigned long)&ax88178a_info, 1730 .driver_info = (unsigned long)&ax88178a_info,
1718}, { 1731}, {
1732 /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */
1733 USB_DEVICE(0x04b4, 0x3610),
1734 .driver_info = (unsigned long)&cypress_GX3_info,
1735}, {
1719 /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ 1736 /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
1720 USB_DEVICE(0x2001, 0x4a00), 1737 USB_DEVICE(0x2001, 0x4a00),
1721 .driver_info = (unsigned long)&dlink_dub1312_info, 1738 .driver_info = (unsigned long)&dlink_dub1312_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 4213c28eeb43..7dc61228c55b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
1730 u8 checksum = CHECKSUM_NONE; 1730 u8 checksum = CHECKSUM_NONE;
1731 u32 opts2, opts3; 1731 u32 opts2, opts3;
1732 1732
1733 if (tp->version == RTL_VER_01) 1733 if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
1734 goto return_result; 1734 goto return_result;
1735 1735
1736 opts2 = le32_to_cpu(rx_desc->opts2); 1736 opts2 = le32_to_cpu(rx_desc->opts2);
@@ -1745,7 +1745,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
1745 checksum = CHECKSUM_NONE; 1745 checksum = CHECKSUM_NONE;
1746 else 1746 else
1747 checksum = CHECKSUM_UNNECESSARY; 1747 checksum = CHECKSUM_UNNECESSARY;
1748 } else if (RD_IPV6_CS) { 1748 } else if (opts2 & RD_IPV6_CS) {
1749 if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) 1749 if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
1750 checksum = CHECKSUM_UNNECESSARY; 1750 checksum = CHECKSUM_UNNECESSARY;
1751 else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) 1751 else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
@@ -3266,10 +3266,8 @@ static int rtl8152_open(struct net_device *netdev)
3266 goto out; 3266 goto out;
3267 3267
3268 res = usb_autopm_get_interface(tp->intf); 3268 res = usb_autopm_get_interface(tp->intf);
3269 if (res < 0) { 3269 if (res < 0)
3270 free_all_mem(tp); 3270 goto out_free;
3271 goto out;
3272 }
3273 3271
3274 mutex_lock(&tp->control); 3272 mutex_lock(&tp->control);
3275 3273
@@ -3285,10 +3283,9 @@ static int rtl8152_open(struct net_device *netdev)
3285 netif_device_detach(tp->netdev); 3283 netif_device_detach(tp->netdev);
3286 netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", 3284 netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
3287 res); 3285 res);
3288 free_all_mem(tp); 3286 goto out_unlock;
3289 } else {
3290 napi_enable(&tp->napi);
3291 } 3287 }
3288 napi_enable(&tp->napi);
3292 3289
3293 mutex_unlock(&tp->control); 3290 mutex_unlock(&tp->control);
3294 3291
@@ -3297,7 +3294,13 @@ static int rtl8152_open(struct net_device *netdev)
3297 tp->pm_notifier.notifier_call = rtl_notifier; 3294 tp->pm_notifier.notifier_call = rtl_notifier;
3298 register_pm_notifier(&tp->pm_notifier); 3295 register_pm_notifier(&tp->pm_notifier);
3299#endif 3296#endif
3297 return 0;
3300 3298
3299out_unlock:
3300 mutex_unlock(&tp->control);
3301 usb_autopm_put_interface(tp->intf);
3302out_free:
3303 free_all_mem(tp);
3301out: 3304out:
3302 return res; 3305 return res;
3303} 3306}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2cafd12a1075..ca5239aea4d9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2037,23 +2037,33 @@ static struct virtio_device_id id_table[] = {
2037 { 0 }, 2037 { 0 },
2038}; 2038};
2039 2039
2040#define VIRTNET_FEATURES \
2041 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
2042 VIRTIO_NET_F_MAC, \
2043 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
2044 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
2045 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
2046 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
2047 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
2048 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
2049 VIRTIO_NET_F_CTRL_MAC_ADDR, \
2050 VIRTIO_NET_F_MTU
2051
2040static unsigned int features[] = { 2052static unsigned int features[] = {
2041 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 2053 VIRTNET_FEATURES,
2042 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 2054};
2043 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 2055
2044 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 2056static unsigned int features_legacy[] = {
2045 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 2057 VIRTNET_FEATURES,
2046 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 2058 VIRTIO_NET_F_GSO,
2047 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
2048 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
2049 VIRTIO_NET_F_CTRL_MAC_ADDR,
2050 VIRTIO_F_ANY_LAYOUT, 2059 VIRTIO_F_ANY_LAYOUT,
2051 VIRTIO_NET_F_MTU,
2052}; 2060};
2053 2061
2054static struct virtio_driver virtio_net_driver = { 2062static struct virtio_driver virtio_net_driver = {
2055 .feature_table = features, 2063 .feature_table = features,
2056 .feature_table_size = ARRAY_SIZE(features), 2064 .feature_table_size = ARRAY_SIZE(features),
2065 .feature_table_legacy = features_legacy,
2066 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
2057 .driver.name = KBUILD_MODNAME, 2067 .driver.name = KBUILD_MODNAME,
2058 .driver.owner = THIS_MODULE, 2068 .driver.owner = THIS_MODULE,
2059 .id_table = id_table, 2069 .id_table = id_table,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index cb5cc7c03160..5264c1a49d86 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -944,7 +944,9 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
944{ 944{
945 struct vxlan_dev *vxlan; 945 struct vxlan_dev *vxlan;
946 struct vxlan_sock *sock4; 946 struct vxlan_sock *sock4;
947 struct vxlan_sock *sock6 = NULL; 947#if IS_ENABLED(CONFIG_IPV6)
948 struct vxlan_sock *sock6;
949#endif
948 unsigned short family = dev->default_dst.remote_ip.sa.sa_family; 950 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
949 951
950 sock4 = rtnl_dereference(dev->vn4_sock); 952 sock4 = rtnl_dereference(dev->vn4_sock);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 2295336355df..cf267f9da753 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4518,7 +4518,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4518 /* store current 11d setting */ 4518 /* store current 11d setting */
4519 if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, 4519 if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
4520 &ifp->vif->is_11d)) { 4520 &ifp->vif->is_11d)) {
4521 supports_11d = false; 4521 is_11d = supports_11d = false;
4522 } else { 4522 } else {
4523 country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, 4523 country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
4524 settings->beacon.tail_len, 4524 settings->beacon.tail_len,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 4fdc3dad3e85..b88e2048ae0b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
1087 ret = iwl_mvm_switch_to_d3(mvm); 1087 ret = iwl_mvm_switch_to_d3(mvm);
1088 if (ret) 1088 if (ret)
1089 return ret; 1089 return ret;
1090 } else {
1091 /* In theory, we wouldn't have to stop a running sched
1092 * scan in order to start another one (for
1093 * net-detect). But in practice this doesn't seem to
1094 * work properly, so stop any running sched_scan now.
1095 */
1096 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
1097 if (ret)
1098 return ret;
1090 } 1099 }
1091 1100
1092 /* rfkill release can be either for wowlan or netdetect */ 1101 /* rfkill release can be either for wowlan or netdetect */
@@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1254 out: 1263 out:
1255 if (ret < 0) { 1264 if (ret < 0) {
1256 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1265 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1257 ieee80211_restart_hw(mvm->hw); 1266 if (mvm->restart_fw > 0) {
1267 mvm->restart_fw--;
1268 ieee80211_restart_hw(mvm->hw);
1269 }
1258 iwl_mvm_free_nd(mvm); 1270 iwl_mvm_free_nd(mvm);
1259 } 1271 }
1260 out_noreset: 1272 out_noreset:
@@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
2088 iwl_mvm_update_changed_regdom(mvm); 2100 iwl_mvm_update_changed_regdom(mvm);
2089 2101
2090 if (mvm->net_detect) { 2102 if (mvm->net_detect) {
2103 /* If this is a non-unified image, we restart the FW,
2104 * so no need to stop the netdetect scan. If that
2105 * fails, continue and try to get the wake-up reasons,
2106 * but trigger a HW restart by keeping a failure code
2107 * in ret.
2108 */
2109 if (unified_image)
2110 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
2111 false);
2112
2091 iwl_mvm_query_netdetect_reasons(mvm, vif); 2113 iwl_mvm_query_netdetect_reasons(mvm, vif);
2092 /* has unlocked the mutex, so skip that */ 2114 /* has unlocked the mutex, so skip that */
2093 goto out; 2115 goto out;
@@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
2271static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) 2293static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2272{ 2294{
2273 struct iwl_mvm *mvm = inode->i_private; 2295 struct iwl_mvm *mvm = inode->i_private;
2274 int remaining_time = 10; 2296 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2297 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2275 2298
2276 mvm->d3_test_active = false; 2299 mvm->d3_test_active = false;
2277 2300
@@ -2282,17 +2305,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2282 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2305 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2283 2306
2284 iwl_abort_notification_waits(&mvm->notif_wait); 2307 iwl_abort_notification_waits(&mvm->notif_wait);
2285 ieee80211_restart_hw(mvm->hw); 2308 if (!unified_image) {
2309 int remaining_time = 10;
2286 2310
2287 /* wait for restart and disconnect all interfaces */ 2311 ieee80211_restart_hw(mvm->hw);
2288 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2312
2289 remaining_time > 0) { 2313 /* wait for restart and disconnect all interfaces */
2290 remaining_time--; 2314 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2291 msleep(1000); 2315 remaining_time > 0) {
2292 } 2316 remaining_time--;
2317 msleep(1000);
2318 }
2293 2319
2294 if (remaining_time == 0) 2320 if (remaining_time == 0)
2295 IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n"); 2321 IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
2322 }
2296 2323
2297 ieee80211_iterate_active_interfaces_atomic( 2324 ieee80211_iterate_active_interfaces_atomic(
2298 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 2325 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 07da4efe8458..7b7d2a146e30 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
1529 .data = { &cmd, }, 1529 .data = { &cmd, },
1530 .len = { sizeof(cmd) }, 1530 .len = { sizeof(cmd) },
1531 }; 1531 };
1532 size_t delta, len; 1532 size_t delta;
1533 ssize_t ret; 1533 ssize_t ret, len;
1534 1534
1535 hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, 1535 hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
1536 DEBUG_GROUP, 0); 1536 DEBUG_GROUP, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 318efd814037..1db1dc13e988 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4121 struct iwl_mvm_internal_rxq_notif *notif, 4121 struct iwl_mvm_internal_rxq_notif *notif,
4122 u32 size) 4122 u32 size)
4123{ 4123{
4124 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
4125 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; 4124 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
4126 int ret; 4125 int ret;
4127 4126
@@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4143 } 4142 }
4144 4143
4145 if (notif->sync) 4144 if (notif->sync)
4146 ret = wait_event_timeout(notif_waitq, 4145 ret = wait_event_timeout(mvm->rx_sync_waitq,
4147 atomic_read(&mvm->queue_sync_counter) == 0, 4146 atomic_read(&mvm->queue_sync_counter) == 0,
4148 HZ); 4147 HZ);
4149 WARN_ON_ONCE(!ret); 4148 WARN_ON_ONCE(!ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index d17cbf603f7c..c60703e0c246 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -937,6 +937,7 @@ struct iwl_mvm {
937 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ 937 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
938 spinlock_t d0i3_tx_lock; 938 spinlock_t d0i3_tx_lock;
939 wait_queue_head_t d0i3_exit_waitq; 939 wait_queue_head_t d0i3_exit_waitq;
940 wait_queue_head_t rx_sync_waitq;
940 941
941 /* BT-Coex */ 942 /* BT-Coex */
942 struct iwl_bt_coex_profile_notif last_bt_notif; 943 struct iwl_bt_coex_profile_notif last_bt_notif;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 05fe6dd1a2c8..4d35deb628bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
619 spin_lock_init(&mvm->refs_lock); 619 spin_lock_init(&mvm->refs_lock);
620 skb_queue_head_init(&mvm->d0i3_tx); 620 skb_queue_head_init(&mvm->d0i3_tx);
621 init_waitqueue_head(&mvm->d0i3_exit_waitq); 621 init_waitqueue_head(&mvm->d0i3_exit_waitq);
622 init_waitqueue_head(&mvm->rx_sync_waitq);
622 623
623 atomic_set(&mvm->queue_sync_counter, 0); 624 atomic_set(&mvm->queue_sync_counter, 0);
624 625
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a57c6ef5bc14..6c802cee900c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
547 "Received expired RX queue sync message\n"); 547 "Received expired RX queue sync message\n");
548 return; 548 return;
549 } 549 }
550 atomic_dec(&mvm->queue_sync_counter); 550 if (!atomic_dec_return(&mvm->queue_sync_counter))
551 wake_up(&mvm->rx_sync_waitq);
551 } 552 }
552 553
553 switch (internal_notif->type) { 554 switch (internal_notif->type) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index f279fdd6eb44..fa9743205491 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
1199 1199
1200static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) 1200static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1201{ 1201{
1202 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1203 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1204
1202 /* This looks a bit arbitrary, but the idea is that if we run 1205 /* This looks a bit arbitrary, but the idea is that if we run
1203 * out of possible simultaneous scans and the userspace is 1206 * out of possible simultaneous scans and the userspace is
1204 * trying to run a scan type that is already running, we 1207 * trying to run a scan type that is already running, we
@@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1225 return -EBUSY; 1228 return -EBUSY;
1226 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 1229 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
1227 case IWL_MVM_SCAN_NETDETECT: 1230 case IWL_MVM_SCAN_NETDETECT:
1228 /* No need to stop anything for net-detect since the 1231 /* For non-unified images, there's no need to stop
1229 * firmware is restarted anyway. This way, any sched 1232 * anything for net-detect since the firmware is
1230 * scans that were running will be restarted when we 1233 * restarted anyway. This way, any sched scans that
1231 * resume. 1234 * were running will be restarted when we resume.
1232 */ 1235 */
1233 return 0; 1236 if (!unified_image)
1237 return 0;
1238
1239 /* If this is a unified image and we ran out of scans,
1240 * we need to stop something. Prefer stopping regular
1241 * scans, because the results are useless at this
1242 * point, and we should be able to keep running
1243 * another scheduled scan while suspended.
1244 */
1245 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
1246 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
1247 true);
1248 if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
1249 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
1250 true);
1251
1252 /* fall through, something is wrong if no scan was
1253 * running but we ran out of scans.
1254 */
1234 default: 1255 default:
1235 WARN_ON(1); 1256 WARN_ON(1);
1236 break; 1257 break;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 001be406a3d3..2f8134b2a504 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
541MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); 541MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
542 542
543#ifdef CONFIG_ACPI 543#ifdef CONFIG_ACPI
544#define SPL_METHOD "SPLC" 544#define ACPI_SPLC_METHOD "SPLC"
545#define SPL_DOMAINTYPE_MODULE BIT(0) 545#define ACPI_SPLC_DOMAIN_WIFI (0x07)
546#define SPL_DOMAINTYPE_WIFI BIT(1)
547#define SPL_DOMAINTYPE_WIGIG BIT(2)
548#define SPL_DOMAINTYPE_RFEM BIT(3)
549 546
550static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) 547static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
551{ 548{
552 union acpi_object *limits, *domain_type, *power_limit; 549 union acpi_object *data_pkg, *dflt_pwr_limit;
553 550 int i;
554 if (splx->type != ACPI_TYPE_PACKAGE || 551
555 splx->package.count != 2 || 552 /* We need at least two elements, one for the revision and one
556 splx->package.elements[0].type != ACPI_TYPE_INTEGER || 553 * for the data itself. Also check that the revision is
557 splx->package.elements[0].integer.value != 0) { 554 * supported (currently only revision 0).
558 IWL_ERR(trans, "Unsupported splx structure\n"); 555 */
556 if (splc->type != ACPI_TYPE_PACKAGE ||
557 splc->package.count < 2 ||
558 splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
559 splc->package.elements[0].integer.value != 0) {
560 IWL_DEBUG_INFO(trans,
561 "Unsupported structure returned by the SPLC method. Ignoring.\n");
559 return 0; 562 return 0;
560 } 563 }
561 564
562 limits = &splx->package.elements[1]; 565 /* loop through all the packages to find the one for WiFi */
563 if (limits->type != ACPI_TYPE_PACKAGE || 566 for (i = 1; i < splc->package.count; i++) {
564 limits->package.count < 2 || 567 union acpi_object *domain;
565 limits->package.elements[0].type != ACPI_TYPE_INTEGER || 568
566 limits->package.elements[1].type != ACPI_TYPE_INTEGER) { 569 data_pkg = &splc->package.elements[i];
567 IWL_ERR(trans, "Invalid limits element\n"); 570
568 return 0; 571 /* Skip anything that is not a package with the right
572 * amount of elements (i.e. at least 2 integers).
573 */
574 if (data_pkg->type != ACPI_TYPE_PACKAGE ||
575 data_pkg->package.count < 2 ||
576 data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
577 data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
578 continue;
579
580 domain = &data_pkg->package.elements[0];
581 if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
582 break;
583
584 data_pkg = NULL;
569 } 585 }
570 586
571 domain_type = &limits->package.elements[0]; 587 if (!data_pkg) {
572 power_limit = &limits->package.elements[1]; 588 IWL_DEBUG_INFO(trans,
573 if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { 589 "No element for the WiFi domain returned by the SPLC method.\n");
574 IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
575 return 0; 590 return 0;
576 } 591 }
577 592
578 return power_limit->integer.value; 593 dflt_pwr_limit = &data_pkg->package.elements[1];
594 return dflt_pwr_limit->integer.value;
579} 595}
580 596
581static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) 597static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
582{ 598{
583 acpi_handle pxsx_handle; 599 acpi_handle pxsx_handle;
584 acpi_handle handle; 600 acpi_handle handle;
585 struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; 601 struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
586 acpi_status status; 602 acpi_status status;
587 603
588 pxsx_handle = ACPI_HANDLE(&pdev->dev); 604 pxsx_handle = ACPI_HANDLE(&pdev->dev);
@@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
593 } 609 }
594 610
595 /* Get the method's handle */ 611 /* Get the method's handle */
596 status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); 612 status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
613 &handle);
597 if (ACPI_FAILURE(status)) { 614 if (ACPI_FAILURE(status)) {
598 IWL_DEBUG_INFO(trans, "SPL method not found\n"); 615 IWL_DEBUG_INFO(trans, "SPLC method not found\n");
599 return; 616 return;
600 } 617 }
601 618
602 /* Call SPLC with no arguments */ 619 /* Call SPLC with no arguments */
603 status = acpi_evaluate_object(handle, NULL, NULL, &splx); 620 status = acpi_evaluate_object(handle, NULL, NULL, &splc);
604 if (ACPI_FAILURE(status)) { 621 if (ACPI_FAILURE(status)) {
605 IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); 622 IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
606 return; 623 return;
607 } 624 }
608 625
609 trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); 626 trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
610 IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", 627 IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
611 trans->dflt_pwr_limit); 628 trans->dflt_pwr_limit);
612 kfree(splx.pointer); 629 kfree(splc.pointer);
613} 630}
614 631
615#else /* CONFIG_ACPI */ 632#else /* CONFIG_ACPI */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index e9a278b60dfd..5f840f16f40b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -592,6 +592,7 @@ error:
592static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 592static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
593 int slots_num, u32 txq_id) 593 int slots_num, u32 txq_id)
594{ 594{
595 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
595 int ret; 596 int ret;
596 597
597 txq->need_update = false; 598 txq->need_update = false;
@@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
606 return ret; 607 return ret;
607 608
608 spin_lock_init(&txq->lock); 609 spin_lock_init(&txq->lock);
610
611 if (txq_id == trans_pcie->cmd_queue) {
612 static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
613
614 lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
615 }
616
609 __skb_queue_head_init(&txq->overflow_q); 617 __skb_queue_head_init(&txq->overflow_q);
610 618
611 /* 619 /*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 7d616b003e89..e085c8c31cfe 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
304 queue->rx_skbs[id] = skb; 304 queue->rx_skbs[id] = skb;
305 305
306 ref = gnttab_claim_grant_reference(&queue->gref_rx_head); 306 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
307 BUG_ON((signed short)ref < 0); 307 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
308 queue->grant_rx_ref[id] = ref; 308 queue->grant_rx_ref[id] = ref;
309 309
310 page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 310 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
@@ -428,7 +428,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
428 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 428 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
429 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 429 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
430 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 430 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
431 BUG_ON((signed short)ref < 0); 431 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
432 432
433 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 433 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
434 gfn, GNTMAP_readonly); 434 gfn, GNTMAP_readonly);
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 83deda4bb4d6..6f9563a96488 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -133,7 +133,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy)
133 return -ENOMEM; 133 return -ENOMEM;
134 134
135 bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length); 135 bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length);
136 if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { 136 if (bytes_recv < 0 || bytes_recv < if_version_length) {
137 pr_err("Could not read IF version\n"); 137 pr_err("Could not read IF version\n");
138 r = -EIO; 138 r = -EIO;
139 goto err; 139 goto err;
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 0d5c29ae51de..7310a261c858 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -112,17 +112,17 @@ MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
112 112
113module_param_named(xeon_b2b_usd_bar4_addr64, 113module_param_named(xeon_b2b_usd_bar4_addr64,
114 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644); 114 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
115MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, 115MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
116 "XEON B2B USD BAR 4 64-bit address"); 116 "XEON B2B USD BAR 4 64-bit address");
117 117
118module_param_named(xeon_b2b_usd_bar4_addr32, 118module_param_named(xeon_b2b_usd_bar4_addr32,
119 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644); 119 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
120MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, 120MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
121 "XEON B2B USD split-BAR 4 32-bit address"); 121 "XEON B2B USD split-BAR 4 32-bit address");
122 122
123module_param_named(xeon_b2b_usd_bar5_addr32, 123module_param_named(xeon_b2b_usd_bar5_addr32,
124 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644); 124 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
125MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, 125MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
126 "XEON B2B USD split-BAR 5 32-bit address"); 126 "XEON B2B USD split-BAR 5 32-bit address");
127 127
128module_param_named(xeon_b2b_dsd_bar2_addr64, 128module_param_named(xeon_b2b_dsd_bar2_addr64,
@@ -132,17 +132,17 @@ MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
132 132
133module_param_named(xeon_b2b_dsd_bar4_addr64, 133module_param_named(xeon_b2b_dsd_bar4_addr64,
134 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644); 134 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
135MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, 135MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
136 "XEON B2B DSD BAR 4 64-bit address"); 136 "XEON B2B DSD BAR 4 64-bit address");
137 137
138module_param_named(xeon_b2b_dsd_bar4_addr32, 138module_param_named(xeon_b2b_dsd_bar4_addr32,
139 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644); 139 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
140MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, 140MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
141 "XEON B2B DSD split-BAR 4 32-bit address"); 141 "XEON B2B DSD split-BAR 4 32-bit address");
142 142
143module_param_named(xeon_b2b_dsd_bar5_addr32, 143module_param_named(xeon_b2b_dsd_bar5_addr32,
144 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644); 144 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
145MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, 145MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
146 "XEON B2B DSD split-BAR 5 32-bit address"); 146 "XEON B2B DSD split-BAR 5 32-bit address");
147 147
148#ifndef ioread64 148#ifndef ioread64
@@ -1755,6 +1755,8 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
1755 XEON_B2B_MIN_SIZE); 1755 XEON_B2B_MIN_SIZE);
1756 if (!ndev->peer_mmio) 1756 if (!ndev->peer_mmio)
1757 return -EIO; 1757 return -EIO;
1758
1759 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
1758 } 1760 }
1759 1761
1760 return 0; 1762 return 0;
@@ -2019,6 +2021,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
2019 goto err_mmio; 2021 goto err_mmio;
2020 } 2022 }
2021 ndev->peer_mmio = ndev->self_mmio; 2023 ndev->peer_mmio = ndev->self_mmio;
2024 ndev->peer_addr = pci_resource_start(pdev, 0);
2022 2025
2023 return 0; 2026 return 0;
2024 2027
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 8601c10acf74..4eb8adb34508 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -257,7 +257,7 @@ enum {
257#define NTB_QP_DEF_NUM_ENTRIES 100 257#define NTB_QP_DEF_NUM_ENTRIES 100
258#define NTB_LINK_DOWN_TIMEOUT 10 258#define NTB_LINK_DOWN_TIMEOUT 10
259#define DMA_RETRIES 20 259#define DMA_RETRIES 20
260#define DMA_OUT_RESOURCE_TO 50 260#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
261 261
262static void ntb_transport_rxc_db(unsigned long data); 262static void ntb_transport_rxc_db(unsigned long data);
263static const struct ntb_ctx_ops ntb_transport_ops; 263static const struct ntb_ctx_ops ntb_transport_ops;
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 6a50f20bf1cd..e75d4fdc0866 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -72,7 +72,7 @@
72#define MAX_THREADS 32 72#define MAX_THREADS 32
73#define MAX_TEST_SIZE SZ_1M 73#define MAX_TEST_SIZE SZ_1M
74#define MAX_SRCS 32 74#define MAX_SRCS 32
75#define DMA_OUT_RESOURCE_TO 50 75#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
76#define DMA_RETRIES 20 76#define DMA_RETRIES 20
77#define SZ_4G (1ULL << 32) 77#define SZ_4G (1ULL << 32)
78#define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */ 78#define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */
@@ -589,7 +589,7 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
589 return -ENOMEM; 589 return -ENOMEM;
590 590
591 if (mutex_is_locked(&perf->run_mutex)) { 591 if (mutex_is_locked(&perf->run_mutex)) {
592 out_off = snprintf(buf, 64, "running\n"); 592 out_off = scnprintf(buf, 64, "running\n");
593 goto read_from_buf; 593 goto read_from_buf;
594 } 594 }
595 595
@@ -600,14 +600,14 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
600 break; 600 break;
601 601
602 if (pctx->status) { 602 if (pctx->status) {
603 out_off += snprintf(buf + out_off, 1024 - out_off, 603 out_off += scnprintf(buf + out_off, 1024 - out_off,
604 "%d: error %d\n", i, 604 "%d: error %d\n", i,
605 pctx->status); 605 pctx->status);
606 continue; 606 continue;
607 } 607 }
608 608
609 rate = div64_u64(pctx->copied, pctx->diff_us); 609 rate = div64_u64(pctx->copied, pctx->diff_us);
610 out_off += snprintf(buf + out_off, 1024 - out_off, 610 out_off += scnprintf(buf + out_off, 1024 - out_off,
611 "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n", 611 "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
612 i, pctx->copied, pctx->diff_us, rate); 612 i, pctx->copied, pctx->diff_us, rate);
613 } 613 }
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 7d311799fca1..435861189d97 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -88,7 +88,7 @@ MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer");
88 88
89static unsigned long db_init = 0x7; 89static unsigned long db_init = 0x7;
90module_param(db_init, ulong, 0644); 90module_param(db_init, ulong, 0644);
91MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer"); 91MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer");
92 92
93struct pp_ctx { 93struct pp_ctx {
94 struct ntb_dev *ntb; 94 struct ntb_dev *ntb;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index f5e3011e31fc..5daf2f4be0cd 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -612,7 +612,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
612 612
613 ret = nvm_register(dev); 613 ret = nvm_register(dev);
614 614
615 ns->lba_shift = ilog2(dev->sec_size) - 9; 615 ns->lba_shift = ilog2(dev->sec_size);
616 616
617 if (sysfs_create_group(&dev->dev.kobj, attrs)) 617 if (sysfs_create_group(&dev->dev.kobj, attrs))
618 pr_warn("%s: failed to create sysfs group for identification\n", 618 pr_warn("%s: failed to create sysfs group for identification\n",
diff --git a/drivers/of/base.c b/drivers/of/base.c
index d687e6de24a0..a0bccb54a9bd 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -2077,8 +2077,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
2077 name = of_get_property(of_aliases, "stdout", NULL); 2077 name = of_get_property(of_aliases, "stdout", NULL);
2078 if (name) 2078 if (name)
2079 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); 2079 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
2080 if (of_stdout)
2081 console_set_by_of();
2082 } 2080 }
2083 2081
2084 if (!of_aliases) 2082 if (!of_aliases)
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 035f50c03281..bed19994c1e9 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -637,8 +637,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
637 } 637 }
638 } 638 }
639 639
640 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
641
642 if (pp->ops->host_init) 640 if (pp->ops->host_init)
643 pp->ops->host_init(pp); 641 pp->ops->host_init(pp);
644 642
@@ -809,6 +807,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
809{ 807{
810 u32 val; 808 u32 val;
811 809
810 /* get iATU unroll support */
811 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
812 dev_dbg(pp->dev, "iATU unroll: %s\n",
813 pp->iatu_unroll_enabled ? "enabled" : "disabled");
814
812 /* set the number of lanes */ 815 /* set the number of lanes */
813 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); 816 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
814 val &= ~PORT_LINK_MODE_MASK; 817 val &= ~PORT_LINK_MODE_MASK;
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index ef0a84c7a588..35936409b2d4 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -533,11 +533,11 @@ static int qcom_pcie_probe(struct platform_device *pdev)
533 if (IS_ERR(pcie->phy)) 533 if (IS_ERR(pcie->phy))
534 return PTR_ERR(pcie->phy); 534 return PTR_ERR(pcie->phy);
535 535
536 pp->dev = dev;
536 ret = pcie->ops->get_resources(pcie); 537 ret = pcie->ops->get_resources(pcie);
537 if (ret) 538 if (ret)
538 return ret; 539 return ret;
539 540
540 pp->dev = dev;
541 pp->root_bus_nr = -1; 541 pp->root_bus_nr = -1;
542 pp->ops = &qcom_pcie_dw_ops; 542 pp->ops = &qcom_pcie_dw_ops;
543 543
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index e0b22dab9b7a..e04f69beb42d 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -190,6 +190,9 @@ struct rockchip_pcie {
190 struct reset_control *mgmt_rst; 190 struct reset_control *mgmt_rst;
191 struct reset_control *mgmt_sticky_rst; 191 struct reset_control *mgmt_sticky_rst;
192 struct reset_control *pipe_rst; 192 struct reset_control *pipe_rst;
193 struct reset_control *pm_rst;
194 struct reset_control *aclk_rst;
195 struct reset_control *pclk_rst;
193 struct clk *aclk_pcie; 196 struct clk *aclk_pcie;
194 struct clk *aclk_perf_pcie; 197 struct clk *aclk_perf_pcie;
195 struct clk *hclk_pcie; 198 struct clk *hclk_pcie;
@@ -408,6 +411,44 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
408 411
409 gpiod_set_value(rockchip->ep_gpio, 0); 412 gpiod_set_value(rockchip->ep_gpio, 0);
410 413
414 err = reset_control_assert(rockchip->aclk_rst);
415 if (err) {
416 dev_err(dev, "assert aclk_rst err %d\n", err);
417 return err;
418 }
419
420 err = reset_control_assert(rockchip->pclk_rst);
421 if (err) {
422 dev_err(dev, "assert pclk_rst err %d\n", err);
423 return err;
424 }
425
426 err = reset_control_assert(rockchip->pm_rst);
427 if (err) {
428 dev_err(dev, "assert pm_rst err %d\n", err);
429 return err;
430 }
431
432 udelay(10);
433
434 err = reset_control_deassert(rockchip->pm_rst);
435 if (err) {
436 dev_err(dev, "deassert pm_rst err %d\n", err);
437 return err;
438 }
439
440 err = reset_control_deassert(rockchip->aclk_rst);
441 if (err) {
442 dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
443 return err;
444 }
445
446 err = reset_control_deassert(rockchip->pclk_rst);
447 if (err) {
448 dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
449 return err;
450 }
451
411 err = phy_init(rockchip->phy); 452 err = phy_init(rockchip->phy);
412 if (err < 0) { 453 if (err < 0) {
413 dev_err(dev, "fail to init phy, err %d\n", err); 454 dev_err(dev, "fail to init phy, err %d\n", err);
@@ -781,6 +822,27 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
781 return PTR_ERR(rockchip->pipe_rst); 822 return PTR_ERR(rockchip->pipe_rst);
782 } 823 }
783 824
825 rockchip->pm_rst = devm_reset_control_get(dev, "pm");
826 if (IS_ERR(rockchip->pm_rst)) {
827 if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
828 dev_err(dev, "missing pm reset property in node\n");
829 return PTR_ERR(rockchip->pm_rst);
830 }
831
832 rockchip->pclk_rst = devm_reset_control_get(dev, "pclk");
833 if (IS_ERR(rockchip->pclk_rst)) {
834 if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
835 dev_err(dev, "missing pclk reset property in node\n");
836 return PTR_ERR(rockchip->pclk_rst);
837 }
838
839 rockchip->aclk_rst = devm_reset_control_get(dev, "aclk");
840 if (IS_ERR(rockchip->aclk_rst)) {
841 if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
842 dev_err(dev, "missing aclk reset property in node\n");
843 return PTR_ERR(rockchip->aclk_rst);
844 }
845
784 rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); 846 rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
785 if (IS_ERR(rockchip->ep_gpio)) { 847 if (IS_ERR(rockchip->ep_gpio)) {
786 dev_err(dev, "missing ep-gpios property in node\n"); 848 dev_err(dev, "missing ep-gpios property in node\n");
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index 55f453de562e..c7f3408e3148 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -29,6 +29,11 @@ static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
29 return intel_mid_pci_set_power_state(pdev, state); 29 return intel_mid_pci_set_power_state(pdev, state);
30} 30}
31 31
32static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
33{
34 return intel_mid_pci_get_power_state(pdev);
35}
36
32static pci_power_t mid_pci_choose_state(struct pci_dev *pdev) 37static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
33{ 38{
34 return PCI_D3hot; 39 return PCI_D3hot;
@@ -52,6 +57,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev)
52static struct pci_platform_pm_ops mid_pci_platform_pm = { 57static struct pci_platform_pm_ops mid_pci_platform_pm = {
53 .is_manageable = mid_pci_power_manageable, 58 .is_manageable = mid_pci_power_manageable,
54 .set_state = mid_pci_set_power_state, 59 .set_state = mid_pci_set_power_state,
60 .get_state = mid_pci_get_power_state,
55 .choose_state = mid_pci_choose_state, 61 .choose_state = mid_pci_choose_state,
56 .sleep_wake = mid_pci_sleep_wake, 62 .sleep_wake = mid_pci_sleep_wake,
57 .run_wake = mid_pci_run_wake, 63 .run_wake = mid_pci_run_wake,
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 66c4d8f42233..9526e341988b 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -121,6 +121,14 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
121 return -EINVAL; 121 return -EINVAL;
122 } 122 }
123 123
124 /*
125 * If we have a shadow copy in RAM, the PCI device doesn't respond
126 * to the shadow range, so we don't need to claim it, and upstream
127 * bridges don't need to route the range to the device.
128 */
129 if (res->flags & IORESOURCE_ROM_SHADOW)
130 return 0;
131
124 root = pci_find_parent_resource(dev, res); 132 root = pci_find_parent_resource(dev, res);
125 if (!root) { 133 if (!root) {
126 dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n", 134 dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 153f3122283d..b6b316de055c 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -107,7 +107,7 @@ int soc_pcmcia_regulator_set(struct soc_pcmcia_socket *skt,
107 107
108 ret = regulator_enable(r->reg); 108 ret = regulator_enable(r->reg);
109 } else { 109 } else {
110 regulator_disable(r->reg); 110 ret = regulator_disable(r->reg);
111 } 111 }
112 if (ret == 0) 112 if (ret == 0)
113 r->on = on; 113 r->on = on;
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c
index 32ae78c8ca17..c85fb0b59729 100644
--- a/drivers/phy/phy-da8xx-usb.c
+++ b/drivers/phy/phy-da8xx-usb.c
@@ -198,7 +198,8 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
198 } else { 198 } else {
199 int ret; 199 int ret;
200 200
201 ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0"); 201 ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy",
202 "ohci-da8xx");
202 if (ret) 203 if (ret)
203 dev_warn(dev, "Failed to create usb11 phy lookup\n"); 204 dev_warn(dev, "Failed to create usb11 phy lookup\n");
204 ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy", 205 ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy",
@@ -216,7 +217,7 @@ static int da8xx_usb_phy_remove(struct platform_device *pdev)
216 217
217 if (!pdev->dev.of_node) { 218 if (!pdev->dev.of_node) {
218 phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx"); 219 phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx");
219 phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0"); 220 phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci-da8xx");
220 } 221 }
221 222
222 return 0; 223 return 0;
diff --git a/drivers/phy/phy-rockchip-pcie.c b/drivers/phy/phy-rockchip-pcie.c
index a2b4c6b58aea..6904633cad68 100644
--- a/drivers/phy/phy-rockchip-pcie.c
+++ b/drivers/phy/phy-rockchip-pcie.c
@@ -249,21 +249,10 @@ err_refclk:
249static int rockchip_pcie_phy_exit(struct phy *phy) 249static int rockchip_pcie_phy_exit(struct phy *phy)
250{ 250{
251 struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); 251 struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
252 int err = 0;
253 252
254 clk_disable_unprepare(rk_phy->clk_pciephy_ref); 253 clk_disable_unprepare(rk_phy->clk_pciephy_ref);
255 254
256 err = reset_control_deassert(rk_phy->phy_rst); 255 return 0;
257 if (err) {
258 dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
259 goto err_reset;
260 }
261
262 return err;
263
264err_reset:
265 clk_prepare_enable(rk_phy->clk_pciephy_ref);
266 return err;
267} 256}
268 257
269static const struct phy_ops ops = { 258static const struct phy_ops ops = {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index b9342a2af7b3..fec34f5213c4 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -264,7 +264,7 @@ static int sun4i_usb_phy_init(struct phy *_phy)
264 return ret; 264 return ret;
265 } 265 }
266 266
267 if (data->cfg->enable_pmu_unk1) { 267 if (phy->pmu && data->cfg->enable_pmu_unk1) {
268 val = readl(phy->pmu + REG_PMU_UNK1); 268 val = readl(phy->pmu + REG_PMU_UNK1);
269 writel(val & ~2, phy->pmu + REG_PMU_UNK1); 269 writel(val & ~2, phy->pmu + REG_PMU_UNK1);
270 } 270 }
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index c8c72e8259d3..87b46390b695 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -26,7 +26,7 @@
26 26
27#define ASPEED_G5_NR_PINS 228 27#define ASPEED_G5_NR_PINS 228
28 28
29#define COND1 SIG_DESC_BIT(SCU90, 6, 0) 29#define COND1 { SCU90, BIT(6), 0, 0 }
30#define COND2 { SCU94, GENMASK(1, 0), 0, 0 } 30#define COND2 { SCU94, GENMASK(1, 0), 0, 0 }
31 31
32#define B14 0 32#define B14 0
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 7f7700716398..5d1e505c3c63 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -844,6 +844,6 @@ static struct platform_driver iproc_gpio_driver = {
844 844
845static int __init iproc_gpio_init(void) 845static int __init iproc_gpio_init(void)
846{ 846{
847 return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe); 847 return platform_driver_register(&iproc_gpio_driver);
848} 848}
849arch_initcall_sync(iproc_gpio_init); 849arch_initcall_sync(iproc_gpio_init);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 35783db1c10b..c8deb8be1da7 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -741,6 +741,6 @@ static struct platform_driver nsp_gpio_driver = {
741 741
742static int __init nsp_gpio_init(void) 742static int __init nsp_gpio_init(void)
743{ 743{
744 return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe); 744 return platform_driver_register(&nsp_gpio_driver);
745} 745}
746arch_initcall_sync(nsp_gpio_init); 746arch_initcall_sync(nsp_gpio_init);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 47613201269a..79c4e14a5a75 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -687,6 +687,7 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
687 if (!info->functions) 687 if (!info->functions)
688 return -ENOMEM; 688 return -ENOMEM;
689 689
690 info->group_index = 0;
690 if (flat_funcs) { 691 if (flat_funcs) {
691 info->ngroups = of_get_child_count(np); 692 info->ngroups = of_get_child_count(np);
692 } else { 693 } else {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 30389f4ccab4..c43b1e9a06af 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1652,12 +1652,15 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
1652} 1652}
1653 1653
1654#ifdef CONFIG_PM_SLEEP 1654#ifdef CONFIG_PM_SLEEP
1655static int chv_pinctrl_suspend(struct device *dev) 1655static int chv_pinctrl_suspend_noirq(struct device *dev)
1656{ 1656{
1657 struct platform_device *pdev = to_platform_device(dev); 1657 struct platform_device *pdev = to_platform_device(dev);
1658 struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); 1658 struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
1659 unsigned long flags;
1659 int i; 1660 int i;
1660 1661
1662 raw_spin_lock_irqsave(&chv_lock, flags);
1663
1661 pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK); 1664 pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK);
1662 1665
1663 for (i = 0; i < pctrl->community->npins; i++) { 1666 for (i = 0; i < pctrl->community->npins; i++) {
@@ -1678,15 +1681,20 @@ static int chv_pinctrl_suspend(struct device *dev)
1678 ctx->padctrl1 = readl(reg); 1681 ctx->padctrl1 = readl(reg);
1679 } 1682 }
1680 1683
1684 raw_spin_unlock_irqrestore(&chv_lock, flags);
1685
1681 return 0; 1686 return 0;
1682} 1687}
1683 1688
1684static int chv_pinctrl_resume(struct device *dev) 1689static int chv_pinctrl_resume_noirq(struct device *dev)
1685{ 1690{
1686 struct platform_device *pdev = to_platform_device(dev); 1691 struct platform_device *pdev = to_platform_device(dev);
1687 struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); 1692 struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
1693 unsigned long flags;
1688 int i; 1694 int i;
1689 1695
1696 raw_spin_lock_irqsave(&chv_lock, flags);
1697
1690 /* 1698 /*
1691 * Mask all interrupts before restoring per-pin configuration 1699 * Mask all interrupts before restoring per-pin configuration
1692 * registers because we don't know in which state BIOS left them 1700 * registers because we don't know in which state BIOS left them
@@ -1731,12 +1739,15 @@ static int chv_pinctrl_resume(struct device *dev)
1731 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); 1739 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
1732 chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK); 1740 chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK);
1733 1741
1742 raw_spin_unlock_irqrestore(&chv_lock, flags);
1743
1734 return 0; 1744 return 0;
1735} 1745}
1736#endif 1746#endif
1737 1747
1738static const struct dev_pm_ops chv_pinctrl_pm_ops = { 1748static const struct dev_pm_ops chv_pinctrl_pm_ops = {
1739 SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume) 1749 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq,
1750 chv_pinctrl_resume_noirq)
1740}; 1751};
1741 1752
1742static const struct acpi_device_id chv_pinctrl_acpi_match[] = { 1753static const struct acpi_device_id chv_pinctrl_acpi_match[] = {
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 99da4cf91031..b7bb37167969 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1512,7 +1512,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
1512 if (info->irqmux_base || gpio_irq > 0) { 1512 if (info->irqmux_base || gpio_irq > 0) {
1513 err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip, 1513 err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip,
1514 0, handle_simple_irq, 1514 0, handle_simple_irq,
1515 IRQ_TYPE_LEVEL_LOW); 1515 IRQ_TYPE_NONE);
1516 if (err) { 1516 if (err) {
1517 gpiochip_remove(&bank->gpio_chip); 1517 gpiochip_remove(&bank->gpio_chip);
1518 dev_info(dev, "could not add irqchip\n"); 1518 dev_info(dev, "could not add irqchip\n");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 200667f08c37..efc43711ff5c 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1092,9 +1092,11 @@ int stm32_pctl_probe(struct platform_device *pdev)
1092 return -EINVAL; 1092 return -EINVAL;
1093 } 1093 }
1094 1094
1095 ret = stm32_pctrl_dt_setup_irq(pdev, pctl); 1095 if (of_find_property(np, "interrupt-parent", NULL)) {
1096 if (ret) 1096 ret = stm32_pctrl_dt_setup_irq(pdev, pctl);
1097 return ret; 1097 if (ret)
1098 return ret;
1099 }
1098 1100
1099 for_each_child_of_node(np, child) 1101 for_each_child_of_node(np, child)
1100 if (of_property_read_bool(child, "gpio-controller")) 1102 if (of_property_read_bool(child, "gpio-controller"))
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index a2323941e677..a7614fc542b5 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -934,6 +934,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
934 }, 934 },
935 }, 935 },
936 { 936 {
937 .ident = "Lenovo Yoga 900",
938 .matches = {
939 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
940 DMI_MATCH(DMI_BOARD_NAME, "VIUU4"),
941 },
942 },
943 {
937 .ident = "Lenovo YOGA 910-13IKB", 944 .ident = "Lenovo YOGA 910-13IKB",
938 .matches = { 945 .matches = {
939 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 946 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index ed5874217ee7..12dbb5063376 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -264,7 +264,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
264 return AE_OK; 264 return AE_OK;
265 265
266 if (acpi_match_device_ids(dev, ids) == 0) 266 if (acpi_match_device_ids(dev, ids) == 0)
267 if (acpi_create_platform_device(dev)) 267 if (acpi_create_platform_device(dev, NULL))
268 dev_info(&dev->dev, 268 dev_info(&dev->dev,
269 "intel-hid: created platform device\n"); 269 "intel-hid: created platform device\n");
270 270
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 146d02f8c9bc..78080763df51 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -164,7 +164,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
164 return AE_OK; 164 return AE_OK;
165 165
166 if (acpi_match_device_ids(dev, ids) == 0) 166 if (acpi_match_device_ids(dev, ids) == 0)
167 if (acpi_create_platform_device(dev)) 167 if (acpi_create_platform_device(dev, NULL))
168 dev_info(&dev->dev, 168 dev_info(&dev->dev,
169 "intel-vbtn: created platform device\n"); 169 "intel-vbtn: created platform device\n");
170 170
diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c
index feac4576b837..2df07ee8f3c3 100644
--- a/drivers/platform/x86/toshiba-wmi.c
+++ b/drivers/platform/x86/toshiba-wmi.c
@@ -24,14 +24,15 @@
24#include <linux/acpi.h> 24#include <linux/acpi.h>
25#include <linux/input.h> 25#include <linux/input.h>
26#include <linux/input/sparse-keymap.h> 26#include <linux/input/sparse-keymap.h>
27#include <linux/dmi.h>
27 28
28MODULE_AUTHOR("Azael Avalos"); 29MODULE_AUTHOR("Azael Avalos");
29MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver"); 30MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver");
30MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
31 32
32#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" 33#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
33 34
34MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID); 35MODULE_ALIAS("wmi:"WMI_EVENT_GUID);
35 36
36static struct input_dev *toshiba_wmi_input_dev; 37static struct input_dev *toshiba_wmi_input_dev;
37 38
@@ -63,6 +64,16 @@ static void toshiba_wmi_notify(u32 value, void *context)
63 kfree(response.pointer); 64 kfree(response.pointer);
64} 65}
65 66
67static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = {
68 {
69 .ident = "Toshiba laptop",
70 .matches = {
71 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
72 },
73 },
74 {}
75};
76
66static int __init toshiba_wmi_input_setup(void) 77static int __init toshiba_wmi_input_setup(void)
67{ 78{
68 acpi_status status; 79 acpi_status status;
@@ -81,7 +92,7 @@ static int __init toshiba_wmi_input_setup(void)
81 if (err) 92 if (err)
82 goto err_free_dev; 93 goto err_free_dev;
83 94
84 status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID, 95 status = wmi_install_notify_handler(WMI_EVENT_GUID,
85 toshiba_wmi_notify, NULL); 96 toshiba_wmi_notify, NULL);
86 if (ACPI_FAILURE(status)) { 97 if (ACPI_FAILURE(status)) {
87 err = -EIO; 98 err = -EIO;
@@ -95,7 +106,7 @@ static int __init toshiba_wmi_input_setup(void)
95 return 0; 106 return 0;
96 107
97 err_remove_notifier: 108 err_remove_notifier:
98 wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); 109 wmi_remove_notify_handler(WMI_EVENT_GUID);
99 err_free_keymap: 110 err_free_keymap:
100 sparse_keymap_free(toshiba_wmi_input_dev); 111 sparse_keymap_free(toshiba_wmi_input_dev);
101 err_free_dev: 112 err_free_dev:
@@ -105,7 +116,7 @@ static int __init toshiba_wmi_input_setup(void)
105 116
106static void toshiba_wmi_input_destroy(void) 117static void toshiba_wmi_input_destroy(void)
107{ 118{
108 wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); 119 wmi_remove_notify_handler(WMI_EVENT_GUID);
109 sparse_keymap_free(toshiba_wmi_input_dev); 120 sparse_keymap_free(toshiba_wmi_input_dev);
110 input_unregister_device(toshiba_wmi_input_dev); 121 input_unregister_device(toshiba_wmi_input_dev);
111} 122}
@@ -114,7 +125,8 @@ static int __init toshiba_wmi_init(void)
114{ 125{
115 int ret; 126 int ret;
116 127
117 if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) 128 if (!wmi_has_guid(WMI_EVENT_GUID) ||
129 !dmi_check_system(toshiba_wmi_dmi_table))
118 return -ENODEV; 130 return -ENODEV;
119 131
120 ret = toshiba_wmi_input_setup(); 132 ret = toshiba_wmi_input_setup();
@@ -130,7 +142,7 @@ static int __init toshiba_wmi_init(void)
130 142
131static void __exit toshiba_wmi_exit(void) 143static void __exit toshiba_wmi_exit(void)
132{ 144{
133 if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) 145 if (wmi_has_guid(WMI_EVENT_GUID))
134 toshiba_wmi_input_destroy(); 146 toshiba_wmi_input_destroy();
135} 147}
136 148
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 67426c0477d3..5c1519b229e0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2754,7 +2754,7 @@ static int _regulator_set_voltage_time(struct regulator_dev *rdev,
2754 ramp_delay = rdev->desc->ramp_delay; 2754 ramp_delay = rdev->desc->ramp_delay;
2755 2755
2756 if (ramp_delay == 0) { 2756 if (ramp_delay == 0) {
2757 rdev_warn(rdev, "ramp_delay not set\n"); 2757 rdev_dbg(rdev, "ramp_delay not set\n");
2758 return 0; 2758 return 0;
2759 } 2759 }
2760 2760
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 18a93d3e3f93..d36534965635 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -327,6 +327,7 @@ static const struct of_device_id asm9260_dt_ids[] = {
327 { .compatible = "alphascale,asm9260-rtc", }, 327 { .compatible = "alphascale,asm9260-rtc", },
328 {} 328 {}
329}; 329};
330MODULE_DEVICE_TABLE(of, asm9260_dt_ids);
330 331
331static struct platform_driver asm9260_rtc_driver = { 332static struct platform_driver asm9260_rtc_driver = {
332 .probe = asm9260_rtc_probe, 333 .probe = asm9260_rtc_probe,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index dd3d59806ffa..7030d7cd3861 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -776,7 +776,7 @@ static void cmos_do_shutdown(int rtc_irq)
776 spin_unlock_irq(&rtc_lock); 776 spin_unlock_irq(&rtc_lock);
777} 777}
778 778
779static void __exit cmos_do_remove(struct device *dev) 779static void cmos_do_remove(struct device *dev)
780{ 780{
781 struct cmos_rtc *cmos = dev_get_drvdata(dev); 781 struct cmos_rtc *cmos = dev_get_drvdata(dev);
782 struct resource *ports; 782 struct resource *ports;
@@ -996,8 +996,9 @@ static u32 rtc_handler(void *context)
996 struct cmos_rtc *cmos = dev_get_drvdata(dev); 996 struct cmos_rtc *cmos = dev_get_drvdata(dev);
997 unsigned char rtc_control = 0; 997 unsigned char rtc_control = 0;
998 unsigned char rtc_intr; 998 unsigned char rtc_intr;
999 unsigned long flags;
999 1000
1000 spin_lock_irq(&rtc_lock); 1001 spin_lock_irqsave(&rtc_lock, flags);
1001 if (cmos_rtc.suspend_ctrl) 1002 if (cmos_rtc.suspend_ctrl)
1002 rtc_control = CMOS_READ(RTC_CONTROL); 1003 rtc_control = CMOS_READ(RTC_CONTROL);
1003 if (rtc_control & RTC_AIE) { 1004 if (rtc_control & RTC_AIE) {
@@ -1006,7 +1007,7 @@ static u32 rtc_handler(void *context)
1006 rtc_intr = CMOS_READ(RTC_INTR_FLAGS); 1007 rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
1007 rtc_update_irq(cmos->rtc, 1, rtc_intr); 1008 rtc_update_irq(cmos->rtc, 1, rtc_intr);
1008 } 1009 }
1009 spin_unlock_irq(&rtc_lock); 1010 spin_unlock_irqrestore(&rtc_lock, flags);
1010 1011
1011 pm_wakeup_event(dev, 0); 1012 pm_wakeup_event(dev, 0);
1012 acpi_clear_event(ACPI_EVENT_RTC); 1013 acpi_clear_event(ACPI_EVENT_RTC);
@@ -1129,7 +1130,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
1129 pnp_irq(pnp, 0)); 1130 pnp_irq(pnp, 0));
1130} 1131}
1131 1132
1132static void __exit cmos_pnp_remove(struct pnp_dev *pnp) 1133static void cmos_pnp_remove(struct pnp_dev *pnp)
1133{ 1134{
1134 cmos_do_remove(&pnp->dev); 1135 cmos_do_remove(&pnp->dev);
1135} 1136}
@@ -1161,7 +1162,7 @@ static struct pnp_driver cmos_pnp_driver = {
1161 .name = (char *) driver_name, 1162 .name = (char *) driver_name,
1162 .id_table = rtc_ids, 1163 .id_table = rtc_ids,
1163 .probe = cmos_pnp_probe, 1164 .probe = cmos_pnp_probe,
1164 .remove = __exit_p(cmos_pnp_remove), 1165 .remove = cmos_pnp_remove,
1165 .shutdown = cmos_pnp_shutdown, 1166 .shutdown = cmos_pnp_shutdown,
1166 1167
1167 /* flag ensures resume() gets called, and stops syslog spam */ 1168 /* flag ensures resume() gets called, and stops syslog spam */
@@ -1238,7 +1239,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev)
1238 return cmos_do_probe(&pdev->dev, resource, irq); 1239 return cmos_do_probe(&pdev->dev, resource, irq);
1239} 1240}
1240 1241
1241static int __exit cmos_platform_remove(struct platform_device *pdev) 1242static int cmos_platform_remove(struct platform_device *pdev)
1242{ 1243{
1243 cmos_do_remove(&pdev->dev); 1244 cmos_do_remove(&pdev->dev);
1244 return 0; 1245 return 0;
@@ -1263,7 +1264,7 @@ static void cmos_platform_shutdown(struct platform_device *pdev)
1263MODULE_ALIAS("platform:rtc_cmos"); 1264MODULE_ALIAS("platform:rtc_cmos");
1264 1265
1265static struct platform_driver cmos_platform_driver = { 1266static struct platform_driver cmos_platform_driver = {
1266 .remove = __exit_p(cmos_platform_remove), 1267 .remove = cmos_platform_remove,
1267 .shutdown = cmos_platform_shutdown, 1268 .shutdown = cmos_platform_shutdown,
1268 .driver = { 1269 .driver = {
1269 .name = driver_name, 1270 .name = driver_name,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index b04ea9b5ae67..51e52446eacb 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -113,6 +113,7 @@
113/* OMAP_RTC_OSC_REG bit fields: */ 113/* OMAP_RTC_OSC_REG bit fields: */
114#define OMAP_RTC_OSC_32KCLK_EN BIT(6) 114#define OMAP_RTC_OSC_32KCLK_EN BIT(6)
115#define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3) 115#define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3)
116#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4)
116 117
117/* OMAP_RTC_IRQWAKEEN bit fields: */ 118/* OMAP_RTC_IRQWAKEEN bit fields: */
118#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1) 119#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
@@ -146,6 +147,7 @@ struct omap_rtc {
146 u8 interrupts_reg; 147 u8 interrupts_reg;
147 bool is_pmic_controller; 148 bool is_pmic_controller;
148 bool has_ext_clk; 149 bool has_ext_clk;
150 bool is_suspending;
149 const struct omap_rtc_device_type *type; 151 const struct omap_rtc_device_type *type;
150 struct pinctrl_dev *pctldev; 152 struct pinctrl_dev *pctldev;
151}; 153};
@@ -786,8 +788,9 @@ static int omap_rtc_probe(struct platform_device *pdev)
786 */ 788 */
787 if (rtc->has_ext_clk) { 789 if (rtc->has_ext_clk) {
788 reg = rtc_read(rtc, OMAP_RTC_OSC_REG); 790 reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
789 rtc_write(rtc, OMAP_RTC_OSC_REG, 791 reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
790 reg | OMAP_RTC_OSC_SEL_32KCLK_SRC); 792 reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
793 rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
791 } 794 }
792 795
793 rtc->type->lock(rtc); 796 rtc->type->lock(rtc);
@@ -898,8 +901,7 @@ static int omap_rtc_suspend(struct device *dev)
898 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0); 901 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
899 rtc->type->lock(rtc); 902 rtc->type->lock(rtc);
900 903
901 /* Disable the clock/module */ 904 rtc->is_suspending = true;
902 pm_runtime_put_sync(dev);
903 905
904 return 0; 906 return 0;
905} 907}
@@ -908,9 +910,6 @@ static int omap_rtc_resume(struct device *dev)
908{ 910{
909 struct omap_rtc *rtc = dev_get_drvdata(dev); 911 struct omap_rtc *rtc = dev_get_drvdata(dev);
910 912
911 /* Enable the clock/module so that we can access the registers */
912 pm_runtime_get_sync(dev);
913
914 rtc->type->unlock(rtc); 913 rtc->type->unlock(rtc);
915 if (device_may_wakeup(dev)) 914 if (device_may_wakeup(dev))
916 disable_irq_wake(rtc->irq_alarm); 915 disable_irq_wake(rtc->irq_alarm);
@@ -918,11 +917,34 @@ static int omap_rtc_resume(struct device *dev)
918 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg); 917 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg);
919 rtc->type->lock(rtc); 918 rtc->type->lock(rtc);
920 919
920 rtc->is_suspending = false;
921
921 return 0; 922 return 0;
922} 923}
923#endif 924#endif
924 925
925static SIMPLE_DEV_PM_OPS(omap_rtc_pm_ops, omap_rtc_suspend, omap_rtc_resume); 926#ifdef CONFIG_PM
927static int omap_rtc_runtime_suspend(struct device *dev)
928{
929 struct omap_rtc *rtc = dev_get_drvdata(dev);
930
931 if (rtc->is_suspending && !rtc->has_ext_clk)
932 return -EBUSY;
933
934 return 0;
935}
936
937static int omap_rtc_runtime_resume(struct device *dev)
938{
939 return 0;
940}
941#endif
942
943static const struct dev_pm_ops omap_rtc_pm_ops = {
944 SET_SYSTEM_SLEEP_PM_OPS(omap_rtc_suspend, omap_rtc_resume)
945 SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend,
946 omap_rtc_runtime_resume, NULL)
947};
926 948
927static void omap_rtc_shutdown(struct platform_device *pdev) 949static void omap_rtc_shutdown(struct platform_device *pdev)
928{ 950{
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 3d53d636b17b..f0cfb0451757 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2636,18 +2636,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2636 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; 2636 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
2637 struct CommandControlBlock *ccb; 2637 struct CommandControlBlock *ccb;
2638 int target = cmd->device->id; 2638 int target = cmd->device->id;
2639 int lun = cmd->device->lun;
2640 uint8_t scsicmd = cmd->cmnd[0];
2641 cmd->scsi_done = done; 2639 cmd->scsi_done = done;
2642 cmd->host_scribble = NULL; 2640 cmd->host_scribble = NULL;
2643 cmd->result = 0; 2641 cmd->result = 0;
2644 if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
2645 if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
2646 cmd->result = (DID_NO_CONNECT << 16);
2647 }
2648 cmd->scsi_done(cmd);
2649 return 0;
2650 }
2651 if (target == 16) { 2642 if (target == 16) {
2652 /* virtual device for iop message transfer */ 2643 /* virtual device for iop message transfer */
2653 arcmsr_handle_virtual_command(acb, cmd); 2644 arcmsr_handle_virtual_command(acb, cmd);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d1421139e6ea..2ffe029ff2b6 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2081,9 +2081,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
2081 /* never reached the xmit task callout */ 2081 /* never reached the xmit task callout */
2082 if (tdata->skb) 2082 if (tdata->skb)
2083 __kfree_skb(tdata->skb); 2083 __kfree_skb(tdata->skb);
2084 memset(tdata, 0, sizeof(*tdata));
2085 2084
2086 task_release_itt(task, task->hdr_itt); 2085 task_release_itt(task, task->hdr_itt);
2086 memset(tdata, 0, sizeof(*tdata));
2087
2087 iscsi_tcp_cleanup_task(task); 2088 iscsi_tcp_cleanup_task(task);
2088} 2089}
2089EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); 2090EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 241829e59668..7bb20684e9fa 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -793,6 +793,7 @@ static void alua_rtpg_work(struct work_struct *work)
793 WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); 793 WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
794 WARN_ON(pg->flags & ALUA_PG_RUN_STPG); 794 WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
795 spin_unlock_irqrestore(&pg->lock, flags); 795 spin_unlock_irqrestore(&pg->lock, flags);
796 kref_put(&pg->kref, release_port_group);
796 return; 797 return;
797 } 798 }
798 if (pg->flags & ALUA_SYNC_STPG) 799 if (pg->flags & ALUA_SYNC_STPG)
@@ -890,6 +891,7 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
890 /* Do not queue if the worker is already running */ 891 /* Do not queue if the worker is already running */
891 if (!(pg->flags & ALUA_PG_RUNNING)) { 892 if (!(pg->flags & ALUA_PG_RUNNING)) {
892 kref_get(&pg->kref); 893 kref_get(&pg->kref);
894 sdev = NULL;
893 start_queue = 1; 895 start_queue = 1;
894 } 896 }
895 } 897 }
@@ -901,7 +903,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
901 if (start_queue && 903 if (start_queue &&
902 !queue_delayed_work(alua_wq, &pg->rtpg_work, 904 !queue_delayed_work(alua_wq, &pg->rtpg_work,
903 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { 905 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
904 scsi_device_put(sdev); 906 if (sdev)
907 scsi_device_put(sdev);
905 kref_put(&pg->kref, release_port_group); 908 kref_put(&pg->kref, release_port_group);
906 } 909 }
907} 910}
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index ca86c885dfaa..3aaea713bf37 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2233,7 +2233,7 @@ struct megasas_instance_template {
2233}; 2233};
2234 2234
2235#define MEGASAS_IS_LOGICAL(scp) \ 2235#define MEGASAS_IS_LOGICAL(scp) \
2236 (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 2236 ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
2237 2237
2238#define MEGASAS_DEV_INDEX(scp) \ 2238#define MEGASAS_DEV_INDEX(scp) \
2239 (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ 2239 (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9ff57dee72d7..d8b1fbd4c8aa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1700,16 +1700,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1700 goto out_done; 1700 goto out_done;
1701 } 1701 }
1702 1702
1703 switch (scmd->cmnd[0]) { 1703 /*
1704 case SYNCHRONIZE_CACHE: 1704 * FW takes care of flush cache on its own for Virtual Disk.
1705 /* 1705 * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
1706 * FW takes care of flush cache on its own 1706 */
1707 * No need to send it down 1707 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
1708 */
1709 scmd->result = DID_OK << 16; 1708 scmd->result = DID_OK << 16;
1710 goto out_done; 1709 goto out_done;
1711 default:
1712 break;
1713 } 1710 }
1714 1711
1715 return instance->instancet->build_and_issue_cmd(instance, scmd); 1712 return instance->instancet->build_and_issue_cmd(instance, scmd);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 209a969a979d..8aa769a2d919 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1273,9 +1273,9 @@ scsih_target_alloc(struct scsi_target *starget)
1273 sas_target_priv_data->handle = raid_device->handle; 1273 sas_target_priv_data->handle = raid_device->handle;
1274 sas_target_priv_data->sas_address = raid_device->wwid; 1274 sas_target_priv_data->sas_address = raid_device->wwid;
1275 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; 1275 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1276 sas_target_priv_data->raid_device = raid_device;
1277 if (ioc->is_warpdrive) 1276 if (ioc->is_warpdrive)
1278 raid_device->starget = starget; 1277 sas_target_priv_data->raid_device = raid_device;
1278 raid_device->starget = starget;
1279 } 1279 }
1280 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1280 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1281 return 0; 1281 return 0;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ace65db1d2a2..567fa080e261 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -707,6 +707,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
707 srb_t *sp; 707 srb_t *sp;
708 int rval; 708 int rval;
709 709
710 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
711 cmd->result = DID_NO_CONNECT << 16;
712 goto qc24_fail_command;
713 }
714
710 if (ha->flags.eeh_busy) { 715 if (ha->flags.eeh_busy) {
711 if (ha->flags.pci_channel_io_perm_failure) { 716 if (ha->flags.pci_channel_io_perm_failure) {
712 ql_dbg(ql_dbg_aer, vha, 0x9010, 717 ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -1451,6 +1456,15 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1451 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1456 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
1452 sp = req->outstanding_cmds[cnt]; 1457 sp = req->outstanding_cmds[cnt];
1453 if (sp) { 1458 if (sp) {
1459 /* Get a reference to the sp and drop the lock.
1460 * The reference ensures this sp->done() call
1461 * - and not the call in qla2xxx_eh_abort() -
1462 * ends the SCSI command (with result 'res').
1463 */
1464 sp_get(sp);
1465 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1466 qla2xxx_eh_abort(GET_CMD_SP(sp));
1467 spin_lock_irqsave(&ha->hardware_lock, flags);
1454 req->outstanding_cmds[cnt] = NULL; 1468 req->outstanding_cmds[cnt] = NULL;
1455 sp->done(vha, sp, res); 1469 sp->done(vha, sp, res);
1456 } 1470 }
@@ -2341,6 +2355,8 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
2341{ 2355{
2342 scsi_qla_host_t *vha = shost_priv(shost); 2356 scsi_qla_host_t *vha = shost_priv(shost);
2343 2357
2358 if (test_bit(UNLOADING, &vha->dpc_flags))
2359 return 1;
2344 if (!vha->host) 2360 if (!vha->host)
2345 return 1; 2361 return 1;
2346 if (time > vha->hw->loop_reset_delay * HZ) 2362 if (time > vha->hw->loop_reset_delay * HZ)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index c905709707f0..cf04a364fd8b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5134,6 +5134,7 @@ static void __exit scsi_debug_exit(void)
5134 bus_unregister(&pseudo_lld_bus); 5134 bus_unregister(&pseudo_lld_bus);
5135 root_device_unregister(pseudo_primary); 5135 root_device_unregister(pseudo_primary);
5136 5136
5137 vfree(map_storep);
5137 vfree(dif_storep); 5138 vfree(dif_storep);
5138 vfree(fake_storep); 5139 vfree(fake_storep);
5139 kfree(sdebug_q_arr); 5140 kfree(sdebug_q_arr);
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 4a0d3cdc607c..15ca09cd16f3 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -793,6 +793,7 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
793 unsigned long flags; 793 unsigned long flags;
794 int result = SUCCESS; 794 int result = SUCCESS;
795 DECLARE_COMPLETION_ONSTACK(abort_cmp); 795 DECLARE_COMPLETION_ONSTACK(abort_cmp);
796 int done;
796 797
797 scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", 798 scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
798 adapter->host->host_no, cmd); 799 adapter->host->host_no, cmd);
@@ -824,10 +825,10 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
824 pvscsi_abort_cmd(adapter, ctx); 825 pvscsi_abort_cmd(adapter, ctx);
825 spin_unlock_irqrestore(&adapter->hw_lock, flags); 826 spin_unlock_irqrestore(&adapter->hw_lock, flags);
826 /* Wait for 2 secs for the completion. */ 827 /* Wait for 2 secs for the completion. */
827 wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); 828 done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
828 spin_lock_irqsave(&adapter->hw_lock, flags); 829 spin_lock_irqsave(&adapter->hw_lock, flags);
829 830
830 if (!completion_done(&abort_cmp)) { 831 if (!done) {
831 /* 832 /*
832 * Failed to abort the command, unmark the fact that it 833 * Failed to abort the command, unmark the fact that it
833 * was requested to be aborted. 834 * was requested to be aborted.
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index c097d2ccbde3..d41292ef85f2 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -26,7 +26,7 @@
26 26
27#include <linux/types.h> 27#include <linux/types.h>
28 28
29#define PVSCSI_DRIVER_VERSION_STRING "1.0.6.0-k" 29#define PVSCSI_DRIVER_VERSION_STRING "1.0.7.0-k"
30 30
31#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 31#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
32 32
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 35c0dd945668..a67b0ff6a362 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -70,6 +70,7 @@
70#define SPI_SR 0x2c 70#define SPI_SR 0x2c
71#define SPI_SR_EOQF 0x10000000 71#define SPI_SR_EOQF 0x10000000
72#define SPI_SR_TCFQF 0x80000000 72#define SPI_SR_TCFQF 0x80000000
73#define SPI_SR_CLEAR 0xdaad0000
73 74
74#define SPI_RSER 0x30 75#define SPI_RSER 0x30
75#define SPI_RSER_EOQFE 0x10000000 76#define SPI_RSER_EOQFE 0x10000000
@@ -646,6 +647,11 @@ static const struct regmap_config dspi_regmap_config = {
646 .max_register = 0x88, 647 .max_register = 0x88,
647}; 648};
648 649
650static void dspi_init(struct fsl_dspi *dspi)
651{
652 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
653}
654
649static int dspi_probe(struct platform_device *pdev) 655static int dspi_probe(struct platform_device *pdev)
650{ 656{
651 struct device_node *np = pdev->dev.of_node; 657 struct device_node *np = pdev->dev.of_node;
@@ -709,6 +715,7 @@ static int dspi_probe(struct platform_device *pdev)
709 return PTR_ERR(dspi->regmap); 715 return PTR_ERR(dspi->regmap);
710 } 716 }
711 717
718 dspi_init(dspi);
712 dspi->irq = platform_get_irq(pdev, 0); 719 dspi->irq = platform_get_irq(pdev, 0);
713 if (dspi->irq < 0) { 720 if (dspi->irq < 0) {
714 dev_err(&pdev->dev, "can't get platform irq\n"); 721 dev_err(&pdev->dev, "can't get platform irq\n");
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7451585a080e..2c175b9495f7 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -458,7 +458,7 @@ static void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
458 458
459 mspi->len -= rx_nr_bytes; 459 mspi->len -= rx_nr_bytes;
460 460
461 if (mspi->rx) 461 if (rx_nr_bytes && mspi->rx)
462 mspi->get_rx(rx_data, mspi); 462 mspi->get_rx(rx_data, mspi);
463 } 463 }
464 464
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 5787b723b593..838783c3fed0 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1618,9 +1618,11 @@ static void of_register_spi_devices(struct spi_master *master)
1618 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1618 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1619 continue; 1619 continue;
1620 spi = of_register_spi_device(master, nc); 1620 spi = of_register_spi_device(master, nc);
1621 if (IS_ERR(spi)) 1621 if (IS_ERR(spi)) {
1622 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1622 dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1623 nc->full_name); 1623 nc->full_name);
1624 of_node_clear_flag(nc, OF_POPULATED);
1625 }
1624 } 1626 }
1625} 1627}
1626#else 1628#else
@@ -3131,6 +3133,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3131 if (IS_ERR(spi)) { 3133 if (IS_ERR(spi)) {
3132 pr_err("%s: failed to create for '%s'\n", 3134 pr_err("%s: failed to create for '%s'\n",
3133 __func__, rd->dn->full_name); 3135 __func__, rd->dn->full_name);
3136 of_node_clear_flag(rd->dn, OF_POPULATED);
3134 return notifier_from_errno(PTR_ERR(spi)); 3137 return notifier_from_errno(PTR_ERR(spi));
3135 } 3138 }
3136 break; 3139 break;
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 7043eb0543f6..5ab49a798164 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -207,7 +207,8 @@ static int ni_tio_clock_period_ps(const struct ni_gpct *counter,
207 * clock period is specified by user with prescaling 207 * clock period is specified by user with prescaling
208 * already taken into account. 208 * already taken into account.
209 */ 209 */
210 return counter->clock_period_ps; 210 *period_ps = counter->clock_period_ps;
211 return 0;
211 } 212 }
212 213
213 switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) { 214 switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index 34307ac3f255..d33d6fe078ad 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -186,6 +186,7 @@ int arche_platform_change_state(enum arche_platform_state state,
186exit: 186exit:
187 spin_unlock_irqrestore(&arche_pdata->wake_lock, flags); 187 spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
188 mutex_unlock(&arche_pdata->platform_state_mutex); 188 mutex_unlock(&arche_pdata->platform_state_mutex);
189 put_device(&pdev->dev);
189 of_node_put(np); 190 of_node_put(np);
190 return ret; 191 return ret;
191} 192}
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 5eecf1cb1028..3892a7470410 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -655,6 +655,7 @@ static void ad5933_work(struct work_struct *work)
655 __be16 buf[2]; 655 __be16 buf[2];
656 int val[2]; 656 int val[2];
657 unsigned char status; 657 unsigned char status;
658 int ret;
658 659
659 mutex_lock(&indio_dev->mlock); 660 mutex_lock(&indio_dev->mlock);
660 if (st->state == AD5933_CTRL_INIT_START_FREQ) { 661 if (st->state == AD5933_CTRL_INIT_START_FREQ) {
@@ -662,19 +663,22 @@ static void ad5933_work(struct work_struct *work)
662 ad5933_cmd(st, AD5933_CTRL_START_SWEEP); 663 ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
663 st->state = AD5933_CTRL_START_SWEEP; 664 st->state = AD5933_CTRL_START_SWEEP;
664 schedule_delayed_work(&st->work, st->poll_time_jiffies); 665 schedule_delayed_work(&st->work, st->poll_time_jiffies);
665 mutex_unlock(&indio_dev->mlock); 666 goto out;
666 return;
667 } 667 }
668 668
669 ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); 669 ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
670 if (ret)
671 goto out;
670 672
671 if (status & AD5933_STAT_DATA_VALID) { 673 if (status & AD5933_STAT_DATA_VALID) {
672 int scan_count = bitmap_weight(indio_dev->active_scan_mask, 674 int scan_count = bitmap_weight(indio_dev->active_scan_mask,
673 indio_dev->masklength); 675 indio_dev->masklength);
674 ad5933_i2c_read(st->client, 676 ret = ad5933_i2c_read(st->client,
675 test_bit(1, indio_dev->active_scan_mask) ? 677 test_bit(1, indio_dev->active_scan_mask) ?
676 AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA, 678 AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
677 scan_count * 2, (u8 *)buf); 679 scan_count * 2, (u8 *)buf);
680 if (ret)
681 goto out;
678 682
679 if (scan_count == 2) { 683 if (scan_count == 2) {
680 val[0] = be16_to_cpu(buf[0]); 684 val[0] = be16_to_cpu(buf[0]);
@@ -686,8 +690,7 @@ static void ad5933_work(struct work_struct *work)
686 } else { 690 } else {
687 /* no data available - try again later */ 691 /* no data available - try again later */
688 schedule_delayed_work(&st->work, st->poll_time_jiffies); 692 schedule_delayed_work(&st->work, st->poll_time_jiffies);
689 mutex_unlock(&indio_dev->mlock); 693 goto out;
690 return;
691 } 694 }
692 695
693 if (status & AD5933_STAT_SWEEP_DONE) { 696 if (status & AD5933_STAT_SWEEP_DONE) {
@@ -700,7 +703,7 @@ static void ad5933_work(struct work_struct *work)
700 ad5933_cmd(st, AD5933_CTRL_INC_FREQ); 703 ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
701 schedule_delayed_work(&st->work, st->poll_time_jiffies); 704 schedule_delayed_work(&st->work, st->poll_time_jiffies);
702 } 705 }
703 706out:
704 mutex_unlock(&indio_dev->mlock); 707 mutex_unlock(&indio_dev->mlock);
705} 708}
706 709
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index ea15cc638097..4d9bd02ede47 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -482,6 +482,8 @@ static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on)
482 flags); 482 flags);
483 memset(&bdev->rds_info, 0, sizeof(bdev->rds_info)); 483 memset(&bdev->rds_info, 0, sizeof(bdev->rds_info));
484 } 484 }
485 if (err)
486 return err;
485 487
486 return bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, 488 return bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
487 bdev->cache_fm_rds_system); 489 bdev->cache_fm_rds_system);
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index a324322ee0ad..499952c8ef39 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -106,13 +106,12 @@ static int nvec_mouse_probe(struct platform_device *pdev)
106{ 106{
107 struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); 107 struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
108 struct serio *ser_dev; 108 struct serio *ser_dev;
109 char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
110 109
111 ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL); 110 ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
112 if (!ser_dev) 111 if (!ser_dev)
113 return -ENOMEM; 112 return -ENOMEM;
114 113
115 ser_dev->id.type = SERIO_PS_PSTHRU; 114 ser_dev->id.type = SERIO_8042;
116 ser_dev->write = ps2_sendcommand; 115 ser_dev->write = ps2_sendcommand;
117 ser_dev->start = ps2_startstreaming; 116 ser_dev->start = ps2_startstreaming;
118 ser_dev->stop = ps2_stopstreaming; 117 ser_dev->stop = ps2_stopstreaming;
@@ -127,9 +126,6 @@ static int nvec_mouse_probe(struct platform_device *pdev)
127 126
128 serio_register_port(ser_dev); 127 serio_register_port(ser_dev);
129 128
130 /* mouse reset */
131 nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset));
132
133 return 0; 129 return 0;
134} 130}
135 131
diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
index 955247979aaa..4ed6d8d7712a 100644
--- a/drivers/staging/sm750fb/ddk750_reg.h
+++ b/drivers/staging/sm750fb/ddk750_reg.h
@@ -601,13 +601,13 @@
601 601
602#define PANEL_PLANE_TL 0x08001C 602#define PANEL_PLANE_TL 0x08001C
603#define PANEL_PLANE_TL_TOP_SHIFT 16 603#define PANEL_PLANE_TL_TOP_SHIFT 16
604#define PANEL_PLANE_TL_TOP_MASK (0xeff << 16) 604#define PANEL_PLANE_TL_TOP_MASK (0x7ff << 16)
605#define PANEL_PLANE_TL_LEFT_MASK 0xeff 605#define PANEL_PLANE_TL_LEFT_MASK 0x7ff
606 606
607#define PANEL_PLANE_BR 0x080020 607#define PANEL_PLANE_BR 0x080020
608#define PANEL_PLANE_BR_BOTTOM_SHIFT 16 608#define PANEL_PLANE_BR_BOTTOM_SHIFT 16
609#define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16) 609#define PANEL_PLANE_BR_BOTTOM_MASK (0x7ff << 16)
610#define PANEL_PLANE_BR_RIGHT_MASK 0xeff 610#define PANEL_PLANE_BR_RIGHT_MASK 0x7ff
611 611
612#define PANEL_HORIZONTAL_TOTAL 0x080024 612#define PANEL_HORIZONTAL_TOTAL 0x080024
613#define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16 613#define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 78f0f85bebdc..fada988512a1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -932,8 +932,6 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
932 DECLARE_WAITQUEUE(wait, current); 932 DECLARE_WAITQUEUE(wait, current);
933 struct async_icount old, new; 933 struct async_icount old, new;
934 934
935 if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD))
936 return -EINVAL;
937 do { 935 do {
938 spin_lock_irq(&acm->read_lock); 936 spin_lock_irq(&acm->read_lock);
939 old = acm->oldcount; 937 old = acm->oldcount;
@@ -1161,6 +1159,8 @@ static int acm_probe(struct usb_interface *intf,
1161 if (quirks == IGNORE_DEVICE) 1159 if (quirks == IGNORE_DEVICE)
1162 return -ENODEV; 1160 return -ENODEV;
1163 1161
1162 memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header));
1163
1164 num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR; 1164 num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR;
1165 1165
1166 /* handle quirks deadly to normal probing*/ 1166 /* handle quirks deadly to normal probing*/
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 7287a763cd0c..fea446900cad 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -769,15 +769,14 @@ static int dwc3_core_init(struct dwc3 *dwc)
769 return 0; 769 return 0;
770 770
771err4: 771err4:
772 phy_power_off(dwc->usb2_generic_phy); 772 phy_power_off(dwc->usb3_generic_phy);
773 773
774err3: 774err3:
775 phy_power_off(dwc->usb3_generic_phy); 775 phy_power_off(dwc->usb2_generic_phy);
776 776
777err2: 777err2:
778 usb_phy_set_suspend(dwc->usb2_phy, 1); 778 usb_phy_set_suspend(dwc->usb2_phy, 1);
779 usb_phy_set_suspend(dwc->usb3_phy, 1); 779 usb_phy_set_suspend(dwc->usb3_phy, 1);
780 dwc3_core_exit(dwc);
781 780
782err1: 781err1:
783 usb_phy_shutdown(dwc->usb2_phy); 782 usb_phy_shutdown(dwc->usb2_phy);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 89a2f712fdfe..aaaf256f71dd 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -31,6 +31,7 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/regmap.h> 32#include <linux/regmap.h>
33#include <linux/reset.h> 33#include <linux/reset.h>
34#include <linux/pinctrl/consumer.h>
34#include <linux/usb/of.h> 35#include <linux/usb/of.h>
35 36
36#include "core.h" 37#include "core.h"
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 686067dd8d2c..84a1709e0784 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -579,14 +579,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
579 579
580 req->length = length; 580 req->length = length;
581 581
582 /* throttle high/super speed IRQ rate back slightly */
583 if (gadget_is_dualspeed(dev->gadget))
584 req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
585 dev->gadget->speed == USB_SPEED_SUPER)) &&
586 !list_empty(&dev->tx_reqs))
587 ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
588 : 0;
589
590 retval = usb_ep_queue(in, req, GFP_ATOMIC); 582 retval = usb_ep_queue(in, req, GFP_ATOMIC);
591 switch (retval) { 583 switch (retval) {
592 default: 584 default:
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index d793f548dfe2..a9a1e4c40480 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -995,6 +995,14 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
995 } 995 }
996 val = readl(base + ext_cap_offset); 996 val = readl(base + ext_cap_offset);
997 997
998 /* Auto handoff never worked for these devices. Force it and continue */
999 if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) ||
1000 (pdev->vendor == PCI_VENDOR_ID_RENESAS
1001 && pdev->device == 0x0014)) {
1002 val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED;
1003 writel(val, base + ext_cap_offset);
1004 }
1005
998 /* If the BIOS owns the HC, signal that the OS wants it, and wait */ 1006 /* If the BIOS owns the HC, signal that the OS wants it, and wait */
999 if (val & XHCI_HC_BIOS_OWNED) { 1007 if (val & XHCI_HC_BIOS_OWNED) {
1000 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); 1008 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 210b7e43a6fd..2440f88e07a3 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -479,7 +479,8 @@ static int da8xx_probe(struct platform_device *pdev)
479 479
480 glue->phy = devm_phy_get(&pdev->dev, "usb-phy"); 480 glue->phy = devm_phy_get(&pdev->dev, "usb-phy");
481 if (IS_ERR(glue->phy)) { 481 if (IS_ERR(glue->phy)) {
482 dev_err(&pdev->dev, "failed to get phy\n"); 482 if (PTR_ERR(glue->phy) != -EPROBE_DEFER)
483 dev_err(&pdev->dev, "failed to get phy\n");
483 return PTR_ERR(glue->phy); 484 return PTR_ERR(glue->phy);
484 } 485 }
485 486
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 27dadc0d9114..e01116e4c067 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2114,11 +2114,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2114 musb->io.ep_offset = musb_flat_ep_offset; 2114 musb->io.ep_offset = musb_flat_ep_offset;
2115 musb->io.ep_select = musb_flat_ep_select; 2115 musb->io.ep_select = musb_flat_ep_select;
2116 } 2116 }
2117 /* And override them with platform specific ops if specified. */
2118 if (musb->ops->ep_offset)
2119 musb->io.ep_offset = musb->ops->ep_offset;
2120 if (musb->ops->ep_select)
2121 musb->io.ep_select = musb->ops->ep_select;
2122 2117
2123 /* At least tusb6010 has its own offsets */ 2118 /* At least tusb6010 has its own offsets */
2124 if (musb->ops->ep_offset) 2119 if (musb->ops->ep_offset)
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
index d059ad4d0dbd..97ee1b46db69 100644
--- a/drivers/uwb/lc-rc.c
+++ b/drivers/uwb/lc-rc.c
@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
56 struct uwb_rc *rc = NULL; 56 struct uwb_rc *rc = NULL;
57 57
58 dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); 58 dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
59 if (dev) 59 if (dev) {
60 rc = dev_get_drvdata(dev); 60 rc = dev_get_drvdata(dev);
61 put_device(dev);
62 }
63
61 return rc; 64 return rc;
62} 65}
63 66
@@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
467 if (dev) { 470 if (dev) {
468 rc = dev_get_drvdata(dev); 471 rc = dev_get_drvdata(dev);
469 __uwb_rc_get(rc); 472 __uwb_rc_get(rc);
473 put_device(dev);
470 } 474 }
475
471 return rc; 476 return rc;
472} 477}
473EXPORT_SYMBOL_GPL(__uwb_rc_try_get); 478EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
@@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
520 525
521 dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev, 526 dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
522 find_rc_grandpa); 527 find_rc_grandpa);
523 if (dev) 528 if (dev) {
524 rc = dev_get_drvdata(dev); 529 rc = dev_get_drvdata(dev);
530 put_device(dev);
531 }
532
525 return rc; 533 return rc;
526} 534}
527EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); 535EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
@@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
553 struct uwb_rc *rc = NULL; 561 struct uwb_rc *rc = NULL;
554 562
555 dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev); 563 dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
556 if (dev) 564 if (dev) {
557 rc = dev_get_drvdata(dev); 565 rc = dev_get_drvdata(dev);
566 put_device(dev);
567 }
558 568
559 return rc; 569 return rc;
560} 570}
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
index c1304b8d4985..678e93741ae1 100644
--- a/drivers/uwb/pal.c
+++ b/drivers/uwb/pal.c
@@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
97 97
98 dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc); 98 dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
99 99
100 put_device(dev);
101
100 return (dev != NULL); 102 return (dev != NULL);
101} 103}
102 104
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index d624a527777f..031bc08d000d 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -829,8 +829,9 @@ static long vfio_pci_ioctl(void *device_data,
829 829
830 } else if (cmd == VFIO_DEVICE_SET_IRQS) { 830 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
831 struct vfio_irq_set hdr; 831 struct vfio_irq_set hdr;
832 size_t size;
832 u8 *data = NULL; 833 u8 *data = NULL;
833 int ret = 0; 834 int max, ret = 0;
834 835
835 minsz = offsetofend(struct vfio_irq_set, count); 836 minsz = offsetofend(struct vfio_irq_set, count);
836 837
@@ -838,23 +839,31 @@ static long vfio_pci_ioctl(void *device_data,
838 return -EFAULT; 839 return -EFAULT;
839 840
840 if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS || 841 if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
842 hdr.count >= (U32_MAX - hdr.start) ||
841 hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | 843 hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
842 VFIO_IRQ_SET_ACTION_TYPE_MASK)) 844 VFIO_IRQ_SET_ACTION_TYPE_MASK))
843 return -EINVAL; 845 return -EINVAL;
844 846
845 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { 847 max = vfio_pci_get_irq_count(vdev, hdr.index);
846 size_t size; 848 if (hdr.start >= max || hdr.start + hdr.count > max)
847 int max = vfio_pci_get_irq_count(vdev, hdr.index); 849 return -EINVAL;
848 850
849 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) 851 switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
850 size = sizeof(uint8_t); 852 case VFIO_IRQ_SET_DATA_NONE:
851 else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD) 853 size = 0;
852 size = sizeof(int32_t); 854 break;
853 else 855 case VFIO_IRQ_SET_DATA_BOOL:
854 return -EINVAL; 856 size = sizeof(uint8_t);
857 break;
858 case VFIO_IRQ_SET_DATA_EVENTFD:
859 size = sizeof(int32_t);
860 break;
861 default:
862 return -EINVAL;
863 }
855 864
856 if (hdr.argsz - minsz < hdr.count * size || 865 if (size) {
857 hdr.start >= max || hdr.start + hdr.count > max) 866 if (hdr.argsz - minsz < hdr.count * size)
858 return -EINVAL; 867 return -EINVAL;
859 868
860 data = memdup_user((void __user *)(arg + minsz), 869 data = memdup_user((void __user *)(arg + minsz),
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index c2e60893cd09..1c46045b0e7f 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -256,7 +256,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
256 if (!is_irq_none(vdev)) 256 if (!is_irq_none(vdev))
257 return -EINVAL; 257 return -EINVAL;
258 258
259 vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); 259 vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
260 if (!vdev->ctx) 260 if (!vdev->ctx)
261 return -ENOMEM; 261 return -ENOMEM;
262 262
diff --git a/drivers/virtio/config.c b/drivers/virtio/config.c
deleted file mode 100644
index f70bcd2ff98f..000000000000
--- a/drivers/virtio/config.c
+++ /dev/null
@@ -1,12 +0,0 @@
1/* Configuration space parsing helpers for virtio.
2 *
3 * The configuration is [type][len][... len bytes ...] fields.
4 *
5 * Copyright 2007 Rusty Russell, IBM Corporation.
6 * GPL v2 or later.
7 */
8#include <linux/err.h>
9#include <linux/virtio.h>
10#include <linux/virtio_config.h>
11#include <linux/bug.h>
12
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 4e7003db12c4..181793f07852 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -577,6 +577,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
577 577
578 virtio_device_ready(vdev); 578 virtio_device_ready(vdev);
579 579
580 if (towards_target(vb))
581 virtballoon_changed(vdev);
580 return 0; 582 return 0;
581 583
582out_del_vqs: 584out_del_vqs:
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 8c4e61783441..6d9e5173d5fa 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -212,10 +212,18 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
212 return -ENODEV; 212 return -ENODEV;
213 } 213 }
214 214
215 rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); 215 rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
216 if (rc) 216 if (rc) {
217 rc = dma_set_mask_and_coherent(&pci_dev->dev, 217 rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
218 DMA_BIT_MASK(32)); 218 } else {
219 /*
220 * The virtio ring base address is expressed as a 32-bit PFN,
221 * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
222 */
223 dma_set_coherent_mask(&pci_dev->dev,
224 DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
225 }
226
219 if (rc) 227 if (rc)
220 dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); 228 dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
221 229
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ed9c9eeedfe5..489bfc61cf30 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
167 * making all of the arch DMA ops work on the vring device itself 167 * making all of the arch DMA ops work on the vring device itself
168 * is a mess. For now, we use the parent device for DMA ops. 168 * is a mess. For now, we use the parent device for DMA ops.
169 */ 169 */
170static struct device *vring_dma_dev(const struct vring_virtqueue *vq) 170static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
171{ 171{
172 return vq->vq.vdev->dev.parent; 172 return vq->vq.vdev->dev.parent;
173} 173}
@@ -732,7 +732,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
732 732
733 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 733 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
734 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 734 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
735 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 735 if (!vq->event)
736 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
736 } 737 }
737 738
738} 739}
@@ -764,7 +765,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
764 * entry. Always do both to keep code simple. */ 765 * entry. Always do both to keep code simple. */
765 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 766 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
766 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 767 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
767 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 768 if (!vq->event)
769 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
768 } 770 }
769 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); 771 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
770 END_USE(vq); 772 END_USE(vq);
@@ -832,10 +834,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
832 * more to do. */ 834 * more to do. */
833 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 835 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
834 * either clear the flags bit or point the event index at the next 836 * either clear the flags bit or point the event index at the next
835 * entry. Always do both to keep code simple. */ 837 * entry. Always update the event index to keep code simple. */
836 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 838 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
837 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 839 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
838 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 840 if (!vq->event)
841 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
839 } 842 }
840 /* TODO: tune this threshold */ 843 /* TODO: tune this threshold */
841 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; 844 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
@@ -953,7 +956,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
953 /* No callback? Tell other side not to bother us. */ 956 /* No callback? Tell other side not to bother us. */
954 if (!callback) { 957 if (!callback) {
955 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 958 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
956 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); 959 if (!vq->event)
960 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
957 } 961 }
958 962
959 /* Put everything in free lists. */ 963 /* Put everything in free lists. */
diff --git a/fs/aio.c b/fs/aio.c
index 1157e13a36d6..428484f2f841 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1078,6 +1078,17 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2)
1078 unsigned tail, pos, head; 1078 unsigned tail, pos, head;
1079 unsigned long flags; 1079 unsigned long flags;
1080 1080
1081 if (kiocb->ki_flags & IOCB_WRITE) {
1082 struct file *file = kiocb->ki_filp;
1083
1084 /*
1085 * Tell lockdep we inherited freeze protection from submission
1086 * thread.
1087 */
1088 __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
1089 file_end_write(file);
1090 }
1091
1081 /* 1092 /*
1082 * Special case handling for sync iocbs: 1093 * Special case handling for sync iocbs:
1083 * - events go directly into the iocb for fast handling 1094 * - events go directly into the iocb for fast handling
@@ -1392,122 +1403,106 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1392 return -EINVAL; 1403 return -EINVAL;
1393} 1404}
1394 1405
1395typedef ssize_t (rw_iter_op)(struct kiocb *, struct iov_iter *); 1406static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec,
1396 1407 bool vectored, bool compat, struct iov_iter *iter)
1397static int aio_setup_vectored_rw(int rw, char __user *buf, size_t len,
1398 struct iovec **iovec,
1399 bool compat,
1400 struct iov_iter *iter)
1401{ 1408{
1409 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1410 size_t len = iocb->aio_nbytes;
1411
1412 if (!vectored) {
1413 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
1414 *iovec = NULL;
1415 return ret;
1416 }
1402#ifdef CONFIG_COMPAT 1417#ifdef CONFIG_COMPAT
1403 if (compat) 1418 if (compat)
1404 return compat_import_iovec(rw, 1419 return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec,
1405 (struct compat_iovec __user *)buf, 1420 iter);
1406 len, UIO_FASTIOV, iovec, iter);
1407#endif 1421#endif
1408 return import_iovec(rw, (struct iovec __user *)buf, 1422 return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter);
1409 len, UIO_FASTIOV, iovec, iter);
1410} 1423}
1411 1424
1412/* 1425static inline ssize_t aio_ret(struct kiocb *req, ssize_t ret)
1413 * aio_run_iocb: 1426{
1414 * Performs the initial checks and io submission. 1427 switch (ret) {
1415 */ 1428 case -EIOCBQUEUED:
1416static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, 1429 return ret;
1417 char __user *buf, size_t len, bool compat) 1430 case -ERESTARTSYS:
1431 case -ERESTARTNOINTR:
1432 case -ERESTARTNOHAND:
1433 case -ERESTART_RESTARTBLOCK:
1434 /*
1435 * There's no easy way to restart the syscall since other AIO's
1436 * may be already running. Just fail this IO with EINTR.
1437 */
1438 ret = -EINTR;
1439 /*FALLTHRU*/
1440 default:
1441 aio_complete(req, ret, 0);
1442 return 0;
1443 }
1444}
1445
1446static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
1447 bool compat)
1418{ 1448{
1419 struct file *file = req->ki_filp; 1449 struct file *file = req->ki_filp;
1420 ssize_t ret;
1421 int rw;
1422 fmode_t mode;
1423 rw_iter_op *iter_op;
1424 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1450 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1425 struct iov_iter iter; 1451 struct iov_iter iter;
1452 ssize_t ret;
1426 1453
1427 switch (opcode) { 1454 if (unlikely(!(file->f_mode & FMODE_READ)))
1428 case IOCB_CMD_PREAD: 1455 return -EBADF;
1429 case IOCB_CMD_PREADV: 1456 if (unlikely(!file->f_op->read_iter))
1430 mode = FMODE_READ; 1457 return -EINVAL;
1431 rw = READ;
1432 iter_op = file->f_op->read_iter;
1433 goto rw_common;
1434
1435 case IOCB_CMD_PWRITE:
1436 case IOCB_CMD_PWRITEV:
1437 mode = FMODE_WRITE;
1438 rw = WRITE;
1439 iter_op = file->f_op->write_iter;
1440 goto rw_common;
1441rw_common:
1442 if (unlikely(!(file->f_mode & mode)))
1443 return -EBADF;
1444
1445 if (!iter_op)
1446 return -EINVAL;
1447
1448 if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV)
1449 ret = aio_setup_vectored_rw(rw, buf, len,
1450 &iovec, compat, &iter);
1451 else {
1452 ret = import_single_range(rw, buf, len, iovec, &iter);
1453 iovec = NULL;
1454 }
1455 if (!ret)
1456 ret = rw_verify_area(rw, file, &req->ki_pos,
1457 iov_iter_count(&iter));
1458 if (ret < 0) {
1459 kfree(iovec);
1460 return ret;
1461 }
1462
1463 if (rw == WRITE)
1464 file_start_write(file);
1465
1466 ret = iter_op(req, &iter);
1467
1468 if (rw == WRITE)
1469 file_end_write(file);
1470 kfree(iovec);
1471 break;
1472
1473 case IOCB_CMD_FDSYNC:
1474 if (!file->f_op->aio_fsync)
1475 return -EINVAL;
1476
1477 ret = file->f_op->aio_fsync(req, 1);
1478 break;
1479 1458
1480 case IOCB_CMD_FSYNC: 1459 ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
1481 if (!file->f_op->aio_fsync) 1460 if (ret)
1482 return -EINVAL; 1461 return ret;
1462 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1463 if (!ret)
1464 ret = aio_ret(req, file->f_op->read_iter(req, &iter));
1465 kfree(iovec);
1466 return ret;
1467}
1483 1468
1484 ret = file->f_op->aio_fsync(req, 0); 1469static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
1485 break; 1470 bool compat)
1471{
1472 struct file *file = req->ki_filp;
1473 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1474 struct iov_iter iter;
1475 ssize_t ret;
1486 1476
1487 default: 1477 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1488 pr_debug("EINVAL: no operation provided\n"); 1478 return -EBADF;
1479 if (unlikely(!file->f_op->write_iter))
1489 return -EINVAL; 1480 return -EINVAL;
1490 }
1491 1481
1492 if (ret != -EIOCBQUEUED) { 1482 ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
1483 if (ret)
1484 return ret;
1485 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1486 if (!ret) {
1487 req->ki_flags |= IOCB_WRITE;
1488 file_start_write(file);
1489 ret = aio_ret(req, file->f_op->write_iter(req, &iter));
1493 /* 1490 /*
1494 * There's no easy way to restart the syscall since other AIO's 1491 * We release freeze protection in aio_complete(). Fool lockdep
1495 * may be already running. Just fail this IO with EINTR. 1492 * by telling it the lock got released so that it doesn't
1493 * complain about held lock when we return to userspace.
1496 */ 1494 */
1497 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || 1495 __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
1498 ret == -ERESTARTNOHAND ||
1499 ret == -ERESTART_RESTARTBLOCK))
1500 ret = -EINTR;
1501 aio_complete(req, ret, 0);
1502 } 1496 }
1503 1497 kfree(iovec);
1504 return 0; 1498 return ret;
1505} 1499}
1506 1500
1507static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1501static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1508 struct iocb *iocb, bool compat) 1502 struct iocb *iocb, bool compat)
1509{ 1503{
1510 struct aio_kiocb *req; 1504 struct aio_kiocb *req;
1505 struct file *file;
1511 ssize_t ret; 1506 ssize_t ret;
1512 1507
1513 /* enforce forwards compatibility on users */ 1508 /* enforce forwards compatibility on users */
@@ -1530,7 +1525,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1530 if (unlikely(!req)) 1525 if (unlikely(!req))
1531 return -EAGAIN; 1526 return -EAGAIN;
1532 1527
1533 req->common.ki_filp = fget(iocb->aio_fildes); 1528 req->common.ki_filp = file = fget(iocb->aio_fildes);
1534 if (unlikely(!req->common.ki_filp)) { 1529 if (unlikely(!req->common.ki_filp)) {
1535 ret = -EBADF; 1530 ret = -EBADF;
1536 goto out_put_req; 1531 goto out_put_req;
@@ -1565,13 +1560,29 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1565 req->ki_user_iocb = user_iocb; 1560 req->ki_user_iocb = user_iocb;
1566 req->ki_user_data = iocb->aio_data; 1561 req->ki_user_data = iocb->aio_data;
1567 1562
1568 ret = aio_run_iocb(&req->common, iocb->aio_lio_opcode, 1563 get_file(file);
1569 (char __user *)(unsigned long)iocb->aio_buf, 1564 switch (iocb->aio_lio_opcode) {
1570 iocb->aio_nbytes, 1565 case IOCB_CMD_PREAD:
1571 compat); 1566 ret = aio_read(&req->common, iocb, false, compat);
1572 if (ret) 1567 break;
1573 goto out_put_req; 1568 case IOCB_CMD_PWRITE:
1569 ret = aio_write(&req->common, iocb, false, compat);
1570 break;
1571 case IOCB_CMD_PREADV:
1572 ret = aio_read(&req->common, iocb, true, compat);
1573 break;
1574 case IOCB_CMD_PWRITEV:
1575 ret = aio_write(&req->common, iocb, true, compat);
1576 break;
1577 default:
1578 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
1579 ret = -EINVAL;
1580 break;
1581 }
1582 fput(file);
1574 1583
1584 if (ret && ret != -EIOCBQUEUED)
1585 goto out_put_req;
1575 return 0; 1586 return 0;
1576out_put_req: 1587out_put_req:
1577 put_reqs_available(ctx, 1); 1588 put_reqs_available(ctx, 1);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 210c94ac8818..4607af38c72e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2647,7 +2647,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2647 2647
2648 btrfs_free_delayed_extent_op(extent_op); 2648 btrfs_free_delayed_extent_op(extent_op);
2649 if (ret) { 2649 if (ret) {
2650 spin_lock(&delayed_refs->lock);
2650 locked_ref->processing = 0; 2651 locked_ref->processing = 0;
2652 delayed_refs->num_heads_ready++;
2653 spin_unlock(&delayed_refs->lock);
2651 btrfs_delayed_ref_unlock(locked_ref); 2654 btrfs_delayed_ref_unlock(locked_ref);
2652 btrfs_put_delayed_ref(ref); 2655 btrfs_put_delayed_ref(ref);
2653 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", 2656 btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 66a755150056..8ed05d95584a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5569,7 +5569,7 @@ void le_bitmap_set(u8 *map, unsigned int start, int len)
5569 *p |= mask_to_set; 5569 *p |= mask_to_set;
5570 len -= bits_to_set; 5570 len -= bits_to_set;
5571 bits_to_set = BITS_PER_BYTE; 5571 bits_to_set = BITS_PER_BYTE;
5572 mask_to_set = ~(u8)0; 5572 mask_to_set = ~0;
5573 p++; 5573 p++;
5574 } 5574 }
5575 if (len) { 5575 if (len) {
@@ -5589,7 +5589,7 @@ void le_bitmap_clear(u8 *map, unsigned int start, int len)
5589 *p &= ~mask_to_clear; 5589 *p &= ~mask_to_clear;
5590 len -= bits_to_clear; 5590 len -= bits_to_clear;
5591 bits_to_clear = BITS_PER_BYTE; 5591 bits_to_clear = BITS_PER_BYTE;
5592 mask_to_clear = ~(u8)0; 5592 mask_to_clear = ~0;
5593 p++; 5593 p++;
5594 } 5594 }
5595 if (len) { 5595 if (len) {
@@ -5679,7 +5679,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5679 kaddr[offset] |= mask_to_set; 5679 kaddr[offset] |= mask_to_set;
5680 len -= bits_to_set; 5680 len -= bits_to_set;
5681 bits_to_set = BITS_PER_BYTE; 5681 bits_to_set = BITS_PER_BYTE;
5682 mask_to_set = ~(u8)0; 5682 mask_to_set = ~0;
5683 if (++offset >= PAGE_SIZE && len > 0) { 5683 if (++offset >= PAGE_SIZE && len > 0) {
5684 offset = 0; 5684 offset = 0;
5685 page = eb->pages[++i]; 5685 page = eb->pages[++i];
@@ -5721,7 +5721,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5721 kaddr[offset] &= ~mask_to_clear; 5721 kaddr[offset] &= ~mask_to_clear;
5722 len -= bits_to_clear; 5722 len -= bits_to_clear;
5723 bits_to_clear = BITS_PER_BYTE; 5723 bits_to_clear = BITS_PER_BYTE;
5724 mask_to_clear = ~(u8)0; 5724 mask_to_clear = ~0;
5725 if (++offset >= PAGE_SIZE && len > 0) { 5725 if (++offset >= PAGE_SIZE && len > 0) {
5726 offset = 0; 5726 offset = 0;
5727 page = eb->pages[++i]; 5727 page = eb->pages[++i];
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 2b790bda7998..8e3a5a266917 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4605,8 +4605,8 @@ delete:
4605 BUG_ON(ret); 4605 BUG_ON(ret);
4606 if (btrfs_should_throttle_delayed_refs(trans, root)) 4606 if (btrfs_should_throttle_delayed_refs(trans, root))
4607 btrfs_async_run_delayed_refs(root, 4607 btrfs_async_run_delayed_refs(root,
4608 trans->transid, 4608 trans->delayed_ref_updates * 2,
4609 trans->delayed_ref_updates * 2, 0); 4609 trans->transid, 0);
4610 if (be_nice) { 4610 if (be_nice) {
4611 if (truncate_space_check(trans, root, 4611 if (truncate_space_check(trans, root,
4612 extent_num_bytes)) { 4612 extent_num_bytes)) {
@@ -8931,9 +8931,14 @@ again:
8931 * So even we call qgroup_free_data(), it won't decrease reserved 8931 * So even we call qgroup_free_data(), it won't decrease reserved
8932 * space. 8932 * space.
8933 * 2) Not written to disk 8933 * 2) Not written to disk
8934 * This means the reserved space should be freed here. 8934 * This means the reserved space should be freed here. However,
8935 * if a truncate invalidates the page (by clearing PageDirty)
8936 * and the page is accounted for while allocating extent
8937 * in btrfs_check_data_free_space() we let delayed_ref to
8938 * free the entire extent.
8935 */ 8939 */
8936 btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE); 8940 if (PageDirty(page))
8941 btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
8937 if (!inode_evicting) { 8942 if (!inode_evicting) {
8938 clear_extent_bit(tree, page_start, page_end, 8943 clear_extent_bit(tree, page_start, page_end,
8939 EXTENT_LOCKED | EXTENT_DIRTY | 8944 EXTENT_LOCKED | EXTENT_DIRTY |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 18e1aa0f85f5..7acbd2cf6192 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3814,6 +3814,11 @@ process_slot:
3814 } 3814 }
3815 btrfs_release_path(path); 3815 btrfs_release_path(path);
3816 key.offset = next_key_min_offset; 3816 key.offset = next_key_min_offset;
3817
3818 if (fatal_signal_pending(current)) {
3819 ret = -EINTR;
3820 goto out;
3821 }
3817 } 3822 }
3818 ret = 0; 3823 ret = 0;
3819 3824
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0ec8ffa37ab0..c4af0cdb783d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2728,7 +2728,14 @@ static int do_relocation(struct btrfs_trans_handle *trans,
2728 2728
2729 bytenr = btrfs_node_blockptr(upper->eb, slot); 2729 bytenr = btrfs_node_blockptr(upper->eb, slot);
2730 if (lowest) { 2730 if (lowest) {
2731 BUG_ON(bytenr != node->bytenr); 2731 if (bytenr != node->bytenr) {
2732 btrfs_err(root->fs_info,
2733 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2734 bytenr, node->bytenr, slot,
2735 upper->eb->start);
2736 err = -EIO;
2737 goto next;
2738 }
2732 } else { 2739 } else {
2733 if (node->eb->start == bytenr) 2740 if (node->eb->start == bytenr)
2734 goto next; 2741 goto next;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 18630e800208..f995e3528a33 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1770,7 +1770,6 @@ const struct file_operations ceph_file_fops = {
1770 .fsync = ceph_fsync, 1770 .fsync = ceph_fsync,
1771 .lock = ceph_lock, 1771 .lock = ceph_lock,
1772 .flock = ceph_flock, 1772 .flock = ceph_flock,
1773 .splice_read = generic_file_splice_read,
1774 .splice_write = iter_file_splice_write, 1773 .splice_write = iter_file_splice_write,
1775 .unlocked_ioctl = ceph_ioctl, 1774 .unlocked_ioctl = ceph_ioctl,
1776 .compat_ioctl = ceph_ioctl, 1775 .compat_ioctl = ceph_ioctl,
diff --git a/fs/coredump.c b/fs/coredump.c
index 281b768000e6..eb9c92c9b20f 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -1,6 +1,7 @@
1#include <linux/slab.h> 1#include <linux/slab.h>
2#include <linux/file.h> 2#include <linux/file.h>
3#include <linux/fdtable.h> 3#include <linux/fdtable.h>
4#include <linux/freezer.h>
4#include <linux/mm.h> 5#include <linux/mm.h>
5#include <linux/stat.h> 6#include <linux/stat.h>
6#include <linux/fcntl.h> 7#include <linux/fcntl.h>
@@ -423,7 +424,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
423 if (core_waiters > 0) { 424 if (core_waiters > 0) {
424 struct core_thread *ptr; 425 struct core_thread *ptr;
425 426
427 freezer_do_not_count();
426 wait_for_completion(&core_state->startup); 428 wait_for_completion(&core_state->startup);
429 freezer_count();
427 /* 430 /*
428 * Wait for all the threads to become inactive, so that 431 * Wait for all the threads to become inactive, so that
429 * all the thread context (extended register state, like 432 * all the thread context (extended register state, like
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 7555ba889d1f..ebecfb8fba06 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -314,7 +314,8 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
314 /* Match the full socket address */ 314 /* Match the full socket address */
315 if (!rpc_cmp_addr_port(sap, clap)) 315 if (!rpc_cmp_addr_port(sap, clap))
316 /* Match all xprt_switch full socket addresses */ 316 /* Match all xprt_switch full socket addresses */
317 if (!rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient, 317 if (IS_ERR(clp->cl_rpcclient) ||
318 !rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient,
318 sap)) 319 sap))
319 continue; 320 continue;
320 321
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index c8162c660c44..5551e8ef67fd 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -98,7 +98,7 @@ rename_retry:
98 return end; 98 return end;
99 } 99 }
100 namelen = strlen(base); 100 namelen = strlen(base);
101 if (flags & NFS_PATH_CANONICAL) { 101 if (*end == '/') {
102 /* Strip off excess slashes in base string */ 102 /* Strip off excess slashes in base string */
103 while (namelen > 0 && base[namelen - 1] == '/') 103 while (namelen > 0 && base[namelen - 1] == '/')
104 namelen--; 104 namelen--;
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index b62973045a3e..a61350f75c74 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -178,12 +178,14 @@ static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
178 __must_hold(&tbl->slot_tbl_lock) 178 __must_hold(&tbl->slot_tbl_lock)
179{ 179{
180 struct nfs4_slot *slot; 180 struct nfs4_slot *slot;
181 int ret;
181 182
182 slot = nfs4_lookup_slot(tbl, slotid); 183 slot = nfs4_lookup_slot(tbl, slotid);
183 if (IS_ERR(slot)) 184 ret = PTR_ERR_OR_ZERO(slot);
184 return PTR_ERR(slot); 185 if (!ret)
185 *seq_nr = slot->seq_nr; 186 *seq_nr = slot->seq_nr;
186 return 0; 187
188 return ret;
187} 189}
188 190
189/* 191/*
@@ -196,7 +198,7 @@ static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
196static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl, 198static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
197 u32 slotid, u32 seq_nr) 199 u32 slotid, u32 seq_nr)
198{ 200{
199 u32 cur_seq; 201 u32 cur_seq = 0;
200 bool ret = false; 202 bool ret = false;
201 203
202 spin_lock(&tbl->slot_tbl_lock); 204 spin_lock(&tbl->slot_tbl_lock);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 56b2d96f9103..259ef85f435a 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -146,6 +146,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
146 u32 id; 146 u32 id;
147 int i; 147 int i;
148 148
149 if (fsinfo->nlayouttypes == 0)
150 goto out_no_driver;
149 if (!(server->nfs_client->cl_exchange_flags & 151 if (!(server->nfs_client->cl_exchange_flags &
150 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) { 152 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
151 printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n", 153 printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index b10d557f9c9e..ee36efd5aece 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -84,6 +84,8 @@ struct nfsd_net {
84 struct list_head client_lru; 84 struct list_head client_lru;
85 struct list_head close_lru; 85 struct list_head close_lru;
86 struct list_head del_recall_lru; 86 struct list_head del_recall_lru;
87
88 /* protected by blocked_locks_lock */
87 struct list_head blocked_locks_lru; 89 struct list_head blocked_locks_lru;
88 90
89 struct delayed_work laundromat_work; 91 struct delayed_work laundromat_work;
@@ -91,6 +93,9 @@ struct nfsd_net {
91 /* client_lock protects the client lru list and session hash table */ 93 /* client_lock protects the client lru list and session hash table */
92 spinlock_t client_lock; 94 spinlock_t client_lock;
93 95
96 /* protects blocked_locks_lru */
97 spinlock_t blocked_locks_lock;
98
94 struct file *rec_file; 99 struct file *rec_file;
95 bool in_grace; 100 bool in_grace;
96 const struct nfsd4_client_tracking_ops *client_tracking_ops; 101 const struct nfsd4_client_tracking_ops *client_tracking_ops;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9752beb78659..4b4beaaa4eaa 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -217,7 +217,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
217{ 217{
218 struct nfsd4_blocked_lock *cur, *found = NULL; 218 struct nfsd4_blocked_lock *cur, *found = NULL;
219 219
220 spin_lock(&nn->client_lock); 220 spin_lock(&nn->blocked_locks_lock);
221 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { 221 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
222 if (fh_match(fh, &cur->nbl_fh)) { 222 if (fh_match(fh, &cur->nbl_fh)) {
223 list_del_init(&cur->nbl_list); 223 list_del_init(&cur->nbl_list);
@@ -226,7 +226,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
226 break; 226 break;
227 } 227 }
228 } 228 }
229 spin_unlock(&nn->client_lock); 229 spin_unlock(&nn->blocked_locks_lock);
230 if (found) 230 if (found)
231 posix_unblock_lock(&found->nbl_lock); 231 posix_unblock_lock(&found->nbl_lock);
232 return found; 232 return found;
@@ -1227,9 +1227,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1227 1227
1228static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) 1228static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1229{ 1229{
1230 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); 1230 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1231
1232 lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
1233 1231
1234 list_del_init(&stp->st_locks); 1232 list_del_init(&stp->st_locks);
1235 nfs4_unhash_stid(&stp->st_stid); 1233 nfs4_unhash_stid(&stp->st_stid);
@@ -1238,12 +1236,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1238 1236
1239static void release_lock_stateid(struct nfs4_ol_stateid *stp) 1237static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1240{ 1238{
1241 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); 1239 struct nfs4_client *clp = stp->st_stid.sc_client;
1242 bool unhashed; 1240 bool unhashed;
1243 1241
1244 spin_lock(&oo->oo_owner.so_client->cl_lock); 1242 spin_lock(&clp->cl_lock);
1245 unhashed = unhash_lock_stateid(stp); 1243 unhashed = unhash_lock_stateid(stp);
1246 spin_unlock(&oo->oo_owner.so_client->cl_lock); 1244 spin_unlock(&clp->cl_lock);
1247 if (unhashed) 1245 if (unhashed)
1248 nfs4_put_stid(&stp->st_stid); 1246 nfs4_put_stid(&stp->st_stid);
1249} 1247}
@@ -4665,7 +4663,7 @@ nfs4_laundromat(struct nfsd_net *nn)
4665 * indefinitely once the lock does become free. 4663 * indefinitely once the lock does become free.
4666 */ 4664 */
4667 BUG_ON(!list_empty(&reaplist)); 4665 BUG_ON(!list_empty(&reaplist));
4668 spin_lock(&nn->client_lock); 4666 spin_lock(&nn->blocked_locks_lock);
4669 while (!list_empty(&nn->blocked_locks_lru)) { 4667 while (!list_empty(&nn->blocked_locks_lru)) {
4670 nbl = list_first_entry(&nn->blocked_locks_lru, 4668 nbl = list_first_entry(&nn->blocked_locks_lru,
4671 struct nfsd4_blocked_lock, nbl_lru); 4669 struct nfsd4_blocked_lock, nbl_lru);
@@ -4678,7 +4676,7 @@ nfs4_laundromat(struct nfsd_net *nn)
4678 list_move(&nbl->nbl_lru, &reaplist); 4676 list_move(&nbl->nbl_lru, &reaplist);
4679 list_del_init(&nbl->nbl_list); 4677 list_del_init(&nbl->nbl_list);
4680 } 4678 }
4681 spin_unlock(&nn->client_lock); 4679 spin_unlock(&nn->blocked_locks_lock);
4682 4680
4683 while (!list_empty(&reaplist)) { 4681 while (!list_empty(&reaplist)) {
4684 nbl = list_first_entry(&nn->blocked_locks_lru, 4682 nbl = list_first_entry(&nn->blocked_locks_lru,
@@ -5439,13 +5437,13 @@ nfsd4_lm_notify(struct file_lock *fl)
5439 bool queue = false; 5437 bool queue = false;
5440 5438
5441 /* An empty list means that something else is going to be using it */ 5439 /* An empty list means that something else is going to be using it */
5442 spin_lock(&nn->client_lock); 5440 spin_lock(&nn->blocked_locks_lock);
5443 if (!list_empty(&nbl->nbl_list)) { 5441 if (!list_empty(&nbl->nbl_list)) {
5444 list_del_init(&nbl->nbl_list); 5442 list_del_init(&nbl->nbl_list);
5445 list_del_init(&nbl->nbl_lru); 5443 list_del_init(&nbl->nbl_lru);
5446 queue = true; 5444 queue = true;
5447 } 5445 }
5448 spin_unlock(&nn->client_lock); 5446 spin_unlock(&nn->blocked_locks_lock);
5449 5447
5450 if (queue) 5448 if (queue)
5451 nfsd4_run_cb(&nbl->nbl_cb); 5449 nfsd4_run_cb(&nbl->nbl_cb);
@@ -5868,10 +5866,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5868 5866
5869 if (fl_flags & FL_SLEEP) { 5867 if (fl_flags & FL_SLEEP) {
5870 nbl->nbl_time = jiffies; 5868 nbl->nbl_time = jiffies;
5871 spin_lock(&nn->client_lock); 5869 spin_lock(&nn->blocked_locks_lock);
5872 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); 5870 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
5873 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); 5871 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
5874 spin_unlock(&nn->client_lock); 5872 spin_unlock(&nn->blocked_locks_lock);
5875 } 5873 }
5876 5874
5877 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); 5875 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
@@ -5900,10 +5898,10 @@ out:
5900 if (nbl) { 5898 if (nbl) {
5901 /* dequeue it if we queued it before */ 5899 /* dequeue it if we queued it before */
5902 if (fl_flags & FL_SLEEP) { 5900 if (fl_flags & FL_SLEEP) {
5903 spin_lock(&nn->client_lock); 5901 spin_lock(&nn->blocked_locks_lock);
5904 list_del_init(&nbl->nbl_list); 5902 list_del_init(&nbl->nbl_list);
5905 list_del_init(&nbl->nbl_lru); 5903 list_del_init(&nbl->nbl_lru);
5906 spin_unlock(&nn->client_lock); 5904 spin_unlock(&nn->blocked_locks_lock);
5907 } 5905 }
5908 free_blocked_lock(nbl); 5906 free_blocked_lock(nbl);
5909 } 5907 }
@@ -6943,9 +6941,11 @@ static int nfs4_state_create_net(struct net *net)
6943 INIT_LIST_HEAD(&nn->client_lru); 6941 INIT_LIST_HEAD(&nn->client_lru);
6944 INIT_LIST_HEAD(&nn->close_lru); 6942 INIT_LIST_HEAD(&nn->close_lru);
6945 INIT_LIST_HEAD(&nn->del_recall_lru); 6943 INIT_LIST_HEAD(&nn->del_recall_lru);
6946 INIT_LIST_HEAD(&nn->blocked_locks_lru);
6947 spin_lock_init(&nn->client_lock); 6944 spin_lock_init(&nn->client_lock);
6948 6945
6946 spin_lock_init(&nn->blocked_locks_lock);
6947 INIT_LIST_HEAD(&nn->blocked_locks_lru);
6948
6949 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 6949 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
6950 get_net(net); 6950 get_net(net);
6951 6951
@@ -7063,14 +7063,14 @@ nfs4_state_shutdown_net(struct net *net)
7063 } 7063 }
7064 7064
7065 BUG_ON(!list_empty(&reaplist)); 7065 BUG_ON(!list_empty(&reaplist));
7066 spin_lock(&nn->client_lock); 7066 spin_lock(&nn->blocked_locks_lock);
7067 while (!list_empty(&nn->blocked_locks_lru)) { 7067 while (!list_empty(&nn->blocked_locks_lru)) {
7068 nbl = list_first_entry(&nn->blocked_locks_lru, 7068 nbl = list_first_entry(&nn->blocked_locks_lru,
7069 struct nfsd4_blocked_lock, nbl_lru); 7069 struct nfsd4_blocked_lock, nbl_lru);
7070 list_move(&nbl->nbl_lru, &reaplist); 7070 list_move(&nbl->nbl_lru, &reaplist);
7071 list_del_init(&nbl->nbl_list); 7071 list_del_init(&nbl->nbl_list);
7072 } 7072 }
7073 spin_unlock(&nn->client_lock); 7073 spin_unlock(&nn->blocked_locks_lock);
7074 7074
7075 while (!list_empty(&reaplist)) { 7075 while (!list_empty(&reaplist)) {
7076 nbl = list_first_entry(&nn->blocked_locks_lru, 7076 nbl = list_first_entry(&nn->blocked_locks_lru,
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index a18613579001..0ee19ecc982d 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1544,8 +1544,6 @@ const struct file_operations ntfs_dir_ops = {
1544 .iterate = ntfs_readdir, /* Read directory contents. */ 1544 .iterate = ntfs_readdir, /* Read directory contents. */
1545#ifdef NTFS_RW 1545#ifdef NTFS_RW
1546 .fsync = ntfs_dir_fsync, /* Sync a directory to disk. */ 1546 .fsync = ntfs_dir_fsync, /* Sync a directory to disk. */
1547 /*.aio_fsync = ,*/ /* Sync all outstanding async
1548 i/o operations on a kiocb. */
1549#endif /* NTFS_RW */ 1547#endif /* NTFS_RW */
1550 /*.ioctl = ,*/ /* Perform function on the 1548 /*.ioctl = ,*/ /* Perform function on the
1551 mounted filesystem. */ 1549 mounted filesystem. */
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index e7054e2ac922..3ecb9f337b7d 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3699,7 +3699,7 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
3699static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb, 3699static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
3700 struct ocfs2_dx_root_block *dx_root) 3700 struct ocfs2_dx_root_block *dx_root)
3701{ 3701{
3702 int credits = ocfs2_clusters_to_blocks(osb->sb, 2); 3702 int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
3703 3703
3704 credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list); 3704 credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
3705 credits += ocfs2_quota_trans_credits(osb->sb); 3705 credits += ocfs2_quota_trans_credits(osb->sb);
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index eb09aa026723..d484068ca716 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -141,6 +141,9 @@ static struct client_debug_mask client_debug_mask;
141 */ 141 */
142static DEFINE_MUTEX(orangefs_debug_lock); 142static DEFINE_MUTEX(orangefs_debug_lock);
143 143
144/* Used to protect data in ORANGEFS_KMOD_DEBUG_HELP_FILE */
145static DEFINE_MUTEX(orangefs_help_file_lock);
146
144/* 147/*
145 * initialize kmod debug operations, create orangefs debugfs dir and 148 * initialize kmod debug operations, create orangefs debugfs dir and
146 * ORANGEFS_KMOD_DEBUG_HELP_FILE. 149 * ORANGEFS_KMOD_DEBUG_HELP_FILE.
@@ -289,6 +292,8 @@ static void *help_start(struct seq_file *m, loff_t *pos)
289 292
290 gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_start: start\n"); 293 gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_start: start\n");
291 294
295 mutex_lock(&orangefs_help_file_lock);
296
292 if (*pos == 0) 297 if (*pos == 0)
293 payload = m->private; 298 payload = m->private;
294 299
@@ -305,6 +310,7 @@ static void *help_next(struct seq_file *m, void *v, loff_t *pos)
305static void help_stop(struct seq_file *m, void *p) 310static void help_stop(struct seq_file *m, void *p)
306{ 311{
307 gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_stop: start\n"); 312 gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_stop: start\n");
313 mutex_unlock(&orangefs_help_file_lock);
308} 314}
309 315
310static int help_show(struct seq_file *m, void *v) 316static int help_show(struct seq_file *m, void *v)
@@ -610,32 +616,54 @@ out:
610 * /sys/kernel/debug/orangefs/debug-help can be catted to 616 * /sys/kernel/debug/orangefs/debug-help can be catted to
611 * see all the available kernel and client debug keywords. 617 * see all the available kernel and client debug keywords.
612 * 618 *
613 * When the kernel boots, we have no idea what keywords the 619 * When orangefs.ko initializes, we have no idea what keywords the
614 * client supports, nor their associated masks. 620 * client supports, nor their associated masks.
615 * 621 *
616 * We pass through this function once at boot and stamp a 622 * We pass through this function once at module-load and stamp a
617 * boilerplate "we don't know" message for the client in the 623 * boilerplate "we don't know" message for the client in the
618 * debug-help file. We pass through here again when the client 624 * debug-help file. We pass through here again when the client
619 * starts and then we can fill out the debug-help file fully. 625 * starts and then we can fill out the debug-help file fully.
620 * 626 *
621 * The client might be restarted any number of times between 627 * The client might be restarted any number of times between
622 * reboots, we only build the debug-help file the first time. 628 * module reloads, we only build the debug-help file the first time.
623 */ 629 */
624int orangefs_prepare_debugfs_help_string(int at_boot) 630int orangefs_prepare_debugfs_help_string(int at_boot)
625{ 631{
626 int rc = -EINVAL;
627 int i;
628 int byte_count = 0;
629 char *client_title = "Client Debug Keywords:\n"; 632 char *client_title = "Client Debug Keywords:\n";
630 char *kernel_title = "Kernel Debug Keywords:\n"; 633 char *kernel_title = "Kernel Debug Keywords:\n";
634 size_t string_size = DEBUG_HELP_STRING_SIZE;
635 size_t result_size;
636 size_t i;
637 char *new;
638 int rc = -EINVAL;
631 639
632 gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); 640 gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
633 641
634 if (at_boot) { 642 if (at_boot)
635 byte_count += strlen(HELP_STRING_UNINITIALIZED);
636 client_title = HELP_STRING_UNINITIALIZED; 643 client_title = HELP_STRING_UNINITIALIZED;
637 } else { 644
638 /* 645 /* build a new debug_help_string. */
646 new = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL);
647 if (!new) {
648 rc = -ENOMEM;
649 goto out;
650 }
651
652 /*
653 * strlcat(dst, src, size) will append at most
654 * "size - strlen(dst) - 1" bytes of src onto dst,
655 * null terminating the result, and return the total
656 * length of the string it tried to create.
657 *
658 * We'll just plow through here building our new debug
659 * help string and let strlcat take care of assuring that
660 * dst doesn't overflow.
661 */
662 strlcat(new, client_title, string_size);
663
664 if (!at_boot) {
665
666 /*
639 * fill the client keyword/mask array and remember 667 * fill the client keyword/mask array and remember
640 * how many elements there were. 668 * how many elements there were.
641 */ 669 */
@@ -644,64 +672,40 @@ int orangefs_prepare_debugfs_help_string(int at_boot)
644 if (cdm_element_count <= 0) 672 if (cdm_element_count <= 0)
645 goto out; 673 goto out;
646 674
647 /* Count the bytes destined for debug_help_string. */
648 byte_count += strlen(client_title);
649
650 for (i = 0; i < cdm_element_count; i++) { 675 for (i = 0; i < cdm_element_count; i++) {
651 byte_count += strlen(cdm_array[i].keyword + 2); 676 strlcat(new, "\t", string_size);
652 if (byte_count >= DEBUG_HELP_STRING_SIZE) { 677 strlcat(new, cdm_array[i].keyword, string_size);
653 pr_info("%s: overflow 1!\n", __func__); 678 strlcat(new, "\n", string_size);
654 goto out;
655 }
656 } 679 }
657
658 gossip_debug(GOSSIP_UTILS_DEBUG,
659 "%s: cdm_element_count:%d:\n",
660 __func__,
661 cdm_element_count);
662 } 680 }
663 681
664 byte_count += strlen(kernel_title); 682 strlcat(new, "\n", string_size);
683 strlcat(new, kernel_title, string_size);
684
665 for (i = 0; i < num_kmod_keyword_mask_map; i++) { 685 for (i = 0; i < num_kmod_keyword_mask_map; i++) {
666 byte_count += 686 strlcat(new, "\t", string_size);
667 strlen(s_kmod_keyword_mask_map[i].keyword + 2); 687 strlcat(new, s_kmod_keyword_mask_map[i].keyword, string_size);
668 if (byte_count >= DEBUG_HELP_STRING_SIZE) { 688 result_size = strlcat(new, "\n", string_size);
669 pr_info("%s: overflow 2!\n", __func__);
670 goto out;
671 }
672 } 689 }
673 690
674 /* build debug_help_string. */ 691 /* See if we tried to put too many bytes into "new"... */
675 debug_help_string = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL); 692 if (result_size >= string_size) {
676 if (!debug_help_string) { 693 kfree(new);
677 rc = -ENOMEM;
678 goto out; 694 goto out;
679 } 695 }
680 696
681 strcat(debug_help_string, client_title); 697 if (at_boot) {
682 698 debug_help_string = new;
683 if (!at_boot) { 699 } else {
684 for (i = 0; i < cdm_element_count; i++) { 700 mutex_lock(&orangefs_help_file_lock);
685 strcat(debug_help_string, "\t"); 701 memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE);
686 strcat(debug_help_string, cdm_array[i].keyword); 702 strlcat(debug_help_string, new, string_size);
687 strcat(debug_help_string, "\n"); 703 mutex_unlock(&orangefs_help_file_lock);
688 }
689 }
690
691 strcat(debug_help_string, "\n");
692 strcat(debug_help_string, kernel_title);
693
694 for (i = 0; i < num_kmod_keyword_mask_map; i++) {
695 strcat(debug_help_string, "\t");
696 strcat(debug_help_string, s_kmod_keyword_mask_map[i].keyword);
697 strcat(debug_help_string, "\n");
698 } 704 }
699 705
700 rc = 0; 706 rc = 0;
701 707
702out: 708out: return rc;
703
704 return rc;
705 709
706} 710}
707 711
@@ -959,8 +963,12 @@ int orangefs_debugfs_new_client_string(void __user *arg)
959 ret = copy_from_user(&client_debug_array_string, 963 ret = copy_from_user(&client_debug_array_string,
960 (void __user *)arg, 964 (void __user *)arg,
961 ORANGEFS_MAX_DEBUG_STRING_LEN); 965 ORANGEFS_MAX_DEBUG_STRING_LEN);
962 if (ret != 0) 966
967 if (ret != 0) {
968 pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
969 __func__);
963 return -EIO; 970 return -EIO;
971 }
964 972
965 /* 973 /*
966 * The real client-core makes an effort to ensure 974 * The real client-core makes an effort to ensure
@@ -975,45 +983,18 @@ int orangefs_debugfs_new_client_string(void __user *arg)
975 client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] = 983 client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] =
976 '\0'; 984 '\0';
977 985
978 if (ret != 0) {
979 pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
980 __func__);
981 return -EIO;
982 }
983
984 pr_info("%s: client debug array string has been received.\n", 986 pr_info("%s: client debug array string has been received.\n",
985 __func__); 987 __func__);
986 988
987 if (!help_string_initialized) { 989 if (!help_string_initialized) {
988 990
989 /* Free the "we don't know yet" default string... */ 991 /* Build a proper debug help string. */
990 kfree(debug_help_string);
991
992 /* build a proper debug help string */
993 if (orangefs_prepare_debugfs_help_string(0)) { 992 if (orangefs_prepare_debugfs_help_string(0)) {
994 gossip_err("%s: no debug help string \n", 993 gossip_err("%s: no debug help string \n",
995 __func__); 994 __func__);
996 return -EIO; 995 return -EIO;
997 } 996 }
998 997
999 /* Replace the boilerplate boot-time debug-help file. */
1000 debugfs_remove(help_file_dentry);
1001
1002 help_file_dentry =
1003 debugfs_create_file(
1004 ORANGEFS_KMOD_DEBUG_HELP_FILE,
1005 0444,
1006 debug_dir,
1007 debug_help_string,
1008 &debug_help_fops);
1009
1010 if (!help_file_dentry) {
1011 gossip_err("%s: debugfs_create_file failed for"
1012 " :%s:!\n",
1013 __func__,
1014 ORANGEFS_KMOD_DEBUG_HELP_FILE);
1015 return -EIO;
1016 }
1017 } 998 }
1018 999
1019 debug_mask_to_string(&client_debug_mask, 1); 1000 debug_mask_to_string(&client_debug_mask, 1);
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
index 2e5b03065f34..4113eb0495bf 100644
--- a/fs/orangefs/orangefs-mod.c
+++ b/fs/orangefs/orangefs-mod.c
@@ -124,7 +124,7 @@ static int __init orangefs_init(void)
124 * unknown at boot time. 124 * unknown at boot time.
125 * 125 *
126 * orangefs_prepare_debugfs_help_string will be used again 126 * orangefs_prepare_debugfs_help_string will be used again
127 * later to rebuild the debug-help file after the client starts 127 * later to rebuild the debug-help-string after the client starts
128 * and passes along the needed info. The argument signifies 128 * and passes along the needed info. The argument signifies
129 * which time orangefs_prepare_debugfs_help_string is being 129 * which time orangefs_prepare_debugfs_help_string is being
130 * called. 130 * called.
@@ -152,7 +152,9 @@ static int __init orangefs_init(void)
152 152
153 ret = register_filesystem(&orangefs_fs_type); 153 ret = register_filesystem(&orangefs_fs_type);
154 if (ret == 0) { 154 if (ret == 0) {
155 pr_info("orangefs: module version %s loaded\n", ORANGEFS_VERSION); 155 pr_info("%s: module version %s loaded\n",
156 __func__,
157 ORANGEFS_VERSION);
156 ret = 0; 158 ret = 0;
157 goto out; 159 goto out;
158 } 160 }
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index aeb60f791418..36795eed40b0 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -178,6 +178,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
178 len -= bytes; 178 len -= bytes;
179 } 179 }
180 180
181 if (!error)
182 error = vfs_fsync(new_file, 0);
181 fput(new_file); 183 fput(new_file);
182out_fput: 184out_fput:
183 fput(old_file); 185 fput(old_file);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index c58f01babf30..7fb53d055537 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -270,9 +270,6 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type)
270 if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode)) 270 if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
271 return NULL; 271 return NULL;
272 272
273 if (!realinode->i_op->get_acl)
274 return NULL;
275
276 old_cred = ovl_override_creds(inode->i_sb); 273 old_cred = ovl_override_creds(inode->i_sb);
277 acl = get_acl(realinode, type); 274 acl = get_acl(realinode, type);
278 revert_creds(old_cred); 275 revert_creds(old_cred);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index bcf3965be819..edd46a0e951d 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1037,6 +1037,21 @@ ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
1037 1037
1038 posix_acl_release(acl); 1038 posix_acl_release(acl);
1039 1039
1040 /*
1041 * Check if sgid bit needs to be cleared (actual setacl operation will
1042 * be done with mounter's capabilities and so that won't do it for us).
1043 */
1044 if (unlikely(inode->i_mode & S_ISGID) &&
1045 handler->flags == ACL_TYPE_ACCESS &&
1046 !in_group_p(inode->i_gid) &&
1047 !capable_wrt_inode_uidgid(inode, CAP_FSETID)) {
1048 struct iattr iattr = { .ia_valid = ATTR_KILL_SGID };
1049
1050 err = ovl_setattr(dentry, &iattr);
1051 if (err)
1052 return err;
1053 }
1054
1040 err = ovl_xattr_set(dentry, handler->name, value, size, flags); 1055 err = ovl_xattr_set(dentry, handler->name, value, size, flags);
1041 if (!err) 1056 if (!err)
1042 ovl_copyattr(ovl_inode_real(inode, NULL), inode); 1057 ovl_copyattr(ovl_inode_real(inode, NULL), inode);
diff --git a/fs/splice.c b/fs/splice.c
index 153d4f3bd441..dcaf185a5731 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -299,13 +299,8 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
299{ 299{
300 struct iov_iter to; 300 struct iov_iter to;
301 struct kiocb kiocb; 301 struct kiocb kiocb;
302 loff_t isize;
303 int idx, ret; 302 int idx, ret;
304 303
305 isize = i_size_read(in->f_mapping->host);
306 if (unlikely(*ppos >= isize))
307 return 0;
308
309 iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len); 304 iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len);
310 idx = to.idx; 305 idx = to.idx;
311 init_sync_kiocb(&kiocb, in); 306 init_sync_kiocb(&kiocb, in);
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 613c5cf19436..5c2929f94bd3 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -199,9 +199,9 @@ xfs_defer_intake_work(
199 struct xfs_defer_pending *dfp; 199 struct xfs_defer_pending *dfp;
200 200
201 list_for_each_entry(dfp, &dop->dop_intake, dfp_list) { 201 list_for_each_entry(dfp, &dop->dop_intake, dfp_list) {
202 trace_xfs_defer_intake_work(tp->t_mountp, dfp);
203 dfp->dfp_intent = dfp->dfp_type->create_intent(tp, 202 dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
204 dfp->dfp_count); 203 dfp->dfp_count);
204 trace_xfs_defer_intake_work(tp->t_mountp, dfp);
205 list_sort(tp->t_mountp, &dfp->dfp_work, 205 list_sort(tp->t_mountp, &dfp->dfp_work,
206 dfp->dfp_type->diff_items); 206 dfp->dfp_type->diff_items);
207 list_for_each(li, &dfp->dfp_work) 207 list_for_each(li, &dfp->dfp_work)
@@ -221,21 +221,14 @@ xfs_defer_trans_abort(
221 struct xfs_defer_pending *dfp; 221 struct xfs_defer_pending *dfp;
222 222
223 trace_xfs_defer_trans_abort(tp->t_mountp, dop); 223 trace_xfs_defer_trans_abort(tp->t_mountp, dop);
224 /*
225 * If the transaction was committed, drop the intent reference
226 * since we're bailing out of here. The other reference is
227 * dropped when the intent hits the AIL. If the transaction
228 * was not committed, the intent is freed by the intent item
229 * unlock handler on abort.
230 */
231 if (!dop->dop_committed)
232 return;
233 224
234 /* Abort intent items. */ 225 /* Abort intent items that don't have a done item. */
235 list_for_each_entry(dfp, &dop->dop_pending, dfp_list) { 226 list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
236 trace_xfs_defer_pending_abort(tp->t_mountp, dfp); 227 trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
237 if (!dfp->dfp_done) 228 if (dfp->dfp_intent && !dfp->dfp_done) {
238 dfp->dfp_type->abort_intent(dfp->dfp_intent); 229 dfp->dfp_type->abort_intent(dfp->dfp_intent);
230 dfp->dfp_intent = NULL;
231 }
239 } 232 }
240 233
241 /* Shut down FS. */ 234 /* Shut down FS. */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 40e887068da2..0504ef8f3aa3 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -118,9 +118,9 @@ do { \
118#define this_cpu_generic_read(pcp) \ 118#define this_cpu_generic_read(pcp) \
119({ \ 119({ \
120 typeof(pcp) __ret; \ 120 typeof(pcp) __ret; \
121 preempt_disable(); \ 121 preempt_disable_notrace(); \
122 __ret = raw_cpu_generic_read(pcp); \ 122 __ret = raw_cpu_generic_read(pcp); \
123 preempt_enable(); \ 123 preempt_enable_notrace(); \
124 __ret; \ 124 __ret; \
125}) 125})
126 126
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index af0254c09424..4df64a1fc09e 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,6 +14,8 @@
14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* 14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
15 * and/or .init.* sections. 15 * and/or .init.* sections.
16 * [__start_rodata, __end_rodata]: contains .rodata.* sections 16 * [__start_rodata, __end_rodata]: contains .rodata.* sections
17 * [__start_data_ro_after_init, __end_data_ro_after_init]:
18 * contains data.ro_after_init section
17 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* 19 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
18 * may be out of this range on some architectures. 20 * may be out of this range on some architectures.
19 * [_sinittext, _einittext]: contains .init.text.* sections 21 * [_sinittext, _einittext]: contains .init.text.* sections
@@ -31,6 +33,7 @@ extern char _data[], _sdata[], _edata[];
31extern char __bss_start[], __bss_stop[]; 33extern char __bss_start[], __bss_stop[];
32extern char __init_begin[], __init_end[]; 34extern char __init_begin[], __init_end[];
33extern char _sinittext[], _einittext[]; 35extern char _sinittext[], _einittext[];
36extern char __start_data_ro_after_init[], __end_data_ro_after_init[];
34extern char _end[]; 37extern char _end[];
35extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; 38extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
36extern char __kprobes_text_start[], __kprobes_text_end[]; 39extern char __kprobes_text_start[], __kprobes_text_end[];
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 30747960bc54..31e1d639abed 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -259,7 +259,10 @@
259 * own by defining an empty RO_AFTER_INIT_DATA. 259 * own by defining an empty RO_AFTER_INIT_DATA.
260 */ 260 */
261#ifndef RO_AFTER_INIT_DATA 261#ifndef RO_AFTER_INIT_DATA
262#define RO_AFTER_INIT_DATA *(.data..ro_after_init) 262#define RO_AFTER_INIT_DATA \
263 __start_data_ro_after_init = .; \
264 *(.data..ro_after_init) \
265 __end_data_ro_after_init = .;
263#endif 266#endif
264 267
265/* 268/*
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 43cf193e54d6..8b4dc62470ff 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -47,8 +47,14 @@ struct drm_crtc;
47 * @src_h: height of visible portion of plane (in 16.16) 47 * @src_h: height of visible portion of plane (in 16.16)
48 * @rotation: rotation of the plane 48 * @rotation: rotation of the plane
49 * @zpos: priority of the given plane on crtc (optional) 49 * @zpos: priority of the given plane on crtc (optional)
50 * Note that multiple active planes on the same crtc can have an identical
51 * zpos value. The rule to solving the conflict is to compare the plane
52 * object IDs; the plane with a higher ID must be stacked on top of a
53 * plane with a lower ID.
50 * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1 54 * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
51 * where N is the number of active planes for given crtc 55 * where N is the number of active planes for given crtc. Note that
56 * the driver must call drm_atomic_normalize_zpos() to update this before
57 * it can be trusted.
52 * @src: clipped source coordinates of the plane (in 16.16) 58 * @src: clipped source coordinates of the plane (in 16.16)
53 * @dst: clipped destination coordinates of the plane 59 * @dst: clipped destination coordinates of the plane
54 * @visible: visibility of the plane 60 * @visible: visibility of the plane
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 689a8b9b9c8f..61a3d90f32b3 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -555,7 +555,8 @@ int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
555int acpi_device_modalias(struct device *, char *, int); 555int acpi_device_modalias(struct device *, char *, int);
556void acpi_walk_dep_device_list(acpi_handle handle); 556void acpi_walk_dep_device_list(acpi_handle handle);
557 557
558struct platform_device *acpi_create_platform_device(struct acpi_device *); 558struct platform_device *acpi_create_platform_device(struct acpi_device *,
559 struct property_entry *);
559#define ACPI_PTR(_ptr) (_ptr) 560#define ACPI_PTR(_ptr) (_ptr)
560 561
561static inline void acpi_device_set_enumerated(struct acpi_device *adev) 562static inline void acpi_device_set_enumerated(struct acpi_device *adev)
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 96337b15a60d..a8e66344bacc 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -258,6 +258,8 @@ struct ceph_watch_item {
258 struct ceph_entity_addr addr; 258 struct ceph_entity_addr addr;
259}; 259};
260 260
261#define CEPH_LINGER_ID_START 0xffff000000000000ULL
262
261struct ceph_osd_client { 263struct ceph_osd_client {
262 struct ceph_client *client; 264 struct ceph_client *client;
263 265
diff --git a/include/linux/console.h b/include/linux/console.h
index 3672809234a7..d530c4627e54 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -173,12 +173,6 @@ static inline void console_sysfs_notify(void)
173#endif 173#endif
174extern bool console_suspend_enabled; 174extern bool console_suspend_enabled;
175 175
176#ifdef CONFIG_OF
177extern void console_set_by_of(void);
178#else
179static inline void console_set_by_of(void) {}
180#endif
181
182/* Suspend and resume console messages over PM events */ 176/* Suspend and resume console messages over PM events */
183extern void suspend_console(void); 177extern void suspend_console(void);
184extern void resume_console(void); 178extern void resume_console(void);
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index c46d2aa16d81..1d18af034554 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type)
106 106
107static inline void frontswap_init(unsigned type, unsigned long *map) 107static inline void frontswap_init(unsigned type, unsigned long *map)
108{ 108{
109 if (frontswap_enabled()) 109#ifdef CONFIG_FRONTSWAP
110 __frontswap_init(type, map); 110 __frontswap_init(type, map);
111#endif
111} 112}
112 113
113#endif /* _LINUX_FRONTSWAP_H */ 114#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 16d2b6e874d6..dc0478c07b2a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -321,6 +321,7 @@ struct writeback_control;
321#define IOCB_HIPRI (1 << 3) 321#define IOCB_HIPRI (1 << 3)
322#define IOCB_DSYNC (1 << 4) 322#define IOCB_DSYNC (1 << 4)
323#define IOCB_SYNC (1 << 5) 323#define IOCB_SYNC (1 << 5)
324#define IOCB_WRITE (1 << 6)
324 325
325struct kiocb { 326struct kiocb {
326 struct file *ki_filp; 327 struct file *ki_filp;
@@ -1709,7 +1710,6 @@ struct file_operations {
1709 int (*flush) (struct file *, fl_owner_t id); 1710 int (*flush) (struct file *, fl_owner_t id);
1710 int (*release) (struct inode *, struct file *); 1711 int (*release) (struct inode *, struct file *);
1711 int (*fsync) (struct file *, loff_t, loff_t, int datasync); 1712 int (*fsync) (struct file *, loff_t, loff_t, int datasync);
1712 int (*aio_fsync) (struct kiocb *, int datasync);
1713 int (*fasync) (int, struct file *, int); 1713 int (*fasync) (int, struct file *, int);
1714 int (*lock) (struct file *, int, struct file_lock *); 1714 int (*lock) (struct file *, int, struct file_lock *);
1715 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); 1715 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 93756585521f..3f95233b2733 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -153,7 +153,7 @@ static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
153{ 153{
154#if defined(CONFIG_NET_L3_MASTER_DEV) 154#if defined(CONFIG_NET_L3_MASTER_DEV)
155 if (!net->ipv4.sysctl_tcp_l3mdev_accept && 155 if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
156 ipv6_l3mdev_skb(IP6CB(skb)->flags)) 156 skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
157 return true; 157 return true;
158#endif 158#endif
159 return false; 159 return false;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c5d3d5024fc8..d8905a229f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -1184,7 +1184,7 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1184 int page); 1184 int page);
1185 1185
1186/* Reset and initialize a NAND device */ 1186/* Reset and initialize a NAND device */
1187int nand_reset(struct nand_chip *chip); 1187int nand_reset(struct nand_chip *chip, int chipnr);
1188 1188
1189/* Free resources held by the NAND device */ 1189/* Free resources held by the NAND device */
1190void nand_cleanup(struct nand_chip *chip); 1190void nand_cleanup(struct nand_chip *chip);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d64135a0ab71..86bacf6a64f0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3252,6 +3252,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3252bool is_skb_forwardable(const struct net_device *dev, 3252bool is_skb_forwardable(const struct net_device *dev,
3253 const struct sk_buff *skb); 3253 const struct sk_buff *skb);
3254 3254
3255static __always_inline int ____dev_forward_skb(struct net_device *dev,
3256 struct sk_buff *skb)
3257{
3258 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3259 unlikely(!is_skb_forwardable(dev, skb))) {
3260 atomic_long_inc(&dev->rx_dropped);
3261 kfree_skb(skb);
3262 return NET_RX_DROP;
3263 }
3264
3265 skb_scrub_packet(skb, true);
3266 skb->priority = 0;
3267 return 0;
3268}
3269
3255void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 3270void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3256 3271
3257extern int netdev_budget; 3272extern int netdev_budget;
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index ee1bed7dbfc6..78bb0d7f6b11 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -253,6 +253,13 @@ static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
253 return -ENOSYS; 253 return -ENOSYS;
254} 254}
255 255
256static inline int phy_reset(struct phy *phy)
257{
258 if (!phy)
259 return 0;
260 return -ENOSYS;
261}
262
256static inline int phy_get_bus_width(struct phy *phy) 263static inline int phy_get_bus_width(struct phy *phy)
257{ 264{
258 return -ENOSYS; 265 return -ENOSYS;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 9adc7b21903d..f6673132431d 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/rbtree.h> 17#include <linux/rbtree.h>
18#include <linux/delay.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/bug.h> 20#include <linux/bug.h>
20#include <linux/lockdep.h> 21#include <linux/lockdep.h>
@@ -116,22 +117,22 @@ struct reg_sequence {
116#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ 117#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
117({ \ 118({ \
118 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ 119 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
119 int ret; \ 120 int pollret; \
120 might_sleep_if(sleep_us); \ 121 might_sleep_if(sleep_us); \
121 for (;;) { \ 122 for (;;) { \
122 ret = regmap_read((map), (addr), &(val)); \ 123 pollret = regmap_read((map), (addr), &(val)); \
123 if (ret) \ 124 if (pollret) \
124 break; \ 125 break; \
125 if (cond) \ 126 if (cond) \
126 break; \ 127 break; \
127 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ 128 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
128 ret = regmap_read((map), (addr), &(val)); \ 129 pollret = regmap_read((map), (addr), &(val)); \
129 break; \ 130 break; \
130 } \ 131 } \
131 if (sleep_us) \ 132 if (sleep_us) \
132 usleep_range((sleep_us >> 2) + 1, sleep_us); \ 133 usleep_range((sleep_us >> 2) + 1, sleep_us); \
133 } \ 134 } \
134 ret ?: ((cond) ? 0 : -ETIMEDOUT); \ 135 pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
135}) 136})
136 137
137#ifdef CONFIG_REGMAP 138#ifdef CONFIG_REGMAP
diff --git a/include/net/ip.h b/include/net/ip.h
index f48c67cab222..ab6761a7c883 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -47,8 +47,7 @@ struct inet_skb_parm {
47#define IPSKB_REROUTED BIT(4) 47#define IPSKB_REROUTED BIT(4)
48#define IPSKB_DOREDIRECT BIT(5) 48#define IPSKB_DOREDIRECT BIT(5)
49#define IPSKB_FRAG_PMTU BIT(6) 49#define IPSKB_FRAG_PMTU BIT(6)
50#define IPSKB_FRAG_SEGS BIT(7) 50#define IPSKB_L3SLAVE BIT(7)
51#define IPSKB_L3SLAVE BIT(8)
52 51
53 u16 frag_max_size; 52 u16 frag_max_size;
54}; 53};
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 20ed9699fcd4..1b1cf33cbfb0 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -146,6 +146,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
146{ 146{
147 int pkt_len, err; 147 int pkt_len, err;
148 148
149 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
149 pkt_len = skb->len - skb_inner_network_offset(skb); 150 pkt_len = skb->len - skb_inner_network_offset(skb);
150 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); 151 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
151 if (unlikely(net_xmit_eval(err))) 152 if (unlikely(net_xmit_eval(err)))
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index 498814626e28..1723a67c0b0a 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -30,8 +30,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
30 if (net->ct.labels_used == 0) 30 if (net->ct.labels_used == 0)
31 return NULL; 31 return NULL;
32 32
33 return nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS, 33 return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC);
34 sizeof(struct nf_conn_labels), GFP_ATOMIC);
35#else 34#else
36 return NULL; 35 return NULL;
37#endif 36#endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3295fb85bff6..311f02739b51 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -161,7 +161,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
161 return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE; 161 return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
162} 162}
163 163
164unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); 164int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
165unsigned int nft_parse_register(const struct nlattr *attr); 165unsigned int nft_parse_register(const struct nlattr *attr);
166int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); 166int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
167 167
@@ -558,7 +558,8 @@ void *nft_set_elem_init(const struct nft_set *set,
558 const struct nft_set_ext_tmpl *tmpl, 558 const struct nft_set_ext_tmpl *tmpl,
559 const u32 *key, const u32 *data, 559 const u32 *key, const u32 *data,
560 u64 timeout, gfp_t gfp); 560 u64 timeout, gfp_t gfp);
561void nft_set_elem_destroy(const struct nft_set *set, void *elem); 561void nft_set_elem_destroy(const struct nft_set *set, void *elem,
562 bool destroy_expr);
562 563
563/** 564/**
564 * struct nft_set_gc_batch_head - nf_tables set garbage collection batch 565 * struct nft_set_gc_batch_head - nf_tables set garbage collection batch
@@ -709,7 +710,6 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
709{ 710{
710 int err; 711 int err;
711 712
712 __module_get(src->ops->type->owner);
713 if (src->ops->clone) { 713 if (src->ops->clone) {
714 dst->ops = src->ops; 714 dst->ops = src->ops;
715 err = src->ops->clone(dst, src); 715 err = src->ops->clone(dst, src);
@@ -718,6 +718,8 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
718 } else { 718 } else {
719 memcpy(dst, src, src->ops->size); 719 memcpy(dst, src, src->ops->size);
720 } 720 }
721
722 __module_get(src->ops->type->owner);
721 return 0; 723 return 0;
722} 724}
723 725
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 87a7f42e7639..31acc3f4f132 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -152,7 +152,7 @@ void sctp_unhash_endpoint(struct sctp_endpoint *);
152struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, 152struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
153 struct sctphdr *, struct sctp_association **, 153 struct sctphdr *, struct sctp_association **,
154 struct sctp_transport **); 154 struct sctp_transport **);
155void sctp_err_finish(struct sock *, struct sctp_association *); 155void sctp_err_finish(struct sock *, struct sctp_transport *);
156void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, 156void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
157 struct sctp_transport *t, __u32 pmtu); 157 struct sctp_transport *t, __u32 pmtu);
158void sctp_icmp_redirect(struct sock *, struct sctp_transport *, 158void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
diff --git a/include/net/sock.h b/include/net/sock.h
index 9d905ed0cd25..442cbb118a07 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1612,11 +1612,11 @@ static inline void sock_put(struct sock *sk)
1612void sock_gen_put(struct sock *sk); 1612void sock_gen_put(struct sock *sk);
1613 1613
1614int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, 1614int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1615 unsigned int trim_cap); 1615 unsigned int trim_cap, bool refcounted);
1616static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, 1616static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1617 const int nested) 1617 const int nested)
1618{ 1618{
1619 return __sk_receive_skb(sk, skb, nested, 1); 1619 return __sk_receive_skb(sk, skb, nested, 1, true);
1620} 1620}
1621 1621
1622static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) 1622static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5b82d4d94834..123979fe12bf 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -805,7 +805,7 @@ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
805{ 805{
806#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 806#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
807 if (!net->ipv4.sysctl_tcp_l3mdev_accept && 807 if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
808 ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) 808 skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
809 return true; 809 return true;
810#endif 810#endif
811 return false; 811 return false;
@@ -1220,6 +1220,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
1220 1220
1221bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); 1221bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1222bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); 1222bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1223int tcp_filter(struct sock *sk, struct sk_buff *skb);
1223 1224
1224#undef STATE_TRACE 1225#undef STATE_TRACE
1225 1226
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h
index 5cd4d4d2dd1d..9c9c6ad55f14 100644
--- a/include/uapi/linux/atm_zatm.h
+++ b/include/uapi/linux/atm_zatm.h
@@ -14,7 +14,6 @@
14 14
15#include <linux/atmapi.h> 15#include <linux/atmapi.h>
16#include <linux/atmioc.h> 16#include <linux/atmioc.h>
17#include <linux/time.h>
18 17
19#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) 18#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
20 /* get pool statistics */ 19 /* get pool statistics */
diff --git a/include/uapi/linux/bpqether.h b/include/uapi/linux/bpqether.h
index a6c35e1a89ad..05865edaefda 100644
--- a/include/uapi/linux/bpqether.h
+++ b/include/uapi/linux/bpqether.h
@@ -5,9 +5,7 @@
5 * Defines for the BPQETHER pseudo device driver 5 * Defines for the BPQETHER pseudo device driver
6 */ 6 */
7 7
8#ifndef __LINUX_IF_ETHER_H
9#include <linux/if_ether.h> 8#include <linux/if_ether.h>
10#endif
11 9
12#define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */ 10#define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */
13#define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1) 11#define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1)
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 33d00a4ce656..819d895edfdc 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -18,12 +18,6 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <sound/asound.h> 19#include <sound/asound.h>
20 20
21#ifndef __KERNEL__
22#error This API is an early revision and not enabled in the current
23#error kernel release, it will be enabled in a future kernel version
24#error with incompatible changes to what is here.
25#endif
26
27/* 21/*
28 * Maximum number of channels topology kcontrol can represent. 22 * Maximum number of channels topology kcontrol can represent.
29 */ 23 */
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 570eeca7bdfa..ad1bc67aff1b 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -687,7 +687,8 @@ static void delete_all_elements(struct bpf_htab *htab)
687 687
688 hlist_for_each_entry_safe(l, n, head, hash_node) { 688 hlist_for_each_entry_safe(l, n, head, hash_node) {
689 hlist_del_rcu(&l->hash_node); 689 hlist_del_rcu(&l->hash_node);
690 htab_elem_free(htab, l); 690 if (l->state != HTAB_EXTRA_ELEM_USED)
691 htab_elem_free(htab, l);
691 } 692 }
692 } 693 }
693} 694}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index cdc06546401b..233e3ac836a6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -195,7 +195,7 @@ static int map_create(union bpf_attr *attr)
195 195
196 err = bpf_map_charge_memlock(map); 196 err = bpf_map_charge_memlock(map);
197 if (err) 197 if (err)
198 goto free_map; 198 goto free_map_nouncharge;
199 199
200 err = bpf_map_new_fd(map); 200 err = bpf_map_new_fd(map);
201 if (err < 0) 201 if (err < 0)
@@ -205,6 +205,8 @@ static int map_create(union bpf_attr *attr)
205 return err; 205 return err;
206 206
207free_map: 207free_map:
208 bpf_map_uncharge_memlock(map);
209free_map_nouncharge:
208 map->ops->map_free(map); 210 map->ops->map_free(map);
209 return err; 211 return err;
210} 212}
diff --git a/kernel/fork.c b/kernel/fork.c
index 623259fc794d..997ac1d584f7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -315,6 +315,9 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
315 315
316static void release_task_stack(struct task_struct *tsk) 316static void release_task_stack(struct task_struct *tsk)
317{ 317{
318 if (WARN_ON(tsk->state != TASK_DEAD))
319 return; /* Better to leak the stack than to free prematurely */
320
318 account_kernel_stack(tsk, -1); 321 account_kernel_stack(tsk, -1);
319 arch_release_thread_stack(tsk->stack); 322 arch_release_thread_stack(tsk->stack);
320 free_thread_stack(tsk); 323 free_thread_stack(tsk);
@@ -1862,6 +1865,7 @@ bad_fork_cleanup_count:
1862 atomic_dec(&p->cred->user->processes); 1865 atomic_dec(&p->cred->user->processes);
1863 exit_creds(p); 1866 exit_creds(p);
1864bad_fork_free: 1867bad_fork_free:
1868 p->state = TASK_DEAD;
1865 put_task_stack(p); 1869 put_task_stack(p);
1866 free_task(p); 1870 free_task(p);
1867fork_out: 1871fork_out:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9c4d30483264..6b669593e7eb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1341,12 +1341,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1341 1341
1342 } else if (new->flags & IRQF_TRIGGER_MASK) { 1342 } else if (new->flags & IRQF_TRIGGER_MASK) {
1343 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 1343 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1344 unsigned int omsk = irq_settings_get_trigger_mask(desc); 1344 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1345 1345
1346 if (nmsk != omsk) 1346 if (nmsk != omsk)
1347 /* hope the handler works with current trigger mode */ 1347 /* hope the handler works with current trigger mode */
1348 pr_warn("irq %d uses trigger mode %u; requested %u\n", 1348 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1349 irq, nmsk, omsk); 1349 irq, omsk, nmsk);
1350 } 1350 }
1351 1351
1352 *old_ptr = new; 1352 *old_ptr = new;
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 084452e34a12..bdff5ed57f10 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -203,8 +203,10 @@ static int __init test_suspend(void)
203 203
204 /* RTCs have initialized by now too ... can we use one? */ 204 /* RTCs have initialized by now too ... can we use one? */
205 dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm); 205 dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
206 if (dev) 206 if (dev) {
207 rtc = rtc_class_open(dev_name(dev)); 207 rtc = rtc_class_open(dev_name(dev));
208 put_device(dev);
209 }
208 if (!rtc) { 210 if (!rtc) {
209 printk(warn_no_rtc); 211 printk(warn_no_rtc);
210 return 0; 212 return 0;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index de08fc90baaf..f7a55e9ff2f7 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -253,17 +253,6 @@ static int preferred_console = -1;
253int console_set_on_cmdline; 253int console_set_on_cmdline;
254EXPORT_SYMBOL(console_set_on_cmdline); 254EXPORT_SYMBOL(console_set_on_cmdline);
255 255
256#ifdef CONFIG_OF
257static bool of_specified_console;
258
259void console_set_by_of(void)
260{
261 of_specified_console = true;
262}
263#else
264# define of_specified_console false
265#endif
266
267/* Flag: console code may call schedule() */ 256/* Flag: console code may call schedule() */
268static int console_may_schedule; 257static int console_may_schedule;
269 258
@@ -794,8 +783,6 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
794 return ret; 783 return ret;
795} 784}
796 785
797static void cont_flush(void);
798
799static ssize_t devkmsg_read(struct file *file, char __user *buf, 786static ssize_t devkmsg_read(struct file *file, char __user *buf,
800 size_t count, loff_t *ppos) 787 size_t count, loff_t *ppos)
801{ 788{
@@ -811,7 +798,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
811 if (ret) 798 if (ret)
812 return ret; 799 return ret;
813 raw_spin_lock_irq(&logbuf_lock); 800 raw_spin_lock_irq(&logbuf_lock);
814 cont_flush();
815 while (user->seq == log_next_seq) { 801 while (user->seq == log_next_seq) {
816 if (file->f_flags & O_NONBLOCK) { 802 if (file->f_flags & O_NONBLOCK) {
817 ret = -EAGAIN; 803 ret = -EAGAIN;
@@ -874,7 +860,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
874 return -ESPIPE; 860 return -ESPIPE;
875 861
876 raw_spin_lock_irq(&logbuf_lock); 862 raw_spin_lock_irq(&logbuf_lock);
877 cont_flush();
878 switch (whence) { 863 switch (whence) {
879 case SEEK_SET: 864 case SEEK_SET:
880 /* the first record */ 865 /* the first record */
@@ -913,7 +898,6 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
913 poll_wait(file, &log_wait, wait); 898 poll_wait(file, &log_wait, wait);
914 899
915 raw_spin_lock_irq(&logbuf_lock); 900 raw_spin_lock_irq(&logbuf_lock);
916 cont_flush();
917 if (user->seq < log_next_seq) { 901 if (user->seq < log_next_seq) {
918 /* return error when data has vanished underneath us */ 902 /* return error when data has vanished underneath us */
919 if (user->seq < log_first_seq) 903 if (user->seq < log_first_seq)
@@ -1300,7 +1284,6 @@ static int syslog_print(char __user *buf, int size)
1300 size_t skip; 1284 size_t skip;
1301 1285
1302 raw_spin_lock_irq(&logbuf_lock); 1286 raw_spin_lock_irq(&logbuf_lock);
1303 cont_flush();
1304 if (syslog_seq < log_first_seq) { 1287 if (syslog_seq < log_first_seq) {
1305 /* messages are gone, move to first one */ 1288 /* messages are gone, move to first one */
1306 syslog_seq = log_first_seq; 1289 syslog_seq = log_first_seq;
@@ -1360,7 +1343,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
1360 return -ENOMEM; 1343 return -ENOMEM;
1361 1344
1362 raw_spin_lock_irq(&logbuf_lock); 1345 raw_spin_lock_irq(&logbuf_lock);
1363 cont_flush();
1364 if (buf) { 1346 if (buf) {
1365 u64 next_seq; 1347 u64 next_seq;
1366 u64 seq; 1348 u64 seq;
@@ -1522,7 +1504,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
1522 /* Number of chars in the log buffer */ 1504 /* Number of chars in the log buffer */
1523 case SYSLOG_ACTION_SIZE_UNREAD: 1505 case SYSLOG_ACTION_SIZE_UNREAD:
1524 raw_spin_lock_irq(&logbuf_lock); 1506 raw_spin_lock_irq(&logbuf_lock);
1525 cont_flush();
1526 if (syslog_seq < log_first_seq) { 1507 if (syslog_seq < log_first_seq) {
1527 /* messages are gone, move to first one */ 1508 /* messages are gone, move to first one */
1528 syslog_seq = log_first_seq; 1509 syslog_seq = log_first_seq;
@@ -2657,7 +2638,7 @@ void register_console(struct console *newcon)
2657 * didn't select a console we take the first one 2638 * didn't select a console we take the first one
2658 * that registers here. 2639 * that registers here.
2659 */ 2640 */
2660 if (preferred_console < 0 && !of_specified_console) { 2641 if (preferred_console < 0) {
2661 if (newcon->index < 0) 2642 if (newcon->index < 0)
2662 newcon->index = 0; 2643 newcon->index = 0;
2663 if (newcon->setup == NULL || 2644 if (newcon->setup == NULL ||
@@ -3039,7 +3020,6 @@ void kmsg_dump(enum kmsg_dump_reason reason)
3039 dumper->active = true; 3020 dumper->active = true;
3040 3021
3041 raw_spin_lock_irqsave(&logbuf_lock, flags); 3022 raw_spin_lock_irqsave(&logbuf_lock, flags);
3042 cont_flush();
3043 dumper->cur_seq = clear_seq; 3023 dumper->cur_seq = clear_seq;
3044 dumper->cur_idx = clear_idx; 3024 dumper->cur_idx = clear_idx;
3045 dumper->next_seq = log_next_seq; 3025 dumper->next_seq = log_next_seq;
@@ -3130,7 +3110,6 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
3130 bool ret; 3110 bool ret;
3131 3111
3132 raw_spin_lock_irqsave(&logbuf_lock, flags); 3112 raw_spin_lock_irqsave(&logbuf_lock, flags);
3133 cont_flush();
3134 ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); 3113 ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
3135 raw_spin_unlock_irqrestore(&logbuf_lock, flags); 3114 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
3136 3115
@@ -3173,7 +3152,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
3173 goto out; 3152 goto out;
3174 3153
3175 raw_spin_lock_irqsave(&logbuf_lock, flags); 3154 raw_spin_lock_irqsave(&logbuf_lock, flags);
3176 cont_flush();
3177 if (dumper->cur_seq < log_first_seq) { 3155 if (dumper->cur_seq < log_first_seq) {
3178 /* messages are gone, move to first available one */ 3156 /* messages are gone, move to first available one */
3179 dumper->cur_seq = log_first_seq; 3157 dumper->cur_seq = log_first_seq;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 42d4027f9e26..154fd689fe02 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5192,21 +5192,14 @@ void sched_show_task(struct task_struct *p)
5192 int ppid; 5192 int ppid;
5193 unsigned long state = p->state; 5193 unsigned long state = p->state;
5194 5194
5195 if (!try_get_task_stack(p))
5196 return;
5195 if (state) 5197 if (state)
5196 state = __ffs(state) + 1; 5198 state = __ffs(state) + 1;
5197 printk(KERN_INFO "%-15.15s %c", p->comm, 5199 printk(KERN_INFO "%-15.15s %c", p->comm,
5198 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 5200 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
5199#if BITS_PER_LONG == 32
5200 if (state == TASK_RUNNING)
5201 printk(KERN_CONT " running ");
5202 else
5203 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
5204#else
5205 if (state == TASK_RUNNING) 5201 if (state == TASK_RUNNING)
5206 printk(KERN_CONT " running task "); 5202 printk(KERN_CONT " running task ");
5207 else
5208 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
5209#endif
5210#ifdef CONFIG_DEBUG_STACK_USAGE 5203#ifdef CONFIG_DEBUG_STACK_USAGE
5211 free = stack_not_used(p); 5204 free = stack_not_used(p);
5212#endif 5205#endif
@@ -5221,6 +5214,7 @@ void sched_show_task(struct task_struct *p)
5221 5214
5222 print_worker_info(KERN_INFO, p); 5215 print_worker_info(KERN_INFO, p);
5223 show_stack(p, NULL); 5216 show_stack(p, NULL);
5217 put_task_stack(p);
5224} 5218}
5225 5219
5226void show_state_filter(unsigned long state_filter) 5220void show_state_filter(unsigned long state_filter)
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 9b7f838511ce..8a5e44236f78 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -49,7 +49,11 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1
49 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, 49 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
50 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; 50 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
51 51
52static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = { 52/*
53 * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
54 * Make sure they are always aligned.
55 */
56static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
53 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, 57 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
54}; 58};
55 59
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 4d830e299989..f87d138e9672 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -192,6 +192,7 @@ void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
192 trace->entries = stack->entries; 192 trace->entries = stack->entries;
193 trace->skip = 0; 193 trace->skip = 0;
194} 194}
195EXPORT_SYMBOL_GPL(depot_fetch_stack);
195 196
196/** 197/**
197 * depot_save_stack - save stack in a stack depot. 198 * depot_save_stack - save stack in a stack depot.
@@ -283,3 +284,4 @@ exit:
283fast_exit: 284fast_exit:
284 return retval; 285 return retval;
285} 286}
287EXPORT_SYMBOL_GPL(depot_save_stack);
diff --git a/mm/cma.c b/mm/cma.c
index 384c2cb51b56..c960459eda7e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -385,6 +385,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
385 bitmap_maxno = cma_bitmap_maxno(cma); 385 bitmap_maxno = cma_bitmap_maxno(cma);
386 bitmap_count = cma_bitmap_pages_to_bits(cma, count); 386 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
387 387
388 if (bitmap_count > bitmap_maxno)
389 return NULL;
390
388 for (;;) { 391 for (;;) {
389 mutex_lock(&cma->lock); 392 mutex_lock(&cma->lock);
390 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, 393 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
diff --git a/mm/filemap.c b/mm/filemap.c
index c7fe2f16503f..50b52fe51937 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1732,6 +1732,9 @@ find_page:
1732 if (inode->i_blkbits == PAGE_SHIFT || 1732 if (inode->i_blkbits == PAGE_SHIFT ||
1733 !mapping->a_ops->is_partially_uptodate) 1733 !mapping->a_ops->is_partially_uptodate)
1734 goto page_not_up_to_date; 1734 goto page_not_up_to_date;
1735 /* pipes can't handle partially uptodate pages */
1736 if (unlikely(iter->type & ITER_PIPE))
1737 goto page_not_up_to_date;
1735 if (!trylock_page(page)) 1738 if (!trylock_page(page))
1736 goto page_not_up_to_date; 1739 goto page_not_up_to_date;
1737 /* Did it get truncated before we got the lock? */ 1740 /* Did it get truncated before we got the lock? */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ec49d9ef1eef..418bf01a50ed 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1826,11 +1826,17 @@ static void return_unused_surplus_pages(struct hstate *h,
1826 * is not the case is if a reserve map was changed between calls. It 1826 * is not the case is if a reserve map was changed between calls. It
1827 * is the responsibility of the caller to notice the difference and 1827 * is the responsibility of the caller to notice the difference and
1828 * take appropriate action. 1828 * take appropriate action.
1829 *
1830 * vma_add_reservation is used in error paths where a reservation must
1831 * be restored when a newly allocated huge page must be freed. It is
1832 * to be called after calling vma_needs_reservation to determine if a
1833 * reservation exists.
1829 */ 1834 */
1830enum vma_resv_mode { 1835enum vma_resv_mode {
1831 VMA_NEEDS_RESV, 1836 VMA_NEEDS_RESV,
1832 VMA_COMMIT_RESV, 1837 VMA_COMMIT_RESV,
1833 VMA_END_RESV, 1838 VMA_END_RESV,
1839 VMA_ADD_RESV,
1834}; 1840};
1835static long __vma_reservation_common(struct hstate *h, 1841static long __vma_reservation_common(struct hstate *h,
1836 struct vm_area_struct *vma, unsigned long addr, 1842 struct vm_area_struct *vma, unsigned long addr,
@@ -1856,6 +1862,14 @@ static long __vma_reservation_common(struct hstate *h,
1856 region_abort(resv, idx, idx + 1); 1862 region_abort(resv, idx, idx + 1);
1857 ret = 0; 1863 ret = 0;
1858 break; 1864 break;
1865 case VMA_ADD_RESV:
1866 if (vma->vm_flags & VM_MAYSHARE)
1867 ret = region_add(resv, idx, idx + 1);
1868 else {
1869 region_abort(resv, idx, idx + 1);
1870 ret = region_del(resv, idx, idx + 1);
1871 }
1872 break;
1859 default: 1873 default:
1860 BUG(); 1874 BUG();
1861 } 1875 }
@@ -1903,6 +1917,56 @@ static void vma_end_reservation(struct hstate *h,
1903 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 1917 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1904} 1918}
1905 1919
1920static long vma_add_reservation(struct hstate *h,
1921 struct vm_area_struct *vma, unsigned long addr)
1922{
1923 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1924}
1925
1926/*
1927 * This routine is called to restore a reservation on error paths. In the
1928 * specific error paths, a huge page was allocated (via alloc_huge_page)
1929 * and is about to be freed. If a reservation for the page existed,
1930 * alloc_huge_page would have consumed the reservation and set PagePrivate
1931 * in the newly allocated page. When the page is freed via free_huge_page,
1932 * the global reservation count will be incremented if PagePrivate is set.
1933 * However, free_huge_page can not adjust the reserve map. Adjust the
1934 * reserve map here to be consistent with global reserve count adjustments
1935 * to be made by free_huge_page.
1936 */
1937static void restore_reserve_on_error(struct hstate *h,
1938 struct vm_area_struct *vma, unsigned long address,
1939 struct page *page)
1940{
1941 if (unlikely(PagePrivate(page))) {
1942 long rc = vma_needs_reservation(h, vma, address);
1943
1944 if (unlikely(rc < 0)) {
1945 /*
1946 * Rare out of memory condition in reserve map
1947 * manipulation. Clear PagePrivate so that
1948 * global reserve count will not be incremented
1949 * by free_huge_page. This will make it appear
1950 * as though the reservation for this page was
1951 * consumed. This may prevent the task from
1952 * faulting in the page at a later time. This
1953 * is better than inconsistent global huge page
1954 * accounting of reserve counts.
1955 */
1956 ClearPagePrivate(page);
1957 } else if (rc) {
1958 rc = vma_add_reservation(h, vma, address);
1959 if (unlikely(rc < 0))
1960 /*
1961 * See above comment about rare out of
1962 * memory condition.
1963 */
1964 ClearPagePrivate(page);
1965 } else
1966 vma_end_reservation(h, vma, address);
1967 }
1968}
1969
1906struct page *alloc_huge_page(struct vm_area_struct *vma, 1970struct page *alloc_huge_page(struct vm_area_struct *vma,
1907 unsigned long addr, int avoid_reserve) 1971 unsigned long addr, int avoid_reserve)
1908{ 1972{
@@ -3498,6 +3562,7 @@ retry_avoidcopy:
3498 spin_unlock(ptl); 3562 spin_unlock(ptl);
3499 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 3563 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3500out_release_all: 3564out_release_all:
3565 restore_reserve_on_error(h, vma, address, new_page);
3501 put_page(new_page); 3566 put_page(new_page);
3502out_release_old: 3567out_release_old:
3503 put_page(old_page); 3568 put_page(old_page);
@@ -3680,6 +3745,7 @@ backout:
3680 spin_unlock(ptl); 3745 spin_unlock(ptl);
3681backout_unlocked: 3746backout_unlocked:
3682 unlock_page(page); 3747 unlock_page(page);
3748 restore_reserve_on_error(h, vma, address, page);
3683 put_page(page); 3749 put_page(page);
3684 goto out; 3750 goto out;
3685} 3751}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index e5355a5b423f..d1380ed93fdf 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1414,6 +1414,7 @@ static void kmemleak_scan(void)
1414 /* data/bss scanning */ 1414 /* data/bss scanning */
1415 scan_large_block(_sdata, _edata); 1415 scan_large_block(_sdata, _edata);
1416 scan_large_block(__bss_start, __bss_stop); 1416 scan_large_block(__bss_start, __bss_stop);
1417 scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init);
1417 1418
1418#ifdef CONFIG_SMP 1419#ifdef CONFIG_SMP
1419 /* per-cpu sections scanning */ 1420 /* per-cpu sections scanning */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index de88f33519c0..19e796d36a62 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1112,10 +1112,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1112 } 1112 }
1113 1113
1114 if (!PageHuge(p) && PageTransHuge(hpage)) { 1114 if (!PageHuge(p) && PageTransHuge(hpage)) {
1115 lock_page(hpage); 1115 lock_page(p);
1116 if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) { 1116 if (!PageAnon(p) || unlikely(split_huge_page(p))) {
1117 unlock_page(hpage); 1117 unlock_page(p);
1118 if (!PageAnon(hpage)) 1118 if (!PageAnon(p))
1119 pr_err("Memory failure: %#lx: non anonymous thp\n", 1119 pr_err("Memory failure: %#lx: non anonymous thp\n",
1120 pfn); 1120 pfn);
1121 else 1121 else
@@ -1126,9 +1126,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1126 put_hwpoison_page(p); 1126 put_hwpoison_page(p);
1127 return -EBUSY; 1127 return -EBUSY;
1128 } 1128 }
1129 unlock_page(hpage); 1129 unlock_page(p);
1130 get_hwpoison_page(p);
1131 put_hwpoison_page(hpage);
1132 VM_BUG_ON_PAGE(!page_count(p), p); 1130 VM_BUG_ON_PAGE(!page_count(p), p);
1133 hpage = compound_head(p); 1131 hpage = compound_head(p);
1134 } 1132 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8fd42aa7c4bd..6de9440e3ae2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -92,7 +92,7 @@ int _node_numa_mem_[MAX_NUMNODES];
92#endif 92#endif
93 93
94#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 94#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
95volatile u64 latent_entropy __latent_entropy; 95volatile unsigned long latent_entropy __latent_entropy;
96EXPORT_SYMBOL(latent_entropy); 96EXPORT_SYMBOL(latent_entropy);
97#endif 97#endif
98 98
@@ -3658,7 +3658,7 @@ retry:
3658 /* Make sure we know about allocations which stall for too long */ 3658 /* Make sure we know about allocations which stall for too long */
3659 if (time_after(jiffies, alloc_start + stall_timeout)) { 3659 if (time_after(jiffies, alloc_start + stall_timeout)) {
3660 warn_alloc(gfp_mask, 3660 warn_alloc(gfp_mask,
3661 "page alloction stalls for %ums, order:%u\n", 3661 "page allocation stalls for %ums, order:%u",
3662 jiffies_to_msecs(jiffies-alloc_start), order); 3662 jiffies_to_msecs(jiffies-alloc_start), order);
3663 stall_timeout += 10 * HZ; 3663 stall_timeout += 10 * HZ;
3664 } 3664 }
diff --git a/mm/shmem.c b/mm/shmem.c
index ad7813d73ea7..166ebf5d2bce 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1483,6 +1483,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1483 copy_highpage(newpage, oldpage); 1483 copy_highpage(newpage, oldpage);
1484 flush_dcache_page(newpage); 1484 flush_dcache_page(newpage);
1485 1485
1486 __SetPageLocked(newpage);
1487 __SetPageSwapBacked(newpage);
1486 SetPageUptodate(newpage); 1488 SetPageUptodate(newpage);
1487 set_page_private(newpage, swap_index); 1489 set_page_private(newpage, swap_index);
1488 SetPageSwapCache(newpage); 1490 SetPageSwapCache(newpage);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 71f0b28a1bec..329b03843863 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -533,8 +533,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
533 533
534 s = create_cache(cache_name, root_cache->object_size, 534 s = create_cache(cache_name, root_cache->object_size,
535 root_cache->size, root_cache->align, 535 root_cache->size, root_cache->align,
536 root_cache->flags, root_cache->ctor, 536 root_cache->flags & CACHE_CREATE_MASK,
537 memcg, root_cache); 537 root_cache->ctor, memcg, root_cache);
538 /* 538 /*
539 * If we could not create a memcg cache, do not complain, because 539 * If we could not create a memcg cache, do not complain, because
540 * that's not critical at all as we can always proceed with the root 540 * that's not critical at all as we can always proceed with the root
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2210de290b54..f30438970cd1 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2224,6 +2224,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
2224 swab32s(&swap_header->info.version); 2224 swab32s(&swap_header->info.version);
2225 swab32s(&swap_header->info.last_page); 2225 swab32s(&swap_header->info.last_page);
2226 swab32s(&swap_header->info.nr_badpages); 2226 swab32s(&swap_header->info.nr_badpages);
2227 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2228 return 0;
2227 for (i = 0; i < swap_header->info.nr_badpages; i++) 2229 for (i = 0; i < swap_header->info.nr_badpages; i++)
2228 swab32s(&swap_header->info.badpages[i]); 2230 swab32s(&swap_header->info.badpages[i]);
2229 } 2231 }
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 8e999ffdf28b..8af9d25ff988 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1549,24 +1549,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1549 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1549 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1550 struct sock *sk = sock->sk; 1550 struct sock *sk = sock->sk;
1551 struct bcm_sock *bo = bcm_sk(sk); 1551 struct bcm_sock *bo = bcm_sk(sk);
1552 int ret = 0;
1552 1553
1553 if (len < sizeof(*addr)) 1554 if (len < sizeof(*addr))
1554 return -EINVAL; 1555 return -EINVAL;
1555 1556
1556 if (bo->bound) 1557 lock_sock(sk);
1557 return -EISCONN; 1558
1559 if (bo->bound) {
1560 ret = -EISCONN;
1561 goto fail;
1562 }
1558 1563
1559 /* bind a device to this socket */ 1564 /* bind a device to this socket */
1560 if (addr->can_ifindex) { 1565 if (addr->can_ifindex) {
1561 struct net_device *dev; 1566 struct net_device *dev;
1562 1567
1563 dev = dev_get_by_index(&init_net, addr->can_ifindex); 1568 dev = dev_get_by_index(&init_net, addr->can_ifindex);
1564 if (!dev) 1569 if (!dev) {
1565 return -ENODEV; 1570 ret = -ENODEV;
1566 1571 goto fail;
1572 }
1567 if (dev->type != ARPHRD_CAN) { 1573 if (dev->type != ARPHRD_CAN) {
1568 dev_put(dev); 1574 dev_put(dev);
1569 return -ENODEV; 1575 ret = -ENODEV;
1576 goto fail;
1570 } 1577 }
1571 1578
1572 bo->ifindex = dev->ifindex; 1579 bo->ifindex = dev->ifindex;
@@ -1577,17 +1584,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1577 bo->ifindex = 0; 1584 bo->ifindex = 0;
1578 } 1585 }
1579 1586
1580 bo->bound = 1;
1581
1582 if (proc_dir) { 1587 if (proc_dir) {
1583 /* unique socket address as filename */ 1588 /* unique socket address as filename */
1584 sprintf(bo->procname, "%lu", sock_i_ino(sk)); 1589 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1585 bo->bcm_proc_read = proc_create_data(bo->procname, 0644, 1590 bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1586 proc_dir, 1591 proc_dir,
1587 &bcm_proc_fops, sk); 1592 &bcm_proc_fops, sk);
1593 if (!bo->bcm_proc_read) {
1594 ret = -ENOMEM;
1595 goto fail;
1596 }
1588 } 1597 }
1589 1598
1590 return 0; 1599 bo->bound = 1;
1600
1601fail:
1602 release_sock(sk);
1603
1604 return ret;
1591} 1605}
1592 1606
1593static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 1607static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
index 7d54e944de5e..dcbe67ff3e2b 100644
--- a/net/ceph/ceph_fs.c
+++ b/net/ceph/ceph_fs.c
@@ -34,7 +34,8 @@ void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
34 fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count); 34 fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count);
35 fl->object_size = le32_to_cpu(legacy->fl_object_size); 35 fl->object_size = le32_to_cpu(legacy->fl_object_size);
36 fl->pool_id = le32_to_cpu(legacy->fl_pg_pool); 36 fl->pool_id = le32_to_cpu(legacy->fl_pg_pool);
37 if (fl->pool_id == 0) 37 if (fl->pool_id == 0 && fl->stripe_unit == 0 &&
38 fl->stripe_count == 0 && fl->object_size == 0)
38 fl->pool_id = -1; 39 fl->pool_id = -1;
39} 40}
40EXPORT_SYMBOL(ceph_file_layout_from_legacy); 41EXPORT_SYMBOL(ceph_file_layout_from_legacy);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index d9bf7a1d0a58..e6ae15bc41b7 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -4094,6 +4094,7 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4094 osd_init(&osdc->homeless_osd); 4094 osd_init(&osdc->homeless_osd);
4095 osdc->homeless_osd.o_osdc = osdc; 4095 osdc->homeless_osd.o_osdc = osdc;
4096 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD; 4096 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
4097 osdc->last_linger_id = CEPH_LINGER_ID_START;
4097 osdc->linger_requests = RB_ROOT; 4098 osdc->linger_requests = RB_ROOT;
4098 osdc->map_checks = RB_ROOT; 4099 osdc->map_checks = RB_ROOT;
4099 osdc->linger_map_checks = RB_ROOT; 4100 osdc->linger_map_checks = RB_ROOT;
diff --git a/net/core/dev.c b/net/core/dev.c
index 7385c1a152fd..6deba68ad9e4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1765,19 +1765,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
1765 1765
1766int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1766int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1767{ 1767{
1768 if (skb_orphan_frags(skb, GFP_ATOMIC) || 1768 int ret = ____dev_forward_skb(dev, skb);
1769 unlikely(!is_skb_forwardable(dev, skb))) {
1770 atomic_long_inc(&dev->rx_dropped);
1771 kfree_skb(skb);
1772 return NET_RX_DROP;
1773 }
1774 1769
1775 skb_scrub_packet(skb, true); 1770 if (likely(!ret)) {
1776 skb->priority = 0; 1771 skb->protocol = eth_type_trans(skb, dev);
1777 skb->protocol = eth_type_trans(skb, dev); 1772 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1778 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1773 }
1779 1774
1780 return 0; 1775 return ret;
1781} 1776}
1782EXPORT_SYMBOL_GPL(__dev_forward_skb); 1777EXPORT_SYMBOL_GPL(__dev_forward_skb);
1783 1778
@@ -2599,7 +2594,7 @@ int skb_checksum_help(struct sk_buff *skb)
2599 goto out; 2594 goto out;
2600 } 2595 }
2601 2596
2602 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2597 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
2603out_set_summed: 2598out_set_summed:
2604 skb->ip_summed = CHECKSUM_NONE; 2599 skb->ip_summed = CHECKSUM_NONE;
2605out: 2600out:
diff --git a/net/core/filter.c b/net/core/filter.c
index cd9e2ba66b0e..dece94fef005 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1628 return dev_forward_skb(dev, skb); 1628 return dev_forward_skb(dev, skb);
1629} 1629}
1630 1630
1631static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
1632 struct sk_buff *skb)
1633{
1634 int ret = ____dev_forward_skb(dev, skb);
1635
1636 if (likely(!ret)) {
1637 skb->dev = dev;
1638 ret = netif_rx(skb);
1639 }
1640
1641 return ret;
1642}
1643
1631static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) 1644static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1632{ 1645{
1633 int ret; 1646 int ret;
@@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1647 return ret; 1660 return ret;
1648} 1661}
1649 1662
1663static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
1664 u32 flags)
1665{
1666 /* skb->mac_len is not set on normal egress */
1667 unsigned int mlen = skb->network_header - skb->mac_header;
1668
1669 __skb_pull(skb, mlen);
1670
1671 /* At ingress, the mac header has already been pulled once.
1672 * At egress, skb_pospull_rcsum has to be done in case that
1673 * the skb is originated from ingress (i.e. a forwarded skb)
1674 * to ensure that rcsum starts at net header.
1675 */
1676 if (!skb_at_tc_ingress(skb))
1677 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
1678 skb_pop_mac_header(skb);
1679 skb_reset_mac_len(skb);
1680 return flags & BPF_F_INGRESS ?
1681 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
1682}
1683
1684static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
1685 u32 flags)
1686{
1687 bpf_push_mac_rcsum(skb);
1688 return flags & BPF_F_INGRESS ?
1689 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1690}
1691
1692static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
1693 u32 flags)
1694{
1695 switch (dev->type) {
1696 case ARPHRD_TUNNEL:
1697 case ARPHRD_TUNNEL6:
1698 case ARPHRD_SIT:
1699 case ARPHRD_IPGRE:
1700 case ARPHRD_VOID:
1701 case ARPHRD_NONE:
1702 return __bpf_redirect_no_mac(skb, dev, flags);
1703 default:
1704 return __bpf_redirect_common(skb, dev, flags);
1705 }
1706}
1707
1650BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) 1708BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
1651{ 1709{
1652 struct net_device *dev; 1710 struct net_device *dev;
@@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
1675 return -ENOMEM; 1733 return -ENOMEM;
1676 } 1734 }
1677 1735
1678 bpf_push_mac_rcsum(clone); 1736 return __bpf_redirect(clone, dev, flags);
1679
1680 return flags & BPF_F_INGRESS ?
1681 __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone);
1682} 1737}
1683 1738
1684static const struct bpf_func_proto bpf_clone_redirect_proto = { 1739static const struct bpf_func_proto bpf_clone_redirect_proto = {
@@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb)
1722 return -EINVAL; 1777 return -EINVAL;
1723 } 1778 }
1724 1779
1725 bpf_push_mac_rcsum(skb); 1780 return __bpf_redirect(skb, dev, ri->flags);
1726
1727 return ri->flags & BPF_F_INGRESS ?
1728 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1729} 1781}
1730 1782
1731static const struct bpf_func_proto bpf_redirect_proto = { 1783static const struct bpf_func_proto bpf_redirect_proto = {
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 87e01815ec85..b481a4a6d3ec 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -122,7 +122,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
122 struct flow_dissector_key_keyid *key_keyid; 122 struct flow_dissector_key_keyid *key_keyid;
123 bool skip_vlan = false; 123 bool skip_vlan = false;
124 u8 ip_proto = 0; 124 u8 ip_proto = 0;
125 bool ret = false; 125 bool ret;
126 126
127 if (!data) { 127 if (!data) {
128 data = skb->data; 128 data = skb->data;
@@ -549,12 +549,17 @@ ip_proto_again:
549out_good: 549out_good:
550 ret = true; 550 ret = true;
551 551
552out_bad: 552 key_control->thoff = (u16)nhoff;
553out:
553 key_basic->n_proto = proto; 554 key_basic->n_proto = proto;
554 key_basic->ip_proto = ip_proto; 555 key_basic->ip_proto = ip_proto;
555 key_control->thoff = (u16)nhoff;
556 556
557 return ret; 557 return ret;
558
559out_bad:
560 ret = false;
561 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
562 goto out;
558} 563}
559EXPORT_SYMBOL(__skb_flow_dissect); 564EXPORT_SYMBOL(__skb_flow_dissect);
560 565
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index fb7348f13501..db313ec7af32 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -275,6 +275,7 @@ int rtnl_unregister(int protocol, int msgtype)
275 275
276 rtnl_msg_handlers[protocol][msgindex].doit = NULL; 276 rtnl_msg_handlers[protocol][msgindex].doit = NULL;
277 rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; 277 rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
278 rtnl_msg_handlers[protocol][msgindex].calcit = NULL;
278 279
279 return 0; 280 return 0;
280} 281}
diff --git a/net/core/sock.c b/net/core/sock.c
index 0397928dfdc2..14e6145be33b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
453EXPORT_SYMBOL(sock_queue_rcv_skb); 453EXPORT_SYMBOL(sock_queue_rcv_skb);
454 454
455int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, 455int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
456 const int nested, unsigned int trim_cap) 456 const int nested, unsigned int trim_cap, bool refcounted)
457{ 457{
458 int rc = NET_RX_SUCCESS; 458 int rc = NET_RX_SUCCESS;
459 459
@@ -487,7 +487,8 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
487 487
488 bh_unlock_sock(sk); 488 bh_unlock_sock(sk);
489out: 489out:
490 sock_put(sk); 490 if (refcounted)
491 sock_put(sk);
491 return rc; 492 return rc;
492discard_and_relse: 493discard_and_relse:
493 kfree_skb(skb); 494 kfree_skb(skb);
@@ -1543,6 +1544,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1543 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); 1544 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
1544 1545
1545 newsk->sk_err = 0; 1546 newsk->sk_err = 0;
1547 newsk->sk_err_soft = 0;
1546 newsk->sk_priority = 0; 1548 newsk->sk_priority = 0;
1547 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1549 newsk->sk_incoming_cpu = raw_smp_processor_id();
1548 atomic64_set(&newsk->sk_cookie, 0); 1550 atomic64_set(&newsk->sk_cookie, 0);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index a957acac2337..fda321d814d6 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -235,7 +235,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
235{ 235{
236 const struct iphdr *iph = (struct iphdr *)skb->data; 236 const struct iphdr *iph = (struct iphdr *)skb->data;
237 const u8 offset = iph->ihl << 2; 237 const u8 offset = iph->ihl << 2;
238 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 238 const struct dccp_hdr *dh;
239 struct dccp_sock *dp; 239 struct dccp_sock *dp;
240 struct inet_sock *inet; 240 struct inet_sock *inet;
241 const int type = icmp_hdr(skb)->type; 241 const int type = icmp_hdr(skb)->type;
@@ -245,11 +245,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
245 int err; 245 int err;
246 struct net *net = dev_net(skb->dev); 246 struct net *net = dev_net(skb->dev);
247 247
248 if (skb->len < offset + sizeof(*dh) || 248 /* Only need dccph_dport & dccph_sport which are the first
249 skb->len < offset + __dccp_basic_hdr_len(dh)) { 249 * 4 bytes in dccp header.
250 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 250 * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
251 return; 251 */
252 } 252 BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
253 BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
254 dh = (struct dccp_hdr *)(skb->data + offset);
253 255
254 sk = __inet_lookup_established(net, &dccp_hashinfo, 256 sk = __inet_lookup_established(net, &dccp_hashinfo,
255 iph->daddr, dh->dccph_dport, 257 iph->daddr, dh->dccph_dport,
@@ -862,7 +864,7 @@ lookup:
862 goto discard_and_relse; 864 goto discard_and_relse;
863 nf_reset(skb); 865 nf_reset(skb);
864 866
865 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4); 867 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
866 868
867no_dccp_socket: 869no_dccp_socket:
868 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 870 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 32f9f1a189f8..adfc790f7193 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -70,7 +70,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
70 u8 type, u8 code, int offset, __be32 info) 70 u8 type, u8 code, int offset, __be32 info)
71{ 71{
72 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 72 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
73 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 73 const struct dccp_hdr *dh;
74 struct dccp_sock *dp; 74 struct dccp_sock *dp;
75 struct ipv6_pinfo *np; 75 struct ipv6_pinfo *np;
76 struct sock *sk; 76 struct sock *sk;
@@ -78,12 +78,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
78 __u64 seq; 78 __u64 seq;
79 struct net *net = dev_net(skb->dev); 79 struct net *net = dev_net(skb->dev);
80 80
81 if (skb->len < offset + sizeof(*dh) || 81 /* Only need dccph_dport & dccph_sport which are the first
82 skb->len < offset + __dccp_basic_hdr_len(dh)) { 82 * 4 bytes in dccp header.
83 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 83 * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
84 ICMP6_MIB_INERRORS); 84 */
85 return; 85 BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
86 } 86 BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
87 dh = (struct dccp_hdr *)(skb->data + offset);
87 88
88 sk = __inet6_lookup_established(net, &dccp_hashinfo, 89 sk = __inet6_lookup_established(net, &dccp_hashinfo,
89 &hdr->daddr, dh->dccph_dport, 90 &hdr->daddr, dh->dccph_dport,
@@ -738,7 +739,8 @@ lookup:
738 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 739 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
739 goto discard_and_relse; 740 goto discard_and_relse;
740 741
741 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0; 742 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
743 refcounted) ? -1 : 0;
742 744
743no_dccp_socket: 745no_dccp_socket:
744 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 746 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
@@ -956,6 +958,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
956 .getsockopt = ipv6_getsockopt, 958 .getsockopt = ipv6_getsockopt,
957 .addr2sockaddr = inet6_csk_addr2sockaddr, 959 .addr2sockaddr = inet6_csk_addr2sockaddr,
958 .sockaddr_len = sizeof(struct sockaddr_in6), 960 .sockaddr_len = sizeof(struct sockaddr_in6),
961 .bind_conflict = inet6_csk_bind_conflict,
959#ifdef CONFIG_COMPAT 962#ifdef CONFIG_COMPAT
960 .compat_setsockopt = compat_ipv6_setsockopt, 963 .compat_setsockopt = compat_ipv6_setsockopt,
961 .compat_getsockopt = compat_ipv6_getsockopt, 964 .compat_getsockopt = compat_ipv6_getsockopt,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 41e65804ddf5..9fe25bf63296 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1009,6 +1009,10 @@ void dccp_close(struct sock *sk, long timeout)
1009 __kfree_skb(skb); 1009 __kfree_skb(skb);
1010 } 1010 }
1011 1011
1012 /* If socket has been already reset kill it. */
1013 if (sk->sk_state == DCCP_CLOSED)
1014 goto adjudge_to_death;
1015
1012 if (data_was_unread) { 1016 if (data_was_unread) {
1013 /* Unread data was tossed, send an appropriate Reset Code */ 1017 /* Unread data was tossed, send an appropriate Reset Code */
1014 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); 1018 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9648c97e541f..5ddf5cda07f4 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -533,9 +533,9 @@ EXPORT_SYMBOL(inet_dgram_connect);
533 533
534static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) 534static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
535{ 535{
536 DEFINE_WAIT(wait); 536 DEFINE_WAIT_FUNC(wait, woken_wake_function);
537 537
538 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 538 add_wait_queue(sk_sleep(sk), &wait);
539 sk->sk_write_pending += writebias; 539 sk->sk_write_pending += writebias;
540 540
541 /* Basic assumption: if someone sets sk->sk_err, he _must_ 541 /* Basic assumption: if someone sets sk->sk_err, he _must_
@@ -545,13 +545,12 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
545 */ 545 */
546 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 546 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
547 release_sock(sk); 547 release_sock(sk);
548 timeo = schedule_timeout(timeo); 548 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
549 lock_sock(sk); 549 lock_sock(sk);
550 if (signal_pending(current) || !timeo) 550 if (signal_pending(current) || !timeo)
551 break; 551 break;
552 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
553 } 552 }
554 finish_wait(sk_sleep(sk), &wait); 553 remove_wait_queue(sk_sleep(sk), &wait);
555 sk->sk_write_pending -= writebias; 554 sk->sk_write_pending -= writebias;
556 return timeo; 555 return timeo;
557} 556}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 31cef3602585..4cff74d4133f 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2413,22 +2413,19 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2413 struct key_vector *l, **tp = &iter->tnode; 2413 struct key_vector *l, **tp = &iter->tnode;
2414 t_key key; 2414 t_key key;
2415 2415
2416 /* use cache location of next-to-find key */ 2416 /* use cached location of previously found key */
2417 if (iter->pos > 0 && pos >= iter->pos) { 2417 if (iter->pos > 0 && pos >= iter->pos) {
2418 pos -= iter->pos;
2419 key = iter->key; 2418 key = iter->key;
2420 } else { 2419 } else {
2421 iter->pos = 0; 2420 iter->pos = 1;
2422 key = 0; 2421 key = 0;
2423 } 2422 }
2424 2423
2425 while ((l = leaf_walk_rcu(tp, key)) != NULL) { 2424 pos -= iter->pos;
2425
2426 while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) {
2426 key = l->key + 1; 2427 key = l->key + 1;
2427 iter->pos++; 2428 iter->pos++;
2428
2429 if (--pos <= 0)
2430 break;
2431
2432 l = NULL; 2429 l = NULL;
2433 2430
2434 /* handle unlikely case of a key wrap */ 2431 /* handle unlikely case of a key wrap */
@@ -2437,7 +2434,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2437 } 2434 }
2438 2435
2439 if (l) 2436 if (l)
2440 iter->key = key; /* remember it */ 2437 iter->key = l->key; /* remember it */
2441 else 2438 else
2442 iter->pos = 0; /* forget it */ 2439 iter->pos = 0; /* forget it */
2443 2440
@@ -2465,7 +2462,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2465 return fib_route_get_idx(iter, *pos); 2462 return fib_route_get_idx(iter, *pos);
2466 2463
2467 iter->pos = 0; 2464 iter->pos = 0;
2468 iter->key = 0; 2465 iter->key = KEY_MAX;
2469 2466
2470 return SEQ_START_TOKEN; 2467 return SEQ_START_TOKEN;
2471} 2468}
@@ -2474,7 +2471,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2474{ 2471{
2475 struct fib_route_iter *iter = seq->private; 2472 struct fib_route_iter *iter = seq->private;
2476 struct key_vector *l = NULL; 2473 struct key_vector *l = NULL;
2477 t_key key = iter->key; 2474 t_key key = iter->key + 1;
2478 2475
2479 ++*pos; 2476 ++*pos;
2480 2477
@@ -2483,7 +2480,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2483 l = leaf_walk_rcu(&iter->tnode, key); 2480 l = leaf_walk_rcu(&iter->tnode, key);
2484 2481
2485 if (l) { 2482 if (l) {
2486 iter->key = l->key + 1; 2483 iter->key = l->key;
2487 iter->pos++; 2484 iter->pos++;
2488 } else { 2485 } else {
2489 iter->pos = 0; 2486 iter->pos = 0;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 53a890b605fc..691146abde2d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -479,7 +479,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
479 fl4->flowi4_proto = IPPROTO_ICMP; 479 fl4->flowi4_proto = IPPROTO_ICMP;
480 fl4->fl4_icmp_type = type; 480 fl4->fl4_icmp_type = type;
481 fl4->fl4_icmp_code = code; 481 fl4->fl4_icmp_code = code;
482 fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev); 482 fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
483 483
484 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); 484 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
485 rt = __ip_route_output_key_hash(net, fl4, 485 rt = __ip_route_output_key_hash(net, fl4,
@@ -504,7 +504,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
504 if (err) 504 if (err)
505 goto relookup_failed; 505 goto relookup_failed;
506 506
507 if (inet_addr_type_dev_table(net, skb_in->dev, 507 if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev,
508 fl4_dec.saddr) == RTN_LOCAL) { 508 fl4_dec.saddr) == RTN_LOCAL) {
509 rt2 = __ip_route_output_key(net, &fl4_dec); 509 rt2 = __ip_route_output_key(net, &fl4_dec);
510 if (IS_ERR(rt2)) 510 if (IS_ERR(rt2))
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 8b4ffd216839..9f0a7b96646f 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -117,7 +117,7 @@ int ip_forward(struct sk_buff *skb)
117 if (opt->is_strictroute && rt->rt_uses_gateway) 117 if (opt->is_strictroute && rt->rt_uses_gateway)
118 goto sr_failed; 118 goto sr_failed;
119 119
120 IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; 120 IPCB(skb)->flags |= IPSKB_FORWARDED;
121 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); 121 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
122 if (ip_exceeds_mtu(skb, mtu)) { 122 if (ip_exceeds_mtu(skb, mtu)) {
123 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 123 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 37dfacd340af..eaf720b65d7e 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -239,19 +239,23 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
239 struct sk_buff *segs; 239 struct sk_buff *segs;
240 int ret = 0; 240 int ret = 0;
241 241
242 /* common case: fragmentation of segments is not allowed, 242 /* common case: seglen is <= mtu
243 * or seglen is <= mtu
244 */ 243 */
245 if (((IPCB(skb)->flags & IPSKB_FRAG_SEGS) == 0) || 244 if (skb_gso_validate_mtu(skb, mtu))
246 skb_gso_validate_mtu(skb, mtu))
247 return ip_finish_output2(net, sk, skb); 245 return ip_finish_output2(net, sk, skb);
248 246
249 /* Slowpath - GSO segment length is exceeding the dst MTU. 247 /* Slowpath - GSO segment length exceeds the egress MTU.
250 * 248 *
251 * This can happen in two cases: 249 * This can happen in several cases:
252 * 1) TCP GRO packet, DF bit not set 250 * - Forwarding of a TCP GRO skb, when DF flag is not set.
253 * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly 251 * - Forwarding of an skb that arrived on a virtualization interface
254 * from host network stack. 252 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
253 * stack.
254 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
255 * interface with a smaller MTU.
256 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
257 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
258 * insufficent MTU.
255 */ 259 */
256 features = netif_skb_features(skb); 260 features = netif_skb_features(skb);
257 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); 261 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
@@ -1579,7 +1583,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1579 } 1583 }
1580 1584
1581 oif = arg->bound_dev_if; 1585 oif = arg->bound_dev_if;
1582 oif = oif ? : skb->skb_iif; 1586 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1587 oif = skb->skb_iif;
1583 1588
1584 flowi4_init_output(&fl4, oif, 1589 flowi4_init_output(&fl4, oif,
1585 IP4_REPLY_MARK(net, skb->mark), 1590 IP4_REPLY_MARK(net, skb->mark),
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 777bc1883870..fed3d29f9eb3 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -63,7 +63,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
63 int pkt_len = skb->len - skb_inner_network_offset(skb); 63 int pkt_len = skb->len - skb_inner_network_offset(skb);
64 struct net *net = dev_net(rt->dst.dev); 64 struct net *net = dev_net(rt->dst.dev);
65 struct net_device *dev = skb->dev; 65 struct net_device *dev = skb->dev;
66 int skb_iif = skb->skb_iif;
67 struct iphdr *iph; 66 struct iphdr *iph;
68 int err; 67 int err;
69 68
@@ -73,16 +72,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
73 skb_dst_set(skb, &rt->dst); 72 skb_dst_set(skb, &rt->dst);
74 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 73 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
75 74
76 if (skb_iif && !(df & htons(IP_DF))) {
77 /* Arrived from an ingress interface, got encapsulated, with
78 * fragmentation of encapulating frames allowed.
79 * If skb is gso, the resulting encapsulated network segments
80 * may exceed dst mtu.
81 * Allow IP Fragmentation of segments.
82 */
83 IPCB(skb)->flags |= IPSKB_FRAG_SEGS;
84 }
85
86 /* Push down and install the IP header. */ 75 /* Push down and install the IP header. */
87 skb_push(skb, sizeof(struct iphdr)); 76 skb_push(skb, sizeof(struct iphdr));
88 skb_reset_network_header(skb); 77 skb_reset_network_header(skb);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index f2fd13b07273..665505d86b12 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1754,7 +1754,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1754 vif->dev->stats.tx_bytes += skb->len; 1754 vif->dev->stats.tx_bytes += skb->len;
1755 } 1755 }
1756 1756
1757 IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; 1757 IPCB(skb)->flags |= IPSKB_FORWARDED;
1758 1758
1759 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally 1759 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1760 * not only before forwarding, but after forwarding on all output 1760 * not only before forwarding, but after forwarding on all output
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
index 7ab544fbc382..0af3d8df70dd 100644
--- a/net/ipv4/netfilter/nft_dup_ipv4.c
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -28,7 +28,7 @@ static void nft_dup_ipv4_eval(const struct nft_expr *expr,
28 struct in_addr gw = { 28 struct in_addr gw = {
29 .s_addr = (__force __be32)regs->data[priv->sreg_addr], 29 .s_addr = (__force __be32)regs->data[priv->sreg_addr],
30 }; 30 };
31 int oif = regs->data[priv->sreg_dev]; 31 int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1;
32 32
33 nf_dup_ipv4(nft_net(pkt), pkt->skb, nft_hook(pkt), &gw, oif); 33 nf_dup_ipv4(nft_net(pkt), pkt->skb, nft_hook(pkt), &gw, oif);
34} 34}
@@ -59,7 +59,9 @@ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
59{ 59{
60 struct nft_dup_ipv4 *priv = nft_expr_priv(expr); 60 struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
61 61
62 if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || 62 if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
63 goto nla_put_failure;
64 if (priv->sreg_dev &&
63 nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) 65 nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
64 goto nla_put_failure; 66 goto nla_put_failure;
65 67
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2355883e1025..d37fc6f7e679 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -755,7 +755,9 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
755 goto reject_redirect; 755 goto reject_redirect;
756 } 756 }
757 757
758 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); 758 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
759 if (!n)
760 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
759 if (!IS_ERR(n)) { 761 if (!IS_ERR(n)) {
760 if (!(n->nud_state & NUD_VALID)) { 762 if (!(n->nud_state & NUD_VALID)) {
761 neigh_event_send(n, NULL); 763 neigh_event_send(n, NULL);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f8f924ca662d..b025a69ebd28 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1162,7 +1162,7 @@ restart:
1162 1162
1163 err = -EPIPE; 1163 err = -EPIPE;
1164 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1164 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1165 goto out_err; 1165 goto do_error;
1166 1166
1167 sg = !!(sk->sk_route_caps & NETIF_F_SG); 1167 sg = !!(sk->sk_route_caps & NETIF_F_SG);
1168 1168
@@ -1239,7 +1239,7 @@ new_segment:
1239 1239
1240 if (!skb_can_coalesce(skb, i, pfrag->page, 1240 if (!skb_can_coalesce(skb, i, pfrag->page,
1241 pfrag->offset)) { 1241 pfrag->offset)) {
1242 if (i == sysctl_max_skb_frags || !sg) { 1242 if (i >= sysctl_max_skb_frags || !sg) {
1243 tcp_mark_push(tp, skb); 1243 tcp_mark_push(tp, skb);
1244 goto new_segment; 1244 goto new_segment;
1245 } 1245 }
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 10d728b6804c..ab37c6775630 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -56,6 +56,7 @@ struct dctcp {
56 u32 next_seq; 56 u32 next_seq;
57 u32 ce_state; 57 u32 ce_state;
58 u32 delayed_ack_reserved; 58 u32 delayed_ack_reserved;
59 u32 loss_cwnd;
59}; 60};
60 61
61static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ 62static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
@@ -96,6 +97,7 @@ static void dctcp_init(struct sock *sk)
96 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); 97 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
97 98
98 ca->delayed_ack_reserved = 0; 99 ca->delayed_ack_reserved = 0;
100 ca->loss_cwnd = 0;
99 ca->ce_state = 0; 101 ca->ce_state = 0;
100 102
101 dctcp_reset(tp, ca); 103 dctcp_reset(tp, ca);
@@ -111,9 +113,10 @@ static void dctcp_init(struct sock *sk)
111 113
112static u32 dctcp_ssthresh(struct sock *sk) 114static u32 dctcp_ssthresh(struct sock *sk)
113{ 115{
114 const struct dctcp *ca = inet_csk_ca(sk); 116 struct dctcp *ca = inet_csk_ca(sk);
115 struct tcp_sock *tp = tcp_sk(sk); 117 struct tcp_sock *tp = tcp_sk(sk);
116 118
119 ca->loss_cwnd = tp->snd_cwnd;
117 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); 120 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
118} 121}
119 122
@@ -308,12 +311,20 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
308 return 0; 311 return 0;
309} 312}
310 313
314static u32 dctcp_cwnd_undo(struct sock *sk)
315{
316 const struct dctcp *ca = inet_csk_ca(sk);
317
318 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
319}
320
311static struct tcp_congestion_ops dctcp __read_mostly = { 321static struct tcp_congestion_ops dctcp __read_mostly = {
312 .init = dctcp_init, 322 .init = dctcp_init,
313 .in_ack_event = dctcp_update_alpha, 323 .in_ack_event = dctcp_update_alpha,
314 .cwnd_event = dctcp_cwnd_event, 324 .cwnd_event = dctcp_cwnd_event,
315 .ssthresh = dctcp_ssthresh, 325 .ssthresh = dctcp_ssthresh,
316 .cong_avoid = tcp_reno_cong_avoid, 326 .cong_avoid = tcp_reno_cong_avoid,
327 .undo_cwnd = dctcp_cwnd_undo,
317 .set_state = dctcp_state, 328 .set_state = dctcp_state,
318 .get_info = dctcp_get_info, 329 .get_info = dctcp_get_info,
319 .flags = TCP_CONG_NEEDS_ECN, 330 .flags = TCP_CONG_NEEDS_ECN,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6491b7c1f975..5555eb86e549 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1567,6 +1567,21 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1567} 1567}
1568EXPORT_SYMBOL(tcp_add_backlog); 1568EXPORT_SYMBOL(tcp_add_backlog);
1569 1569
1570int tcp_filter(struct sock *sk, struct sk_buff *skb)
1571{
1572 struct tcphdr *th = (struct tcphdr *)skb->data;
1573 unsigned int eaten = skb->len;
1574 int err;
1575
1576 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1577 if (!err) {
1578 eaten -= skb->len;
1579 TCP_SKB_CB(skb)->end_seq -= eaten;
1580 }
1581 return err;
1582}
1583EXPORT_SYMBOL(tcp_filter);
1584
1570/* 1585/*
1571 * From tcp_input.c 1586 * From tcp_input.c
1572 */ 1587 */
@@ -1679,8 +1694,10 @@ process:
1679 1694
1680 nf_reset(skb); 1695 nf_reset(skb);
1681 1696
1682 if (sk_filter(sk, skb)) 1697 if (tcp_filter(sk, skb))
1683 goto discard_and_relse; 1698 goto discard_and_relse;
1699 th = (const struct tcphdr *)skb->data;
1700 iph = ip_hdr(skb);
1684 1701
1685 skb->dev = NULL; 1702 skb->dev = NULL;
1686 1703
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index ab249fee616b..eb948ffd734b 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -449,7 +449,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
449 if (__ipv6_addr_needs_scope_id(addr_type)) 449 if (__ipv6_addr_needs_scope_id(addr_type))
450 iif = skb->dev->ifindex; 450 iif = skb->dev->ifindex;
451 else 451 else
452 iif = l3mdev_master_ifindex(skb->dev); 452 iif = l3mdev_master_ifindex(skb_dst(skb)->dev);
453 453
454 /* 454 /*
455 * Must not send error if the source does not uniquely 455 * Must not send error if the source does not uniquely
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ddc878d2cc6d..b37054b1873d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1367,7 +1367,7 @@ emsgsize:
1367 if (((length > mtu) || 1367 if (((length > mtu) ||
1368 (skb && skb_is_gso(skb))) && 1368 (skb && skb_is_gso(skb))) &&
1369 (sk->sk_protocol == IPPROTO_UDP) && 1369 (sk->sk_protocol == IPPROTO_UDP) &&
1370 (rt->dst.dev->features & NETIF_F_UFO) && 1370 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
1371 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { 1371 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1372 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1372 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1373 hh_len, fragheaderlen, exthdrlen, 1373 hh_len, fragheaderlen, exthdrlen,
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index a7520528ecd2..b283f293ee4a 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -88,9 +88,6 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
88 88
89 uh->len = htons(skb->len); 89 uh->len = htons(skb->len);
90 90
91 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
92 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
93 | IPSKB_REROUTED);
94 skb_dst_set(skb, dst); 91 skb_dst_set(skb, dst);
95 92
96 udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); 93 udp6_set_csum(nocheck, skb, saddr, daddr, skb->len);
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
index 26074a8bada7..d8b5b60b7d53 100644
--- a/net/ipv6/netfilter/nft_dup_ipv6.c
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -26,7 +26,7 @@ static void nft_dup_ipv6_eval(const struct nft_expr *expr,
26{ 26{
27 struct nft_dup_ipv6 *priv = nft_expr_priv(expr); 27 struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
28 struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr]; 28 struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr];
29 int oif = regs->data[priv->sreg_dev]; 29 int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1;
30 30
31 nf_dup_ipv6(nft_net(pkt), pkt->skb, nft_hook(pkt), gw, oif); 31 nf_dup_ipv6(nft_net(pkt), pkt->skb, nft_hook(pkt), gw, oif);
32} 32}
@@ -57,7 +57,9 @@ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
57{ 57{
58 struct nft_dup_ipv6 *priv = nft_expr_priv(expr); 58 struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
59 59
60 if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || 60 if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
61 goto nla_put_failure;
62 if (priv->sreg_dev &&
61 nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) 63 nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
62 goto nla_put_failure; 64 goto nla_put_failure;
63 65
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6aa014eedccd..b317bb135ed4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1364,6 +1364,9 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1364 if (rt6->rt6i_flags & RTF_LOCAL) 1364 if (rt6->rt6i_flags & RTF_LOCAL)
1365 return; 1365 return;
1366 1366
1367 if (dst_metric_locked(dst, RTAX_MTU))
1368 return;
1369
1367 dst_confirm(dst); 1370 dst_confirm(dst);
1368 mtu = max_t(u32, mtu, IPV6_MIN_MTU); 1371 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
1369 if (mtu >= dst_mtu(dst)) 1372 if (mtu >= dst_mtu(dst))
@@ -2763,6 +2766,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2763 PMTU discouvery. 2766 PMTU discouvery.
2764 */ 2767 */
2765 if (rt->dst.dev == arg->dev && 2768 if (rt->dst.dev == arg->dev &&
2769 dst_metric_raw(&rt->dst, RTAX_MTU) &&
2766 !dst_metric_locked(&rt->dst, RTAX_MTU)) { 2770 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
2767 if (rt->rt6i_flags & RTF_CACHE) { 2771 if (rt->rt6i_flags & RTF_CACHE) {
2768 /* For RTF_CACHE with rt6i_pmtu == 0 2772 /* For RTF_CACHE with rt6i_pmtu == 0
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index aece1b15e744..28ec0a2e7b72 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -819,8 +819,12 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
819 fl6.flowi6_proto = IPPROTO_TCP; 819 fl6.flowi6_proto = IPPROTO_TCP;
820 if (rt6_need_strict(&fl6.daddr) && !oif) 820 if (rt6_need_strict(&fl6.daddr) && !oif)
821 fl6.flowi6_oif = tcp_v6_iif(skb); 821 fl6.flowi6_oif = tcp_v6_iif(skb);
822 else 822 else {
823 fl6.flowi6_oif = oif ? : skb->skb_iif; 823 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
824 oif = skb->skb_iif;
825
826 fl6.flowi6_oif = oif;
827 }
824 828
825 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); 829 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
826 fl6.fl6_dport = t1->dest; 830 fl6.fl6_dport = t1->dest;
@@ -1227,7 +1231,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1227 if (skb->protocol == htons(ETH_P_IP)) 1231 if (skb->protocol == htons(ETH_P_IP))
1228 return tcp_v4_do_rcv(sk, skb); 1232 return tcp_v4_do_rcv(sk, skb);
1229 1233
1230 if (sk_filter(sk, skb)) 1234 if (tcp_filter(sk, skb))
1231 goto discard; 1235 goto discard;
1232 1236
1233 /* 1237 /*
@@ -1455,8 +1459,10 @@ process:
1455 if (tcp_v6_inbound_md5_hash(sk, skb)) 1459 if (tcp_v6_inbound_md5_hash(sk, skb))
1456 goto discard_and_relse; 1460 goto discard_and_relse;
1457 1461
1458 if (sk_filter(sk, skb)) 1462 if (tcp_filter(sk, skb))
1459 goto discard_and_relse; 1463 goto discard_and_relse;
1464 th = (const struct tcphdr *)skb->data;
1465 hdr = ipv6_hdr(skb);
1460 1466
1461 skb->dev = NULL; 1467 skb->dev = NULL;
1462 1468
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 6b85ded4f91d..038c2ba0ae0f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3869,7 +3869,7 @@ static struct genl_family ip_vs_genl_family __ro_after_init = {
3869 .hdrsize = 0, 3869 .hdrsize = 0,
3870 .name = IPVS_GENL_NAME, 3870 .name = IPVS_GENL_NAME,
3871 .version = IPVS_GENL_VERSION, 3871 .version = IPVS_GENL_VERSION,
3872 .maxattr = IPVS_CMD_MAX, 3872 .maxattr = IPVS_CMD_ATTR_MAX,
3873 .netnsok = true, /* Make ipvsadm to work on netns */ 3873 .netnsok = true, /* Make ipvsadm to work on netns */
3874 .module = THIS_MODULE, 3874 .module = THIS_MODULE,
3875 .ops = ip_vs_genl_ops, 3875 .ops = ip_vs_genl_ops,
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 1b07578bedf3..9350530c16c1 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -283,6 +283,7 @@ struct ip_vs_sync_buff {
283 */ 283 */
284static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) 284static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
285{ 285{
286 memset(ho, 0, sizeof(*ho));
286 ho->init_seq = get_unaligned_be32(&no->init_seq); 287 ho->init_seq = get_unaligned_be32(&no->init_seq);
287 ho->delta = get_unaligned_be32(&no->delta); 288 ho->delta = get_unaligned_be32(&no->delta);
288 ho->previous_delta = get_unaligned_be32(&no->previous_delta); 289 ho->previous_delta = get_unaligned_be32(&no->previous_delta);
@@ -917,8 +918,10 @@ static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *pa
917 kfree(param->pe_data); 918 kfree(param->pe_data);
918 } 919 }
919 920
920 if (opt) 921 if (opt) {
921 memcpy(&cp->in_seq, opt, sizeof(*opt)); 922 cp->in_seq = opt->in_seq;
923 cp->out_seq = opt->out_seq;
924 }
922 atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); 925 atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
923 cp->state = state; 926 cp->state = state;
924 cp->old_state = cp->state; 927 cp->old_state = cp->state;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e9ffe33dc0ca..6a0bbfa8e702 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -76,6 +76,7 @@ struct conntrack_gc_work {
76 struct delayed_work dwork; 76 struct delayed_work dwork;
77 u32 last_bucket; 77 u32 last_bucket;
78 bool exiting; 78 bool exiting;
79 long next_gc_run;
79}; 80};
80 81
81static __read_mostly struct kmem_cache *nf_conntrack_cachep; 82static __read_mostly struct kmem_cache *nf_conntrack_cachep;
@@ -83,9 +84,11 @@ static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
83static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); 84static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
84static __read_mostly bool nf_conntrack_locks_all; 85static __read_mostly bool nf_conntrack_locks_all;
85 86
87/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
86#define GC_MAX_BUCKETS_DIV 64u 88#define GC_MAX_BUCKETS_DIV 64u
87#define GC_MAX_BUCKETS 8192u 89/* upper bound of scan intervals */
88#define GC_INTERVAL (5 * HZ) 90#define GC_INTERVAL_MAX (2 * HZ)
91/* maximum conntracks to evict per gc run */
89#define GC_MAX_EVICTS 256u 92#define GC_MAX_EVICTS 256u
90 93
91static struct conntrack_gc_work conntrack_gc_work; 94static struct conntrack_gc_work conntrack_gc_work;
@@ -936,13 +939,13 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
936static void gc_worker(struct work_struct *work) 939static void gc_worker(struct work_struct *work)
937{ 940{
938 unsigned int i, goal, buckets = 0, expired_count = 0; 941 unsigned int i, goal, buckets = 0, expired_count = 0;
939 unsigned long next_run = GC_INTERVAL;
940 unsigned int ratio, scanned = 0;
941 struct conntrack_gc_work *gc_work; 942 struct conntrack_gc_work *gc_work;
943 unsigned int ratio, scanned = 0;
944 unsigned long next_run;
942 945
943 gc_work = container_of(work, struct conntrack_gc_work, dwork.work); 946 gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
944 947
945 goal = min(nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV, GC_MAX_BUCKETS); 948 goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
946 i = gc_work->last_bucket; 949 i = gc_work->last_bucket;
947 950
948 do { 951 do {
@@ -982,17 +985,47 @@ static void gc_worker(struct work_struct *work)
982 if (gc_work->exiting) 985 if (gc_work->exiting)
983 return; 986 return;
984 987
988 /*
989 * Eviction will normally happen from the packet path, and not
990 * from this gc worker.
991 *
992 * This worker is only here to reap expired entries when system went
993 * idle after a busy period.
994 *
995 * The heuristics below are supposed to balance conflicting goals:
996 *
997 * 1. Minimize time until we notice a stale entry
998 * 2. Maximize scan intervals to not waste cycles
999 *
1000 * Normally, expired_count will be 0, this increases the next_run time
1001 * to priorize 2) above.
1002 *
1003 * As soon as a timed-out entry is found, move towards 1) and increase
1004 * the scan frequency.
1005 * In case we have lots of evictions next scan is done immediately.
1006 */
985 ratio = scanned ? expired_count * 100 / scanned : 0; 1007 ratio = scanned ? expired_count * 100 / scanned : 0;
986 if (ratio >= 90 || expired_count == GC_MAX_EVICTS) 1008 if (ratio >= 90 || expired_count == GC_MAX_EVICTS) {
1009 gc_work->next_gc_run = 0;
987 next_run = 0; 1010 next_run = 0;
1011 } else if (expired_count) {
1012 gc_work->next_gc_run /= 2U;
1013 next_run = msecs_to_jiffies(1);
1014 } else {
1015 if (gc_work->next_gc_run < GC_INTERVAL_MAX)
1016 gc_work->next_gc_run += msecs_to_jiffies(1);
1017
1018 next_run = gc_work->next_gc_run;
1019 }
988 1020
989 gc_work->last_bucket = i; 1021 gc_work->last_bucket = i;
990 schedule_delayed_work(&gc_work->dwork, next_run); 1022 queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
991} 1023}
992 1024
993static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) 1025static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
994{ 1026{
995 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); 1027 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
1028 gc_work->next_gc_run = GC_INTERVAL_MAX;
996 gc_work->exiting = false; 1029 gc_work->exiting = false;
997} 1030}
998 1031
@@ -1884,7 +1917,7 @@ int nf_conntrack_init_start(void)
1884 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); 1917 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1885 1918
1886 conntrack_gc_work_init(&conntrack_gc_work); 1919 conntrack_gc_work_init(&conntrack_gc_work);
1887 schedule_delayed_work(&conntrack_gc_work.dwork, GC_INTERVAL); 1920 queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX);
1888 1921
1889 return 0; 1922 return 0;
1890 1923
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 336e21559e01..7341adf7059d 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -138,9 +138,14 @@ __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
138 138
139 for (i = 0; i < nf_ct_helper_hsize; i++) { 139 for (i = 0; i < nf_ct_helper_hsize; i++) {
140 hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { 140 hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) {
141 if (!strcmp(h->name, name) && 141 if (strcmp(h->name, name))
142 h->tuple.src.l3num == l3num && 142 continue;
143 h->tuple.dst.protonum == protonum) 143
144 if (h->tuple.src.l3num != NFPROTO_UNSPEC &&
145 h->tuple.src.l3num != l3num)
146 continue;
147
148 if (h->tuple.dst.protonum == protonum)
144 return h; 149 return h;
145 } 150 }
146 } 151 }
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 621b81c7bddc..c3fc14e021ec 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1436,9 +1436,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
1436 handler = &sip_handlers[i]; 1436 handler = &sip_handlers[i];
1437 if (handler->request == NULL) 1437 if (handler->request == NULL)
1438 continue; 1438 continue;
1439 if (*datalen < handler->len || 1439 if (*datalen < handler->len + 2 ||
1440 strncasecmp(*dptr, handler->method, handler->len)) 1440 strncasecmp(*dptr, handler->method, handler->len))
1441 continue; 1441 continue;
1442 if ((*dptr)[handler->len] != ' ' ||
1443 !isalpha((*dptr)[handler->len+1]))
1444 continue;
1442 1445
1443 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, 1446 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
1444 &matchoff, &matchlen) <= 0) { 1447 &matchoff, &matchlen) <= 0) {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 24db22257586..026581b04ea8 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2956,12 +2956,14 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
2956 2956
2957 err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); 2957 err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
2958 if (err < 0) 2958 if (err < 0)
2959 goto err2; 2959 goto err3;
2960 2960
2961 list_add_tail_rcu(&set->list, &table->sets); 2961 list_add_tail_rcu(&set->list, &table->sets);
2962 table->use++; 2962 table->use++;
2963 return 0; 2963 return 0;
2964 2964
2965err3:
2966 ops->destroy(set);
2965err2: 2967err2:
2966 kfree(set); 2968 kfree(set);
2967err1: 2969err1:
@@ -3452,14 +3454,15 @@ void *nft_set_elem_init(const struct nft_set *set,
3452 return elem; 3454 return elem;
3453} 3455}
3454 3456
3455void nft_set_elem_destroy(const struct nft_set *set, void *elem) 3457void nft_set_elem_destroy(const struct nft_set *set, void *elem,
3458 bool destroy_expr)
3456{ 3459{
3457 struct nft_set_ext *ext = nft_set_elem_ext(set, elem); 3460 struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
3458 3461
3459 nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); 3462 nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE);
3460 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) 3463 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
3461 nft_data_uninit(nft_set_ext_data(ext), set->dtype); 3464 nft_data_uninit(nft_set_ext_data(ext), set->dtype);
3462 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) 3465 if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
3463 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); 3466 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
3464 3467
3465 kfree(elem); 3468 kfree(elem);
@@ -3565,6 +3568,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
3565 dreg = nft_type_to_reg(set->dtype); 3568 dreg = nft_type_to_reg(set->dtype);
3566 list_for_each_entry(binding, &set->bindings, list) { 3569 list_for_each_entry(binding, &set->bindings, list) {
3567 struct nft_ctx bind_ctx = { 3570 struct nft_ctx bind_ctx = {
3571 .net = ctx->net,
3568 .afi = ctx->afi, 3572 .afi = ctx->afi,
3569 .table = ctx->table, 3573 .table = ctx->table,
3570 .chain = (struct nft_chain *)binding->chain, 3574 .chain = (struct nft_chain *)binding->chain,
@@ -3812,7 +3816,7 @@ void nft_set_gc_batch_release(struct rcu_head *rcu)
3812 3816
3813 gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); 3817 gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu);
3814 for (i = 0; i < gcb->head.cnt; i++) 3818 for (i = 0; i < gcb->head.cnt; i++)
3815 nft_set_elem_destroy(gcb->head.set, gcb->elems[i]); 3819 nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true);
3816 kfree(gcb); 3820 kfree(gcb);
3817} 3821}
3818EXPORT_SYMBOL_GPL(nft_set_gc_batch_release); 3822EXPORT_SYMBOL_GPL(nft_set_gc_batch_release);
@@ -4030,7 +4034,7 @@ static void nf_tables_commit_release(struct nft_trans *trans)
4030 break; 4034 break;
4031 case NFT_MSG_DELSETELEM: 4035 case NFT_MSG_DELSETELEM:
4032 nft_set_elem_destroy(nft_trans_elem_set(trans), 4036 nft_set_elem_destroy(nft_trans_elem_set(trans),
4033 nft_trans_elem(trans).priv); 4037 nft_trans_elem(trans).priv, true);
4034 break; 4038 break;
4035 } 4039 }
4036 kfree(trans); 4040 kfree(trans);
@@ -4171,7 +4175,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
4171 break; 4175 break;
4172 case NFT_MSG_NEWSETELEM: 4176 case NFT_MSG_NEWSETELEM:
4173 nft_set_elem_destroy(nft_trans_elem_set(trans), 4177 nft_set_elem_destroy(nft_trans_elem_set(trans),
4174 nft_trans_elem(trans).priv); 4178 nft_trans_elem(trans).priv, true);
4175 break; 4179 break;
4176 } 4180 }
4177 kfree(trans); 4181 kfree(trans);
@@ -4421,7 +4425,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
4421 * Otherwise a 0 is returned and the attribute value is stored in the 4425 * Otherwise a 0 is returned and the attribute value is stored in the
4422 * destination variable. 4426 * destination variable.
4423 */ 4427 */
4424unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) 4428int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
4425{ 4429{
4426 u32 val; 4430 u32 val;
4427 4431
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 4339e3f1c4b1..7de2f46734a4 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -44,18 +44,22 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
44 &regs->data[priv->sreg_key], 44 &regs->data[priv->sreg_key],
45 &regs->data[priv->sreg_data], 45 &regs->data[priv->sreg_data],
46 timeout, GFP_ATOMIC); 46 timeout, GFP_ATOMIC);
47 if (elem == NULL) { 47 if (elem == NULL)
48 if (set->size) 48 goto err1;
49 atomic_dec(&set->nelems);
50 return NULL;
51 }
52 49
53 ext = nft_set_elem_ext(set, elem); 50 ext = nft_set_elem_ext(set, elem);
54 if (priv->expr != NULL && 51 if (priv->expr != NULL &&
55 nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0) 52 nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
56 return NULL; 53 goto err2;
57 54
58 return elem; 55 return elem;
56
57err2:
58 nft_set_elem_destroy(set, elem, false);
59err1:
60 if (set->size)
61 atomic_dec(&set->nelems);
62 return NULL;
59} 63}
60 64
61static void nft_dynset_eval(const struct nft_expr *expr, 65static void nft_dynset_eval(const struct nft_expr *expr,
@@ -139,6 +143,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
139 return PTR_ERR(set); 143 return PTR_ERR(set);
140 } 144 }
141 145
146 if (set->ops->update == NULL)
147 return -EOPNOTSUPP;
148
142 if (set->flags & NFT_SET_CONSTANT) 149 if (set->flags & NFT_SET_CONSTANT)
143 return -EBUSY; 150 return -EBUSY;
144 151
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 3794cb2fc788..a3dface3e6e6 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -98,7 +98,7 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key,
98 const struct nft_set_ext **ext) 98 const struct nft_set_ext **ext)
99{ 99{
100 struct nft_hash *priv = nft_set_priv(set); 100 struct nft_hash *priv = nft_set_priv(set);
101 struct nft_hash_elem *he; 101 struct nft_hash_elem *he, *prev;
102 struct nft_hash_cmp_arg arg = { 102 struct nft_hash_cmp_arg arg = {
103 .genmask = NFT_GENMASK_ANY, 103 .genmask = NFT_GENMASK_ANY,
104 .set = set, 104 .set = set,
@@ -112,15 +112,24 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key,
112 he = new(set, expr, regs); 112 he = new(set, expr, regs);
113 if (he == NULL) 113 if (he == NULL)
114 goto err1; 114 goto err1;
115 if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node, 115
116 nft_hash_params)) 116 prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
117 nft_hash_params);
118 if (IS_ERR(prev))
117 goto err2; 119 goto err2;
120
121 /* Another cpu may race to insert the element with the same key */
122 if (prev) {
123 nft_set_elem_destroy(set, he, true);
124 he = prev;
125 }
126
118out: 127out:
119 *ext = &he->ext; 128 *ext = &he->ext;
120 return true; 129 return true;
121 130
122err2: 131err2:
123 nft_set_elem_destroy(set, he); 132 nft_set_elem_destroy(set, he, true);
124err1: 133err1:
125 return false; 134 return false;
126} 135}
@@ -332,7 +341,7 @@ static int nft_hash_init(const struct nft_set *set,
332 341
333static void nft_hash_elem_destroy(void *ptr, void *arg) 342static void nft_hash_elem_destroy(void *ptr, void *arg)
334{ 343{
335 nft_set_elem_destroy((const struct nft_set *)arg, ptr); 344 nft_set_elem_destroy((const struct nft_set *)arg, ptr, true);
336} 345}
337 346
338static void nft_hash_destroy(const struct nft_set *set) 347static void nft_hash_destroy(const struct nft_set *set)
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 38b5bda242f8..36493a7cae88 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -266,7 +266,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
266 while ((node = priv->root.rb_node) != NULL) { 266 while ((node = priv->root.rb_node) != NULL) {
267 rb_erase(node, &priv->root); 267 rb_erase(node, &priv->root);
268 rbe = rb_entry(node, struct nft_rbtree_elem, node); 268 rbe = rb_entry(node, struct nft_rbtree_elem, node);
269 nft_set_elem_destroy(set, rbe); 269 nft_set_elem_destroy(set, rbe, true);
270 } 270 }
271} 271}
272 272
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 69f78e96fdb4..b83e158e116a 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -44,7 +44,7 @@ connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
44 u_int32_t newmark; 44 u_int32_t newmark;
45 45
46 ct = nf_ct_get(skb, &ctinfo); 46 ct = nf_ct_get(skb, &ctinfo);
47 if (ct == NULL) 47 if (ct == NULL || nf_ct_is_untracked(ct))
48 return XT_CONTINUE; 48 return XT_CONTINUE;
49 49
50 switch (info->mode) { 50 switch (info->mode) {
@@ -97,7 +97,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par)
97 const struct nf_conn *ct; 97 const struct nf_conn *ct;
98 98
99 ct = nf_ct_get(skb, &ctinfo); 99 ct = nf_ct_get(skb, &ctinfo);
100 if (ct == NULL) 100 if (ct == NULL || nf_ct_is_untracked(ct))
101 return false; 101 return false;
102 102
103 return ((ct->mark & info->mask) == info->mark) ^ info->invert; 103 return ((ct->mark & info->mask) == info->mark) ^ info->invert;
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index b2f0e986a6f4..a5546249fb10 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -178,11 +178,8 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
178 } 178 }
179 cb->args[1] = i; 179 cb->args[1] = i;
180 } else { 180 } else {
181 if (req->sdiag_protocol >= MAX_LINKS) { 181 if (req->sdiag_protocol >= MAX_LINKS)
182 read_unlock(&nl_table_lock);
183 rcu_read_unlock();
184 return -ENOENT; 182 return -ENOENT;
185 }
186 183
187 err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); 184 err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
188 } 185 }
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index bbd3bff885a1..fb6e10fdb217 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -383,6 +383,7 @@ int genl_register_family(struct genl_family *family)
383 383
384errout_remove: 384errout_remove:
385 idr_remove(&genl_fam_idr, family->id); 385 idr_remove(&genl_fam_idr, family->id);
386 kfree(family->attrbuf);
386errout_locked: 387errout_locked:
387 genl_unlock_all(); 388 genl_unlock_all();
388 return err; 389 return err;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a2ea1d1cc06a..a01a56ec8b8c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -181,9 +181,10 @@ int sctp_rcv(struct sk_buff *skb)
181 * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB 181 * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
182 */ 182 */
183 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { 183 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
184 if (asoc) { 184 if (transport) {
185 sctp_association_put(asoc); 185 sctp_transport_put(transport);
186 asoc = NULL; 186 asoc = NULL;
187 transport = NULL;
187 } else { 188 } else {
188 sctp_endpoint_put(ep); 189 sctp_endpoint_put(ep);
189 ep = NULL; 190 ep = NULL;
@@ -269,8 +270,8 @@ int sctp_rcv(struct sk_buff *skb)
269 bh_unlock_sock(sk); 270 bh_unlock_sock(sk);
270 271
271 /* Release the asoc/ep ref we took in the lookup calls. */ 272 /* Release the asoc/ep ref we took in the lookup calls. */
272 if (asoc) 273 if (transport)
273 sctp_association_put(asoc); 274 sctp_transport_put(transport);
274 else 275 else
275 sctp_endpoint_put(ep); 276 sctp_endpoint_put(ep);
276 277
@@ -283,8 +284,8 @@ discard_it:
283 284
284discard_release: 285discard_release:
285 /* Release the asoc/ep ref we took in the lookup calls. */ 286 /* Release the asoc/ep ref we took in the lookup calls. */
286 if (asoc) 287 if (transport)
287 sctp_association_put(asoc); 288 sctp_transport_put(transport);
288 else 289 else
289 sctp_endpoint_put(ep); 290 sctp_endpoint_put(ep);
290 291
@@ -300,6 +301,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
300{ 301{
301 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 302 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
302 struct sctp_inq *inqueue = &chunk->rcvr->inqueue; 303 struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
304 struct sctp_transport *t = chunk->transport;
303 struct sctp_ep_common *rcvr = NULL; 305 struct sctp_ep_common *rcvr = NULL;
304 int backloged = 0; 306 int backloged = 0;
305 307
@@ -351,7 +353,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
351done: 353done:
352 /* Release the refs we took in sctp_add_backlog */ 354 /* Release the refs we took in sctp_add_backlog */
353 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 355 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
354 sctp_association_put(sctp_assoc(rcvr)); 356 sctp_transport_put(t);
355 else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 357 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
356 sctp_endpoint_put(sctp_ep(rcvr)); 358 sctp_endpoint_put(sctp_ep(rcvr));
357 else 359 else
@@ -363,6 +365,7 @@ done:
363static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) 365static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
364{ 366{
365 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 367 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
368 struct sctp_transport *t = chunk->transport;
366 struct sctp_ep_common *rcvr = chunk->rcvr; 369 struct sctp_ep_common *rcvr = chunk->rcvr;
367 int ret; 370 int ret;
368 371
@@ -373,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
373 * from us 376 * from us
374 */ 377 */
375 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 378 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
376 sctp_association_hold(sctp_assoc(rcvr)); 379 sctp_transport_hold(t);
377 else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 380 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
378 sctp_endpoint_hold(sctp_ep(rcvr)); 381 sctp_endpoint_hold(sctp_ep(rcvr));
379 else 382 else
@@ -537,15 +540,15 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
537 return sk; 540 return sk;
538 541
539out: 542out:
540 sctp_association_put(asoc); 543 sctp_transport_put(transport);
541 return NULL; 544 return NULL;
542} 545}
543 546
544/* Common cleanup code for icmp/icmpv6 error handler. */ 547/* Common cleanup code for icmp/icmpv6 error handler. */
545void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) 548void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
546{ 549{
547 bh_unlock_sock(sk); 550 bh_unlock_sock(sk);
548 sctp_association_put(asoc); 551 sctp_transport_put(t);
549} 552}
550 553
551/* 554/*
@@ -641,7 +644,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
641 } 644 }
642 645
643out_unlock: 646out_unlock:
644 sctp_err_finish(sk, asoc); 647 sctp_err_finish(sk, transport);
645} 648}
646 649
647/* 650/*
@@ -952,11 +955,8 @@ static struct sctp_association *__sctp_lookup_association(
952 goto out; 955 goto out;
953 956
954 asoc = t->asoc; 957 asoc = t->asoc;
955 sctp_association_hold(asoc);
956 *pt = t; 958 *pt = t;
957 959
958 sctp_transport_put(t);
959
960out: 960out:
961 return asoc; 961 return asoc;
962} 962}
@@ -986,7 +986,7 @@ int sctp_has_association(struct net *net,
986 struct sctp_transport *transport; 986 struct sctp_transport *transport;
987 987
988 if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) { 988 if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
989 sctp_association_put(asoc); 989 sctp_transport_put(transport);
990 return 1; 990 return 1;
991 } 991 }
992 992
@@ -1021,7 +1021,6 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
1021 struct sctphdr *sh = sctp_hdr(skb); 1021 struct sctphdr *sh = sctp_hdr(skb);
1022 union sctp_params params; 1022 union sctp_params params;
1023 sctp_init_chunk_t *init; 1023 sctp_init_chunk_t *init;
1024 struct sctp_transport *transport;
1025 struct sctp_af *af; 1024 struct sctp_af *af;
1026 1025
1027 /* 1026 /*
@@ -1052,7 +1051,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
1052 1051
1053 af->from_addr_param(paddr, params.addr, sh->source, 0); 1052 af->from_addr_param(paddr, params.addr, sh->source, 0);
1054 1053
1055 asoc = __sctp_lookup_association(net, laddr, paddr, &transport); 1054 asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1056 if (asoc) 1055 if (asoc)
1057 return asoc; 1056 return asoc;
1058 } 1057 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f473779e8b1c..176af3080a2b 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -198,7 +198,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
198 } 198 }
199 199
200out_unlock: 200out_unlock:
201 sctp_err_finish(sk, asoc); 201 sctp_err_finish(sk, transport);
202out: 202out:
203 if (likely(idev != NULL)) 203 if (likely(idev != NULL))
204 in6_dev_put(idev); 204 in6_dev_put(idev);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9fbb6feb8c27..f23ad913dc7a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1214,9 +1214,12 @@ static int __sctp_connect(struct sock *sk,
1214 1214
1215 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1215 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1216 1216
1217 err = sctp_wait_for_connect(asoc, &timeo); 1217 if (assoc_id)
1218 if ((err == 0 || err == -EINPROGRESS) && assoc_id)
1219 *assoc_id = asoc->assoc_id; 1218 *assoc_id = asoc->assoc_id;
1219 err = sctp_wait_for_connect(asoc, &timeo);
1220 /* Note: the asoc may be freed after the return of
1221 * sctp_wait_for_connect.
1222 */
1220 1223
1221 /* Don't free association on exit. */ 1224 /* Don't free association on exit. */
1222 asoc = NULL; 1225 asoc = NULL;
@@ -4282,19 +4285,18 @@ static void sctp_shutdown(struct sock *sk, int how)
4282{ 4285{
4283 struct net *net = sock_net(sk); 4286 struct net *net = sock_net(sk);
4284 struct sctp_endpoint *ep; 4287 struct sctp_endpoint *ep;
4285 struct sctp_association *asoc;
4286 4288
4287 if (!sctp_style(sk, TCP)) 4289 if (!sctp_style(sk, TCP))
4288 return; 4290 return;
4289 4291
4290 if (how & SEND_SHUTDOWN) { 4292 ep = sctp_sk(sk)->ep;
4293 if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
4294 struct sctp_association *asoc;
4295
4291 sk->sk_state = SCTP_SS_CLOSING; 4296 sk->sk_state = SCTP_SS_CLOSING;
4292 ep = sctp_sk(sk)->ep; 4297 asoc = list_entry(ep->asocs.next,
4293 if (!list_empty(&ep->asocs)) { 4298 struct sctp_association, asocs);
4294 asoc = list_entry(ep->asocs.next, 4299 sctp_primitive_SHUTDOWN(net, asoc, NULL);
4295 struct sctp_association, asocs);
4296 sctp_primitive_SHUTDOWN(net, asoc, NULL);
4297 }
4298 } 4300 }
4299} 4301}
4300 4302
@@ -4480,12 +4482,9 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
4480 if (!transport || !sctp_transport_hold(transport)) 4482 if (!transport || !sctp_transport_hold(transport))
4481 goto out; 4483 goto out;
4482 4484
4483 sctp_association_hold(transport->asoc);
4484 sctp_transport_put(transport);
4485
4486 rcu_read_unlock(); 4485 rcu_read_unlock();
4487 err = cb(transport, p); 4486 err = cb(transport, p);
4488 sctp_association_put(transport->asoc); 4487 sctp_transport_put(transport);
4489 4488
4490out: 4489out:
4491 return err; 4490 return err;
diff --git a/net/socket.c b/net/socket.c
index 4ce33c35e606..f9e26c68c3cf 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2064,6 +2064,8 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2064 if (err) 2064 if (err)
2065 break; 2065 break;
2066 ++datagrams; 2066 ++datagrams;
2067 if (msg_data_left(&msg_sys))
2068 break;
2067 cond_resched(); 2069 cond_resched();
2068 } 2070 }
2069 2071
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index d8bd97a5a7c9..3dfd769dc5b5 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1616,7 +1616,7 @@ gss_validate(struct rpc_task *task, __be32 *p)
1616{ 1616{
1617 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1617 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1618 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1618 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1619 __be32 seq; 1619 __be32 *seq = NULL;
1620 struct kvec iov; 1620 struct kvec iov;
1621 struct xdr_buf verf_buf; 1621 struct xdr_buf verf_buf;
1622 struct xdr_netobj mic; 1622 struct xdr_netobj mic;
@@ -1631,9 +1631,12 @@ gss_validate(struct rpc_task *task, __be32 *p)
1631 goto out_bad; 1631 goto out_bad;
1632 if (flav != RPC_AUTH_GSS) 1632 if (flav != RPC_AUTH_GSS)
1633 goto out_bad; 1633 goto out_bad;
1634 seq = htonl(task->tk_rqstp->rq_seqno); 1634 seq = kmalloc(4, GFP_NOFS);
1635 iov.iov_base = &seq; 1635 if (!seq)
1636 iov.iov_len = sizeof(seq); 1636 goto out_bad;
1637 *seq = htonl(task->tk_rqstp->rq_seqno);
1638 iov.iov_base = seq;
1639 iov.iov_len = 4;
1637 xdr_buf_from_iov(&iov, &verf_buf); 1640 xdr_buf_from_iov(&iov, &verf_buf);
1638 mic.data = (u8 *)p; 1641 mic.data = (u8 *)p;
1639 mic.len = len; 1642 mic.len = len;
@@ -1653,11 +1656,13 @@ gss_validate(struct rpc_task *task, __be32 *p)
1653 gss_put_ctx(ctx); 1656 gss_put_ctx(ctx);
1654 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", 1657 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
1655 task->tk_pid, __func__); 1658 task->tk_pid, __func__);
1659 kfree(seq);
1656 return p + XDR_QUADLEN(len); 1660 return p + XDR_QUADLEN(len);
1657out_bad: 1661out_bad:
1658 gss_put_ctx(ctx); 1662 gss_put_ctx(ctx);
1659 dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__, 1663 dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__,
1660 PTR_ERR(ret)); 1664 PTR_ERR(ret));
1665 kfree(seq);
1661 return ret; 1666 return ret;
1662} 1667}
1663 1668
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 244245bcbbd2..90115ceefd49 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -166,8 +166,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
166 unsigned int usage, struct xdr_netobj *cksumout) 166 unsigned int usage, struct xdr_netobj *cksumout)
167{ 167{
168 struct scatterlist sg[1]; 168 struct scatterlist sg[1];
169 int err; 169 int err = -1;
170 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 170 u8 *checksumdata;
171 u8 rc4salt[4]; 171 u8 rc4salt[4];
172 struct crypto_ahash *md5; 172 struct crypto_ahash *md5;
173 struct crypto_ahash *hmac_md5; 173 struct crypto_ahash *hmac_md5;
@@ -187,23 +187,22 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
187 return GSS_S_FAILURE; 187 return GSS_S_FAILURE;
188 } 188 }
189 189
190 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
191 if (!checksumdata)
192 return GSS_S_FAILURE;
193
190 md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 194 md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
191 if (IS_ERR(md5)) 195 if (IS_ERR(md5))
192 return GSS_S_FAILURE; 196 goto out_free_cksum;
193 197
194 hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, 198 hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0,
195 CRYPTO_ALG_ASYNC); 199 CRYPTO_ALG_ASYNC);
196 if (IS_ERR(hmac_md5)) { 200 if (IS_ERR(hmac_md5))
197 crypto_free_ahash(md5); 201 goto out_free_md5;
198 return GSS_S_FAILURE;
199 }
200 202
201 req = ahash_request_alloc(md5, GFP_KERNEL); 203 req = ahash_request_alloc(md5, GFP_KERNEL);
202 if (!req) { 204 if (!req)
203 crypto_free_ahash(hmac_md5); 205 goto out_free_hmac_md5;
204 crypto_free_ahash(md5);
205 return GSS_S_FAILURE;
206 }
207 206
208 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 207 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
209 208
@@ -232,11 +231,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
232 231
233 ahash_request_free(req); 232 ahash_request_free(req);
234 req = ahash_request_alloc(hmac_md5, GFP_KERNEL); 233 req = ahash_request_alloc(hmac_md5, GFP_KERNEL);
235 if (!req) { 234 if (!req)
236 crypto_free_ahash(hmac_md5); 235 goto out_free_hmac_md5;
237 crypto_free_ahash(md5);
238 return GSS_S_FAILURE;
239 }
240 236
241 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 237 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
242 238
@@ -258,8 +254,12 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
258 cksumout->len = kctx->gk5e->cksumlength; 254 cksumout->len = kctx->gk5e->cksumlength;
259out: 255out:
260 ahash_request_free(req); 256 ahash_request_free(req);
261 crypto_free_ahash(md5); 257out_free_hmac_md5:
262 crypto_free_ahash(hmac_md5); 258 crypto_free_ahash(hmac_md5);
259out_free_md5:
260 crypto_free_ahash(md5);
261out_free_cksum:
262 kfree(checksumdata);
263 return err ? GSS_S_FAILURE : 0; 263 return err ? GSS_S_FAILURE : 0;
264} 264}
265 265
@@ -276,8 +276,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
276 struct crypto_ahash *tfm; 276 struct crypto_ahash *tfm;
277 struct ahash_request *req; 277 struct ahash_request *req;
278 struct scatterlist sg[1]; 278 struct scatterlist sg[1];
279 int err; 279 int err = -1;
280 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 280 u8 *checksumdata;
281 unsigned int checksumlen; 281 unsigned int checksumlen;
282 282
283 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) 283 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
@@ -291,15 +291,17 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
291 return GSS_S_FAILURE; 291 return GSS_S_FAILURE;
292 } 292 }
293 293
294 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
295 if (checksumdata == NULL)
296 return GSS_S_FAILURE;
297
294 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 298 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
295 if (IS_ERR(tfm)) 299 if (IS_ERR(tfm))
296 return GSS_S_FAILURE; 300 goto out_free_cksum;
297 301
298 req = ahash_request_alloc(tfm, GFP_KERNEL); 302 req = ahash_request_alloc(tfm, GFP_KERNEL);
299 if (!req) { 303 if (!req)
300 crypto_free_ahash(tfm); 304 goto out_free_ahash;
301 return GSS_S_FAILURE;
302 }
303 305
304 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 306 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
305 307
@@ -349,7 +351,10 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
349 cksumout->len = kctx->gk5e->cksumlength; 351 cksumout->len = kctx->gk5e->cksumlength;
350out: 352out:
351 ahash_request_free(req); 353 ahash_request_free(req);
354out_free_ahash:
352 crypto_free_ahash(tfm); 355 crypto_free_ahash(tfm);
356out_free_cksum:
357 kfree(checksumdata);
353 return err ? GSS_S_FAILURE : 0; 358 return err ? GSS_S_FAILURE : 0;
354} 359}
355 360
@@ -368,8 +373,8 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
368 struct crypto_ahash *tfm; 373 struct crypto_ahash *tfm;
369 struct ahash_request *req; 374 struct ahash_request *req;
370 struct scatterlist sg[1]; 375 struct scatterlist sg[1];
371 int err; 376 int err = -1;
372 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 377 u8 *checksumdata;
373 unsigned int checksumlen; 378 unsigned int checksumlen;
374 379
375 if (kctx->gk5e->keyed_cksum == 0) { 380 if (kctx->gk5e->keyed_cksum == 0) {
@@ -383,16 +388,18 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
383 return GSS_S_FAILURE; 388 return GSS_S_FAILURE;
384 } 389 }
385 390
391 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
392 if (!checksumdata)
393 return GSS_S_FAILURE;
394
386 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 395 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
387 if (IS_ERR(tfm)) 396 if (IS_ERR(tfm))
388 return GSS_S_FAILURE; 397 goto out_free_cksum;
389 checksumlen = crypto_ahash_digestsize(tfm); 398 checksumlen = crypto_ahash_digestsize(tfm);
390 399
391 req = ahash_request_alloc(tfm, GFP_KERNEL); 400 req = ahash_request_alloc(tfm, GFP_KERNEL);
392 if (!req) { 401 if (!req)
393 crypto_free_ahash(tfm); 402 goto out_free_ahash;
394 return GSS_S_FAILURE;
395 }
396 403
397 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 404 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
398 405
@@ -433,7 +440,10 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
433 } 440 }
434out: 441out:
435 ahash_request_free(req); 442 ahash_request_free(req);
443out_free_ahash:
436 crypto_free_ahash(tfm); 444 crypto_free_ahash(tfm);
445out_free_cksum:
446 kfree(checksumdata);
437 return err ? GSS_S_FAILURE : 0; 447 return err ? GSS_S_FAILURE : 0;
438} 448}
439 449
@@ -666,14 +676,17 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
666 u32 ret; 676 u32 ret;
667 struct scatterlist sg[1]; 677 struct scatterlist sg[1];
668 SKCIPHER_REQUEST_ON_STACK(req, cipher); 678 SKCIPHER_REQUEST_ON_STACK(req, cipher);
669 u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2]; 679 u8 *data;
670 struct page **save_pages; 680 struct page **save_pages;
671 u32 len = buf->len - offset; 681 u32 len = buf->len - offset;
672 682
673 if (len > ARRAY_SIZE(data)) { 683 if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
674 WARN_ON(0); 684 WARN_ON(0);
675 return -ENOMEM; 685 return -ENOMEM;
676 } 686 }
687 data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS);
688 if (!data)
689 return -ENOMEM;
677 690
678 /* 691 /*
679 * For encryption, we want to read from the cleartext 692 * For encryption, we want to read from the cleartext
@@ -708,6 +721,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
708 ret = write_bytes_to_xdr_buf(buf, offset, data, len); 721 ret = write_bytes_to_xdr_buf(buf, offset, data, len);
709 722
710out: 723out:
724 kfree(data);
711 return ret; 725 return ret;
712} 726}
713 727
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index d67f7e1bc82d..45662d7f0943 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -718,30 +718,37 @@ gss_write_null_verf(struct svc_rqst *rqstp)
718static int 718static int
719gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) 719gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
720{ 720{
721 __be32 xdr_seq; 721 __be32 *xdr_seq;
722 u32 maj_stat; 722 u32 maj_stat;
723 struct xdr_buf verf_data; 723 struct xdr_buf verf_data;
724 struct xdr_netobj mic; 724 struct xdr_netobj mic;
725 __be32 *p; 725 __be32 *p;
726 struct kvec iov; 726 struct kvec iov;
727 int err = -1;
727 728
728 svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS); 729 svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
729 xdr_seq = htonl(seq); 730 xdr_seq = kmalloc(4, GFP_KERNEL);
731 if (!xdr_seq)
732 return -1;
733 *xdr_seq = htonl(seq);
730 734
731 iov.iov_base = &xdr_seq; 735 iov.iov_base = xdr_seq;
732 iov.iov_len = sizeof(xdr_seq); 736 iov.iov_len = 4;
733 xdr_buf_from_iov(&iov, &verf_data); 737 xdr_buf_from_iov(&iov, &verf_data);
734 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; 738 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
735 mic.data = (u8 *)(p + 1); 739 mic.data = (u8 *)(p + 1);
736 maj_stat = gss_get_mic(ctx_id, &verf_data, &mic); 740 maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
737 if (maj_stat != GSS_S_COMPLETE) 741 if (maj_stat != GSS_S_COMPLETE)
738 return -1; 742 goto out;
739 *p++ = htonl(mic.len); 743 *p++ = htonl(mic.len);
740 memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len); 744 memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len);
741 p += XDR_QUADLEN(mic.len); 745 p += XDR_QUADLEN(mic.len);
742 if (!xdr_ressize_check(rqstp, p)) 746 if (!xdr_ressize_check(rqstp, p))
743 return -1; 747 goto out;
744 return 0; 748 err = 0;
749out:
750 kfree(xdr_seq);
751 return err;
745} 752}
746 753
747struct gss_domain { 754struct gss_domain {
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 34dd7b26ee5f..62a482790937 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2753,14 +2753,18 @@ EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout);
2753 2753
2754void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt) 2754void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt)
2755{ 2755{
2756 rcu_read_lock();
2756 xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 2757 xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
2758 rcu_read_unlock();
2757} 2759}
2758EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put); 2760EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put);
2759 2761
2760void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 2762void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
2761{ 2763{
2764 rcu_read_lock();
2762 rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), 2765 rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch),
2763 xprt); 2766 xprt);
2767 rcu_read_unlock();
2764} 2768}
2765EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); 2769EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt);
2766 2770
@@ -2770,9 +2774,8 @@ bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
2770 struct rpc_xprt_switch *xps; 2774 struct rpc_xprt_switch *xps;
2771 bool ret; 2775 bool ret;
2772 2776
2773 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
2774
2775 rcu_read_lock(); 2777 rcu_read_lock();
2778 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
2776 ret = rpc_xprt_switch_has_addr(xps, sap); 2779 ret = rpc_xprt_switch_has_addr(xps, sap);
2777 rcu_read_unlock(); 2780 rcu_read_unlock();
2778 return ret; 2781 return ret;
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 210949562786..26b26beef2d4 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -44,18 +44,20 @@
44 * being done. 44 * being done.
45 * 45 *
46 * When the underlying transport disconnects, MRs are left in one of 46 * When the underlying transport disconnects, MRs are left in one of
47 * three states: 47 * four states:
48 * 48 *
49 * INVALID: The MR was not in use before the QP entered ERROR state. 49 * INVALID: The MR was not in use before the QP entered ERROR state.
50 * (Or, the LOCAL_INV WR has not completed or flushed yet).
51 *
52 * STALE: The MR was being registered or unregistered when the QP
53 * entered ERROR state, and the pending WR was flushed.
54 * 50 *
55 * VALID: The MR was registered before the QP entered ERROR state. 51 * VALID: The MR was registered before the QP entered ERROR state.
56 * 52 *
57 * When frwr_op_map encounters STALE and VALID MRs, they are recovered 53 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
58 * with ib_dereg_mr and then are re-initialized. Beause MR recovery 54 * state, and the pending WR was flushed.
55 *
56 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
57 * state, and the pending WR was flushed.
58 *
59 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
60 * with ib_dereg_mr and then are re-initialized. Because MR recovery
59 * allocates fresh resources, it is deferred to a workqueue, and the 61 * allocates fresh resources, it is deferred to a workqueue, and the
60 * recovered MRs are placed back on the rb_mws list when recovery is 62 * recovered MRs are placed back on the rb_mws list when recovery is
61 * complete. frwr_op_map allocates another MR for the current RPC while 63 * complete. frwr_op_map allocates another MR for the current RPC while
@@ -177,12 +179,15 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
177static void 179static void
178frwr_op_recover_mr(struct rpcrdma_mw *mw) 180frwr_op_recover_mr(struct rpcrdma_mw *mw)
179{ 181{
182 enum rpcrdma_frmr_state state = mw->frmr.fr_state;
180 struct rpcrdma_xprt *r_xprt = mw->mw_xprt; 183 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
181 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 184 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
182 int rc; 185 int rc;
183 186
184 rc = __frwr_reset_mr(ia, mw); 187 rc = __frwr_reset_mr(ia, mw);
185 ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir); 188 if (state != FRMR_FLUSHED_LI)
189 ib_dma_unmap_sg(ia->ri_device,
190 mw->mw_sg, mw->mw_nents, mw->mw_dir);
186 if (rc) 191 if (rc)
187 goto out_release; 192 goto out_release;
188 193
@@ -262,10 +267,8 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
262} 267}
263 268
264static void 269static void
265__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr, 270__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
266 const char *wr)
267{ 271{
268 frmr->fr_state = FRMR_IS_STALE;
269 if (wc->status != IB_WC_WR_FLUSH_ERR) 272 if (wc->status != IB_WC_WR_FLUSH_ERR)
270 pr_err("rpcrdma: %s: %s (%u/0x%x)\n", 273 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
271 wr, ib_wc_status_msg(wc->status), 274 wr, ib_wc_status_msg(wc->status),
@@ -288,7 +291,8 @@ frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
288 if (wc->status != IB_WC_SUCCESS) { 291 if (wc->status != IB_WC_SUCCESS) {
289 cqe = wc->wr_cqe; 292 cqe = wc->wr_cqe;
290 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 293 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
291 __frwr_sendcompletion_flush(wc, frmr, "fastreg"); 294 frmr->fr_state = FRMR_FLUSHED_FR;
295 __frwr_sendcompletion_flush(wc, "fastreg");
292 } 296 }
293} 297}
294 298
@@ -308,7 +312,8 @@ frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
308 if (wc->status != IB_WC_SUCCESS) { 312 if (wc->status != IB_WC_SUCCESS) {
309 cqe = wc->wr_cqe; 313 cqe = wc->wr_cqe;
310 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 314 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
311 __frwr_sendcompletion_flush(wc, frmr, "localinv"); 315 frmr->fr_state = FRMR_FLUSHED_LI;
316 __frwr_sendcompletion_flush(wc, "localinv");
312 } 317 }
313} 318}
314 319
@@ -328,8 +333,10 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
328 /* WARNING: Only wr_cqe and status are reliable at this point */ 333 /* WARNING: Only wr_cqe and status are reliable at this point */
329 cqe = wc->wr_cqe; 334 cqe = wc->wr_cqe;
330 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 335 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
331 if (wc->status != IB_WC_SUCCESS) 336 if (wc->status != IB_WC_SUCCESS) {
332 __frwr_sendcompletion_flush(wc, frmr, "localinv"); 337 frmr->fr_state = FRMR_FLUSHED_LI;
338 __frwr_sendcompletion_flush(wc, "localinv");
339 }
333 complete(&frmr->fr_linv_done); 340 complete(&frmr->fr_linv_done);
334} 341}
335 342
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index 2d8545c34095..20027f8de129 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -177,18 +177,26 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
177 return -EINVAL; 177 return -EINVAL;
178 } 178 }
179 179
180 /* svc_rdma_sendto releases this page */
180 page = alloc_page(RPCRDMA_DEF_GFP); 181 page = alloc_page(RPCRDMA_DEF_GFP);
181 if (!page) 182 if (!page)
182 return -ENOMEM; 183 return -ENOMEM;
183
184 rqst->rq_buffer = page_address(page); 184 rqst->rq_buffer = page_address(page);
185
186 rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
187 if (!rqst->rq_rbuffer) {
188 put_page(page);
189 return -ENOMEM;
190 }
185 return 0; 191 return 0;
186} 192}
187 193
188static void 194static void
189xprt_rdma_bc_free(struct rpc_task *task) 195xprt_rdma_bc_free(struct rpc_task *task)
190{ 196{
191 /* No-op: ctxt and page have already been freed. */ 197 struct rpc_rqst *rqst = task->tk_rqstp;
198
199 kfree(rqst->rq_rbuffer);
192} 200}
193 201
194static int 202static int
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 0d35b761c883..6e1bba358203 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -216,7 +216,8 @@ struct rpcrdma_rep {
216enum rpcrdma_frmr_state { 216enum rpcrdma_frmr_state {
217 FRMR_IS_INVALID, /* ready to be used */ 217 FRMR_IS_INVALID, /* ready to be used */
218 FRMR_IS_VALID, /* in use */ 218 FRMR_IS_VALID, /* in use */
219 FRMR_IS_STALE, /* failed completion */ 219 FRMR_FLUSHED_FR, /* flushed FASTREG WR */
220 FRMR_FLUSHED_LI, /* flushed LOCALINV WR */
220}; 221};
221 222
222struct rpcrdma_frmr { 223struct rpcrdma_frmr {
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 7178d0aa7861..af392d9b9cec 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2563,6 +2563,7 @@ static int bc_malloc(struct rpc_task *task)
2563 buf->len = PAGE_SIZE; 2563 buf->len = PAGE_SIZE;
2564 2564
2565 rqst->rq_buffer = buf->data; 2565 rqst->rq_buffer = buf->data;
2566 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2566 return 0; 2567 return 0;
2567} 2568}
2568 2569
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 87620183910e..6a705d0ff889 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2812,7 +2812,8 @@ static int unix_seq_show(struct seq_file *seq, void *v)
2812 i++; 2812 i++;
2813 } 2813 }
2814 for ( ; i < len; i++) 2814 for ( ; i < len; i++)
2815 seq_putc(seq, u->addr->name->sun_path[i]); 2815 seq_putc(seq, u->addr->name->sun_path[i] ?:
2816 '@');
2816 } 2817 }
2817 unix_state_unlock(s); 2818 unix_state_unlock(s);
2818 seq_putc(seq, '\n'); 2819 seq_putc(seq, '\n');
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 5c53fdb67ca7..ac87f9c068ae 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -26,6 +26,7 @@ hostprogs-y += xdp2
26hostprogs-y += test_current_task_under_cgroup 26hostprogs-y += test_current_task_under_cgroup
27hostprogs-y += trace_event 27hostprogs-y += trace_event
28hostprogs-y += sampleip 28hostprogs-y += sampleip
29hostprogs-y += tc_l2_redirect
29 30
30sock_example-objs := sock_example.o libbpf.o 31sock_example-objs := sock_example.o libbpf.o
31fds_example-objs := bpf_load.o libbpf.o fds_example.o 32fds_example-objs := bpf_load.o libbpf.o fds_example.o
@@ -53,6 +54,7 @@ test_current_task_under_cgroup-objs := bpf_load.o libbpf.o \
53 test_current_task_under_cgroup_user.o 54 test_current_task_under_cgroup_user.o
54trace_event-objs := bpf_load.o libbpf.o trace_event_user.o 55trace_event-objs := bpf_load.o libbpf.o trace_event_user.o
55sampleip-objs := bpf_load.o libbpf.o sampleip_user.o 56sampleip-objs := bpf_load.o libbpf.o sampleip_user.o
57tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o
56 58
57# Tell kbuild to always build the programs 59# Tell kbuild to always build the programs
58always := $(hostprogs-y) 60always := $(hostprogs-y)
@@ -69,6 +71,7 @@ always += test_probe_write_user_kern.o
69always += trace_output_kern.o 71always += trace_output_kern.o
70always += tcbpf1_kern.o 72always += tcbpf1_kern.o
71always += tcbpf2_kern.o 73always += tcbpf2_kern.o
74always += tc_l2_redirect_kern.o
72always += lathist_kern.o 75always += lathist_kern.o
73always += offwaketime_kern.o 76always += offwaketime_kern.o
74always += spintest_kern.o 77always += spintest_kern.o
@@ -108,6 +111,7 @@ HOSTLOADLIBES_xdp2 += -lelf
108HOSTLOADLIBES_test_current_task_under_cgroup += -lelf 111HOSTLOADLIBES_test_current_task_under_cgroup += -lelf
109HOSTLOADLIBES_trace_event += -lelf 112HOSTLOADLIBES_trace_event += -lelf
110HOSTLOADLIBES_sampleip += -lelf 113HOSTLOADLIBES_sampleip += -lelf
114HOSTLOADLIBES_tc_l2_redirect += -l elf
111 115
112# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: 116# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
113# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang 117# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
diff --git a/samples/bpf/tc_l2_redirect.sh b/samples/bpf/tc_l2_redirect.sh
new file mode 100755
index 000000000000..80a05591a140
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect.sh
@@ -0,0 +1,173 @@
1#!/bin/bash
2
3[[ -z $TC ]] && TC='tc'
4[[ -z $IP ]] && IP='ip'
5
6REDIRECT_USER='./tc_l2_redirect'
7REDIRECT_BPF='./tc_l2_redirect_kern.o'
8
9RP_FILTER=$(< /proc/sys/net/ipv4/conf/all/rp_filter)
10IPV6_FORWARDING=$(< /proc/sys/net/ipv6/conf/all/forwarding)
11
12function config_common {
13 local tun_type=$1
14
15 $IP netns add ns1
16 $IP netns add ns2
17 $IP link add ve1 type veth peer name vens1
18 $IP link add ve2 type veth peer name vens2
19 $IP link set dev ve1 up
20 $IP link set dev ve2 up
21 $IP link set dev ve1 mtu 1500
22 $IP link set dev ve2 mtu 1500
23 $IP link set dev vens1 netns ns1
24 $IP link set dev vens2 netns ns2
25
26 $IP -n ns1 link set dev lo up
27 $IP -n ns1 link set dev vens1 up
28 $IP -n ns1 addr add 10.1.1.101/24 dev vens1
29 $IP -n ns1 addr add 2401:db01::65/64 dev vens1 nodad
30 $IP -n ns1 route add default via 10.1.1.1 dev vens1
31 $IP -n ns1 route add default via 2401:db01::1 dev vens1
32
33 $IP -n ns2 link set dev lo up
34 $IP -n ns2 link set dev vens2 up
35 $IP -n ns2 addr add 10.2.1.102/24 dev vens2
36 $IP -n ns2 addr add 2401:db02::66/64 dev vens2 nodad
37 $IP -n ns2 addr add 10.10.1.102 dev lo
38 $IP -n ns2 addr add 2401:face::66/64 dev lo nodad
39 $IP -n ns2 link add ipt2 type ipip local 10.2.1.102 remote 10.2.1.1
40 $IP -n ns2 link add ip6t2 type ip6tnl mode any local 2401:db02::66 remote 2401:db02::1
41 $IP -n ns2 link set dev ipt2 up
42 $IP -n ns2 link set dev ip6t2 up
43 $IP netns exec ns2 $TC qdisc add dev vens2 clsact
44 $IP netns exec ns2 $TC filter add dev vens2 ingress bpf da obj $REDIRECT_BPF sec drop_non_tun_vip
45 if [[ $tun_type == "ipip" ]]; then
46 $IP -n ns2 route add 10.1.1.0/24 dev ipt2
47 $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0
48 $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ipt2.rp_filter=0
49 else
50 $IP -n ns2 route add 10.1.1.0/24 dev ip6t2
51 $IP -n ns2 route add 2401:db01::/64 dev ip6t2
52 $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0
53 $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ip6t2.rp_filter=0
54 fi
55
56 $IP addr add 10.1.1.1/24 dev ve1
57 $IP addr add 2401:db01::1/64 dev ve1 nodad
58 $IP addr add 10.2.1.1/24 dev ve2
59 $IP addr add 2401:db02::1/64 dev ve2 nodad
60
61 $TC qdisc add dev ve2 clsact
62 $TC filter add dev ve2 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_forward
63
64 sysctl -q -w net.ipv4.conf.all.rp_filter=0
65 sysctl -q -w net.ipv6.conf.all.forwarding=1
66}
67
68function cleanup {
69 set +e
70 [[ -z $DEBUG ]] || set +x
71 $IP netns delete ns1 >& /dev/null
72 $IP netns delete ns2 >& /dev/null
73 $IP link del ve1 >& /dev/null
74 $IP link del ve2 >& /dev/null
75 $IP link del ipt >& /dev/null
76 $IP link del ip6t >& /dev/null
77 sysctl -q -w net.ipv4.conf.all.rp_filter=$RP_FILTER
78 sysctl -q -w net.ipv6.conf.all.forwarding=$IPV6_FORWARDING
79 rm -f /sys/fs/bpf/tc/globals/tun_iface
80 [[ -z $DEBUG ]] || set -x
81 set -e
82}
83
84function l2_to_ipip {
85 echo -n "l2_to_ipip $1: "
86
87 local dir=$1
88
89 config_common ipip
90
91 $IP link add ipt type ipip external
92 $IP link set dev ipt up
93 sysctl -q -w net.ipv4.conf.ipt.rp_filter=0
94 sysctl -q -w net.ipv4.conf.ipt.forwarding=1
95
96 if [[ $dir == "egress" ]]; then
97 $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2
98 $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect
99 sysctl -q -w net.ipv4.conf.ve1.forwarding=1
100 else
101 $TC qdisc add dev ve1 clsact
102 $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect
103 fi
104
105 $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ipt/ifindex)
106
107 $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null
108
109 if [[ $dir == "egress" ]]; then
110 # test direct egress to ve2 (i.e. not forwarding from
111 # ve1 to ve2).
112 ping -c1 10.10.1.102 >& /dev/null
113 fi
114
115 cleanup
116
117 echo "OK"
118}
119
120function l2_to_ip6tnl {
121 echo -n "l2_to_ip6tnl $1: "
122
123 local dir=$1
124
125 config_common ip6tnl
126
127 $IP link add ip6t type ip6tnl mode any external
128 $IP link set dev ip6t up
129 sysctl -q -w net.ipv4.conf.ip6t.rp_filter=0
130 sysctl -q -w net.ipv4.conf.ip6t.forwarding=1
131
132 if [[ $dir == "egress" ]]; then
133 $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2
134 $IP route add 2401:face::/64 via 2401:db02::66 dev ve2
135 $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect
136 sysctl -q -w net.ipv4.conf.ve1.forwarding=1
137 else
138 $TC qdisc add dev ve1 clsact
139 $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect
140 fi
141
142 $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ip6t/ifindex)
143
144 $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null
145 $IP netns exec ns1 ping -6 -c1 2401:face::66 >& /dev/null
146
147 if [[ $dir == "egress" ]]; then
148 # test direct egress to ve2 (i.e. not forwarding from
149 # ve1 to ve2).
150 ping -c1 10.10.1.102 >& /dev/null
151 ping -6 -c1 2401:face::66 >& /dev/null
152 fi
153
154 cleanup
155
156 echo "OK"
157}
158
159cleanup
160test_names="l2_to_ipip l2_to_ip6tnl"
161test_dirs="ingress egress"
162if [[ $# -ge 2 ]]; then
163 test_names=$1
164 test_dirs=$2
165elif [[ $# -ge 1 ]]; then
166 test_names=$1
167fi
168
169for t in $test_names; do
170 for d in $test_dirs; do
171 $t $d
172 done
173done
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c
new file mode 100644
index 000000000000..92a44729dbe4
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect_kern.c
@@ -0,0 +1,236 @@
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <uapi/linux/bpf.h>
8#include <uapi/linux/if_ether.h>
9#include <uapi/linux/if_packet.h>
10#include <uapi/linux/ip.h>
11#include <uapi/linux/ipv6.h>
12#include <uapi/linux/in.h>
13#include <uapi/linux/tcp.h>
14#include <uapi/linux/filter.h>
15#include <uapi/linux/pkt_cls.h>
16#include <net/ipv6.h>
17#include "bpf_helpers.h"
18
19#define _htonl __builtin_bswap32
20
21#define PIN_GLOBAL_NS 2
22struct bpf_elf_map {
23 __u32 type;
24 __u32 size_key;
25 __u32 size_value;
26 __u32 max_elem;
27 __u32 flags;
28 __u32 id;
29 __u32 pinning;
30};
31
32/* copy of 'struct ethhdr' without __packed */
33struct eth_hdr {
34 unsigned char h_dest[ETH_ALEN];
35 unsigned char h_source[ETH_ALEN];
36 unsigned short h_proto;
37};
38
39struct bpf_elf_map SEC("maps") tun_iface = {
40 .type = BPF_MAP_TYPE_ARRAY,
41 .size_key = sizeof(int),
42 .size_value = sizeof(int),
43 .pinning = PIN_GLOBAL_NS,
44 .max_elem = 1,
45};
46
47static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr)
48{
49 if (eth_proto == htons(ETH_P_IP))
50 return (_htonl(0xffffff00) & daddr) == _htonl(0x0a0a0100);
51 else if (eth_proto == htons(ETH_P_IPV6))
52 return (daddr == _htonl(0x2401face));
53
54 return false;
55}
56
57SEC("l2_to_iptun_ingress_forward")
58int _l2_to_iptun_ingress_forward(struct __sk_buff *skb)
59{
60 struct bpf_tunnel_key tkey = {};
61 void *data = (void *)(long)skb->data;
62 struct eth_hdr *eth = data;
63 void *data_end = (void *)(long)skb->data_end;
64 int key = 0, *ifindex;
65
66 int ret;
67
68 if (data + sizeof(*eth) > data_end)
69 return TC_ACT_OK;
70
71 ifindex = bpf_map_lookup_elem(&tun_iface, &key);
72 if (!ifindex)
73 return TC_ACT_OK;
74
75 if (eth->h_proto == htons(ETH_P_IP)) {
76 char fmt4[] = "ingress forward to ifindex:%d daddr4:%x\n";
77 struct iphdr *iph = data + sizeof(*eth);
78
79 if (data + sizeof(*eth) + sizeof(*iph) > data_end)
80 return TC_ACT_OK;
81
82 if (iph->protocol != IPPROTO_IPIP)
83 return TC_ACT_OK;
84
85 bpf_trace_printk(fmt4, sizeof(fmt4), *ifindex,
86 _htonl(iph->daddr));
87 return bpf_redirect(*ifindex, BPF_F_INGRESS);
88 } else if (eth->h_proto == htons(ETH_P_IPV6)) {
89 char fmt6[] = "ingress forward to ifindex:%d daddr6:%x::%x\n";
90 struct ipv6hdr *ip6h = data + sizeof(*eth);
91
92 if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
93 return TC_ACT_OK;
94
95 if (ip6h->nexthdr != IPPROTO_IPIP &&
96 ip6h->nexthdr != IPPROTO_IPV6)
97 return TC_ACT_OK;
98
99 bpf_trace_printk(fmt6, sizeof(fmt6), *ifindex,
100 _htonl(ip6h->daddr.s6_addr32[0]),
101 _htonl(ip6h->daddr.s6_addr32[3]));
102 return bpf_redirect(*ifindex, BPF_F_INGRESS);
103 }
104
105 return TC_ACT_OK;
106}
107
108SEC("l2_to_iptun_ingress_redirect")
109int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb)
110{
111 struct bpf_tunnel_key tkey = {};
112 void *data = (void *)(long)skb->data;
113 struct eth_hdr *eth = data;
114 void *data_end = (void *)(long)skb->data_end;
115 int key = 0, *ifindex;
116
117 int ret;
118
119 if (data + sizeof(*eth) > data_end)
120 return TC_ACT_OK;
121
122 ifindex = bpf_map_lookup_elem(&tun_iface, &key);
123 if (!ifindex)
124 return TC_ACT_OK;
125
126 if (eth->h_proto == htons(ETH_P_IP)) {
127 char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n";
128 struct iphdr *iph = data + sizeof(*eth);
129 __be32 daddr = iph->daddr;
130
131 if (data + sizeof(*eth) + sizeof(*iph) > data_end)
132 return TC_ACT_OK;
133
134 if (!is_vip_addr(eth->h_proto, daddr))
135 return TC_ACT_OK;
136
137 bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(daddr), *ifindex);
138 } else {
139 return TC_ACT_OK;
140 }
141
142 tkey.tunnel_id = 10000;
143 tkey.tunnel_ttl = 64;
144 tkey.remote_ipv4 = 0x0a020166; /* 10.2.1.102 */
145 bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0);
146 return bpf_redirect(*ifindex, 0);
147}
148
149SEC("l2_to_ip6tun_ingress_redirect")
150int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb)
151{
152 struct bpf_tunnel_key tkey = {};
153 void *data = (void *)(long)skb->data;
154 struct eth_hdr *eth = data;
155 void *data_end = (void *)(long)skb->data_end;
156 int key = 0, *ifindex;
157
158 if (data + sizeof(*eth) > data_end)
159 return TC_ACT_OK;
160
161 ifindex = bpf_map_lookup_elem(&tun_iface, &key);
162 if (!ifindex)
163 return TC_ACT_OK;
164
165 if (eth->h_proto == htons(ETH_P_IP)) {
166 char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n";
167 struct iphdr *iph = data + sizeof(*eth);
168
169 if (data + sizeof(*eth) + sizeof(*iph) > data_end)
170 return TC_ACT_OK;
171
172 if (!is_vip_addr(eth->h_proto, iph->daddr))
173 return TC_ACT_OK;
174
175 bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(iph->daddr),
176 *ifindex);
177 } else if (eth->h_proto == htons(ETH_P_IPV6)) {
178 char fmt6[] = "e/ingress redirect daddr6:%x to ifindex:%d\n";
179 struct ipv6hdr *ip6h = data + sizeof(*eth);
180
181 if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
182 return TC_ACT_OK;
183
184 if (!is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0]))
185 return TC_ACT_OK;
186
187 bpf_trace_printk(fmt6, sizeof(fmt6),
188 _htonl(ip6h->daddr.s6_addr32[0]), *ifindex);
189 } else {
190 return TC_ACT_OK;
191 }
192
193 tkey.tunnel_id = 10000;
194 tkey.tunnel_ttl = 64;
195 /* 2401:db02:0:0:0:0:0:66 */
196 tkey.remote_ipv6[0] = _htonl(0x2401db02);
197 tkey.remote_ipv6[1] = 0;
198 tkey.remote_ipv6[2] = 0;
199 tkey.remote_ipv6[3] = _htonl(0x00000066);
200 bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6);
201 return bpf_redirect(*ifindex, 0);
202}
203
204SEC("drop_non_tun_vip")
205int _drop_non_tun_vip(struct __sk_buff *skb)
206{
207 struct bpf_tunnel_key tkey = {};
208 void *data = (void *)(long)skb->data;
209 struct eth_hdr *eth = data;
210 void *data_end = (void *)(long)skb->data_end;
211
212 if (data + sizeof(*eth) > data_end)
213 return TC_ACT_OK;
214
215 if (eth->h_proto == htons(ETH_P_IP)) {
216 struct iphdr *iph = data + sizeof(*eth);
217
218 if (data + sizeof(*eth) + sizeof(*iph) > data_end)
219 return TC_ACT_OK;
220
221 if (is_vip_addr(eth->h_proto, iph->daddr))
222 return TC_ACT_SHOT;
223 } else if (eth->h_proto == htons(ETH_P_IPV6)) {
224 struct ipv6hdr *ip6h = data + sizeof(*eth);
225
226 if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
227 return TC_ACT_OK;
228
229 if (is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0]))
230 return TC_ACT_SHOT;
231 }
232
233 return TC_ACT_OK;
234}
235
236char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tc_l2_redirect_user.c b/samples/bpf/tc_l2_redirect_user.c
new file mode 100644
index 000000000000..4013c5337b91
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect_user.c
@@ -0,0 +1,73 @@
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/unistd.h>
8#include <linux/bpf.h>
9
10#include <stdlib.h>
11#include <stdio.h>
12#include <unistd.h>
13#include <string.h>
14#include <errno.h>
15
16#include "libbpf.h"
17
18static void usage(void)
19{
20 printf("Usage: tc_l2_ipip_redirect [...]\n");
21 printf(" -U <file> Update an already pinned BPF array\n");
22 printf(" -i <ifindex> Interface index\n");
23 printf(" -h Display this help\n");
24}
25
26int main(int argc, char **argv)
27{
28 const char *pinned_file = NULL;
29 int ifindex = -1;
30 int array_key = 0;
31 int array_fd = -1;
32 int ret = -1;
33 int opt;
34
35 while ((opt = getopt(argc, argv, "F:U:i:")) != -1) {
36 switch (opt) {
37 /* General args */
38 case 'U':
39 pinned_file = optarg;
40 break;
41 case 'i':
42 ifindex = atoi(optarg);
43 break;
44 default:
45 usage();
46 goto out;
47 }
48 }
49
50 if (ifindex < 0 || !pinned_file) {
51 usage();
52 goto out;
53 }
54
55 array_fd = bpf_obj_get(pinned_file);
56 if (array_fd < 0) {
57 fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n",
58 pinned_file, strerror(errno), errno);
59 goto out;
60 }
61
62 /* bpf_tunnel_key.remote_ipv4 expects host byte orders */
63 ret = bpf_update_elem(array_fd, &array_key, &ifindex, 0);
64 if (ret) {
65 perror("bpf_update_elem");
66 goto out;
67 }
68
69out:
70 if (array_fd != -1)
71 close(array_fd);
72 return ret;
73}
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 53449a6ff6aa..7c321a603b07 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -36,6 +36,7 @@ warning-2 += -Wshadow
36warning-2 += $(call cc-option, -Wlogical-op) 36warning-2 += $(call cc-option, -Wlogical-op)
37warning-2 += $(call cc-option, -Wmissing-field-initializers) 37warning-2 += $(call cc-option, -Wmissing-field-initializers)
38warning-2 += $(call cc-option, -Wsign-compare) 38warning-2 += $(call cc-option, -Wsign-compare)
39warning-2 += $(call cc-option, -Wmaybe-uninitialized)
39 40
40warning-3 := -Wbad-function-cast 41warning-3 := -Wbad-function-cast
41warning-3 += -Wcast-qual 42warning-3 += -Wcast-qual
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan
index dd779c40c8e6..3b1b13818d59 100644
--- a/scripts/Makefile.ubsan
+++ b/scripts/Makefile.ubsan
@@ -17,4 +17,8 @@ endif
17ifdef CONFIG_UBSAN_NULL 17ifdef CONFIG_UBSAN_NULL
18 CFLAGS_UBSAN += $(call cc-option, -fsanitize=null) 18 CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
19endif 19endif
20
21 # -fsanitize=* options makes GCC less smart than usual and
22 # increase number of 'maybe-uninitialized false-positives
23 CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized)
20endif 24endif
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index 19f5adfd877d..d9ff038c1b28 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -8,6 +8,9 @@
8# of the GNU General Public License, incorporated herein by reference. 8# of the GNU General Public License, incorporated herein by reference.
9 9
10import sys, os, re 10import sys, os, re
11from signal import signal, SIGPIPE, SIG_DFL
12
13signal(SIGPIPE, SIG_DFL)
11 14
12if len(sys.argv) != 3: 15if len(sys.argv) != 3:
13 sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0]) 16 sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0])
diff --git a/scripts/gcc-plugins/cyc_complexity_plugin.c b/scripts/gcc-plugins/cyc_complexity_plugin.c
index 34df974c6ba3..8af7db06122d 100644
--- a/scripts/gcc-plugins/cyc_complexity_plugin.c
+++ b/scripts/gcc-plugins/cyc_complexity_plugin.c
@@ -20,7 +20,7 @@
20 20
21#include "gcc-common.h" 21#include "gcc-common.h"
22 22
23int plugin_is_GPL_compatible; 23__visible int plugin_is_GPL_compatible;
24 24
25static struct plugin_info cyc_complexity_plugin_info = { 25static struct plugin_info cyc_complexity_plugin_info = {
26 .version = "20160225", 26 .version = "20160225",
@@ -49,7 +49,7 @@ static unsigned int cyc_complexity_execute(void)
49 49
50#include "gcc-generate-gimple-pass.h" 50#include "gcc-generate-gimple-pass.h"
51 51
52int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) 52__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
53{ 53{
54 const char * const plugin_name = plugin_info->base_name; 54 const char * const plugin_name = plugin_info->base_name;
55 struct register_pass_info cyc_complexity_pass_info; 55 struct register_pass_info cyc_complexity_pass_info;
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 172850bcd0d9..950fd2e64bb7 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -130,6 +130,7 @@ extern void dump_gimple_stmt(pretty_printer *, gimple, int, int);
130#endif 130#endif
131 131
132#define __unused __attribute__((__unused__)) 132#define __unused __attribute__((__unused__))
133#define __visible __attribute__((visibility("default")))
133 134
134#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node)) 135#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
135#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node)) 136#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
index ff1939b804ae..8160f1c1b56e 100644
--- a/scripts/gcc-plugins/latent_entropy_plugin.c
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -77,7 +77,7 @@
77 77
78#include "gcc-common.h" 78#include "gcc-common.h"
79 79
80int plugin_is_GPL_compatible; 80__visible int plugin_is_GPL_compatible;
81 81
82static GTY(()) tree latent_entropy_decl; 82static GTY(()) tree latent_entropy_decl;
83 83
@@ -340,7 +340,7 @@ static enum tree_code get_op(tree *rhs)
340 break; 340 break;
341 } 341 }
342 if (rhs) 342 if (rhs)
343 *rhs = build_int_cstu(unsigned_intDI_type_node, random_const); 343 *rhs = build_int_cstu(long_unsigned_type_node, random_const);
344 return op; 344 return op;
345} 345}
346 346
@@ -372,7 +372,7 @@ static void __perturb_latent_entropy(gimple_stmt_iterator *gsi,
372 enum tree_code op; 372 enum tree_code op;
373 373
374 /* 1. create temporary copy of latent_entropy */ 374 /* 1. create temporary copy of latent_entropy */
375 temp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy"); 375 temp = create_var(long_unsigned_type_node, "temp_latent_entropy");
376 376
377 /* 2. read... */ 377 /* 2. read... */
378 add_referenced_var(latent_entropy_decl); 378 add_referenced_var(latent_entropy_decl);
@@ -459,13 +459,13 @@ static void init_local_entropy(basic_block bb, tree local_entropy)
459 gsi_insert_before(&gsi, call, GSI_NEW_STMT); 459 gsi_insert_before(&gsi, call, GSI_NEW_STMT);
460 update_stmt(call); 460 update_stmt(call);
461 461
462 udi_frame_addr = fold_convert(unsigned_intDI_type_node, frame_addr); 462 udi_frame_addr = fold_convert(long_unsigned_type_node, frame_addr);
463 assign = gimple_build_assign(local_entropy, udi_frame_addr); 463 assign = gimple_build_assign(local_entropy, udi_frame_addr);
464 gsi_insert_after(&gsi, assign, GSI_NEW_STMT); 464 gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
465 update_stmt(assign); 465 update_stmt(assign);
466 466
467 /* 3. create temporary copy of latent_entropy */ 467 /* 3. create temporary copy of latent_entropy */
468 tmp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy"); 468 tmp = create_var(long_unsigned_type_node, "temp_latent_entropy");
469 469
470 /* 4. read the global entropy variable into local entropy */ 470 /* 4. read the global entropy variable into local entropy */
471 add_referenced_var(latent_entropy_decl); 471 add_referenced_var(latent_entropy_decl);
@@ -480,7 +480,7 @@ static void init_local_entropy(basic_block bb, tree local_entropy)
480 update_stmt(assign); 480 update_stmt(assign);
481 481
482 rand_cst = get_random_const(); 482 rand_cst = get_random_const();
483 rand_const = build_int_cstu(unsigned_intDI_type_node, rand_cst); 483 rand_const = build_int_cstu(long_unsigned_type_node, rand_cst);
484 op = get_op(NULL); 484 op = get_op(NULL);
485 assign = create_assign(op, local_entropy, local_entropy, rand_const); 485 assign = create_assign(op, local_entropy, local_entropy, rand_const);
486 gsi_insert_after(&gsi, assign, GSI_NEW_STMT); 486 gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
@@ -529,7 +529,7 @@ static unsigned int latent_entropy_execute(void)
529 } 529 }
530 530
531 /* 1. create the local entropy variable */ 531 /* 1. create the local entropy variable */
532 local_entropy = create_var(unsigned_intDI_type_node, "local_entropy"); 532 local_entropy = create_var(long_unsigned_type_node, "local_entropy");
533 533
534 /* 2. initialize the local entropy variable */ 534 /* 2. initialize the local entropy variable */
535 init_local_entropy(bb, local_entropy); 535 init_local_entropy(bb, local_entropy);
@@ -561,10 +561,9 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
561 if (in_lto_p) 561 if (in_lto_p)
562 return; 562 return;
563 563
564 /* extern volatile u64 latent_entropy */ 564 /* extern volatile unsigned long latent_entropy */
565 gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64); 565 quals = TYPE_QUALS(long_unsigned_type_node) | TYPE_QUAL_VOLATILE;
566 quals = TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE; 566 type = build_qualified_type(long_unsigned_type_node, quals);
567 type = build_qualified_type(long_long_unsigned_type_node, quals);
568 id = get_identifier("latent_entropy"); 567 id = get_identifier("latent_entropy");
569 latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, id, type); 568 latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, id, type);
570 569
@@ -584,8 +583,8 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
584 | TODO_update_ssa 583 | TODO_update_ssa
585#include "gcc-generate-gimple-pass.h" 584#include "gcc-generate-gimple-pass.h"
586 585
587int plugin_init(struct plugin_name_args *plugin_info, 586__visible int plugin_init(struct plugin_name_args *plugin_info,
588 struct plugin_gcc_version *version) 587 struct plugin_gcc_version *version)
589{ 588{
590 bool enabled = true; 589 bool enabled = true;
591 const char * const plugin_name = plugin_info->base_name; 590 const char * const plugin_name = plugin_info->base_name;
diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c
index aedd6113cb73..7ea0b3f50739 100644
--- a/scripts/gcc-plugins/sancov_plugin.c
+++ b/scripts/gcc-plugins/sancov_plugin.c
@@ -21,7 +21,7 @@
21 21
22#include "gcc-common.h" 22#include "gcc-common.h"
23 23
24int plugin_is_GPL_compatible; 24__visible int plugin_is_GPL_compatible;
25 25
26tree sancov_fndecl; 26tree sancov_fndecl;
27 27
@@ -86,7 +86,7 @@ static void sancov_start_unit(void __unused *gcc_data, void __unused *user_data)
86#endif 86#endif
87} 87}
88 88
89int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) 89__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
90{ 90{
91 int i; 91 int i;
92 struct register_pass_info sancov_plugin_pass_info; 92 struct register_pass_info sancov_plugin_pass_info;
diff --git a/sound/core/info.c b/sound/core/info.c
index 895362a696c9..8ab72e0f5932 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -325,10 +325,15 @@ static ssize_t snd_info_text_entry_write(struct file *file,
325 size_t next; 325 size_t next;
326 int err = 0; 326 int err = 0;
327 327
328 if (!entry->c.text.write)
329 return -EIO;
328 pos = *offset; 330 pos = *offset;
329 if (!valid_pos(pos, count)) 331 if (!valid_pos(pos, count))
330 return -EIO; 332 return -EIO;
331 next = pos + count; 333 next = pos + count;
334 /* don't handle too large text inputs */
335 if (next > 16 * 1024)
336 return -EIO;
332 mutex_lock(&entry->access); 337 mutex_lock(&entry->access);
333 buf = data->wbuffer; 338 buf = data->wbuffer;
334 if (!buf) { 339 if (!buf) {
@@ -366,7 +371,9 @@ static int snd_info_seq_show(struct seq_file *seq, void *p)
366 struct snd_info_private_data *data = seq->private; 371 struct snd_info_private_data *data = seq->private;
367 struct snd_info_entry *entry = data->entry; 372 struct snd_info_entry *entry = data->entry;
368 373
369 if (entry->c.text.read) { 374 if (!entry->c.text.read) {
375 return -EIO;
376 } else {
370 data->rbuffer->buffer = (char *)seq; /* XXX hack! */ 377 data->rbuffer->buffer = (char *)seq; /* XXX hack! */
371 entry->c.text.read(entry, data->rbuffer); 378 entry->c.text.read(entry, data->rbuffer);
372 } 379 }
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 18baea2f7d65..84f86745c30e 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -148,11 +148,11 @@ SND_SOC_DAPM_OUTPUT("AOUTR"),
148}; 148};
149 149
150static const struct snd_soc_dapm_route cs4270_dapm_routes[] = { 150static const struct snd_soc_dapm_route cs4270_dapm_routes[] = {
151 { "Capture", NULL, "AINA" }, 151 { "Capture", NULL, "AINL" },
152 { "Capture", NULL, "AINB" }, 152 { "Capture", NULL, "AINR" },
153 153
154 { "AOUTA", NULL, "Playback" }, 154 { "AOUTL", NULL, "Playback" },
155 { "AOUTB", NULL, "Playback" }, 155 { "AOUTR", NULL, "Playback" },
156}; 156};
157 157
158/** 158/**
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index 1152aa5e7c39..cf37936bfe3a 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -880,7 +880,8 @@ static const struct snd_soc_dapm_widget da7219_dapm_widgets[] = {
880 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), 880 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
881 881
882 /* DAI */ 882 /* DAI */
883 SND_SOC_DAPM_AIF_OUT("DAIOUT", "Capture", 0, SND_SOC_NOPM, 0, 0), 883 SND_SOC_DAPM_AIF_OUT("DAIOUT", "Capture", 0, DA7219_DAI_TDM_CTRL,
884 DA7219_DAI_OE_SHIFT, DA7219_NO_INVERT),
884 SND_SOC_DAPM_AIF_IN("DAIIN", "Playback", 0, SND_SOC_NOPM, 0, 0), 885 SND_SOC_DAPM_AIF_IN("DAIIN", "Playback", 0, SND_SOC_NOPM, 0, 0),
885 886
886 /* Output Muxes */ 887 /* Output Muxes */
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index b904492d7744..90b5948e0ff3 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -364,7 +364,12 @@ static int hdmi_of_xlate_dai_name(struct snd_soc_component *component,
364 struct of_phandle_args *args, 364 struct of_phandle_args *args,
365 const char **dai_name) 365 const char **dai_name)
366{ 366{
367 int id = args->args[0]; 367 int id;
368
369 if (args->args_count)
370 id = args->args[0];
371 else
372 id = 0;
368 373
369 if (id < ARRAY_SIZE(hdmi_dai_name)) { 374 if (id < ARRAY_SIZE(hdmi_dai_name)) {
370 *dai_name = hdmi_dai_name[id]; 375 *dai_name = hdmi_dai_name[id];
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index 55558643166f..2db8179047ae 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -249,6 +249,11 @@ static int rt298_jack_detect(struct rt298_priv *rt298, bool *hp, bool *mic)
249 snd_soc_dapm_force_enable_pin(dapm, "LDO1"); 249 snd_soc_dapm_force_enable_pin(dapm, "LDO1");
250 snd_soc_dapm_sync(dapm); 250 snd_soc_dapm_sync(dapm);
251 251
252 regmap_update_bits(rt298->regmap,
253 RT298_POWER_CTRL1, 0x1001, 0);
254 regmap_update_bits(rt298->regmap,
255 RT298_POWER_CTRL2, 0x4, 0x4);
256
252 regmap_write(rt298->regmap, RT298_SET_MIC1, 0x24); 257 regmap_write(rt298->regmap, RT298_SET_MIC1, 0x24);
253 msleep(50); 258 msleep(50);
254 259
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index 01a18d88f1eb..00ff2788879e 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -1547,11 +1547,11 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
1547 msleep(sleep_time[i]); 1547 msleep(sleep_time[i]);
1548 val = snd_soc_read(codec, RT5663_EM_JACK_TYPE_2) & 1548 val = snd_soc_read(codec, RT5663_EM_JACK_TYPE_2) &
1549 0x0003; 1549 0x0003;
1550 dev_dbg(codec->dev, "%s: MX-00e7 val=%x sleep %d\n",
1551 __func__, val, sleep_time[i]);
1550 i++; 1552 i++;
1551 if (val == 0x1 || val == 0x2 || val == 0x3) 1553 if (val == 0x1 || val == 0x2 || val == 0x3)
1552 break; 1554 break;
1553 dev_dbg(codec->dev, "%s: MX-00e7 val=%x sleep %d\n",
1554 __func__, val, sleep_time[i]);
1555 } 1555 }
1556 dev_dbg(codec->dev, "%s val = %d\n", __func__, val); 1556 dev_dbg(codec->dev, "%s val = %d\n", __func__, val);
1557 switch (val) { 1557 switch (val) {
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
index 7b31ee9b82bc..d6e00c77edcd 100644
--- a/sound/soc/codecs/sti-sas.c
+++ b/sound/soc/codecs/sti-sas.c
@@ -424,7 +424,7 @@ static const struct snd_soc_dai_ops stih407_dac_ops = {
424static const struct regmap_config stih407_sas_regmap = { 424static const struct regmap_config stih407_sas_regmap = {
425 .reg_bits = 32, 425 .reg_bits = 32,
426 .val_bits = 32, 426 .val_bits = 32,
427 427 .fast_io = true,
428 .max_register = STIH407_AUDIO_DAC_CTRL, 428 .max_register = STIH407_AUDIO_DAC_CTRL,
429 .reg_defaults = stih407_sas_reg_defaults, 429 .reg_defaults = stih407_sas_reg_defaults,
430 .num_reg_defaults = ARRAY_SIZE(stih407_sas_reg_defaults), 430 .num_reg_defaults = ARRAY_SIZE(stih407_sas_reg_defaults),
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index df5e5cb33baa..810369f687d7 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -341,20 +341,9 @@ static int tas571x_set_bias_level(struct snd_soc_codec *codec,
341 return ret; 341 return ret;
342 } 342 }
343 } 343 }
344
345 gpiod_set_value(priv->pdn_gpio, 0);
346 usleep_range(5000, 6000);
347
348 regcache_cache_only(priv->regmap, false);
349 ret = regcache_sync(priv->regmap);
350 if (ret)
351 return ret;
352 } 344 }
353 break; 345 break;
354 case SND_SOC_BIAS_OFF: 346 case SND_SOC_BIAS_OFF:
355 regcache_cache_only(priv->regmap, true);
356 gpiod_set_value(priv->pdn_gpio, 1);
357
358 if (!IS_ERR(priv->mclk)) 347 if (!IS_ERR(priv->mclk))
359 clk_disable_unprepare(priv->mclk); 348 clk_disable_unprepare(priv->mclk);
360 break; 349 break;
@@ -401,16 +390,6 @@ static const struct snd_kcontrol_new tas5711_controls[] = {
401 TAS571X_SOFT_MUTE_REG, 390 TAS571X_SOFT_MUTE_REG,
402 TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT, 391 TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
403 1, 1), 392 1, 1),
404
405 SOC_DOUBLE_R_RANGE("CH1 Mixer Volume",
406 TAS5717_CH1_LEFT_CH_MIX_REG,
407 TAS5717_CH1_RIGHT_CH_MIX_REG,
408 16, 0, 0x80, 0),
409
410 SOC_DOUBLE_R_RANGE("CH2 Mixer Volume",
411 TAS5717_CH2_LEFT_CH_MIX_REG,
412 TAS5717_CH2_RIGHT_CH_MIX_REG,
413 16, 0, 0x80, 0),
414}; 393};
415 394
416static const struct regmap_range tas571x_readonly_regs_range[] = { 395static const struct regmap_range tas571x_readonly_regs_range[] = {
@@ -488,6 +467,16 @@ static const struct snd_kcontrol_new tas5717_controls[] = {
488 TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT, 467 TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
489 1, 1), 468 1, 1),
490 469
470 SOC_DOUBLE_R_RANGE("CH1 Mixer Volume",
471 TAS5717_CH1_LEFT_CH_MIX_REG,
472 TAS5717_CH1_RIGHT_CH_MIX_REG,
473 16, 0, 0x80, 0),
474
475 SOC_DOUBLE_R_RANGE("CH2 Mixer Volume",
476 TAS5717_CH2_LEFT_CH_MIX_REG,
477 TAS5717_CH2_RIGHT_CH_MIX_REG,
478 16, 0, 0x80, 0),
479
491 /* 480 /*
492 * The biquads are named according to the register names. 481 * The biquads are named according to the register names.
493 * Please note that TI's TAS57xx Graphical Development Environment 482 * Please note that TI's TAS57xx Graphical Development Environment
@@ -747,13 +736,14 @@ static int tas571x_i2c_probe(struct i2c_client *client,
747 /* pulse the active low reset line for ~100us */ 736 /* pulse the active low reset line for ~100us */
748 usleep_range(100, 200); 737 usleep_range(100, 200);
749 gpiod_set_value(priv->reset_gpio, 0); 738 gpiod_set_value(priv->reset_gpio, 0);
750 usleep_range(12000, 20000); 739 usleep_range(13500, 20000);
751 } 740 }
752 741
753 ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0); 742 ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0);
754 if (ret) 743 if (ret)
755 return ret; 744 return ret;
756 745
746 usleep_range(50000, 60000);
757 747
758 memcpy(&priv->codec_driver, &tas571x_codec, sizeof(priv->codec_driver)); 748 memcpy(&priv->codec_driver, &tas571x_codec, sizeof(priv->codec_driver));
759 priv->codec_driver.component_driver.controls = priv->chip->controls; 749 priv->codec_driver.component_driver.controls = priv->chip->controls;
@@ -770,9 +760,6 @@ static int tas571x_i2c_probe(struct i2c_client *client,
770 return ret; 760 return ret;
771 } 761 }
772 762
773 regcache_cache_only(priv->regmap, true);
774 gpiod_set_value(priv->pdn_gpio, 1);
775
776 return snd_soc_register_codec(&client->dev, &priv->codec_driver, 763 return snd_soc_register_codec(&client->dev, &priv->codec_driver,
777 &tas571x_dai, 1); 764 &tas571x_dai, 1);
778} 765}
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 26eb5a0a5575..fd5d1e091038 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -47,6 +47,7 @@ config SND_SOC_INTEL_SST_MATCH
47 47
48config SND_SOC_INTEL_HASWELL 48config SND_SOC_INTEL_HASWELL
49 tristate 49 tristate
50 select SND_SOC_INTEL_SST_FIRMWARE
50 51
51config SND_SOC_INTEL_BAYTRAIL 52config SND_SOC_INTEL_BAYTRAIL
52 tristate 53 tristate
@@ -56,7 +57,6 @@ config SND_SOC_INTEL_HASWELL_MACH
56 depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM 57 depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
57 depends on DW_DMAC_CORE 58 depends on DW_DMAC_CORE
58 select SND_SOC_INTEL_SST 59 select SND_SOC_INTEL_SST
59 select SND_SOC_INTEL_SST_FIRMWARE
60 select SND_SOC_INTEL_HASWELL 60 select SND_SOC_INTEL_HASWELL
61 select SND_SOC_RT5640 61 select SND_SOC_RT5640
62 help 62 help
@@ -138,7 +138,6 @@ config SND_SOC_INTEL_BROADWELL_MACH
138 I2C_DESIGNWARE_PLATFORM 138 I2C_DESIGNWARE_PLATFORM
139 depends on DW_DMAC_CORE 139 depends on DW_DMAC_CORE
140 select SND_SOC_INTEL_SST 140 select SND_SOC_INTEL_SST
141 select SND_SOC_INTEL_SST_FIRMWARE
142 select SND_SOC_INTEL_HASWELL 141 select SND_SOC_INTEL_HASWELL
143 select SND_SOC_RT286 142 select SND_SOC_RT286
144 help 143 help
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index ba5c0d71720a..0a88537ca58a 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -416,6 +416,7 @@ static const struct dmi_system_id cht_table[] = {
416 DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"), 416 DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
417 }, 417 },
418 }, 418 },
419 { }
419}; 420};
420 421
421 422
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 6532b8f0ab2f..865a21e557cc 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -130,8 +130,8 @@ static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
130 */ 130 */
131 ret = snd_soc_card_jack_new(rtd->card, "Headset Jack", 131 ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
132 SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | 132 SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
133 SND_JACK_BTN_2 | SND_JACK_BTN_3, &broxton_headset, 133 SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
134 NULL, 0); 134 &broxton_headset, NULL, 0);
135 if (ret) { 135 if (ret) {
136 dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret); 136 dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
137 return ret; 137 return ret;
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 2989c164dafe..06fa5e85dd0e 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -674,7 +674,7 @@ static int skl_probe(struct pci_dev *pci,
674 674
675 if (skl->nhlt == NULL) { 675 if (skl->nhlt == NULL) {
676 err = -ENODEV; 676 err = -ENODEV;
677 goto out_free; 677 goto out_display_power_off;
678 } 678 }
679 679
680 skl_nhlt_update_topology_bin(skl); 680 skl_nhlt_update_topology_bin(skl);
@@ -746,6 +746,9 @@ out_mach_free:
746 skl_machine_device_unregister(skl); 746 skl_machine_device_unregister(skl);
747out_nhlt_free: 747out_nhlt_free:
748 skl_nhlt_free(skl->nhlt); 748 skl_nhlt_free(skl->nhlt);
749out_display_power_off:
750 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
751 snd_hdac_display_power(bus, false);
749out_free: 752out_free:
750 skl->init_failed = 1; 753 skl->init_failed = 1;
751 skl_free(ebus); 754 skl_free(ebus);
@@ -785,8 +788,7 @@ static void skl_remove(struct pci_dev *pci)
785 788
786 release_firmware(skl->tplg); 789 release_firmware(skl->tplg);
787 790
788 if (pci_dev_run_wake(pci)) 791 pm_runtime_get_noresume(&pci->dev);
789 pm_runtime_get_noresume(&pci->dev);
790 792
791 /* codec removal, invoke bus_device_remove */ 793 /* codec removal, invoke bus_device_remove */
792 snd_hdac_ext_bus_device_remove(ebus); 794 snd_hdac_ext_bus_device_remove(ebus);
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index f2bf8661dd21..823b5a236d8d 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -208,7 +208,7 @@ config SND_PXA2XX_SOC_IMOTE2
208 208
209config SND_MMP_SOC_BROWNSTONE 209config SND_MMP_SOC_BROWNSTONE
210 tristate "SoC Audio support for Marvell Brownstone" 210 tristate "SoC Audio support for Marvell Brownstone"
211 depends on SND_MMP_SOC && MACH_BROWNSTONE 211 depends on SND_MMP_SOC && MACH_BROWNSTONE && I2C
212 select SND_MMP_SOC_SSPA 212 select SND_MMP_SOC_SSPA
213 select MFD_WM8994 213 select MFD_WM8994
214 select SND_SOC_WM8994 214 select SND_SOC_WM8994
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 3cde9fb977fa..eff3f9a8b685 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -586,3 +586,6 @@ int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev)
586 return 0; 586 return 0;
587} 587}
588EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove); 588EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove);
589
590MODULE_DESCRIPTION("QTi LPASS CPU Driver");
591MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index e2ff538a8aa5..b392e51de94d 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -61,7 +61,41 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
61{ 61{
62 struct snd_pcm_runtime *runtime = substream->runtime; 62 struct snd_pcm_runtime *runtime = substream->runtime;
63 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; 63 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
64 int ret; 64 struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
65 struct lpass_data *drvdata =
66 snd_soc_platform_get_drvdata(soc_runtime->platform);
67 struct lpass_variant *v = drvdata->variant;
68 int ret, dma_ch, dir = substream->stream;
69 struct lpass_pcm_data *data;
70
71 data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
72 if (!data)
73 return -ENOMEM;
74
75 data->i2s_port = cpu_dai->driver->id;
76 runtime->private_data = data;
77
78 dma_ch = 0;
79 if (v->alloc_dma_channel)
80 dma_ch = v->alloc_dma_channel(drvdata, dir);
81 if (dma_ch < 0)
82 return dma_ch;
83
84 drvdata->substream[dma_ch] = substream;
85
86 ret = regmap_write(drvdata->lpaif_map,
87 LPAIF_DMACTL_REG(v, dma_ch, dir), 0);
88 if (ret) {
89 dev_err(soc_runtime->dev,
90 "%s() error writing to rdmactl reg: %d\n",
91 __func__, ret);
92 return ret;
93 }
94
95 if (dir == SNDRV_PCM_STREAM_PLAYBACK)
96 data->rdma_ch = dma_ch;
97 else
98 data->wrdma_ch = dma_ch;
65 99
66 snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware); 100 snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware);
67 101
@@ -80,13 +114,40 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
80 return 0; 114 return 0;
81} 115}
82 116
117static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
118{
119 struct snd_pcm_runtime *runtime = substream->runtime;
120 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
121 struct lpass_data *drvdata =
122 snd_soc_platform_get_drvdata(soc_runtime->platform);
123 struct lpass_variant *v = drvdata->variant;
124 struct lpass_pcm_data *data;
125 int dma_ch, dir = substream->stream;
126
127 data = runtime->private_data;
128 v = drvdata->variant;
129
130 if (dir == SNDRV_PCM_STREAM_PLAYBACK)
131 dma_ch = data->rdma_ch;
132 else
133 dma_ch = data->wrdma_ch;
134
135 drvdata->substream[dma_ch] = NULL;
136
137 if (v->free_dma_channel)
138 v->free_dma_channel(drvdata, dma_ch);
139
140 return 0;
141}
142
83static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream, 143static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
84 struct snd_pcm_hw_params *params) 144 struct snd_pcm_hw_params *params)
85{ 145{
86 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; 146 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
87 struct lpass_data *drvdata = 147 struct lpass_data *drvdata =
88 snd_soc_platform_get_drvdata(soc_runtime->platform); 148 snd_soc_platform_get_drvdata(soc_runtime->platform);
89 struct lpass_pcm_data *pcm_data = drvdata->private_data; 149 struct snd_pcm_runtime *rt = substream->runtime;
150 struct lpass_pcm_data *pcm_data = rt->private_data;
90 struct lpass_variant *v = drvdata->variant; 151 struct lpass_variant *v = drvdata->variant;
91 snd_pcm_format_t format = params_format(params); 152 snd_pcm_format_t format = params_format(params);
92 unsigned int channels = params_channels(params); 153 unsigned int channels = params_channels(params);
@@ -179,7 +240,8 @@ static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream)
179 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; 240 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
180 struct lpass_data *drvdata = 241 struct lpass_data *drvdata =
181 snd_soc_platform_get_drvdata(soc_runtime->platform); 242 snd_soc_platform_get_drvdata(soc_runtime->platform);
182 struct lpass_pcm_data *pcm_data = drvdata->private_data; 243 struct snd_pcm_runtime *rt = substream->runtime;
244 struct lpass_pcm_data *pcm_data = rt->private_data;
183 struct lpass_variant *v = drvdata->variant; 245 struct lpass_variant *v = drvdata->variant;
184 unsigned int reg; 246 unsigned int reg;
185 int ret; 247 int ret;
@@ -203,7 +265,8 @@ static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream)
203 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; 265 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
204 struct lpass_data *drvdata = 266 struct lpass_data *drvdata =
205 snd_soc_platform_get_drvdata(soc_runtime->platform); 267 snd_soc_platform_get_drvdata(soc_runtime->platform);
206 struct lpass_pcm_data *pcm_data = drvdata->private_data; 268 struct snd_pcm_runtime *rt = substream->runtime;
269 struct lpass_pcm_data *pcm_data = rt->private_data;
207 struct lpass_variant *v = drvdata->variant; 270 struct lpass_variant *v = drvdata->variant;
208 int ret, ch, dir = substream->stream; 271 int ret, ch, dir = substream->stream;
209 272
@@ -257,7 +320,8 @@ static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream,
257 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; 320 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
258 struct lpass_data *drvdata = 321 struct lpass_data *drvdata =
259 snd_soc_platform_get_drvdata(soc_runtime->platform); 322 snd_soc_platform_get_drvdata(soc_runtime->platform);
260 struct lpass_pcm_data *pcm_data = drvdata->private_data; 323 struct snd_pcm_runtime *rt = substream->runtime;
324 struct lpass_pcm_data *pcm_data = rt->private_data;
261 struct lpass_variant *v = drvdata->variant; 325 struct lpass_variant *v = drvdata->variant;
262 int ret, ch, dir = substream->stream; 326 int ret, ch, dir = substream->stream;
263 327
@@ -333,7 +397,8 @@ static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
333 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; 397 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
334 struct lpass_data *drvdata = 398 struct lpass_data *drvdata =
335 snd_soc_platform_get_drvdata(soc_runtime->platform); 399 snd_soc_platform_get_drvdata(soc_runtime->platform);
336 struct lpass_pcm_data *pcm_data = drvdata->private_data; 400 struct snd_pcm_runtime *rt = substream->runtime;
401 struct lpass_pcm_data *pcm_data = rt->private_data;
337 struct lpass_variant *v = drvdata->variant; 402 struct lpass_variant *v = drvdata->variant;
338 unsigned int base_addr, curr_addr; 403 unsigned int base_addr, curr_addr;
339 int ret, ch, dir = substream->stream; 404 int ret, ch, dir = substream->stream;
@@ -374,6 +439,7 @@ static int lpass_platform_pcmops_mmap(struct snd_pcm_substream *substream,
374 439
375static const struct snd_pcm_ops lpass_platform_pcm_ops = { 440static const struct snd_pcm_ops lpass_platform_pcm_ops = {
376 .open = lpass_platform_pcmops_open, 441 .open = lpass_platform_pcmops_open,
442 .close = lpass_platform_pcmops_close,
377 .ioctl = snd_pcm_lib_ioctl, 443 .ioctl = snd_pcm_lib_ioctl,
378 .hw_params = lpass_platform_pcmops_hw_params, 444 .hw_params = lpass_platform_pcmops_hw_params,
379 .hw_free = lpass_platform_pcmops_hw_free, 445 .hw_free = lpass_platform_pcmops_hw_free,
@@ -470,117 +536,45 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
470{ 536{
471 struct snd_pcm *pcm = soc_runtime->pcm; 537 struct snd_pcm *pcm = soc_runtime->pcm;
472 struct snd_pcm_substream *psubstream, *csubstream; 538 struct snd_pcm_substream *psubstream, *csubstream;
473 struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
474 struct lpass_data *drvdata =
475 snd_soc_platform_get_drvdata(soc_runtime->platform);
476 struct lpass_variant *v = drvdata->variant;
477 int ret = -EINVAL; 539 int ret = -EINVAL;
478 struct lpass_pcm_data *data;
479 size_t size = lpass_platform_pcm_hardware.buffer_bytes_max; 540 size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
480 541
481 data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
482 if (!data)
483 return -ENOMEM;
484
485 data->i2s_port = cpu_dai->driver->id;
486 drvdata->private_data = data;
487
488 psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; 542 psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
489 if (psubstream) { 543 if (psubstream) {
490 if (v->alloc_dma_channel)
491 data->rdma_ch = v->alloc_dma_channel(drvdata,
492 SNDRV_PCM_STREAM_PLAYBACK);
493
494 if (data->rdma_ch < 0)
495 return data->rdma_ch;
496
497 drvdata->substream[data->rdma_ch] = psubstream;
498
499 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, 544 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
500 soc_runtime->platform->dev, 545 soc_runtime->platform->dev,
501 size, &psubstream->dma_buffer); 546 size, &psubstream->dma_buffer);
502 if (ret)
503 goto playback_alloc_err;
504
505 ret = regmap_write(drvdata->lpaif_map,
506 LPAIF_RDMACTL_REG(v, data->rdma_ch), 0);
507 if (ret) { 547 if (ret) {
508 dev_err(soc_runtime->dev, 548 dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n");
509 "%s() error writing to rdmactl reg: %d\n", 549 return ret;
510 __func__, ret);
511 goto capture_alloc_err;
512 } 550 }
513 } 551 }
514 552
515 csubstream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; 553 csubstream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
516 if (csubstream) { 554 if (csubstream) {
517 if (v->alloc_dma_channel)
518 data->wrdma_ch = v->alloc_dma_channel(drvdata,
519 SNDRV_PCM_STREAM_CAPTURE);
520
521 if (data->wrdma_ch < 0) {
522 ret = data->wrdma_ch;
523 goto capture_alloc_err;
524 }
525
526 drvdata->substream[data->wrdma_ch] = csubstream;
527
528 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, 555 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
529 soc_runtime->platform->dev, 556 soc_runtime->platform->dev,
530 size, &csubstream->dma_buffer); 557 size, &csubstream->dma_buffer);
531 if (ret)
532 goto capture_alloc_err;
533
534 ret = regmap_write(drvdata->lpaif_map,
535 LPAIF_WRDMACTL_REG(v, data->wrdma_ch), 0);
536 if (ret) { 558 if (ret) {
537 dev_err(soc_runtime->dev, 559 dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n");
538 "%s() error writing to wrdmactl reg: %d\n", 560 if (psubstream)
539 __func__, ret); 561 snd_dma_free_pages(&psubstream->dma_buffer);
540 goto capture_reg_err; 562 return ret;
541 } 563 }
564
542 } 565 }
543 566
544 return 0; 567 return 0;
545
546capture_reg_err:
547 if (csubstream)
548 snd_dma_free_pages(&csubstream->dma_buffer);
549
550capture_alloc_err:
551 if (psubstream)
552 snd_dma_free_pages(&psubstream->dma_buffer);
553
554 playback_alloc_err:
555 dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n");
556
557 return ret;
558} 568}
559 569
560static void lpass_platform_pcm_free(struct snd_pcm *pcm) 570static void lpass_platform_pcm_free(struct snd_pcm *pcm)
561{ 571{
562 struct snd_soc_pcm_runtime *rt;
563 struct lpass_data *drvdata;
564 struct lpass_pcm_data *data;
565 struct lpass_variant *v;
566 struct snd_pcm_substream *substream; 572 struct snd_pcm_substream *substream;
567 int ch, i; 573 int i;
568 574
569 for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) { 575 for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) {
570 substream = pcm->streams[i].substream; 576 substream = pcm->streams[i].substream;
571 if (substream) { 577 if (substream) {
572 rt = substream->private_data;
573 drvdata = snd_soc_platform_get_drvdata(rt->platform);
574 data = drvdata->private_data;
575
576 ch = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
577 ? data->rdma_ch
578 : data->wrdma_ch;
579 v = drvdata->variant;
580 drvdata->substream[ch] = NULL;
581 if (v->free_dma_channel)
582 v->free_dma_channel(drvdata, ch);
583
584 snd_dma_free_pages(&substream->dma_buffer); 578 snd_dma_free_pages(&substream->dma_buffer);
585 substream->dma_buffer.area = NULL; 579 substream->dma_buffer.area = NULL;
586 substream->dma_buffer.addr = 0; 580 substream->dma_buffer.addr = 0;
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index 35b3cea8207d..924971b6ded5 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -59,7 +59,6 @@ struct lpass_data {
59 struct clk *pcnoc_mport_clk; 59 struct clk *pcnoc_mport_clk;
60 struct clk *pcnoc_sway_clk; 60 struct clk *pcnoc_sway_clk;
61 61
62 void *private_data;
63}; 62};
64 63
65/* Vairant data per each SOC */ 64/* Vairant data per each SOC */
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index 97d6700b1009..cbc0023c2bc8 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -383,11 +383,6 @@ static int s3c_ac97_probe(struct platform_device *pdev)
383 goto err4; 383 goto err4;
384 } 384 }
385 385
386 ret = devm_snd_soc_register_component(&pdev->dev, &s3c_ac97_component,
387 s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai));
388 if (ret)
389 goto err5;
390
391 ret = samsung_asoc_dma_platform_register(&pdev->dev, 386 ret = samsung_asoc_dma_platform_register(&pdev->dev,
392 ac97_pdata->dma_filter, 387 ac97_pdata->dma_filter,
393 NULL, NULL); 388 NULL, NULL);
@@ -396,6 +391,11 @@ static int s3c_ac97_probe(struct platform_device *pdev)
396 goto err5; 391 goto err5;
397 } 392 }
398 393
394 ret = devm_snd_soc_register_component(&pdev->dev, &s3c_ac97_component,
395 s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai));
396 if (ret)
397 goto err5;
398
399 return 0; 399 return 0;
400err5: 400err5:
401 free_irq(irq_res->start, NULL); 401 free_irq(irq_res->start, NULL);
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 7e32cf4581f8..7825bff45ae3 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1237,14 +1237,14 @@ static int samsung_i2s_probe(struct platform_device *pdev)
1237 dev_err(&pdev->dev, "Unable to get drvdata\n"); 1237 dev_err(&pdev->dev, "Unable to get drvdata\n");
1238 return -EFAULT; 1238 return -EFAULT;
1239 } 1239 }
1240 ret = devm_snd_soc_register_component(&sec_dai->pdev->dev, 1240 ret = samsung_asoc_dma_platform_register(&pdev->dev,
1241 &samsung_i2s_component, 1241 sec_dai->filter, "tx-sec", NULL);
1242 &sec_dai->i2s_dai_drv, 1);
1243 if (ret != 0) 1242 if (ret != 0)
1244 return ret; 1243 return ret;
1245 1244
1246 return samsung_asoc_dma_platform_register(&pdev->dev, 1245 return devm_snd_soc_register_component(&sec_dai->pdev->dev,
1247 sec_dai->filter, "tx-sec", NULL); 1246 &samsung_i2s_component,
1247 &sec_dai->i2s_dai_drv, 1);
1248 } 1248 }
1249 1249
1250 pri_dai = i2s_alloc_dai(pdev, false); 1250 pri_dai = i2s_alloc_dai(pdev, false);
@@ -1314,6 +1314,11 @@ static int samsung_i2s_probe(struct platform_device *pdev)
1314 if (quirks & QUIRK_PRI_6CHAN) 1314 if (quirks & QUIRK_PRI_6CHAN)
1315 pri_dai->i2s_dai_drv.playback.channels_max = 6; 1315 pri_dai->i2s_dai_drv.playback.channels_max = 6;
1316 1316
1317 ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter,
1318 NULL, NULL);
1319 if (ret < 0)
1320 goto err_disable_clk;
1321
1317 if (quirks & QUIRK_SEC_DAI) { 1322 if (quirks & QUIRK_SEC_DAI) {
1318 sec_dai = i2s_alloc_dai(pdev, true); 1323 sec_dai = i2s_alloc_dai(pdev, true);
1319 if (!sec_dai) { 1324 if (!sec_dai) {
@@ -1353,10 +1358,6 @@ static int samsung_i2s_probe(struct platform_device *pdev)
1353 if (ret < 0) 1358 if (ret < 0)
1354 goto err_free_dai; 1359 goto err_free_dai;
1355 1360
1356 ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter,
1357 NULL, NULL);
1358 if (ret < 0)
1359 goto err_free_dai;
1360 1361
1361 pm_runtime_enable(&pdev->dev); 1362 pm_runtime_enable(&pdev->dev);
1362 1363
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 43e367a9acc3..c484985812ed 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -565,24 +565,25 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
565 pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id]; 565 pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id];
566 pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id]; 566 pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id];
567 567
568 ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
569 NULL, NULL);
570 if (ret) {
571 dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
572 goto err5;
573 }
574
568 pm_runtime_enable(&pdev->dev); 575 pm_runtime_enable(&pdev->dev);
569 576
570 ret = devm_snd_soc_register_component(&pdev->dev, &s3c_pcm_component, 577 ret = devm_snd_soc_register_component(&pdev->dev, &s3c_pcm_component,
571 &s3c_pcm_dai[pdev->id], 1); 578 &s3c_pcm_dai[pdev->id], 1);
572 if (ret != 0) { 579 if (ret != 0) {
573 dev_err(&pdev->dev, "failed to get register DAI: %d\n", ret); 580 dev_err(&pdev->dev, "failed to get register DAI: %d\n", ret);
574 goto err5; 581 goto err6;
575 }
576
577 ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
578 NULL, NULL);
579 if (ret) {
580 dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
581 goto err5;
582 } 582 }
583 583
584 return 0; 584 return 0;
585 585err6:
586 pm_runtime_disable(&pdev->dev);
586err5: 587err5:
587 clk_disable_unprepare(pcm->pclk); 588 clk_disable_unprepare(pcm->pclk);
588err4: 589err4:
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 3e89fbc0c51d..0a4718207e6e 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -168,19 +168,19 @@ static int s3c2412_iis_dev_probe(struct platform_device *pdev)
168 s3c2412_i2s_pcm_stereo_in.addr = res->start + S3C2412_IISRXD; 168 s3c2412_i2s_pcm_stereo_in.addr = res->start + S3C2412_IISRXD;
169 s3c2412_i2s_pcm_stereo_in.filter_data = pdata->dma_capture; 169 s3c2412_i2s_pcm_stereo_in.filter_data = pdata->dma_capture;
170 170
171 ret = s3c_i2sv2_register_component(&pdev->dev, -1, 171 ret = samsung_asoc_dma_platform_register(&pdev->dev,
172 &s3c2412_i2s_component, 172 pdata->dma_filter,
173 &s3c2412_i2s_dai); 173 NULL, NULL);
174 if (ret) { 174 if (ret) {
175 pr_err("failed to register the dai\n"); 175 pr_err("failed to register the DMA: %d\n", ret);
176 return ret; 176 return ret;
177 } 177 }
178 178
179 ret = samsung_asoc_dma_platform_register(&pdev->dev, 179 ret = s3c_i2sv2_register_component(&pdev->dev, -1,
180 pdata->dma_filter, 180 &s3c2412_i2s_component,
181 NULL, NULL); 181 &s3c2412_i2s_dai);
182 if (ret) 182 if (ret)
183 pr_err("failed to register the DMA: %d\n", ret); 183 pr_err("failed to register the dai\n");
184 184
185 return ret; 185 return ret;
186} 186}
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index c78a936a3099..9052f6a7073e 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -474,18 +474,18 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
474 s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO; 474 s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO;
475 s3c24xx_i2s_pcm_stereo_in.filter_data = pdata->dma_capture; 475 s3c24xx_i2s_pcm_stereo_in.filter_data = pdata->dma_capture;
476 476
477 ret = devm_snd_soc_register_component(&pdev->dev, 477 ret = samsung_asoc_dma_platform_register(&pdev->dev,
478 &s3c24xx_i2s_component, &s3c24xx_i2s_dai, 1); 478 pdata->dma_filter,
479 NULL, NULL);
479 if (ret) { 480 if (ret) {
480 pr_err("failed to register the dai\n"); 481 pr_err("failed to register the dma: %d\n", ret);
481 return ret; 482 return ret;
482 } 483 }
483 484
484 ret = samsung_asoc_dma_platform_register(&pdev->dev, 485 ret = devm_snd_soc_register_component(&pdev->dev,
485 pdata->dma_filter, 486 &s3c24xx_i2s_component, &s3c24xx_i2s_dai, 1);
486 NULL, NULL);
487 if (ret) 487 if (ret)
488 pr_err("failed to register the dma: %d\n", ret); 488 pr_err("failed to register the dai\n");
489 489
490 return ret; 490 return ret;
491} 491}
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index 26c1fbed4d35..779504f54bc0 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -416,15 +416,6 @@ static int spdif_probe(struct platform_device *pdev)
416 goto err3; 416 goto err3;
417 } 417 }
418 418
419 dev_set_drvdata(&pdev->dev, spdif);
420
421 ret = devm_snd_soc_register_component(&pdev->dev,
422 &samsung_spdif_component, &samsung_spdif_dai, 1);
423 if (ret != 0) {
424 dev_err(&pdev->dev, "fail to register dai\n");
425 goto err4;
426 }
427
428 spdif_stereo_out.addr_width = 2; 419 spdif_stereo_out.addr_width = 2;
429 spdif_stereo_out.addr = mem_res->start + DATA_OUTBUF; 420 spdif_stereo_out.addr = mem_res->start + DATA_OUTBUF;
430 filter = NULL; 421 filter = NULL;
@@ -432,7 +423,6 @@ static int spdif_probe(struct platform_device *pdev)
432 spdif_stereo_out.filter_data = spdif_pdata->dma_playback; 423 spdif_stereo_out.filter_data = spdif_pdata->dma_playback;
433 filter = spdif_pdata->dma_filter; 424 filter = spdif_pdata->dma_filter;
434 } 425 }
435
436 spdif->dma_playback = &spdif_stereo_out; 426 spdif->dma_playback = &spdif_stereo_out;
437 427
438 ret = samsung_asoc_dma_platform_register(&pdev->dev, filter, 428 ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
@@ -442,6 +432,15 @@ static int spdif_probe(struct platform_device *pdev)
442 goto err4; 432 goto err4;
443 } 433 }
444 434
435 dev_set_drvdata(&pdev->dev, spdif);
436
437 ret = devm_snd_soc_register_component(&pdev->dev,
438 &samsung_spdif_component, &samsung_spdif_dai, 1);
439 if (ret != 0) {
440 dev_err(&pdev->dev, "fail to register dai\n");
441 goto err4;
442 }
443
445 return 0; 444 return 0;
446err4: 445err4:
447 iounmap(spdif->regs); 446 iounmap(spdif->regs);
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index 1bc8ebc2528e..ad54d4cf58ad 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -614,7 +614,11 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
614 iec958->status[3] = ucontrol->value.iec958.status[3]; 614 iec958->status[3] = ucontrol->value.iec958.status[3];
615 mutex_unlock(&player->ctrl_lock); 615 mutex_unlock(&player->ctrl_lock);
616 616
617 uni_player_set_channel_status(player, NULL); 617 if (player->substream && player->substream->runtime)
618 uni_player_set_channel_status(player,
619 player->substream->runtime);
620 else
621 uni_player_set_channel_status(player, NULL);
618 622
619 return 0; 623 return 0;
620} 624}
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index e047ec06d538..56ed9472e89f 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -765,11 +765,11 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
765 765
766 card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); 766 card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
767 if (!card) 767 if (!card)
768 return NULL; 768 return ERR_PTR(-ENOMEM);
769 769
770 card->dai_link = sun4i_codec_create_link(dev, &card->num_links); 770 card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
771 if (!card->dai_link) 771 if (!card->dai_link)
772 return NULL; 772 return ERR_PTR(-ENOMEM);
773 773
774 card->dev = dev; 774 card->dev = dev;
775 card->name = "sun4i-codec"; 775 card->name = "sun4i-codec";
@@ -829,12 +829,6 @@ static int sun4i_codec_probe(struct platform_device *pdev)
829 return PTR_ERR(scodec->clk_module); 829 return PTR_ERR(scodec->clk_module);
830 } 830 }
831 831
832 /* Enable the bus clock */
833 if (clk_prepare_enable(scodec->clk_apb)) {
834 dev_err(&pdev->dev, "Failed to enable the APB clock\n");
835 return -EINVAL;
836 }
837
838 scodec->gpio_pa = devm_gpiod_get_optional(&pdev->dev, "allwinner,pa", 832 scodec->gpio_pa = devm_gpiod_get_optional(&pdev->dev, "allwinner,pa",
839 GPIOD_OUT_LOW); 833 GPIOD_OUT_LOW);
840 if (IS_ERR(scodec->gpio_pa)) { 834 if (IS_ERR(scodec->gpio_pa)) {
@@ -844,6 +838,12 @@ static int sun4i_codec_probe(struct platform_device *pdev)
844 return ret; 838 return ret;
845 } 839 }
846 840
841 /* Enable the bus clock */
842 if (clk_prepare_enable(scodec->clk_apb)) {
843 dev_err(&pdev->dev, "Failed to enable the APB clock\n");
844 return -EINVAL;
845 }
846
847 /* DMA configuration for TX FIFO */ 847 /* DMA configuration for TX FIFO */
848 scodec->playback_dma_data.addr = res->start + SUN4I_CODEC_DAC_TXDATA; 848 scodec->playback_dma_data.addr = res->start + SUN4I_CODEC_DAC_TXDATA;
849 scodec->playback_dma_data.maxburst = 4; 849 scodec->playback_dma_data.maxburst = 4;
@@ -876,7 +876,8 @@ static int sun4i_codec_probe(struct platform_device *pdev)
876 } 876 }
877 877
878 card = sun4i_codec_create_card(&pdev->dev); 878 card = sun4i_codec_create_card(&pdev->dev);
879 if (!card) { 879 if (IS_ERR(card)) {
880 ret = PTR_ERR(card);
880 dev_err(&pdev->dev, "Failed to create our card\n"); 881 dev_err(&pdev->dev, "Failed to create our card\n");
881 goto err_unregister_codec; 882 goto err_unregister_codec;
882 } 883 }
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 4ffff7be9299..a53fef0c673b 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1337,8 +1337,8 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
1337 } 1337 }
1338 1338
1339 if (first) { 1339 if (first) {
1340 ui_browser__printf(&browser->b, "%c", folded_sign); 1340 ui_browser__printf(&browser->b, "%c ", folded_sign);
1341 width--; 1341 width -= 2;
1342 first = false; 1342 first = false;
1343 } else { 1343 } else {
1344 ui_browser__printf(&browser->b, " "); 1344 ui_browser__printf(&browser->b, " ");
@@ -1361,8 +1361,10 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
1361 width -= hpp.buf - s; 1361 width -= hpp.buf - s;
1362 } 1362 }
1363 1363
1364 ui_browser__write_nstring(&browser->b, "", hierarchy_indent); 1364 if (!first) {
1365 width -= hierarchy_indent; 1365 ui_browser__write_nstring(&browser->b, "", hierarchy_indent);
1366 width -= hierarchy_indent;
1367 }
1366 1368
1367 if (column >= browser->b.horiz_scroll) { 1369 if (column >= browser->b.horiz_scroll) {
1368 char s[2048]; 1370 char s[2048];
@@ -1381,7 +1383,13 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
1381 } 1383 }
1382 1384
1383 perf_hpp_list__for_each_format(entry->hpp_list, fmt) { 1385 perf_hpp_list__for_each_format(entry->hpp_list, fmt) {
1384 ui_browser__write_nstring(&browser->b, "", 2); 1386 if (first) {
1387 ui_browser__printf(&browser->b, "%c ", folded_sign);
1388 first = false;
1389 } else {
1390 ui_browser__write_nstring(&browser->b, "", 2);
1391 }
1392
1385 width -= 2; 1393 width -= 2;
1386 1394
1387 /* 1395 /*
@@ -1555,10 +1563,11 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
1555 int indent = hists->nr_hpp_node - 2; 1563 int indent = hists->nr_hpp_node - 2;
1556 bool first_node, first_col; 1564 bool first_node, first_col;
1557 1565
1558 ret = scnprintf(buf, size, " "); 1566 ret = scnprintf(buf, size, " ");
1559 if (advance_hpp_check(&dummy_hpp, ret)) 1567 if (advance_hpp_check(&dummy_hpp, ret))
1560 return ret; 1568 return ret;
1561 1569
1570 first_node = true;
1562 /* the first hpp_list_node is for overhead columns */ 1571 /* the first hpp_list_node is for overhead columns */
1563 fmt_node = list_first_entry(&hists->hpp_formats, 1572 fmt_node = list_first_entry(&hists->hpp_formats,
1564 struct perf_hpp_list_node, list); 1573 struct perf_hpp_list_node, list);
@@ -1573,12 +1582,16 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
1573 ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " "); 1582 ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " ");
1574 if (advance_hpp_check(&dummy_hpp, ret)) 1583 if (advance_hpp_check(&dummy_hpp, ret))
1575 break; 1584 break;
1585
1586 first_node = false;
1576 } 1587 }
1577 1588
1578 ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s", 1589 if (!first_node) {
1579 indent * HIERARCHY_INDENT, ""); 1590 ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s",
1580 if (advance_hpp_check(&dummy_hpp, ret)) 1591 indent * HIERARCHY_INDENT, "");
1581 return ret; 1592 if (advance_hpp_check(&dummy_hpp, ret))
1593 return ret;
1594 }
1582 1595
1583 first_node = true; 1596 first_node = true;
1584 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) { 1597 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
@@ -2076,8 +2089,21 @@ void hist_browser__init(struct hist_browser *browser,
2076 browser->b.use_navkeypressed = true; 2089 browser->b.use_navkeypressed = true;
2077 browser->show_headers = symbol_conf.show_hist_headers; 2090 browser->show_headers = symbol_conf.show_hist_headers;
2078 2091
2079 hists__for_each_format(hists, fmt) 2092 if (symbol_conf.report_hierarchy) {
2093 struct perf_hpp_list_node *fmt_node;
2094
2095 /* count overhead columns (in the first node) */
2096 fmt_node = list_first_entry(&hists->hpp_formats,
2097 struct perf_hpp_list_node, list);
2098 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
2099 ++browser->b.columns;
2100
2101 /* add a single column for whole hierarchy sort keys*/
2080 ++browser->b.columns; 2102 ++browser->b.columns;
2103 } else {
2104 hists__for_each_format(hists, fmt)
2105 ++browser->b.columns;
2106 }
2081 2107
2082 hists__reset_column_width(hists); 2108 hists__reset_column_width(hists);
2083} 2109}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index b02992efb513..a69f027368ef 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1600,18 +1600,18 @@ static void hists__hierarchy_output_resort(struct hists *hists,
1600 if (prog) 1600 if (prog)
1601 ui_progress__update(prog, 1); 1601 ui_progress__update(prog, 1);
1602 1602
1603 hists->nr_entries++;
1604 if (!he->filtered) {
1605 hists->nr_non_filtered_entries++;
1606 hists__calc_col_len(hists, he);
1607 }
1608
1603 if (!he->leaf) { 1609 if (!he->leaf) {
1604 hists__hierarchy_output_resort(hists, prog, 1610 hists__hierarchy_output_resort(hists, prog,
1605 &he->hroot_in, 1611 &he->hroot_in,
1606 &he->hroot_out, 1612 &he->hroot_out,
1607 min_callchain_hits, 1613 min_callchain_hits,
1608 use_callchain); 1614 use_callchain);
1609 hists->nr_entries++;
1610 if (!he->filtered) {
1611 hists->nr_non_filtered_entries++;
1612 hists__calc_col_len(hists, he);
1613 }
1614
1615 continue; 1615 continue;
1616 } 1616 }
1617 1617
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
index b4bf76971dc9..1eef0aed6423 100644
--- a/tools/power/cpupower/utils/cpufreq-set.c
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -296,7 +296,7 @@ int cmd_freq_set(int argc, char **argv)
296 struct cpufreq_affected_cpus *cpus; 296 struct cpufreq_affected_cpus *cpus;
297 297
298 if (!bitmask_isbitset(cpus_chosen, cpu) || 298 if (!bitmask_isbitset(cpus_chosen, cpu) ||
299 cpupower_is_cpu_online(cpu)) 299 cpupower_is_cpu_online(cpu) != 1)
300 continue; 300 continue;
301 301
302 cpus = cpufreq_get_related_cpus(cpu); 302 cpus = cpufreq_get_related_cpus(cpu);
@@ -316,10 +316,7 @@ int cmd_freq_set(int argc, char **argv)
316 cpu <= bitmask_last(cpus_chosen); cpu++) { 316 cpu <= bitmask_last(cpus_chosen); cpu++) {
317 317
318 if (!bitmask_isbitset(cpus_chosen, cpu) || 318 if (!bitmask_isbitset(cpus_chosen, cpu) ||
319 cpupower_is_cpu_online(cpu)) 319 cpupower_is_cpu_online(cpu) != 1)
320 continue;
321
322 if (cpupower_is_cpu_online(cpu) != 1)
323 continue; 320 continue;
324 321
325 printf(_("Setting cpu: %d\n"), cpu); 322 printf(_("Setting cpu: %d\n"), cpu);
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile
index 877a8a4721b6..c012edbdb13b 100644
--- a/tools/virtio/ringtest/Makefile
+++ b/tools/virtio/ringtest/Makefile
@@ -3,8 +3,8 @@ all:
3all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring 3all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring
4 4
5CFLAGS += -Wall 5CFLAGS += -Wall
6CFLAGS += -pthread -O2 -ggdb 6CFLAGS += -pthread -O2 -ggdb -flto -fwhole-program
7LDFLAGS += -pthread -O2 -ggdb 7LDFLAGS += -pthread -O2 -ggdb -flto -fwhole-program
8 8
9main.o: main.c main.h 9main.o: main.c main.h
10ring.o: ring.c main.h 10ring.o: ring.c main.h
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c
index 147abb452a6c..f31353fac541 100644
--- a/tools/virtio/ringtest/main.c
+++ b/tools/virtio/ringtest/main.c
@@ -96,7 +96,13 @@ void set_affinity(const char *arg)
96 assert(!ret); 96 assert(!ret);
97} 97}
98 98
99static void run_guest(void) 99void poll_used(void)
100{
101 while (used_empty())
102 busy_wait();
103}
104
105static void __attribute__((__flatten__)) run_guest(void)
100{ 106{
101 int completed_before; 107 int completed_before;
102 int completed = 0; 108 int completed = 0;
@@ -141,7 +147,7 @@ static void run_guest(void)
141 assert(completed <= bufs); 147 assert(completed <= bufs);
142 assert(started <= bufs); 148 assert(started <= bufs);
143 if (do_sleep) { 149 if (do_sleep) {
144 if (enable_call()) 150 if (used_empty() && enable_call())
145 wait_for_call(); 151 wait_for_call();
146 } else { 152 } else {
147 poll_used(); 153 poll_used();
@@ -149,7 +155,13 @@ static void run_guest(void)
149 } 155 }
150} 156}
151 157
152static void run_host(void) 158void poll_avail(void)
159{
160 while (avail_empty())
161 busy_wait();
162}
163
164static void __attribute__((__flatten__)) run_host(void)
153{ 165{
154 int completed_before; 166 int completed_before;
155 int completed = 0; 167 int completed = 0;
@@ -160,7 +172,7 @@ static void run_host(void)
160 172
161 for (;;) { 173 for (;;) {
162 if (do_sleep) { 174 if (do_sleep) {
163 if (enable_kick()) 175 if (avail_empty() && enable_kick())
164 wait_for_kick(); 176 wait_for_kick();
165 } else { 177 } else {
166 poll_avail(); 178 poll_avail();
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 16917acb0ade..34e63cc4c572 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -56,15 +56,15 @@ void alloc_ring(void);
56int add_inbuf(unsigned, void *, void *); 56int add_inbuf(unsigned, void *, void *);
57void *get_buf(unsigned *, void **); 57void *get_buf(unsigned *, void **);
58void disable_call(); 58void disable_call();
59bool used_empty();
59bool enable_call(); 60bool enable_call();
60void kick_available(); 61void kick_available();
61void poll_used();
62/* host side */ 62/* host side */
63void disable_kick(); 63void disable_kick();
64bool avail_empty();
64bool enable_kick(); 65bool enable_kick();
65bool use_buf(unsigned *, void **); 66bool use_buf(unsigned *, void **);
66void call_used(); 67void call_used();
67void poll_avail();
68 68
69/* implemented by main */ 69/* implemented by main */
70extern bool do_sleep; 70extern bool do_sleep;
diff --git a/tools/virtio/ringtest/noring.c b/tools/virtio/ringtest/noring.c
index eda2f4824130..b8d1c1daac7c 100644
--- a/tools/virtio/ringtest/noring.c
+++ b/tools/virtio/ringtest/noring.c
@@ -24,8 +24,9 @@ void *get_buf(unsigned *lenp, void **bufp)
24 return "Buffer"; 24 return "Buffer";
25} 25}
26 26
27void poll_used(void) 27bool used_empty()
28{ 28{
29 return false;
29} 30}
30 31
31void disable_call() 32void disable_call()
@@ -54,8 +55,9 @@ bool enable_kick()
54 assert(0); 55 assert(0);
55} 56}
56 57
57void poll_avail(void) 58bool avail_empty()
58{ 59{
60 return false;
59} 61}
60 62
61bool use_buf(unsigned *lenp, void **bufp) 63bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index bd2ad1d3b7a9..635b07b4fdd3 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -133,18 +133,9 @@ void *get_buf(unsigned *lenp, void **bufp)
133 return datap; 133 return datap;
134} 134}
135 135
136void poll_used(void) 136bool used_empty()
137{ 137{
138 void *b; 138 return (tailcnt == headcnt || __ptr_ring_full(&array));
139
140 do {
141 if (tailcnt == headcnt || __ptr_ring_full(&array)) {
142 b = NULL;
143 barrier();
144 } else {
145 b = "Buffer\n";
146 }
147 } while (!b);
148} 139}
149 140
150void disable_call() 141void disable_call()
@@ -173,14 +164,9 @@ bool enable_kick()
173 assert(0); 164 assert(0);
174} 165}
175 166
176void poll_avail(void) 167bool avail_empty()
177{ 168{
178 void *b; 169 return !__ptr_ring_peek(&array);
179
180 do {
181 barrier();
182 b = __ptr_ring_peek(&array);
183 } while (!b);
184} 170}
185 171
186bool use_buf(unsigned *lenp, void **bufp) 172bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c
index c25c8d248b6b..747c5dd47be8 100644
--- a/tools/virtio/ringtest/ring.c
+++ b/tools/virtio/ringtest/ring.c
@@ -163,12 +163,11 @@ void *get_buf(unsigned *lenp, void **bufp)
163 return datap; 163 return datap;
164} 164}
165 165
166void poll_used(void) 166bool used_empty()
167{ 167{
168 unsigned head = (ring_size - 1) & guest.last_used_idx; 168 unsigned head = (ring_size - 1) & guest.last_used_idx;
169 169
170 while (ring[head].flags & DESC_HW) 170 return (ring[head].flags & DESC_HW);
171 busy_wait();
172} 171}
173 172
174void disable_call() 173void disable_call()
@@ -180,13 +179,11 @@ void disable_call()
180 179
181bool enable_call() 180bool enable_call()
182{ 181{
183 unsigned head = (ring_size - 1) & guest.last_used_idx;
184
185 event->call_index = guest.last_used_idx; 182 event->call_index = guest.last_used_idx;
186 /* Flush call index write */ 183 /* Flush call index write */
187 /* Barrier D (for pairing) */ 184 /* Barrier D (for pairing) */
188 smp_mb(); 185 smp_mb();
189 return ring[head].flags & DESC_HW; 186 return used_empty();
190} 187}
191 188
192void kick_available(void) 189void kick_available(void)
@@ -213,20 +210,17 @@ void disable_kick()
213 210
214bool enable_kick() 211bool enable_kick()
215{ 212{
216 unsigned head = (ring_size - 1) & host.used_idx;
217
218 event->kick_index = host.used_idx; 213 event->kick_index = host.used_idx;
219 /* Barrier C (for pairing) */ 214 /* Barrier C (for pairing) */
220 smp_mb(); 215 smp_mb();
221 return !(ring[head].flags & DESC_HW); 216 return avail_empty();
222} 217}
223 218
224void poll_avail(void) 219bool avail_empty()
225{ 220{
226 unsigned head = (ring_size - 1) & host.used_idx; 221 unsigned head = (ring_size - 1) & host.used_idx;
227 222
228 while (!(ring[head].flags & DESC_HW)) 223 return !(ring[head].flags & DESC_HW);
229 busy_wait();
230} 224}
231 225
232bool use_buf(unsigned *lenp, void **bufp) 226bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c
index 761866212aac..bbc3043b2fb1 100644
--- a/tools/virtio/ringtest/virtio_ring_0_9.c
+++ b/tools/virtio/ringtest/virtio_ring_0_9.c
@@ -194,24 +194,16 @@ void *get_buf(unsigned *lenp, void **bufp)
194 return datap; 194 return datap;
195} 195}
196 196
197void poll_used(void) 197bool used_empty()
198{ 198{
199 unsigned short last_used_idx = guest.last_used_idx;
199#ifdef RING_POLL 200#ifdef RING_POLL
200 unsigned head = (ring_size - 1) & guest.last_used_idx; 201 unsigned short head = last_used_idx & (ring_size - 1);
202 unsigned index = ring.used->ring[head].id;
201 203
202 for (;;) { 204 return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
203 unsigned index = ring.used->ring[head].id;
204
205 if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
206 busy_wait();
207 else
208 break;
209 }
210#else 205#else
211 unsigned head = guest.last_used_idx; 206 return ring.used->idx == last_used_idx;
212
213 while (ring.used->idx == head)
214 busy_wait();
215#endif 207#endif
216} 208}
217 209
@@ -224,22 +216,11 @@ void disable_call()
224 216
225bool enable_call() 217bool enable_call()
226{ 218{
227 unsigned short last_used_idx; 219 vring_used_event(&ring) = guest.last_used_idx;
228
229 vring_used_event(&ring) = (last_used_idx = guest.last_used_idx);
230 /* Flush call index write */ 220 /* Flush call index write */
231 /* Barrier D (for pairing) */ 221 /* Barrier D (for pairing) */
232 smp_mb(); 222 smp_mb();
233#ifdef RING_POLL 223 return used_empty();
234 {
235 unsigned short head = last_used_idx & (ring_size - 1);
236 unsigned index = ring.used->ring[head].id;
237
238 return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
239 }
240#else
241 return ring.used->idx == last_used_idx;
242#endif
243} 224}
244 225
245void kick_available(void) 226void kick_available(void)
@@ -266,36 +247,21 @@ void disable_kick()
266 247
267bool enable_kick() 248bool enable_kick()
268{ 249{
269 unsigned head = host.used_idx; 250 vring_avail_event(&ring) = host.used_idx;
270
271 vring_avail_event(&ring) = head;
272 /* Barrier C (for pairing) */ 251 /* Barrier C (for pairing) */
273 smp_mb(); 252 smp_mb();
274#ifdef RING_POLL 253 return avail_empty();
275 {
276 unsigned index = ring.avail->ring[head & (ring_size - 1)];
277
278 return (index ^ head ^ 0x8000) & ~(ring_size - 1);
279 }
280#else
281 return head == ring.avail->idx;
282#endif
283} 254}
284 255
285void poll_avail(void) 256bool avail_empty()
286{ 257{
287 unsigned head = host.used_idx; 258 unsigned head = host.used_idx;
288#ifdef RING_POLL 259#ifdef RING_POLL
289 for (;;) { 260 unsigned index = ring.avail->ring[head & (ring_size - 1)];
290 unsigned index = ring.avail->ring[head & (ring_size - 1)]; 261
291 if ((index ^ head ^ 0x8000) & ~(ring_size - 1)) 262 return ((index ^ head ^ 0x8000) & ~(ring_size - 1));
292 busy_wait();
293 else
294 break;
295 }
296#else 263#else
297 while (ring.avail->idx == head) 264 return head == ring.avail->idx;
298 busy_wait();
299#endif 265#endif
300} 266}
301 267
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index e18b30ddcdce..ebe1b9fa3c4d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -453,17 +453,33 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
453 return container_of(dev, struct vgic_io_device, dev); 453 return container_of(dev, struct vgic_io_device, dev);
454} 454}
455 455
456static bool check_region(const struct vgic_register_region *region, 456static bool check_region(const struct kvm *kvm,
457 const struct vgic_register_region *region,
457 gpa_t addr, int len) 458 gpa_t addr, int len)
458{ 459{
459 if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1) 460 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
460 return true; 461
461 if ((region->access_flags & VGIC_ACCESS_32bit) && 462 switch (len) {
462 len == sizeof(u32) && !(addr & 3)) 463 case sizeof(u8):
463 return true; 464 flags = VGIC_ACCESS_8bit;
464 if ((region->access_flags & VGIC_ACCESS_64bit) && 465 break;
465 len == sizeof(u64) && !(addr & 7)) 466 case sizeof(u32):
466 return true; 467 flags = VGIC_ACCESS_32bit;
468 break;
469 case sizeof(u64):
470 flags = VGIC_ACCESS_64bit;
471 break;
472 default:
473 return false;
474 }
475
476 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
477 if (!region->bits_per_irq)
478 return true;
479
480 /* Do we access a non-allocated IRQ? */
481 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
482 }
467 483
468 return false; 484 return false;
469} 485}
@@ -477,7 +493,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
477 493
478 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, 494 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
479 addr - iodev->base_addr); 495 addr - iodev->base_addr);
480 if (!region || !check_region(region, addr, len)) { 496 if (!region || !check_region(vcpu->kvm, region, addr, len)) {
481 memset(val, 0, len); 497 memset(val, 0, len);
482 return 0; 498 return 0;
483 } 499 }
@@ -510,10 +526,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
510 526
511 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, 527 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
512 addr - iodev->base_addr); 528 addr - iodev->base_addr);
513 if (!region) 529 if (!region || !check_region(vcpu->kvm, region, addr, len))
514 return 0;
515
516 if (!check_region(region, addr, len))
517 return 0; 530 return 0;
518 531
519 switch (iodev->iodev_type) { 532 switch (iodev->iodev_type) {
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
index 4c34d39d44a0..84961b4e4422 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.h
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
50#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) 50#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
51 51
52/* 52/*
53 * (addr & mask) gives us the byte offset for the INT ID, so we want to 53 * (addr & mask) gives us the _byte_ offset for the INT ID.
54 * divide this with 'bytes per irq' to get the INT ID, which is given 54 * We multiply this by 8 the get the _bit_ offset, then divide this by
55 * by '(bits) / 8'. But we do this with fixed-point-arithmetic and 55 * the number of bits to learn the actual INT ID.
56 * take advantage of the fact that division by a fraction equals 56 * But instead of a division (which requires a "long long div" implementation),
57 * multiplication with the inverted fraction, and scale up both the 57 * we shift by the binary logarithm of <bits>.
58 * numerator and denominator with 8 to support at most 64 bits per IRQ: 58 * This assumes that <bits> is a power of two.
59 */ 59 */
60#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ 60#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
61 64 / (bits) / 8) 61 8 >> ilog2(bits))
62 62
63/* 63/*
64 * Some VGIC registers store per-IRQ information, with a different number 64 * Some VGIC registers store per-IRQ information, with a different number
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 2893d5ba523a..6440b56ec90e 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -273,6 +273,18 @@ retry:
273 * no more work for us to do. 273 * no more work for us to do.
274 */ 274 */
275 spin_unlock(&irq->irq_lock); 275 spin_unlock(&irq->irq_lock);
276
277 /*
278 * We have to kick the VCPU here, because we could be
279 * queueing an edge-triggered interrupt for which we
280 * get no EOI maintenance interrupt. In that case,
281 * while the IRQ is already on the VCPU's AP list, the
282 * VCPU could have EOI'ed the original interrupt and
283 * won't see this one until it exits for some other
284 * reason.
285 */
286 if (vcpu)
287 kvm_vcpu_kick(vcpu);
276 return false; 288 return false;
277 } 289 }
278 290
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index f397e9b20370..a29786dd9522 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -42,6 +42,7 @@
42 42
43#ifdef CONFIG_HAVE_KVM_IRQFD 43#ifdef CONFIG_HAVE_KVM_IRQFD
44 44
45static struct workqueue_struct *irqfd_cleanup_wq;
45 46
46static void 47static void
47irqfd_inject(struct work_struct *work) 48irqfd_inject(struct work_struct *work)
@@ -167,7 +168,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
167 168
168 list_del_init(&irqfd->list); 169 list_del_init(&irqfd->list);
169 170
170 schedule_work(&irqfd->shutdown); 171 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
171} 172}
172 173
173int __attribute__((weak)) kvm_arch_set_irq_inatomic( 174int __attribute__((weak)) kvm_arch_set_irq_inatomic(
@@ -554,7 +555,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
554 * so that we guarantee there will not be any more interrupts on this 555 * so that we guarantee there will not be any more interrupts on this
555 * gsi once this deassign function returns. 556 * gsi once this deassign function returns.
556 */ 557 */
557 flush_work(&irqfd->shutdown); 558 flush_workqueue(irqfd_cleanup_wq);
558 559
559 return 0; 560 return 0;
560} 561}
@@ -591,7 +592,7 @@ kvm_irqfd_release(struct kvm *kvm)
591 * Block until we know all outstanding shutdown jobs have completed 592 * Block until we know all outstanding shutdown jobs have completed
592 * since we do not take a kvm* reference. 593 * since we do not take a kvm* reference.
593 */ 594 */
594 flush_work(&irqfd->shutdown); 595 flush_workqueue(irqfd_cleanup_wq);
595 596
596} 597}
597 598
@@ -621,8 +622,23 @@ void kvm_irq_routing_update(struct kvm *kvm)
621 spin_unlock_irq(&kvm->irqfds.lock); 622 spin_unlock_irq(&kvm->irqfds.lock);
622} 623}
623 624
625/*
626 * create a host-wide workqueue for issuing deferred shutdown requests
627 * aggregated from all vm* instances. We need our own isolated
628 * queue to ease flushing work items when a VM exits.
629 */
630int kvm_irqfd_init(void)
631{
632 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
633 if (!irqfd_cleanup_wq)
634 return -ENOMEM;
635
636 return 0;
637}
638
624void kvm_irqfd_exit(void) 639void kvm_irqfd_exit(void)
625{ 640{
641 destroy_workqueue(irqfd_cleanup_wq);
626} 642}
627#endif 643#endif
628 644
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2907b7b78654..5c360347a1e9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3844,7 +3844,12 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
3844 * kvm_arch_init makes sure there's at most one caller 3844 * kvm_arch_init makes sure there's at most one caller
3845 * for architectures that support multiple implementations, 3845 * for architectures that support multiple implementations,
3846 * like intel and amd on x86. 3846 * like intel and amd on x86.
3847 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
3848 * conflicts in case kvm is already setup for another implementation.
3847 */ 3849 */
3850 r = kvm_irqfd_init();
3851 if (r)
3852 goto out_irqfd;
3848 3853
3849 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3854 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
3850 r = -ENOMEM; 3855 r = -ENOMEM;
@@ -3926,6 +3931,7 @@ out_free_0a:
3926 free_cpumask_var(cpus_hardware_enabled); 3931 free_cpumask_var(cpus_hardware_enabled);
3927out_free_0: 3932out_free_0:
3928 kvm_irqfd_exit(); 3933 kvm_irqfd_exit();
3934out_irqfd:
3929 kvm_arch_exit(); 3935 kvm_arch_exit();
3930out_fail: 3936out_fail:
3931 return r; 3937 return r;