aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt62
-rw-r--r--Documentation/admin-guide/security-bugs.rst21
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/core-api/xarray.rst52
-rw-r--r--Documentation/devicetree/bindings/net/can/holt_hi311x.txt2
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_can.txt28
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt31
-rw-r--r--Documentation/devicetree/bindings/spi/spi-uniphier.txt14
-rw-r--r--Documentation/input/event-codes.rst11
-rw-r--r--Documentation/media/uapi/v4l/dev-meta.rst2
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-fmt.rst5
-rw-r--r--Documentation/networking/rxrpc.txt17
-rw-r--r--Documentation/userspace-api/spec_ctrl.rst9
-rw-r--r--Documentation/x86/boot.txt32
-rw-r--r--MAINTAINERS174
-rw-r--r--Makefile4
-rw-r--r--arch/arm/boot/dts/am3517-evm.dts2
-rw-r--r--arch/arm/boot/dts/am3517-som.dtsi2
-rw-r--r--arch/arm/boot/dts/imx51-zii-rdu1.dts6
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi2
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts2
-rw-r--r--arch/arm/boot/dts/rk3288-veyron.dtsi6
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi2
-rw-r--r--arch/arm/kernel/ftrace.c17
-rw-r--r--arch/arm/mach-davinci/da830.c4
-rw-r--r--arch/arm/mach-davinci/da850.c4
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c40
-rw-r--r--arch/arm/mach-davinci/dm355.c32
-rw-r--r--arch/arm/mach-davinci/dm365.c37
-rw-r--r--arch/arm/mach-davinci/dm644x.c22
-rw-r--r--arch/arm/mach-davinci/dm646x.c12
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c3
-rw-r--r--arch/arm/mach-omap2/prm44xx.c2
-rw-r--r--arch/arm64/Kconfig25
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi2
-rw-r--r--arch/arm64/include/asm/ftrace.h13
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/include/asm/tlbflush.h4
-rw-r--r--arch/arm64/kernel/cpu_errata.c20
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/ftrace.c15
-rw-r--r--arch/arm64/net/bpf_jit_comp.c26
-rw-r--r--arch/ia64/include/asm/numa.h4
-rw-r--r--arch/ia64/kernel/acpi.c6
-rw-r--r--arch/ia64/mm/numa.c6
-rw-r--r--arch/microblaze/kernel/ftrace.c15
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig1
-rw-r--r--arch/mips/include/asm/syscall.h2
-rw-r--r--arch/mips/kernel/ftrace.c14
-rw-r--r--arch/mips/kernel/setup.c1
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/loongson64/loongson-3/numa.c12
-rw-r--r--arch/mips/ralink/mt7620.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c11
-rw-r--r--arch/nds32/kernel/ftrace.c18
-rw-r--r--arch/parisc/kernel/ftrace.c17
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c57
-rw-r--r--arch/riscv/Makefile15
-rw-r--r--arch/riscv/boot/.gitignore2
-rw-r--r--arch/riscv/boot/Makefile33
-rw-r--r--arch/riscv/boot/install.sh60
-rw-r--r--arch/riscv/include/asm/module.h1
-rw-r--r--arch/riscv/include/asm/uaccess.h4
-rw-r--r--arch/riscv/include/asm/unistd.h5
-rw-r--r--arch/riscv/include/uapi/asm/unistd.h (renamed from arch/riscv/include/uapi/asm/syscalls.h)26
-rw-r--r--arch/riscv/kernel/cpu.c9
-rw-r--r--arch/riscv/kernel/ftrace.c14
-rw-r--r--arch/riscv/kernel/head.S10
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kernel/ftrace.c13
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/mm/pgalloc.c1
-rw-r--r--arch/sh/kernel/ftrace.c16
-rw-r--r--arch/sparc/kernel/ftrace.c11
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c97
-rw-r--r--arch/x86/Kconfig12
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/boot/header.S6
-rw-r--r--arch/x86/events/core.c20
-rw-r--r--arch/x86/events/intel/core.c56
-rw-r--r--arch/x86/events/perf_event.h13
-rw-r--r--arch/x86/include/asm/fpu/internal.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/msr-index.h5
-rw-r--r--arch/x86/include/asm/nospec-branch.h26
-rw-r--r--arch/x86/include/asm/spec-ctrl.h20
-rw-r--r--arch/x86/include/asm/switch_to.h3
-rw-r--r--arch/x86/include/asm/thread_info.h20
-rw-r--r--arch/x86/include/asm/tlbflush.h8
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h7
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/cpu/bugs.c525
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c19
-rw-r--r--arch/x86/kernel/fpu/signal.c4
-rw-r--r--arch/x86/kernel/ftrace.c15
-rw-r--r--arch/x86/kernel/head32.c1
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/process.c101
-rw-r--r--arch/x86/kernel/process.h39
-rw-r--r--arch/x86/kernel/process_32.c10
-rw-r--r--arch/x86/kernel/process_64.c10
-rw-r--r--arch/x86/kernel/setup.c17
-rw-r--r--arch/x86/kvm/lapic.c7
-rw-r--r--arch/x86/kvm/mmu.c27
-rw-r--r--arch/x86/kvm/svm.c44
-rw-r--r--arch/x86/kvm/vmx.c98
-rw-r--r--arch/x86/kvm/x86.c10
-rw-r--r--arch/x86/mm/tlb.c115
-rw-r--r--arch/x86/xen/enlighten.c78
-rw-r--r--arch/x86/xen/multicalls.c35
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--arch/x86/xen/spinlock.c7
-rw-r--r--arch/xtensa/kernel/asm-offsets.c16
-rw-r--r--arch/xtensa/kernel/process.c5
-rw-r--r--arch/xtensa/kernel/ptrace.c42
-rw-r--r--block/blk-merge.c2
-rw-r--r--drivers/acpi/acpi_platform.c1
-rw-r--r--drivers/acpi/acpica/exserial.c21
-rw-r--r--drivers/acpi/arm64/iort.c2
-rw-r--r--drivers/android/binder.c21
-rw-r--r--drivers/android/binder_alloc.c16
-rw-r--r--drivers/android/binder_alloc.h3
-rw-r--r--drivers/atm/firestream.c4
-rw-r--r--drivers/base/devres.c10
-rw-r--r--drivers/cpufreq/ti-cpufreq.c26
-rw-r--r--drivers/dma-buf/udmabuf.c1
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/firmware/efi/efi.c36
-rw-r--r--drivers/fsi/Kconfig1
-rw-r--r--drivers/fsi/fsi-scom.c1
-rw-r--r--drivers/gnss/serial.c3
-rw-r--r--drivers/gnss/sirf.c3
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-mockup.c6
-rw-r--r--drivers/gpio/gpio-pxa.c4
-rw-r--r--drivers/gpio/gpiolib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c39
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c30
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c21
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c39
-rw-r--r--drivers/gpu/drm/drm_auth.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c39
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c41
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c27
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c1
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c4
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c21
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c15
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-input.c47
-rw-r--r--drivers/hid/hid-logitech-hidpp.c309
-rw-r--r--drivers/hid/hid-multitouch.c6
-rw-r--r--drivers/hid/hid-quirks.c3
-rw-r--r--drivers/hid/hid-sensor-custom.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c13
-rw-r--r--drivers/hid/hid-steam.c154
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hid/uhid.c25
-rw-r--r--drivers/hv/channel.c8
-rw-r--r--drivers/hv/hv_kvp.c26
-rw-r--r--drivers/hwmon/ina2xx.c6
-rw-r--r--drivers/hwmon/mlxreg-fan.c2
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c6
-rw-r--r--drivers/hwmon/w83795.c2
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c5
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c5
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c3
-rw-r--r--drivers/iio/light/hid-sensor-als.c8
-rw-r--r--drivers/iio/light/hid-sensor-prox.c8
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c8
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c12
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c8
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c8
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c3
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c6
-rw-r--r--drivers/infiniband/core/umem_odp.c20
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c128
-rw-r--r--drivers/infiniband/hw/mlx5/main.c29
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c22
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c7
-rw-r--r--drivers/iommu/amd_iommu_init.c3
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/intel-svm.c2
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/media/cec/cec-adap.c49
-rw-r--r--drivers/media/i2c/tc358743.c1
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c6
-rw-r--r--drivers/media/platform/omap3isp/isp.c3
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c2
-rw-r--r--drivers/media/platform/vim2m.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c4
-rw-r--r--drivers/misc/atmel-ssc.c2
-rw-r--r--drivers/misc/mic/scif/scif_rma.c2
-rw-r--r--drivers/misc/sgi-gru/grukdump.c4
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c86
-rw-r--r--drivers/mtd/nand/bbt.c3
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c11
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c32
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c19
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c159
-rw-r--r--drivers/net/can/dev.c48
-rw-r--r--drivers/net/can/flexcan.c108
-rw-r--r--drivers/net/can/rcar/rcar_can.c5
-rw-r--r--drivers/net/can/rx-offload.c51
-rw-r--r--drivers/net/can/spi/hi311x.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c10
-rw-r--r--drivers/net/can/usb/ucan.c7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c23
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c70
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c18
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c72
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c14
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c4
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c11
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c44
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c50
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h5
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/phy/mdio-gpio.c10
-rw-r--r--drivers/net/phy/mscc.c14
-rw-r--r--drivers/net/phy/phy_device.c8
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/ipheth.c10
-rw-r--r--drivers/net/virtio_net.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c17
-rw-r--r--drivers/nvme/host/core.c8
-rw-r--r--drivers/nvme/host/fc.c75
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvmem/core.c10
-rw-r--r--drivers/opp/of.c6
-rw-r--r--drivers/opp/ti-opp-supply.c6
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c10
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c1
-rw-r--r--drivers/pci/pci.c24
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c20
-rw-r--r--drivers/phy/socionext/Kconfig3
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c10
-rw-r--r--drivers/s390/crypto/ap_bus.c8
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/s390/crypto/ap_queue.c15
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c1
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c1
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c9
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h6
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c3
-rw-r--r--drivers/slimbus/slimbus.h6
-rw-r--r--drivers/spi/spi-mt65xx.c4
-rw-r--r--drivers/spi/spi-omap2-mcspi.c37
-rw-r--r--drivers/staging/comedi/comedi.h39
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c3
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c24
-rw-r--r--drivers/staging/most/core.c2
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c3
-rw-r--r--drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c7
-rw-r--r--drivers/thunderbolt/switch.c40
-rw-r--r--drivers/uio/uio.c7
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/core/quirks.c17
-rw-r--r--drivers/usb/dwc2/pci.c1
-rw-r--r--drivers/usb/dwc3/core.c1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c13
-rw-r--r--drivers/usb/gadget/function/f_fs.c26
-rw-r--r--drivers/usb/gadget/function/u_ether.c11
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c88
-rw-r--r--drivers/usb/host/xhci-histb.c6
-rw-r--r--drivers/usb/host/xhci-hub.c66
-rw-r--r--drivers/usb/host/xhci-mtk.c6
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-plat.c6
-rw-r--r--drivers/usb/host/xhci-ring.c45
-rw-r--r--drivers/usb/host/xhci-tegra.c1
-rw-r--r--drivers/usb/host/xhci.c2
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/appledisplay.c1
-rw-r--r--drivers/usb/storage/unusual_realtek.h10
-rw-r--r--drivers/xen/balloon.c65
-rw-r--r--drivers/xen/pvcalls-front.c4
-rw-r--r--drivers/xen/xlate_mmu.c1
-rw-r--r--fs/afs/dir.c4
-rw-r--r--fs/afs/fs_probe.c39
-rw-r--r--fs/afs/inode.c18
-rw-r--r--fs/afs/internal.h9
-rw-r--r--fs/afs/misc.c52
-rw-r--r--fs/afs/rotate.c53
-rw-r--r--fs/afs/rxrpc.c11
-rw-r--r--fs/afs/vl_probe.c45
-rw-r--r--fs/afs/vl_rotate.c50
-rw-r--r--fs/aio.c1
-rw-r--r--fs/btrfs/disk-io.c11
-rw-r--r--fs/btrfs/file.c24
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/relocation.c1
-rw-r--r--fs/btrfs/send.c11
-rw-r--r--fs/btrfs/super.c1
-rw-r--r--fs/cachefiles/namei.c8
-rw-r--r--fs/cachefiles/rdwr.c9
-rw-r--r--fs/cachefiles/xattr.c3
-rw-r--r--fs/dax.c60
-rw-r--r--fs/direct-io.c4
-rw-r--r--fs/exec.c5
-rw-r--r--fs/exportfs/expfs.c3
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/fscache/object.c3
-rw-r--r--fs/hfs/btree.c3
-rw-r--r--fs/hfsplus/btree.c3
-rw-r--r--fs/iomap.c53
-rw-r--r--fs/nfs/callback_proc.c22
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c21
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h4
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c19
-rw-r--r--fs/nfs/nfs42proc.c19
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4state.c16
-rw-r--r--fs/nilfs2/btnode.c4
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/move_extents.c47
-rw-r--r--fs/pstore/ram.c15
-rw-r--r--fs/read_write.c15
-rw-r--r--fs/sysv/inode.c2
-rw-r--r--fs/udf/super.c16
-rw-r--r--fs/udf/unicode.c14
-rw-r--r--fs/userfaultfd.c15
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c5
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c11
-rw-r--r--fs/xfs/xfs_bmap_util.c10
-rw-r--r--fs/xfs/xfs_bmap_util.h3
-rw-r--r--fs/xfs/xfs_buf_item.c28
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_reflink.c18
-rw-r--r--fs/xfs/xfs_trace.h5
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/rx-offload.h7
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/filter.h4
-rw-r--r--include/linux/fscache-cache.h3
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/hid-sensor-hub.h4
-rw-r--r--include/linux/hid.h28
-rw-r--r--include/linux/mlx5/mlx5_ifc.h12
-rw-r--r--include/linux/net_dim.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h13
-rw-r--r--include/linux/platform_data/gpio-davinci.h2
-rw-r--r--include/linux/psi.h3
-rw-r--r--include/linux/pstore.h5
-rw-r--r--include/linux/ptrace.h17
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sched/smt.h20
-rw-r--r--include/linux/skbuff.h18
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/linux/tracehook.h4
-rw-r--r--include/linux/tracepoint.h6
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/xarray.h267
-rw-r--r--include/media/v4l2-mem2mem.h2
-rw-r--r--include/net/af_rxrpc.h3
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h2
-rw-r--r--include/net/sctp/sctp.h12
-rw-r--r--include/sound/soc.h2
-rw-r--r--include/trace/events/rxrpc.h2
-rw-r--r--include/trace/events/sched.h12
-rw-r--r--include/uapi/linux/input-event-codes.h10
-rw-r--r--include/uapi/linux/prctl.h1
-rw-r--r--include/uapi/linux/v4l2-controls.h5
-rw-r--r--include/xen/balloon.h5
-rw-r--r--init/Kconfig9
-rw-r--r--init/initramfs.c22
-rw-r--r--kernel/bpf/core.c34
-rw-r--r--kernel/bpf/local_storage.c3
-rw-r--r--kernel/bpf/queue_stack_maps.c16
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/cpu.c15
-rw-r--r--kernel/dma/swiotlb.c3
-rw-r--r--kernel/events/uprobes.c12
-rw-r--r--kernel/kcov.c4
-rw-r--r--kernel/ptrace.c10
-rw-r--r--kernel/sched/core.c19
-rw-r--r--kernel/sched/psi.c30
-rw-r--r--kernel/sched/sched.h4
-rw-r--r--kernel/sched/stats.h8
-rw-r--r--kernel/stackleak.c4
-rw-r--r--kernel/trace/bpf_trace.c8
-rw-r--r--kernel/trace/ftrace.c7
-rw-r--r--kernel/trace/trace.h57
-rw-r--r--kernel/trace/trace_functions_graph.c53
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--lib/debugobjects.c5
-rw-r--r--lib/iov_iter.c38
-rw-r--r--lib/test_firmware.c1
-rw-r--r--lib/test_hexdump.c2
-rw-r--r--lib/test_kmod.c1
-rw-r--r--lib/test_xarray.c50
-rw-r--r--lib/xarray.c139
-rw-r--r--mm/gup.c3
-rw-r--r--mm/huge_memory.c43
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/khugepaged.c140
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/rmap.c13
-rw-r--r--mm/shmem.c43
-rw-r--r--mm/truncate.c8
-rw-r--r--mm/userfaultfd.c62
-rw-r--r--net/batman-adv/bat_v_elp.c6
-rw-r--r--net/batman-adv/fragmentation.c2
-rw-r--r--net/bridge/br_private.h7
-rw-r--r--net/bridge/br_vlan.c3
-rw-r--r--net/can/raw.c15
-rw-r--r--net/ceph/messenger.c12
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/filter.c5
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c7
-rw-r--r--net/ipv4/netfilter/nf_nat_masquerade_ipv4.c38
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c4
-rw-r--r--net/ipv4/tcp_input.c31
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/tcp_timer.c12
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/netfilter.c3
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c8
-rw-r--r--net/ipv6/netfilter/nf_nat_masquerade_ipv6.c49
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c4
-rw-r--r--net/ipv6/route.c14
-rw-r--r--net/l2tp/l2tp_core.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/nf_conncount.c44
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c14
-rw-r--r--net/netfilter/nf_tables_api.c46
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c15
-rw-r--r--net/netfilter/nft_compat.c3
-rw-r--r--net/netfilter/nft_flow_offload.c5
-rw-r--r--net/netfilter/xt_RATEEST.c10
-rw-r--r--net/netfilter/xt_hashlimit.c9
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rxrpc/af_rxrpc.c27
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c36
-rw-r--r--net/sched/sch_fq.c31
-rw-r--r--net/sctp/output.c25
-rw-r--r--net/sctp/socket.c26
-rw-r--r--net/sctp/stream.c1
-rw-r--r--net/smc/af_smc.c11
-rw-r--r--net/smc/smc_cdc.c26
-rw-r--r--net/smc/smc_cdc.h60
-rw-r--r--net/smc/smc_core.c20
-rw-r--r--net/smc/smc_core.h5
-rw-r--r--net/smc/smc_ism.c43
-rw-r--r--net/smc/smc_ism.h1
-rw-r--r--net/smc/smc_wr.c4
-rw-r--r--net/socket.c2
-rw-r--r--net/tipc/discover.c19
-rw-r--r--net/tipc/net.c45
-rw-r--r--net/tipc/net.h2
-rw-r--r--net/tipc/node.c7
-rw-r--r--net/tipc/socket.c15
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/unifdef.c4
-rw-r--r--security/selinux/nlmsgtab.c13
-rw-r--r--sound/core/control.c80
-rw-r--r--sound/core/oss/pcm_oss.c6
-rw-r--r--sound/core/oss/pcm_plugin.c6
-rw-r--r--sound/isa/wss/wss_lib.c2
-rw-r--r--sound/pci/ac97/ac97_codec.c2
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c5
-rw-r--r--sound/pci/hda/patch_realtek.c37
-rw-r--r--sound/soc/codecs/hdac_hdmi.c11
-rw-r--r--sound/soc/codecs/pcm186x.h2
-rw-r--r--sound/soc/codecs/pcm3060.c12
-rw-r--r--sound/soc/codecs/wm_adsp.c37
-rw-r--r--sound/soc/intel/Kconfig26
-rw-r--r--sound/soc/intel/boards/Kconfig24
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c32
-rw-r--r--sound/soc/intel/skylake/skl.c32
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c67
-rw-r--r--sound/soc/omap/omap-dmic.c9
-rw-r--r--sound/soc/omap/omap-mcbsp.c6
-rw-r--r--sound/soc/omap/omap-mcpdm.c43
-rw-r--r--sound/soc/qcom/common.c9
-rw-r--r--sound/soc/qcom/qdsp6/q6afe-dai.c208
-rw-r--r--sound/soc/qcom/qdsp6/q6afe.c16
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c33
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c19
-rw-r--r--sound/soc/rockchip/rockchip_pcm.c1
-rw-r--r--sound/soc/sh/rcar/ssi.c2
-rw-r--r--sound/soc/soc-acpi.c10
-rw-r--r--sound/soc/soc-core.c1
-rw-r--r--sound/soc/stm/stm32_sai_sub.c2
-rw-r--r--sound/soc/sunxi/Kconfig2
-rw-r--r--sound/soc/sunxi/sun8i-codec.c12
-rw-r--r--sound/sparc/cs4231.c8
-rw-r--r--sound/usb/quirks-table.h10
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst11
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst9
-rw-r--r--tools/bpf/bpftool/common.c17
-rw-r--r--tools/bpf/bpftool/main.h2
-rw-r--r--tools/bpf/bpftool/prog.c13
-rw-r--r--tools/include/uapi/linux/pkt_cls.h612
-rw-r--r--tools/include/uapi/linux/prctl.h1
-rw-r--r--tools/include/uapi/linux/tc_act/tc_bpf.h37
-rw-r--r--tools/objtool/elf.c19
-rw-r--r--tools/power/cpupower/Makefile12
-rw-r--r--tools/power/cpupower/bench/Makefile2
-rw-r--r--tools/power/cpupower/debug/x86_64/Makefile4
-rw-r--r--tools/power/cpupower/lib/cpufreq.c2
-rw-r--r--tools/power/cpupower/lib/cpuidle.c2
-rw-r--r--tools/power/cpupower/lib/cpupower.c4
-rw-r--r--tools/power/cpupower/lib/cpupower_intern.h2
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/bpf/test_netcnt.c5
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c19
-rw-r--r--tools/testing/selftests/netfilter/Makefile6
-rw-r--r--tools/testing/selftests/netfilter/config2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_trans_stress.sh78
-rw-r--r--tools/testing/selftests/proc/proc-self-map-files-002.c9
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py18
639 files changed, 7490 insertions, 3917 deletions
diff --git a/CREDITS b/CREDITS
index 84cbec4c6211..c9273393fe14 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2204,6 +2204,10 @@ S: Post Office Box 371
2204S: North Little Rock, Arkansas 72115 2204S: North Little Rock, Arkansas 72115
2205S: USA 2205S: USA
2206 2206
2207N: Christopher Li
2208E: sparse@chrisli.org
2209D: Sparse maintainer 2009 - 2018
2210
2207N: Stephan Linz 2211N: Stephan Linz
2208E: linz@mazet.de 2212E: linz@mazet.de
2209E: Stephan.Linz@gmx.de 2213E: Stephan.Linz@gmx.de
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 5463d5a4d85c..aefd358a5ca3 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3505,6 +3505,10 @@
3505 before loading. 3505 before loading.
3506 See Documentation/blockdev/ramdisk.txt. 3506 See Documentation/blockdev/ramdisk.txt.
3507 3507
3508 psi= [KNL] Enable or disable pressure stall information
3509 tracking.
3510 Format: <bool>
3511
3508 psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to 3512 psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to
3509 probe for; one of (bare|imps|exps|lifebook|any). 3513 probe for; one of (bare|imps|exps|lifebook|any).
3510 psmouse.rate= [HW,MOUSE] Set desired mouse report rate, in reports 3514 psmouse.rate= [HW,MOUSE] Set desired mouse report rate, in reports
@@ -4195,9 +4199,13 @@
4195 4199
4196 spectre_v2= [X86] Control mitigation of Spectre variant 2 4200 spectre_v2= [X86] Control mitigation of Spectre variant 2
4197 (indirect branch speculation) vulnerability. 4201 (indirect branch speculation) vulnerability.
4202 The default operation protects the kernel from
4203 user space attacks.
4198 4204
4199 on - unconditionally enable 4205 on - unconditionally enable, implies
4200 off - unconditionally disable 4206 spectre_v2_user=on
4207 off - unconditionally disable, implies
4208 spectre_v2_user=off
4201 auto - kernel detects whether your CPU model is 4209 auto - kernel detects whether your CPU model is
4202 vulnerable 4210 vulnerable
4203 4211
@@ -4207,6 +4215,12 @@
4207 CONFIG_RETPOLINE configuration option, and the 4215 CONFIG_RETPOLINE configuration option, and the
4208 compiler with which the kernel was built. 4216 compiler with which the kernel was built.
4209 4217
4218 Selecting 'on' will also enable the mitigation
4219 against user space to user space task attacks.
4220
4221 Selecting 'off' will disable both the kernel and
4222 the user space protections.
4223
4210 Specific mitigations can also be selected manually: 4224 Specific mitigations can also be selected manually:
4211 4225
4212 retpoline - replace indirect branches 4226 retpoline - replace indirect branches
@@ -4216,6 +4230,48 @@
4216 Not specifying this option is equivalent to 4230 Not specifying this option is equivalent to
4217 spectre_v2=auto. 4231 spectre_v2=auto.
4218 4232
4233 spectre_v2_user=
4234 [X86] Control mitigation of Spectre variant 2
4235 (indirect branch speculation) vulnerability between
4236 user space tasks
4237
4238 on - Unconditionally enable mitigations. Is
4239 enforced by spectre_v2=on
4240
4241 off - Unconditionally disable mitigations. Is
4242 enforced by spectre_v2=off
4243
4244 prctl - Indirect branch speculation is enabled,
4245 but mitigation can be enabled via prctl
4246 per thread. The mitigation control state
4247 is inherited on fork.
4248
4249 prctl,ibpb
4250 - Like "prctl" above, but only STIBP is
4251 controlled per thread. IBPB is issued
4252 always when switching between different user
4253 space processes.
4254
4255 seccomp
4256 - Same as "prctl" above, but all seccomp
4257 threads will enable the mitigation unless
4258 they explicitly opt out.
4259
4260 seccomp,ibpb
4261 - Like "seccomp" above, but only STIBP is
4262 controlled per thread. IBPB is issued
4263 always when switching between different
4264 user space processes.
4265
4266 auto - Kernel selects the mitigation depending on
4267 the available CPU features and vulnerability.
4268
4269 Default mitigation:
4270 If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl"
4271
4272 Not specifying this option is equivalent to
4273 spectre_v2_user=auto.
4274
4219 spec_store_bypass_disable= 4275 spec_store_bypass_disable=
4220 [HW] Control Speculative Store Bypass (SSB) Disable mitigation 4276 [HW] Control Speculative Store Bypass (SSB) Disable mitigation
4221 (Speculative Store Bypass vulnerability) 4277 (Speculative Store Bypass vulnerability)
@@ -4714,6 +4770,8 @@
4714 prevent spurious wakeup); 4770 prevent spurious wakeup);
4715 n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a 4771 n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a
4716 pause after every control message); 4772 pause after every control message);
4773 o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
4774 delay after resetting its port);
4717 Example: quirks=0781:5580:bk,0a5c:5834:gij 4775 Example: quirks=0781:5580:bk,0a5c:5834:gij
4718 4776
4719 usbhid.mousepoll= 4777 usbhid.mousepoll=
diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst
index 164bf71149fd..30187d49dc2c 100644
--- a/Documentation/admin-guide/security-bugs.rst
+++ b/Documentation/admin-guide/security-bugs.rst
@@ -32,16 +32,17 @@ Disclosure and embargoed information
32The security list is not a disclosure channel. For that, see Coordination 32The security list is not a disclosure channel. For that, see Coordination
33below. 33below.
34 34
35Once a robust fix has been developed, our preference is to release the 35Once a robust fix has been developed, the release process starts. Fixes
36fix in a timely fashion, treating it no differently than any of the other 36for publicly known bugs are released immediately.
37thousands of changes and fixes the Linux kernel project releases every 37
38month. 38Although our preference is to release fixes for publicly undisclosed bugs
39 39as soon as they become available, this may be postponed at the request of
40However, at the request of the reporter, we will postpone releasing the 40the reporter or an affected party for up to 7 calendar days from the start
41fix for up to 5 business days after the date of the report or after the 41of the release process, with an exceptional extension to 14 calendar days
42embargo has lifted; whichever comes first. The only exception to that 42if it is agreed that the criticality of the bug requires more time. The
43rule is if the bug is publicly known, in which case the preference is to 43only valid reason for deferring the publication of a fix is to accommodate
44release the fix as soon as it's available. 44the logistics of QA and large scale rollouts which require release
45coordination.
45 46
46Whilst embargoed information may be shared with trusted individuals in 47Whilst embargoed information may be shared with trusted individuals in
47order to develop a fix, such information will not be published alongside 48order to develop a fix, such information will not be published alongside
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 76ccded8b74c..8f9577621144 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -57,6 +57,7 @@ stable kernels.
57| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | 57| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
58| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 | 58| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
59| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 | 59| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
60| ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
60| ARM | MMU-500 | #841119,#826419 | N/A | 61| ARM | MMU-500 | #841119,#826419 | N/A |
61| | | | | 62| | | | |
62| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | 63| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index a4e705108f42..dbe96cb5558e 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -74,7 +74,8 @@ using :c:func:`xa_load`. xa_store will overwrite any entry with the
74new entry and return the previous entry stored at that index. You can 74new entry and return the previous entry stored at that index. You can
75use :c:func:`xa_erase` instead of calling :c:func:`xa_store` with a 75use :c:func:`xa_erase` instead of calling :c:func:`xa_store` with a
76``NULL`` entry. There is no difference between an entry that has never 76``NULL`` entry. There is no difference between an entry that has never
77been stored to and one that has most recently had ``NULL`` stored to it. 77been stored to, one that has been erased and one that has most recently
78had ``NULL`` stored to it.
78 79
79You can conditionally replace an entry at an index by using 80You can conditionally replace an entry at an index by using
80:c:func:`xa_cmpxchg`. Like :c:func:`cmpxchg`, it will only succeed if 81:c:func:`xa_cmpxchg`. Like :c:func:`cmpxchg`, it will only succeed if
@@ -105,23 +106,44 @@ may result in the entry being marked at some, but not all of the other
105indices. Storing into one index may result in the entry retrieved by 106indices. Storing into one index may result in the entry retrieved by
106some, but not all of the other indices changing. 107some, but not all of the other indices changing.
107 108
109Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
110will not need to allocate memory. The :c:func:`xa_reserve` function
111will store a reserved entry at the indicated index. Users of the normal
112API will see this entry as containing ``NULL``. If you do not need to
113use the reserved entry, you can call :c:func:`xa_release` to remove the
114unused entry. If another user has stored to the entry in the meantime,
115:c:func:`xa_release` will do nothing; if instead you want the entry to
116become ``NULL``, you should use :c:func:`xa_erase`.
117
118If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
119will return ``true``.
120
108Finally, you can remove all entries from an XArray by calling 121Finally, you can remove all entries from an XArray by calling
109:c:func:`xa_destroy`. If the XArray entries are pointers, you may wish 122:c:func:`xa_destroy`. If the XArray entries are pointers, you may wish
110to free the entries first. You can do this by iterating over all present 123to free the entries first. You can do this by iterating over all present
111entries in the XArray using the :c:func:`xa_for_each` iterator. 124entries in the XArray using the :c:func:`xa_for_each` iterator.
112 125
113ID assignment 126Allocating XArrays
114------------- 127------------------
128
129If you use :c:func:`DEFINE_XARRAY_ALLOC` to define the XArray, or
130initialise it by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`,
131the XArray changes to track whether entries are in use or not.
115 132
116You can call :c:func:`xa_alloc` to store the entry at any unused index 133You can call :c:func:`xa_alloc` to store the entry at any unused index
117in the XArray. If you need to modify the array from interrupt context, 134in the XArray. If you need to modify the array from interrupt context,
118you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable 135you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable
119interrupts while allocating the ID. Unlike :c:func:`xa_store`, allocating 136interrupts while allocating the ID.
120a ``NULL`` pointer does not delete an entry. Instead it reserves an 137
121entry like :c:func:`xa_reserve` and you can release it using either 138Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert`
122:c:func:`xa_erase` or :c:func:`xa_release`. To use ID assignment, the 139will mark the entry as being allocated. Unlike a normal XArray, storing
123XArray must be defined with :c:func:`DEFINE_XARRAY_ALLOC`, or initialised 140``NULL`` will mark the entry as being in use, like :c:func:`xa_reserve`.
124by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`, 141To free an entry, use :c:func:`xa_erase` (or :c:func:`xa_release` if
142you only want to free the entry if it's ``NULL``).
143
144You cannot use ``XA_MARK_0`` with an allocating XArray as this mark
145is used to track whether an entry is free or not. The other marks are
146available for your use.
125 147
126Memory allocation 148Memory allocation
127----------------- 149-----------------
@@ -158,6 +180,8 @@ Takes RCU read lock:
158 180
159Takes xa_lock internally: 181Takes xa_lock internally:
160 * :c:func:`xa_store` 182 * :c:func:`xa_store`
183 * :c:func:`xa_store_bh`
184 * :c:func:`xa_store_irq`
161 * :c:func:`xa_insert` 185 * :c:func:`xa_insert`
162 * :c:func:`xa_erase` 186 * :c:func:`xa_erase`
163 * :c:func:`xa_erase_bh` 187 * :c:func:`xa_erase_bh`
@@ -167,6 +191,9 @@ Takes xa_lock internally:
167 * :c:func:`xa_alloc` 191 * :c:func:`xa_alloc`
168 * :c:func:`xa_alloc_bh` 192 * :c:func:`xa_alloc_bh`
169 * :c:func:`xa_alloc_irq` 193 * :c:func:`xa_alloc_irq`
194 * :c:func:`xa_reserve`
195 * :c:func:`xa_reserve_bh`
196 * :c:func:`xa_reserve_irq`
170 * :c:func:`xa_destroy` 197 * :c:func:`xa_destroy`
171 * :c:func:`xa_set_mark` 198 * :c:func:`xa_set_mark`
172 * :c:func:`xa_clear_mark` 199 * :c:func:`xa_clear_mark`
@@ -177,6 +204,7 @@ Assumes xa_lock held on entry:
177 * :c:func:`__xa_erase` 204 * :c:func:`__xa_erase`
178 * :c:func:`__xa_cmpxchg` 205 * :c:func:`__xa_cmpxchg`
179 * :c:func:`__xa_alloc` 206 * :c:func:`__xa_alloc`
207 * :c:func:`__xa_reserve`
180 * :c:func:`__xa_set_mark` 208 * :c:func:`__xa_set_mark`
181 * :c:func:`__xa_clear_mark` 209 * :c:func:`__xa_clear_mark`
182 210
@@ -234,7 +262,8 @@ Sharing the XArray with interrupt context is also possible, either
234using :c:func:`xa_lock_irqsave` in both the interrupt handler and process 262using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
235context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock` 263context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
236in the interrupt handler. Some of the more common patterns have helper 264in the interrupt handler. Some of the more common patterns have helper
237functions such as :c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`. 265functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`,
266:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
238 267
239Sometimes you need to protect access to the XArray with a mutex because 268Sometimes you need to protect access to the XArray with a mutex because
240that lock sits above another mutex in the locking hierarchy. That does 269that lock sits above another mutex in the locking hierarchy. That does
@@ -322,7 +351,8 @@ to :c:func:`xas_retry`, and retry the operation if it returns ``true``.
322 - :c:func:`xa_is_zero` 351 - :c:func:`xa_is_zero`
323 - Zero entries appear as ``NULL`` through the Normal API, but occupy 352 - Zero entries appear as ``NULL`` through the Normal API, but occupy
324 an entry in the XArray which can be used to reserve the index for 353 an entry in the XArray which can be used to reserve the index for
325 future use. 354 future use. This is used by allocating XArrays for allocated entries
355 which are ``NULL``.
326 356
327Other internal entries may be added in the future. As far as possible, they 357Other internal entries may be added in the future. As far as possible, they
328will be handled by :c:func:`xas_retry`. 358will be handled by :c:func:`xas_retry`.
diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
index 903a78da65be..3a9926f99937 100644
--- a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
+++ b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
@@ -17,7 +17,7 @@ Example:
17 reg = <1>; 17 reg = <1>;
18 clocks = <&clk32m>; 18 clocks = <&clk32m>;
19 interrupt-parent = <&gpio4>; 19 interrupt-parent = <&gpio4>;
20 interrupts = <13 IRQ_TYPE_EDGE_RISING>; 20 interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
21 vdd-supply = <&reg5v0>; 21 vdd-supply = <&reg5v0>;
22 xceiver-supply = <&reg5v0>; 22 xceiver-supply = <&reg5v0>;
23 }; 23 };
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
index cc4372842bf3..9936b9ee67c3 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_can.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt
@@ -5,6 +5,7 @@ Required properties:
5- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC. 5- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
6 "renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC. 6 "renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC.
7 "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC. 7 "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
8 "renesas,can-r8a774a1" if CAN controller is a part of R8A774A1 SoC.
8 "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC. 9 "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
9 "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC. 10 "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
10 "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC. 11 "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
@@ -14,26 +15,32 @@ Required properties:
14 "renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC. 15 "renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC.
15 "renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC. 16 "renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
16 "renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC. 17 "renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC.
18 "renesas,can-r8a77965" if CAN controller is a part of R8A77965 SoC.
17 "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device. 19 "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
18 "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1 20 "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1
19 compatible device. 21 compatible device.
20 "renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device. 22 "renesas,rcar-gen3-can" for a generic R-Car Gen3 or RZ/G2
23 compatible device.
21 When compatible with the generic version, nodes must list the 24 When compatible with the generic version, nodes must list the
22 SoC-specific version corresponding to the platform first 25 SoC-specific version corresponding to the platform first
23 followed by the generic version. 26 followed by the generic version.
24 27
25- reg: physical base address and size of the R-Car CAN register map. 28- reg: physical base address and size of the R-Car CAN register map.
26- interrupts: interrupt specifier for the sole interrupt. 29- interrupts: interrupt specifier for the sole interrupt.
27- clocks: phandles and clock specifiers for 3 CAN clock inputs. 30- clocks: phandles and clock specifiers for 2 CAN clock inputs for RZ/G2
28- clock-names: 3 clock input name strings: "clkp1", "clkp2", "can_clk". 31 devices.
32 phandles and clock specifiers for 3 CAN clock inputs for every other
33 SoC.
34- clock-names: 2 clock input name strings for RZ/G2: "clkp1", "can_clk".
35 3 clock input name strings for every other SoC: "clkp1", "clkp2",
36 "can_clk".
29- pinctrl-0: pin control group to be used for this controller. 37- pinctrl-0: pin control group to be used for this controller.
30- pinctrl-names: must be "default". 38- pinctrl-names: must be "default".
31 39
32Required properties for "renesas,can-r8a7795" and "renesas,can-r8a7796" 40Required properties for R8A7795, R8A7796 and R8A77965:
33compatible: 41For the denoted SoCs, "clkp2" can be CANFD clock. This is a div6 clock and can
34In R8A7795 and R8A7796 SoCs, "clkp2" can be CANFD clock. This is a div6 clock 42be used by both CAN and CAN FD controller at the same time. It needs to be
35and can be used by both CAN and CAN FD controller at the same time. It needs to 43scaled to maximum frequency if any of these controllers use it. This is done
36be scaled to maximum frequency if any of these controllers use it. This is done
37using the below properties: 44using the below properties:
38 45
39- assigned-clocks: phandle of clkp2(CANFD) clock. 46- assigned-clocks: phandle of clkp2(CANFD) clock.
@@ -42,8 +49,9 @@ using the below properties:
42Optional properties: 49Optional properties:
43- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are: 50- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are:
44 <0x0> (default) : Peripheral clock (clkp1) 51 <0x0> (default) : Peripheral clock (clkp1)
45 <0x1> : Peripheral clock (clkp2) 52 <0x1> : Peripheral clock (clkp2) (not supported by
46 <0x3> : Externally input clock 53 RZ/G2 devices)
54 <0x3> : External input clock
47 55
48Example 56Example
49------- 57-------
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index 3ceeb8de1196..35694c0c376b 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -7,7 +7,7 @@ limitations.
7Current Binding 7Current Binding
8--------------- 8---------------
9 9
10Switches are true Linux devices and can be probes by any means. Once 10Switches are true Linux devices and can be probed by any means. Once
11probed, they register to the DSA framework, passing a node 11probed, they register to the DSA framework, passing a node
12pointer. This node is expected to fulfil the following binding, and 12pointer. This node is expected to fulfil the following binding, and
13may contain additional properties as required by the device it is 13may contain additional properties as required by the device it is
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
index adf20b2bdf71..fbc198d5dd39 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -40,24 +40,36 @@ Required properties:
40 "ref" for 19.2 MHz ref clk, 40 "ref" for 19.2 MHz ref clk,
41 "com_aux" for phy common block aux clock, 41 "com_aux" for phy common block aux clock,
42 "ref_aux" for phy reference aux clock, 42 "ref_aux" for phy reference aux clock,
43
44 For "qcom,ipq8074-qmp-pcie-phy": no clocks are listed.
43 For "qcom,msm8996-qmp-pcie-phy" must contain: 45 For "qcom,msm8996-qmp-pcie-phy" must contain:
44 "aux", "cfg_ahb", "ref". 46 "aux", "cfg_ahb", "ref".
45 For "qcom,msm8996-qmp-usb3-phy" must contain: 47 For "qcom,msm8996-qmp-usb3-phy" must contain:
46 "aux", "cfg_ahb", "ref". 48 "aux", "cfg_ahb", "ref".
47 For "qcom,qmp-v3-usb3-phy" must contain: 49 For "qcom,sdm845-qmp-usb3-phy" must contain:
50 "aux", "cfg_ahb", "ref", "com_aux".
51 For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
48 "aux", "cfg_ahb", "ref", "com_aux". 52 "aux", "cfg_ahb", "ref", "com_aux".
53 For "qcom,sdm845-qmp-ufs-phy" must contain:
54 "ref", "ref_aux".
49 55
50 - resets: a list of phandles and reset controller specifier pairs, 56 - resets: a list of phandles and reset controller specifier pairs,
51 one for each entry in reset-names. 57 one for each entry in reset-names.
52 - reset-names: "phy" for reset of phy block, 58 - reset-names: "phy" for reset of phy block,
53 "common" for phy common block reset, 59 "common" for phy common block reset,
54 "cfg" for phy's ahb cfg block reset (Optional). 60 "cfg" for phy's ahb cfg block reset.
61
62 For "qcom,ipq8074-qmp-pcie-phy" must contain:
63 "phy", "common".
55 For "qcom,msm8996-qmp-pcie-phy" must contain: 64 For "qcom,msm8996-qmp-pcie-phy" must contain:
56 "phy", "common", "cfg". 65 "phy", "common", "cfg".
57 For "qcom,msm8996-qmp-usb3-phy" must contain 66 For "qcom,msm8996-qmp-usb3-phy" must contain
58 "phy", "common". 67 "phy", "common".
59 For "qcom,ipq8074-qmp-pcie-phy" must contain: 68 For "qcom,sdm845-qmp-usb3-phy" must contain:
60 "phy", "common". 69 "phy", "common".
70 For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
71 "phy", "common".
72 For "qcom,sdm845-qmp-ufs-phy": no resets are listed.
61 73
62 - vdda-phy-supply: Phandle to a regulator supply to PHY core block. 74 - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
63 - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block. 75 - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
@@ -79,9 +91,10 @@ Required properties for child node:
79 91
80 - #phy-cells: must be 0 92 - #phy-cells: must be 0
81 93
94Required properties child node of pcie and usb3 qmp phys:
82 - clocks: a list of phandles and clock-specifier pairs, 95 - clocks: a list of phandles and clock-specifier pairs,
83 one for each entry in clock-names. 96 one for each entry in clock-names.
84 - clock-names: Must contain following for pcie and usb qmp phys: 97 - clock-names: Must contain following:
85 "pipe<lane-number>" for pipe clock specific to each lane. 98 "pipe<lane-number>" for pipe clock specific to each lane.
86 - clock-output-names: Name of the PHY clock that will be the parent for 99 - clock-output-names: Name of the PHY clock that will be the parent for
87 the above pipe clock. 100 the above pipe clock.
@@ -91,9 +104,11 @@ Required properties for child node:
91 (or) 104 (or)
92 "pcie20_phy1_pipe_clk" 105 "pcie20_phy1_pipe_clk"
93 106
107Required properties for child node of PHYs with lane reset, AKA:
108 "qcom,msm8996-qmp-pcie-phy"
94 - resets: a list of phandles and reset controller specifier pairs, 109 - resets: a list of phandles and reset controller specifier pairs,
95 one for each entry in reset-names. 110 one for each entry in reset-names.
96 - reset-names: Must contain following for pcie qmp phys: 111 - reset-names: Must contain following:
97 "lane<lane-number>" for reset specific to each lane. 112 "lane<lane-number>" for reset specific to each lane.
98 113
99Example: 114Example:
diff --git a/Documentation/devicetree/bindings/spi/spi-uniphier.txt b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
index 504a4ecfc7b1..b04e66a52de5 100644
--- a/Documentation/devicetree/bindings/spi/spi-uniphier.txt
+++ b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
@@ -5,18 +5,20 @@ UniPhier SoCs have SCSSI which supports SPI single channel.
5Required properties: 5Required properties:
6 - compatible: should be "socionext,uniphier-scssi" 6 - compatible: should be "socionext,uniphier-scssi"
7 - reg: address and length of the spi master registers 7 - reg: address and length of the spi master registers
8 - #address-cells: must be <1>, see spi-bus.txt 8 - interrupts: a single interrupt specifier
9 - #size-cells: must be <0>, see spi-bus.txt 9 - pinctrl-names: should be "default"
10 - clocks: A phandle to the clock for the device. 10 - pinctrl-0: pin control state for the default mode
11 - resets: A phandle to the reset control for the device. 11 - clocks: a phandle to the clock for the device
12 - resets: a phandle to the reset control for the device
12 13
13Example: 14Example:
14 15
15spi0: spi@54006000 { 16spi0: spi@54006000 {
16 compatible = "socionext,uniphier-scssi"; 17 compatible = "socionext,uniphier-scssi";
17 reg = <0x54006000 0x100>; 18 reg = <0x54006000 0x100>;
18 #address-cells = <1>; 19 interrupts = <0 39 4>;
19 #size-cells = <0>; 20 pinctrl-names = "default";
21 pinctrl-0 = <&pinctrl_spi0>;
20 clocks = <&peri_clk 11>; 22 clocks = <&peri_clk 11>;
21 resets = <&peri_rst 11>; 23 resets = <&peri_rst 11>;
22}; 24};
diff --git a/Documentation/input/event-codes.rst b/Documentation/input/event-codes.rst
index cef220c176a4..a8c0873beb95 100644
--- a/Documentation/input/event-codes.rst
+++ b/Documentation/input/event-codes.rst
@@ -190,16 +190,7 @@ A few EV_REL codes have special meanings:
190* REL_WHEEL, REL_HWHEEL: 190* REL_WHEEL, REL_HWHEEL:
191 191
192 - These codes are used for vertical and horizontal scroll wheels, 192 - These codes are used for vertical and horizontal scroll wheels,
193 respectively. The value is the number of "notches" moved on the wheel, the 193 respectively.
194 physical size of which varies by device. For high-resolution wheels (which
195 report multiple events for each notch of movement, or do not have notches)
196 this may be an approximation based on the high-resolution scroll events.
197
198* REL_WHEEL_HI_RES:
199
200 - If a vertical scroll wheel supports high-resolution scrolling, this code
201 will be emitted in addition to REL_WHEEL. The value is the (approximate)
202 distance travelled by the user's finger, in microns.
203 194
204EV_ABS 195EV_ABS
205------ 196------
diff --git a/Documentation/media/uapi/v4l/dev-meta.rst b/Documentation/media/uapi/v4l/dev-meta.rst
index f7ac8d0d3af1..b65dc078abeb 100644
--- a/Documentation/media/uapi/v4l/dev-meta.rst
+++ b/Documentation/media/uapi/v4l/dev-meta.rst
@@ -40,7 +40,7 @@ To use the :ref:`format` ioctls applications set the ``type`` field of the
40the desired operation. Both drivers and applications must set the remainder of 40the desired operation. Both drivers and applications must set the remainder of
41the :c:type:`v4l2_format` structure to 0. 41the :c:type:`v4l2_format` structure to 0.
42 42
43.. _v4l2-meta-format: 43.. c:type:: v4l2_meta_format
44 44
45.. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}| 45.. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}|
46 46
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
index 3ead350e099f..9ea494a8faca 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
@@ -133,6 +133,11 @@ The format as returned by :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` must be identical
133 - Definition of a data format, see :ref:`pixfmt`, used by SDR 133 - Definition of a data format, see :ref:`pixfmt`, used by SDR
134 capture and output devices. 134 capture and output devices.
135 * - 135 * -
136 - struct :c:type:`v4l2_meta_format`
137 - ``meta``
138 - Definition of a metadata format, see :ref:`meta-formats`, used by
139 metadata capture devices.
140 * -
136 - __u8 141 - __u8
137 - ``raw_data``\ [200] 142 - ``raw_data``\ [200]
138 - Place holder for future extensions. 143 - Place holder for future extensions.
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index 605e00cdd6be..89f1302d593a 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -1056,18 +1056,23 @@ The kernel interface functions are as follows:
1056 1056
1057 u32 rxrpc_kernel_check_life(struct socket *sock, 1057 u32 rxrpc_kernel_check_life(struct socket *sock,
1058 struct rxrpc_call *call); 1058 struct rxrpc_call *call);
1059 void rxrpc_kernel_probe_life(struct socket *sock,
1060 struct rxrpc_call *call);
1059 1061
1060 This returns a number that is updated when ACKs are received from the peer 1062 The first function returns a number that is updated when ACKs are received
1061 (notably including PING RESPONSE ACKs which we can elicit by sending PING 1063 from the peer (notably including PING RESPONSE ACKs which we can elicit by
1062 ACKs to see if the call still exists on the server). The caller should 1064 sending PING ACKs to see if the call still exists on the server). The
1063 compare the numbers of two calls to see if the call is still alive after 1065 caller should compare the numbers of two calls to see if the call is still
1064 waiting for a suitable interval. 1066 alive after waiting for a suitable interval.
1065 1067
1066 This allows the caller to work out if the server is still contactable and 1068 This allows the caller to work out if the server is still contactable and
1067 if the call is still alive on the server whilst waiting for the server to 1069 if the call is still alive on the server whilst waiting for the server to
1068 process a client operation. 1070 process a client operation.
1069 1071
1070 This function may transmit a PING ACK. 1072 The second function causes a ping ACK to be transmitted to try to provoke
1073 the peer into responding, which would then cause the value returned by the
1074 first function to change. Note that this must be called in TASK_RUNNING
1075 state.
1071 1076
1072 (*) Get reply timestamp. 1077 (*) Get reply timestamp.
1073 1078
diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst
index 32f3d55c54b7..c4dbe6f7cdae 100644
--- a/Documentation/userspace-api/spec_ctrl.rst
+++ b/Documentation/userspace-api/spec_ctrl.rst
@@ -92,3 +92,12 @@ Speculation misfeature controls
92 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0); 92 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
93 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0); 93 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
94 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0); 94 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
95
96- PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes
97 (Mitigate Spectre V2 style attacks against user processes)
98
99 Invocations:
100 * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
101 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
102 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
103 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 7727db8f94bc..5e9b826b5f62 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -61,18 +61,6 @@ Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
61 to struct boot_params for loading bzImage and ramdisk 61 to struct boot_params for loading bzImage and ramdisk
62 above 4G in 64bit. 62 above 4G in 64bit.
63 63
64Protocol 2.13: (Kernel 3.14) Support 32- and 64-bit flags being set in
65 xloadflags to support booting a 64-bit kernel from 32-bit
66 EFI
67
68Protocol 2.14: (Kernel 4.20) Added acpi_rsdp_addr holding the physical
69 address of the ACPI RSDP table.
70 The bootloader updates version with:
71 0x8000 | min(kernel-version, bootloader-version)
72 kernel-version being the protocol version supported by
73 the kernel and bootloader-version the protocol version
74 supported by the bootloader.
75
76**** MEMORY LAYOUT 64**** MEMORY LAYOUT
77 65
78The traditional memory map for the kernel loader, used for Image or 66The traditional memory map for the kernel loader, used for Image or
@@ -209,7 +197,6 @@ Offset Proto Name Meaning
2090258/8 2.10+ pref_address Preferred loading address 1970258/8 2.10+ pref_address Preferred loading address
2100260/4 2.10+ init_size Linear memory required during initialization 1980260/4 2.10+ init_size Linear memory required during initialization
2110264/4 2.11+ handover_offset Offset of handover entry point 1990264/4 2.11+ handover_offset Offset of handover entry point
2120268/8 2.14+ acpi_rsdp_addr Physical address of RSDP table
213 200
214(1) For backwards compatibility, if the setup_sects field contains 0, the 201(1) For backwards compatibility, if the setup_sects field contains 0, the
215 real value is 4. 202 real value is 4.
@@ -322,7 +309,7 @@ Protocol: 2.00+
322 Contains the magic number "HdrS" (0x53726448). 309 Contains the magic number "HdrS" (0x53726448).
323 310
324Field name: version 311Field name: version
325Type: modify 312Type: read
326Offset/size: 0x206/2 313Offset/size: 0x206/2
327Protocol: 2.00+ 314Protocol: 2.00+
328 315
@@ -330,12 +317,6 @@ Protocol: 2.00+
330 e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version 317 e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
331 10.17. 318 10.17.
332 319
333 Up to protocol version 2.13 this information is only read by the
334 bootloader. From protocol version 2.14 onwards the bootloader will
335 write the used protocol version or-ed with 0x8000 to the field. The
336 used protocol version will be the minimum of the supported protocol
337 versions of the bootloader and the kernel.
338
339Field name: realmode_swtch 320Field name: realmode_swtch
340Type: modify (optional) 321Type: modify (optional)
341Offset/size: 0x208/4 322Offset/size: 0x208/4
@@ -763,17 +744,6 @@ Offset/size: 0x264/4
763 744
764 See EFI HANDOVER PROTOCOL below for more details. 745 See EFI HANDOVER PROTOCOL below for more details.
765 746
766Field name: acpi_rsdp_addr
767Type: write
768Offset/size: 0x268/8
769Protocol: 2.14+
770
771 This field can be set by the boot loader to tell the kernel the
772 physical address of the ACPI RSDP table.
773
774 A value of 0 indicates the kernel should fall back to the standard
775 methods to locate the RSDP.
776
777 747
778**** THE IMAGE CHECKSUM 748**** THE IMAGE CHECKSUM
779 749
diff --git a/MAINTAINERS b/MAINTAINERS
index b755a89fa325..6682420421c1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -180,6 +180,7 @@ F: drivers/net/hamradio/6pack.c
180 180
1818169 10/100/1000 GIGABIT ETHERNET DRIVER 1818169 10/100/1000 GIGABIT ETHERNET DRIVER
182M: Realtek linux nic maintainers <nic_swsd@realtek.com> 182M: Realtek linux nic maintainers <nic_swsd@realtek.com>
183M: Heiner Kallweit <hkallweit1@gmail.com>
183L: netdev@vger.kernel.org 184L: netdev@vger.kernel.org
184S: Maintained 185S: Maintained
185F: drivers/net/ethernet/realtek/r8169.c 186F: drivers/net/ethernet/realtek/r8169.c
@@ -717,7 +718,7 @@ F: include/linux/mfd/altera-a10sr.h
717F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h 718F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
718 719
719ALTERA TRIPLE SPEED ETHERNET DRIVER 720ALTERA TRIPLE SPEED ETHERNET DRIVER
720M: Vince Bridgers <vbridger@opensource.altera.com> 721M: Thor Thayer <thor.thayer@linux.intel.com>
721L: netdev@vger.kernel.org 722L: netdev@vger.kernel.org
722L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) 723L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
723S: Maintained 724S: Maintained
@@ -1922,7 +1923,6 @@ ARM/QUALCOMM SUPPORT
1922M: Andy Gross <andy.gross@linaro.org> 1923M: Andy Gross <andy.gross@linaro.org>
1923M: David Brown <david.brown@linaro.org> 1924M: David Brown <david.brown@linaro.org>
1924L: linux-arm-msm@vger.kernel.org 1925L: linux-arm-msm@vger.kernel.org
1925L: linux-soc@vger.kernel.org
1926S: Maintained 1926S: Maintained
1927F: Documentation/devicetree/bindings/soc/qcom/ 1927F: Documentation/devicetree/bindings/soc/qcom/
1928F: arch/arm/boot/dts/qcom-*.dts 1928F: arch/arm/boot/dts/qcom-*.dts
@@ -2490,7 +2490,7 @@ F: drivers/net/wireless/ath/*
2490ATHEROS ATH5K WIRELESS DRIVER 2490ATHEROS ATH5K WIRELESS DRIVER
2491M: Jiri Slaby <jirislaby@gmail.com> 2491M: Jiri Slaby <jirislaby@gmail.com>
2492M: Nick Kossifidis <mickflemm@gmail.com> 2492M: Nick Kossifidis <mickflemm@gmail.com>
2493M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com> 2493M: Luis Chamberlain <mcgrof@kernel.org>
2494L: linux-wireless@vger.kernel.org 2494L: linux-wireless@vger.kernel.org
2495W: http://wireless.kernel.org/en/users/Drivers/ath5k 2495W: http://wireless.kernel.org/en/users/Drivers/ath5k
2496S: Maintained 2496S: Maintained
@@ -2800,7 +2800,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
2800T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git 2800T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
2801Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 2801Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
2802S: Supported 2802S: Supported
2803F: arch/x86/net/bpf_jit* 2803F: arch/*/net/*
2804F: Documentation/networking/filter.txt 2804F: Documentation/networking/filter.txt
2805F: Documentation/bpf/ 2805F: Documentation/bpf/
2806F: include/linux/bpf* 2806F: include/linux/bpf*
@@ -2820,6 +2820,67 @@ F: tools/bpf/
2820F: tools/lib/bpf/ 2820F: tools/lib/bpf/
2821F: tools/testing/selftests/bpf/ 2821F: tools/testing/selftests/bpf/
2822 2822
2823BPF JIT for ARM
2824M: Shubham Bansal <illusionist.neo@gmail.com>
2825L: netdev@vger.kernel.org
2826S: Maintained
2827F: arch/arm/net/
2828
2829BPF JIT for ARM64
2830M: Daniel Borkmann <daniel@iogearbox.net>
2831M: Alexei Starovoitov <ast@kernel.org>
2832M: Zi Shen Lim <zlim.lnx@gmail.com>
2833L: netdev@vger.kernel.org
2834S: Supported
2835F: arch/arm64/net/
2836
2837BPF JIT for MIPS (32-BIT AND 64-BIT)
2838M: Paul Burton <paul.burton@mips.com>
2839L: netdev@vger.kernel.org
2840S: Maintained
2841F: arch/mips/net/
2842
2843BPF JIT for NFP NICs
2844M: Jakub Kicinski <jakub.kicinski@netronome.com>
2845L: netdev@vger.kernel.org
2846S: Supported
2847F: drivers/net/ethernet/netronome/nfp/bpf/
2848
2849BPF JIT for POWERPC (32-BIT AND 64-BIT)
2850M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
2851M: Sandipan Das <sandipan@linux.ibm.com>
2852L: netdev@vger.kernel.org
2853S: Maintained
2854F: arch/powerpc/net/
2855
2856BPF JIT for S390
2857M: Martin Schwidefsky <schwidefsky@de.ibm.com>
2858M: Heiko Carstens <heiko.carstens@de.ibm.com>
2859L: netdev@vger.kernel.org
2860S: Maintained
2861F: arch/s390/net/
2862X: arch/s390/net/pnet.c
2863
2864BPF JIT for SPARC (32-BIT AND 64-BIT)
2865M: David S. Miller <davem@davemloft.net>
2866L: netdev@vger.kernel.org
2867S: Maintained
2868F: arch/sparc/net/
2869
2870BPF JIT for X86 32-BIT
2871M: Wang YanQing <udknight@gmail.com>
2872L: netdev@vger.kernel.org
2873S: Maintained
2874F: arch/x86/net/bpf_jit_comp32.c
2875
2876BPF JIT for X86 64-BIT
2877M: Alexei Starovoitov <ast@kernel.org>
2878M: Daniel Borkmann <daniel@iogearbox.net>
2879L: netdev@vger.kernel.org
2880S: Supported
2881F: arch/x86/net/
2882X: arch/x86/net/bpf_jit_comp32.c
2883
2823BROADCOM B44 10/100 ETHERNET DRIVER 2884BROADCOM B44 10/100 ETHERNET DRIVER
2824M: Michael Chan <michael.chan@broadcom.com> 2885M: Michael Chan <michael.chan@broadcom.com>
2825L: netdev@vger.kernel.org 2886L: netdev@vger.kernel.org
@@ -2860,7 +2921,7 @@ F: drivers/staging/vc04_services
2860BROADCOM BCM47XX MIPS ARCHITECTURE 2921BROADCOM BCM47XX MIPS ARCHITECTURE
2861M: Hauke Mehrtens <hauke@hauke-m.de> 2922M: Hauke Mehrtens <hauke@hauke-m.de>
2862M: Rafał Miłecki <zajec5@gmail.com> 2923M: Rafał Miłecki <zajec5@gmail.com>
2863L: linux-mips@linux-mips.org 2924L: linux-mips@vger.kernel.org
2864S: Maintained 2925S: Maintained
2865F: Documentation/devicetree/bindings/mips/brcm/ 2926F: Documentation/devicetree/bindings/mips/brcm/
2866F: arch/mips/bcm47xx/* 2927F: arch/mips/bcm47xx/*
@@ -2869,7 +2930,6 @@ F: arch/mips/include/asm/mach-bcm47xx/*
2869BROADCOM BCM5301X ARM ARCHITECTURE 2930BROADCOM BCM5301X ARM ARCHITECTURE
2870M: Hauke Mehrtens <hauke@hauke-m.de> 2931M: Hauke Mehrtens <hauke@hauke-m.de>
2871M: Rafał Miłecki <zajec5@gmail.com> 2932M: Rafał Miłecki <zajec5@gmail.com>
2872M: Jon Mason <jonmason@broadcom.com>
2873M: bcm-kernel-feedback-list@broadcom.com 2933M: bcm-kernel-feedback-list@broadcom.com
2874L: linux-arm-kernel@lists.infradead.org 2934L: linux-arm-kernel@lists.infradead.org
2875S: Maintained 2935S: Maintained
@@ -2924,7 +2984,7 @@ F: drivers/cpufreq/bmips-cpufreq.c
2924BROADCOM BMIPS MIPS ARCHITECTURE 2984BROADCOM BMIPS MIPS ARCHITECTURE
2925M: Kevin Cernekee <cernekee@gmail.com> 2985M: Kevin Cernekee <cernekee@gmail.com>
2926M: Florian Fainelli <f.fainelli@gmail.com> 2986M: Florian Fainelli <f.fainelli@gmail.com>
2927L: linux-mips@linux-mips.org 2987L: linux-mips@vger.kernel.org
2928T: git git://github.com/broadcom/stblinux.git 2988T: git git://github.com/broadcom/stblinux.git
2929S: Maintained 2989S: Maintained
2930F: arch/mips/bmips/* 2990F: arch/mips/bmips/*
@@ -3015,7 +3075,6 @@ F: drivers/net/ethernet/broadcom/genet/
3015BROADCOM IPROC ARM ARCHITECTURE 3075BROADCOM IPROC ARM ARCHITECTURE
3016M: Ray Jui <rjui@broadcom.com> 3076M: Ray Jui <rjui@broadcom.com>
3017M: Scott Branden <sbranden@broadcom.com> 3077M: Scott Branden <sbranden@broadcom.com>
3018M: Jon Mason <jonmason@broadcom.com>
3019M: bcm-kernel-feedback-list@broadcom.com 3078M: bcm-kernel-feedback-list@broadcom.com
3020L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 3079L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
3021T: git git://github.com/broadcom/cygnus-linux.git 3080T: git git://github.com/broadcom/cygnus-linux.git
@@ -3062,7 +3121,7 @@ F: include/uapi/rdma/bnxt_re-abi.h
3062 3121
3063BROADCOM NVRAM DRIVER 3122BROADCOM NVRAM DRIVER
3064M: Rafał Miłecki <zajec5@gmail.com> 3123M: Rafał Miłecki <zajec5@gmail.com>
3065L: linux-mips@linux-mips.org 3124L: linux-mips@vger.kernel.org
3066S: Maintained 3125S: Maintained
3067F: drivers/firmware/broadcom/* 3126F: drivers/firmware/broadcom/*
3068 3127
@@ -3276,6 +3335,12 @@ F: include/uapi/linux/caif/
3276F: include/net/caif/ 3335F: include/net/caif/
3277F: net/caif/ 3336F: net/caif/
3278 3337
3338CAKE QDISC
3339M: Toke Høiland-Jørgensen <toke@toke.dk>
3340L: cake@lists.bufferbloat.net (moderated for non-subscribers)
3341S: Maintained
3342F: net/sched/sch_cake.c
3343
3279CALGARY x86-64 IOMMU 3344CALGARY x86-64 IOMMU
3280M: Muli Ben-Yehuda <mulix@mulix.org> 3345M: Muli Ben-Yehuda <mulix@mulix.org>
3281M: Jon Mason <jdmason@kudzu.us> 3346M: Jon Mason <jdmason@kudzu.us>
@@ -4158,7 +4223,7 @@ F: net/decnet/
4158 4223
4159DECSTATION PLATFORM SUPPORT 4224DECSTATION PLATFORM SUPPORT
4160M: "Maciej W. Rozycki" <macro@linux-mips.org> 4225M: "Maciej W. Rozycki" <macro@linux-mips.org>
4161L: linux-mips@linux-mips.org 4226L: linux-mips@vger.kernel.org
4162W: http://www.linux-mips.org/wiki/DECstation 4227W: http://www.linux-mips.org/wiki/DECstation
4163S: Maintained 4228S: Maintained
4164F: arch/mips/dec/ 4229F: arch/mips/dec/
@@ -5249,7 +5314,7 @@ EDAC-CAVIUM OCTEON
5249M: Ralf Baechle <ralf@linux-mips.org> 5314M: Ralf Baechle <ralf@linux-mips.org>
5250M: David Daney <david.daney@cavium.com> 5315M: David Daney <david.daney@cavium.com>
5251L: linux-edac@vger.kernel.org 5316L: linux-edac@vger.kernel.org
5252L: linux-mips@linux-mips.org 5317L: linux-mips@vger.kernel.org
5253S: Supported 5318S: Supported
5254F: drivers/edac/octeon_edac* 5319F: drivers/edac/octeon_edac*
5255 5320
@@ -5528,6 +5593,7 @@ F: net/bridge/
5528ETHERNET PHY LIBRARY 5593ETHERNET PHY LIBRARY
5529M: Andrew Lunn <andrew@lunn.ch> 5594M: Andrew Lunn <andrew@lunn.ch>
5530M: Florian Fainelli <f.fainelli@gmail.com> 5595M: Florian Fainelli <f.fainelli@gmail.com>
5596M: Heiner Kallweit <hkallweit1@gmail.com>
5531L: netdev@vger.kernel.org 5597L: netdev@vger.kernel.org
5532S: Maintained 5598S: Maintained
5533F: Documentation/ABI/testing/sysfs-bus-mdio 5599F: Documentation/ABI/testing/sysfs-bus-mdio
@@ -5766,7 +5832,7 @@ F: include/uapi/linux/firewire*.h
5766F: tools/firewire/ 5832F: tools/firewire/
5767 5833
5768FIRMWARE LOADER (request_firmware) 5834FIRMWARE LOADER (request_firmware)
5769M: Luis R. Rodriguez <mcgrof@kernel.org> 5835M: Luis Chamberlain <mcgrof@kernel.org>
5770L: linux-kernel@vger.kernel.org 5836L: linux-kernel@vger.kernel.org
5771S: Maintained 5837S: Maintained
5772F: Documentation/firmware_class/ 5838F: Documentation/firmware_class/
@@ -6299,6 +6365,7 @@ F: tools/testing/selftests/gpio/
6299 6365
6300GPIO SUBSYSTEM 6366GPIO SUBSYSTEM
6301M: Linus Walleij <linus.walleij@linaro.org> 6367M: Linus Walleij <linus.walleij@linaro.org>
6368M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
6302L: linux-gpio@vger.kernel.org 6369L: linux-gpio@vger.kernel.org
6303T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git 6370T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
6304S: Maintained 6371S: Maintained
@@ -7436,6 +7503,20 @@ S: Maintained
7436F: Documentation/fb/intelfb.txt 7503F: Documentation/fb/intelfb.txt
7437F: drivers/video/fbdev/intelfb/ 7504F: drivers/video/fbdev/intelfb/
7438 7505
7506INTEL GPIO DRIVERS
7507M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7508L: linux-gpio@vger.kernel.org
7509S: Maintained
7510T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
7511F: drivers/gpio/gpio-ich.c
7512F: drivers/gpio/gpio-intel-mid.c
7513F: drivers/gpio/gpio-lynxpoint.c
7514F: drivers/gpio/gpio-merrifield.c
7515F: drivers/gpio/gpio-ml-ioh.c
7516F: drivers/gpio/gpio-pch.c
7517F: drivers/gpio/gpio-sch.c
7518F: drivers/gpio/gpio-sodaville.c
7519
7439INTEL GVT-g DRIVERS (Intel GPU Virtualization) 7520INTEL GVT-g DRIVERS (Intel GPU Virtualization)
7440M: Zhenyu Wang <zhenyuw@linux.intel.com> 7521M: Zhenyu Wang <zhenyuw@linux.intel.com>
7441M: Zhi Wang <zhi.a.wang@intel.com> 7522M: Zhi Wang <zhi.a.wang@intel.com>
@@ -7446,12 +7527,6 @@ T: git https://github.com/intel/gvt-linux.git
7446S: Supported 7527S: Supported
7447F: drivers/gpu/drm/i915/gvt/ 7528F: drivers/gpu/drm/i915/gvt/
7448 7529
7449INTEL PMIC GPIO DRIVER
7450R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7451S: Maintained
7452F: drivers/gpio/gpio-*cove.c
7453F: drivers/gpio/gpio-msic.c
7454
7455INTEL HID EVENT DRIVER 7530INTEL HID EVENT DRIVER
7456M: Alex Hung <alex.hung@canonical.com> 7531M: Alex Hung <alex.hung@canonical.com>
7457L: platform-driver-x86@vger.kernel.org 7532L: platform-driver-x86@vger.kernel.org
@@ -7539,12 +7614,6 @@ W: https://01.org/linux-acpi
7539S: Supported 7614S: Supported
7540F: drivers/platform/x86/intel_menlow.c 7615F: drivers/platform/x86/intel_menlow.c
7541 7616
7542INTEL MERRIFIELD GPIO DRIVER
7543M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7544L: linux-gpio@vger.kernel.org
7545S: Maintained
7546F: drivers/gpio/gpio-merrifield.c
7547
7548INTEL MIC DRIVERS (mic) 7617INTEL MIC DRIVERS (mic)
7549M: Sudeep Dutt <sudeep.dutt@intel.com> 7618M: Sudeep Dutt <sudeep.dutt@intel.com>
7550M: Ashutosh Dixit <ashutosh.dixit@intel.com> 7619M: Ashutosh Dixit <ashutosh.dixit@intel.com>
@@ -7577,6 +7646,13 @@ F: drivers/platform/x86/intel_punit_ipc.c
7577F: arch/x86/include/asm/intel_pmc_ipc.h 7646F: arch/x86/include/asm/intel_pmc_ipc.h
7578F: arch/x86/include/asm/intel_punit_ipc.h 7647F: arch/x86/include/asm/intel_punit_ipc.h
7579 7648
7649INTEL PMIC GPIO DRIVERS
7650M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7651S: Maintained
7652T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
7653F: drivers/gpio/gpio-*cove.c
7654F: drivers/gpio/gpio-msic.c
7655
7580INTEL MULTIFUNCTION PMIC DEVICE DRIVERS 7656INTEL MULTIFUNCTION PMIC DEVICE DRIVERS
7581R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 7657R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7582S: Maintained 7658S: Maintained
@@ -7685,7 +7761,7 @@ F: Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
7685 7761
7686IOC3 ETHERNET DRIVER 7762IOC3 ETHERNET DRIVER
7687M: Ralf Baechle <ralf@linux-mips.org> 7763M: Ralf Baechle <ralf@linux-mips.org>
7688L: linux-mips@linux-mips.org 7764L: linux-mips@vger.kernel.org
7689S: Maintained 7765S: Maintained
7690F: drivers/net/ethernet/sgi/ioc3-eth.c 7766F: drivers/net/ethernet/sgi/ioc3-eth.c
7691 7767
@@ -8056,7 +8132,7 @@ F: tools/testing/selftests/
8056F: Documentation/dev-tools/kselftest* 8132F: Documentation/dev-tools/kselftest*
8057 8133
8058KERNEL USERMODE HELPER 8134KERNEL USERMODE HELPER
8059M: "Luis R. Rodriguez" <mcgrof@kernel.org> 8135M: Luis Chamberlain <mcgrof@kernel.org>
8060L: linux-kernel@vger.kernel.org 8136L: linux-kernel@vger.kernel.org
8061S: Maintained 8137S: Maintained
8062F: kernel/umh.c 8138F: kernel/umh.c
@@ -8113,7 +8189,7 @@ F: arch/arm64/kvm/
8113 8189
8114KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) 8190KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
8115M: James Hogan <jhogan@kernel.org> 8191M: James Hogan <jhogan@kernel.org>
8116L: linux-mips@linux-mips.org 8192L: linux-mips@vger.kernel.org
8117S: Supported 8193S: Supported
8118F: arch/mips/include/uapi/asm/kvm* 8194F: arch/mips/include/uapi/asm/kvm*
8119F: arch/mips/include/asm/kvm* 8195F: arch/mips/include/asm/kvm*
@@ -8232,7 +8308,7 @@ F: mm/kmemleak.c
8232F: mm/kmemleak-test.c 8308F: mm/kmemleak-test.c
8233 8309
8234KMOD KERNEL MODULE LOADER - USERMODE HELPER 8310KMOD KERNEL MODULE LOADER - USERMODE HELPER
8235M: "Luis R. Rodriguez" <mcgrof@kernel.org> 8311M: Luis Chamberlain <mcgrof@kernel.org>
8236L: linux-kernel@vger.kernel.org 8312L: linux-kernel@vger.kernel.org
8237S: Maintained 8313S: Maintained
8238F: kernel/kmod.c 8314F: kernel/kmod.c
@@ -8286,7 +8362,7 @@ F: drivers/net/dsa/lantiq_gswip.c
8286 8362
8287LANTIQ MIPS ARCHITECTURE 8363LANTIQ MIPS ARCHITECTURE
8288M: John Crispin <john@phrozen.org> 8364M: John Crispin <john@phrozen.org>
8289L: linux-mips@linux-mips.org 8365L: linux-mips@vger.kernel.org
8290S: Maintained 8366S: Maintained
8291F: arch/mips/lantiq 8367F: arch/mips/lantiq
8292F: drivers/soc/lantiq 8368F: drivers/soc/lantiq
@@ -8849,7 +8925,7 @@ S: Maintained
8849 8925
8850MARDUK (CREATOR CI40) DEVICE TREE SUPPORT 8926MARDUK (CREATOR CI40) DEVICE TREE SUPPORT
8851M: Rahul Bedarkar <rahulbedarkar89@gmail.com> 8927M: Rahul Bedarkar <rahulbedarkar89@gmail.com>
8852L: linux-mips@linux-mips.org 8928L: linux-mips@vger.kernel.org
8853S: Maintained 8929S: Maintained
8854F: arch/mips/boot/dts/img/pistachio_marduk.dts 8930F: arch/mips/boot/dts/img/pistachio_marduk.dts
8855 8931
@@ -9808,7 +9884,7 @@ F: drivers/dma/at_xdmac.c
9808 9884
9809MICROSEMI MIPS SOCS 9885MICROSEMI MIPS SOCS
9810M: Alexandre Belloni <alexandre.belloni@bootlin.com> 9886M: Alexandre Belloni <alexandre.belloni@bootlin.com>
9811L: linux-mips@linux-mips.org 9887L: linux-mips@vger.kernel.org
9812S: Maintained 9888S: Maintained
9813F: arch/mips/generic/board-ocelot.c 9889F: arch/mips/generic/board-ocelot.c
9814F: arch/mips/configs/generic/board-ocelot.config 9890F: arch/mips/configs/generic/board-ocelot.config
@@ -9848,7 +9924,7 @@ MIPS
9848M: Ralf Baechle <ralf@linux-mips.org> 9924M: Ralf Baechle <ralf@linux-mips.org>
9849M: Paul Burton <paul.burton@mips.com> 9925M: Paul Burton <paul.burton@mips.com>
9850M: James Hogan <jhogan@kernel.org> 9926M: James Hogan <jhogan@kernel.org>
9851L: linux-mips@linux-mips.org 9927L: linux-mips@vger.kernel.org
9852W: http://www.linux-mips.org/ 9928W: http://www.linux-mips.org/
9853T: git git://git.linux-mips.org/pub/scm/ralf/linux.git 9929T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
9854T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git 9930T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
@@ -9861,7 +9937,7 @@ F: drivers/platform/mips/
9861 9937
9862MIPS BOSTON DEVELOPMENT BOARD 9938MIPS BOSTON DEVELOPMENT BOARD
9863M: Paul Burton <paul.burton@mips.com> 9939M: Paul Burton <paul.burton@mips.com>
9864L: linux-mips@linux-mips.org 9940L: linux-mips@vger.kernel.org
9865S: Maintained 9941S: Maintained
9866F: Documentation/devicetree/bindings/clock/img,boston-clock.txt 9942F: Documentation/devicetree/bindings/clock/img,boston-clock.txt
9867F: arch/mips/boot/dts/img/boston.dts 9943F: arch/mips/boot/dts/img/boston.dts
@@ -9871,7 +9947,7 @@ F: include/dt-bindings/clock/boston-clock.h
9871 9947
9872MIPS GENERIC PLATFORM 9948MIPS GENERIC PLATFORM
9873M: Paul Burton <paul.burton@mips.com> 9949M: Paul Burton <paul.burton@mips.com>
9874L: linux-mips@linux-mips.org 9950L: linux-mips@vger.kernel.org
9875S: Supported 9951S: Supported
9876F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt 9952F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt
9877F: arch/mips/generic/ 9953F: arch/mips/generic/
@@ -9879,7 +9955,7 @@ F: arch/mips/tools/generic-board-config.sh
9879 9955
9880MIPS/LOONGSON1 ARCHITECTURE 9956MIPS/LOONGSON1 ARCHITECTURE
9881M: Keguang Zhang <keguang.zhang@gmail.com> 9957M: Keguang Zhang <keguang.zhang@gmail.com>
9882L: linux-mips@linux-mips.org 9958L: linux-mips@vger.kernel.org
9883S: Maintained 9959S: Maintained
9884F: arch/mips/loongson32/ 9960F: arch/mips/loongson32/
9885F: arch/mips/include/asm/mach-loongson32/ 9961F: arch/mips/include/asm/mach-loongson32/
@@ -9888,7 +9964,7 @@ F: drivers/*/*/*loongson1*
9888 9964
9889MIPS/LOONGSON2 ARCHITECTURE 9965MIPS/LOONGSON2 ARCHITECTURE
9890M: Jiaxun Yang <jiaxun.yang@flygoat.com> 9966M: Jiaxun Yang <jiaxun.yang@flygoat.com>
9891L: linux-mips@linux-mips.org 9967L: linux-mips@vger.kernel.org
9892S: Maintained 9968S: Maintained
9893F: arch/mips/loongson64/fuloong-2e/ 9969F: arch/mips/loongson64/fuloong-2e/
9894F: arch/mips/loongson64/lemote-2f/ 9970F: arch/mips/loongson64/lemote-2f/
@@ -9898,7 +9974,7 @@ F: drivers/*/*/*loongson2*
9898 9974
9899MIPS/LOONGSON3 ARCHITECTURE 9975MIPS/LOONGSON3 ARCHITECTURE
9900M: Huacai Chen <chenhc@lemote.com> 9976M: Huacai Chen <chenhc@lemote.com>
9901L: linux-mips@linux-mips.org 9977L: linux-mips@vger.kernel.org
9902S: Maintained 9978S: Maintained
9903F: arch/mips/loongson64/ 9979F: arch/mips/loongson64/
9904F: arch/mips/include/asm/mach-loongson64/ 9980F: arch/mips/include/asm/mach-loongson64/
@@ -9908,7 +9984,7 @@ F: drivers/*/*/*loongson3*
9908 9984
9909MIPS RINT INSTRUCTION EMULATION 9985MIPS RINT INSTRUCTION EMULATION
9910M: Aleksandar Markovic <aleksandar.markovic@mips.com> 9986M: Aleksandar Markovic <aleksandar.markovic@mips.com>
9911L: linux-mips@linux-mips.org 9987L: linux-mips@vger.kernel.org
9912S: Supported 9988S: Supported
9913F: arch/mips/math-emu/sp_rint.c 9989F: arch/mips/math-emu/sp_rint.c
9914F: arch/mips/math-emu/dp_rint.c 9990F: arch/mips/math-emu/dp_rint.c
@@ -10893,7 +10969,7 @@ F: include/linux/platform_data/i2c-omap.h
10893 10969
10894ONION OMEGA2+ BOARD 10970ONION OMEGA2+ BOARD
10895M: Harvey Hunt <harveyhuntnexus@gmail.com> 10971M: Harvey Hunt <harveyhuntnexus@gmail.com>
10896L: linux-mips@linux-mips.org 10972L: linux-mips@vger.kernel.org
10897S: Maintained 10973S: Maintained
10898F: arch/mips/boot/dts/ralink/omega2p.dts 10974F: arch/mips/boot/dts/ralink/omega2p.dts
10899 10975
@@ -11802,7 +11878,7 @@ F: drivers/pinctrl/spear/
11802 11878
11803PISTACHIO SOC SUPPORT 11879PISTACHIO SOC SUPPORT
11804M: James Hartley <james.hartley@sondrel.com> 11880M: James Hartley <james.hartley@sondrel.com>
11805L: linux-mips@linux-mips.org 11881L: linux-mips@vger.kernel.org
11806S: Odd Fixes 11882S: Odd Fixes
11807F: arch/mips/pistachio/ 11883F: arch/mips/pistachio/
11808F: arch/mips/include/asm/mach-pistachio/ 11884F: arch/mips/include/asm/mach-pistachio/
@@ -11982,7 +12058,7 @@ F: kernel/printk/
11982F: include/linux/printk.h 12058F: include/linux/printk.h
11983 12059
11984PRISM54 WIRELESS DRIVER 12060PRISM54 WIRELESS DRIVER
11985M: "Luis R. Rodriguez" <mcgrof@gmail.com> 12061M: Luis Chamberlain <mcgrof@kernel.org>
11986L: linux-wireless@vger.kernel.org 12062L: linux-wireless@vger.kernel.org
11987W: http://wireless.kernel.org/en/users/Drivers/p54 12063W: http://wireless.kernel.org/en/users/Drivers/p54
11988S: Obsolete 12064S: Obsolete
@@ -11996,9 +12072,10 @@ S: Maintained
11996F: fs/proc/ 12072F: fs/proc/
11997F: include/linux/proc_fs.h 12073F: include/linux/proc_fs.h
11998F: tools/testing/selftests/proc/ 12074F: tools/testing/selftests/proc/
12075F: Documentation/filesystems/proc.txt
11999 12076
12000PROC SYSCTL 12077PROC SYSCTL
12001M: "Luis R. Rodriguez" <mcgrof@kernel.org> 12078M: Luis Chamberlain <mcgrof@kernel.org>
12002M: Kees Cook <keescook@chromium.org> 12079M: Kees Cook <keescook@chromium.org>
12003L: linux-kernel@vger.kernel.org 12080L: linux-kernel@vger.kernel.org
12004L: linux-fsdevel@vger.kernel.org 12081L: linux-fsdevel@vger.kernel.org
@@ -12461,7 +12538,7 @@ F: drivers/media/usb/rainshadow-cec/*
12461 12538
12462RALINK MIPS ARCHITECTURE 12539RALINK MIPS ARCHITECTURE
12463M: John Crispin <john@phrozen.org> 12540M: John Crispin <john@phrozen.org>
12464L: linux-mips@linux-mips.org 12541L: linux-mips@vger.kernel.org
12465S: Maintained 12542S: Maintained
12466F: arch/mips/ralink 12543F: arch/mips/ralink
12467 12544
@@ -12481,7 +12558,7 @@ F: drivers/block/brd.c
12481 12558
12482RANCHU VIRTUAL BOARD FOR MIPS 12559RANCHU VIRTUAL BOARD FOR MIPS
12483M: Miodrag Dinic <miodrag.dinic@mips.com> 12560M: Miodrag Dinic <miodrag.dinic@mips.com>
12484L: linux-mips@linux-mips.org 12561L: linux-mips@vger.kernel.org
12485S: Supported 12562S: Supported
12486F: arch/mips/generic/board-ranchu.c 12563F: arch/mips/generic/board-ranchu.c
12487F: arch/mips/configs/generic/board-ranchu.config 12564F: arch/mips/configs/generic/board-ranchu.config
@@ -13931,6 +14008,7 @@ S: Supported
13931F: Documentation/devicetree/bindings/sound/ 14008F: Documentation/devicetree/bindings/sound/
13932F: Documentation/sound/soc/ 14009F: Documentation/sound/soc/
13933F: sound/soc/ 14010F: sound/soc/
14011F: include/dt-bindings/sound/
13934F: include/sound/soc* 14012F: include/sound/soc*
13935 14013
13936SOUNDWIRE SUBSYSTEM 14014SOUNDWIRE SUBSYSTEM
@@ -13978,11 +14056,10 @@ F: drivers/tty/serial/sunzilog.h
13978F: drivers/tty/vcc.c 14056F: drivers/tty/vcc.c
13979 14057
13980SPARSE CHECKER 14058SPARSE CHECKER
13981M: "Christopher Li" <sparse@chrisli.org> 14059M: "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com>
13982L: linux-sparse@vger.kernel.org 14060L: linux-sparse@vger.kernel.org
13983W: https://sparse.wiki.kernel.org/ 14061W: https://sparse.wiki.kernel.org/
13984T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git 14062T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
13985T: git git://git.kernel.org/pub/scm/devel/sparse/chrisl/sparse.git
13986S: Maintained 14063S: Maintained
13987F: include/linux/compiler.h 14064F: include/linux/compiler.h
13988 14065
@@ -14079,6 +14156,7 @@ F: Documentation/devicetree/bindings/iio/proximity/vl53l0x.txt
14079 14156
14080STABLE BRANCH 14157STABLE BRANCH
14081M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 14158M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
14159M: Sasha Levin <sashal@kernel.org>
14082L: stable@vger.kernel.org 14160L: stable@vger.kernel.org
14083S: Supported 14161S: Supported
14084F: Documentation/process/stable-kernel-rules.rst 14162F: Documentation/process/stable-kernel-rules.rst
@@ -15216,7 +15294,7 @@ F: arch/um/os-Linux/drivers/
15216TURBOCHANNEL SUBSYSTEM 15294TURBOCHANNEL SUBSYSTEM
15217M: "Maciej W. Rozycki" <macro@linux-mips.org> 15295M: "Maciej W. Rozycki" <macro@linux-mips.org>
15218M: Ralf Baechle <ralf@linux-mips.org> 15296M: Ralf Baechle <ralf@linux-mips.org>
15219L: linux-mips@linux-mips.org 15297L: linux-mips@vger.kernel.org
15220Q: http://patchwork.linux-mips.org/project/linux-mips/list/ 15298Q: http://patchwork.linux-mips.org/project/linux-mips/list/
15221S: Maintained 15299S: Maintained
15222F: drivers/tc/ 15300F: drivers/tc/
@@ -16037,7 +16115,7 @@ F: drivers/net/vmxnet3/
16037 16115
16038VOCORE VOCORE2 BOARD 16116VOCORE VOCORE2 BOARD
16039M: Harvey Hunt <harveyhuntnexus@gmail.com> 16117M: Harvey Hunt <harveyhuntnexus@gmail.com>
16040L: linux-mips@linux-mips.org 16118L: linux-mips@vger.kernel.org
16041S: Maintained 16119S: Maintained
16042F: arch/mips/boot/dts/ralink/vocore2.dts 16120F: arch/mips/boot/dts/ralink/vocore2.dts
16043 16121
diff --git a/Makefile b/Makefile
index ddbf627cad8f..e9fd22c8445e 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 20 3PATCHLEVEL = 20
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc3 5EXTRAVERSION = -rc5
6NAME = "People's Front" 6NAME = Shy Crocodile
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
9# To see a list of typical targets execute "make help" 9# To see a list of typical targets execute "make help"
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
index d4d33cd7adad..1e2bb68231ad 100644
--- a/arch/arm/boot/dts/am3517-evm.dts
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -228,7 +228,7 @@
228 vmmc-supply = <&vmmc_fixed>; 228 vmmc-supply = <&vmmc_fixed>;
229 bus-width = <4>; 229 bus-width = <4>;
230 wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */ 230 wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
231 cd-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio_127 */ 231 cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio_127 */
232}; 232};
233 233
234&mmc3 { 234&mmc3 {
diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi
index dae6e458e59f..b1c988eed87c 100644
--- a/arch/arm/boot/dts/am3517-som.dtsi
+++ b/arch/arm/boot/dts/am3517-som.dtsi
@@ -163,7 +163,7 @@
163 compatible = "ti,wl1271"; 163 compatible = "ti,wl1271";
164 reg = <2>; 164 reg = <2>;
165 interrupt-parent = <&gpio6>; 165 interrupt-parent = <&gpio6>;
166 interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; /* gpio_170 */ 166 interrupts = <10 IRQ_TYPE_EDGE_RISING>; /* gpio_170 */
167 ref-clock-frequency = <26000000>; 167 ref-clock-frequency = <26000000>;
168 tcxo-clock-frequency = <26000000>; 168 tcxo-clock-frequency = <26000000>;
169 }; 169 };
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
index e45a15ceb94b..69d753cac89a 100644
--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
@@ -492,12 +492,6 @@
492 pinctrl-0 = <&pinctrl_i2c2>; 492 pinctrl-0 = <&pinctrl_i2c2>;
493 status = "okay"; 493 status = "okay";
494 494
495 eeprom@50 {
496 compatible = "atmel,24c04";
497 pagesize = <16>;
498 reg = <0x50>;
499 };
500
501 hpa1: amp@60 { 495 hpa1: amp@60 {
502 compatible = "ti,tpa6130a2"; 496 compatible = "ti,tpa6130a2";
503 reg = <0x60>; 497 reg = <0x60>;
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index ac343330d0c8..98b682a8080c 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -129,7 +129,7 @@
129}; 129};
130 130
131&mmc3 { 131&mmc3 {
132 interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; 132 interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
133 pinctrl-0 = <&mmc3_pins &wl127x_gpio>; 133 pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
134 pinctrl-names = "default"; 134 pinctrl-names = "default";
135 vmmc-supply = <&wl12xx_vmmc>; 135 vmmc-supply = <&wl12xx_vmmc>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 9d5d53fbe9c0..c39cf2ca54da 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -35,7 +35,7 @@
35 * jumpering combinations for the long run. 35 * jumpering combinations for the long run.
36 */ 36 */
37&mmc3 { 37&mmc3 {
38 interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; 38 interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
39 pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>; 39 pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>;
40 pinctrl-names = "default"; 40 pinctrl-names = "default";
41 vmmc-supply = <&wl12xx_vmmc>; 41 vmmc-supply = <&wl12xx_vmmc>;
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 2075120cfc4d..d8bf939a3aff 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -10,7 +10,11 @@
10#include "rk3288.dtsi" 10#include "rk3288.dtsi"
11 11
12/ { 12/ {
13 memory@0 { 13 /*
14 * The default coreboot on veyron devices ignores memory@0 nodes
15 * and would instead create another memory node.
16 */
17 memory {
14 device_type = "memory"; 18 device_type = "memory";
15 reg = <0x0 0x0 0x0 0x80000000>; 19 reg = <0x0 0x0 0x0 0x80000000>;
16 }; 20 };
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 843052f14f1c..dd0dda6ed44b 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -314,7 +314,7 @@
314 0x1 0x0 0x60000000 0x10000000 314 0x1 0x0 0x60000000 0x10000000
315 0x2 0x0 0x70000000 0x10000000 315 0x2 0x0 0x70000000 0x10000000
316 0x3 0x0 0x80000000 0x10000000>; 316 0x3 0x0 0x80000000 0x10000000>;
317 clocks = <&mck>; 317 clocks = <&h32ck>;
318 status = "disabled"; 318 status = "disabled";
319 319
320 nand_controller: nand-controller { 320 nand_controller: nand-controller {
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 0142fcfcc3d3..bda949fd84e8 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -183,9 +183,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
183 unsigned long frame_pointer) 183 unsigned long frame_pointer)
184{ 184{
185 unsigned long return_hooker = (unsigned long) &return_to_handler; 185 unsigned long return_hooker = (unsigned long) &return_to_handler;
186 struct ftrace_graph_ent trace;
187 unsigned long old; 186 unsigned long old;
188 int err;
189 187
190 if (unlikely(atomic_read(&current->tracing_graph_pause))) 188 if (unlikely(atomic_read(&current->tracing_graph_pause)))
191 return; 189 return;
@@ -193,21 +191,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
193 old = *parent; 191 old = *parent;
194 *parent = return_hooker; 192 *parent = return_hooker;
195 193
196 trace.func = self_addr; 194 if (function_graph_enter(old, self_addr, frame_pointer, NULL))
197 trace.depth = current->curr_ret_stack + 1;
198
199 /* Only trace if the calling function expects to */
200 if (!ftrace_graph_entry(&trace)) {
201 *parent = old; 195 *parent = old;
202 return;
203 }
204
205 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
206 frame_pointer, NULL);
207 if (err == -EBUSY) {
208 *parent = old;
209 return;
210 }
211} 196}
212 197
213#ifdef CONFIG_DYNAMIC_FTRACE 198#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 0bc5bd2665df..2cc9fe4c3a91 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -759,7 +759,9 @@ static struct davinci_id da830_ids[] = {
759}; 759};
760 760
761static struct davinci_gpio_platform_data da830_gpio_platform_data = { 761static struct davinci_gpio_platform_data da830_gpio_platform_data = {
762 .ngpio = 128, 762 .no_auto_base = true,
763 .base = 0,
764 .ngpio = 128,
763}; 765};
764 766
765int __init da830_register_gpio(void) 767int __init da830_register_gpio(void)
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 4528bbf0c861..e7b78df2bfef 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -719,7 +719,9 @@ int __init da850_register_vpif_capture(struct vpif_capture_config
719} 719}
720 720
721static struct davinci_gpio_platform_data da850_gpio_platform_data = { 721static struct davinci_gpio_platform_data da850_gpio_platform_data = {
722 .ngpio = 144, 722 .no_auto_base = true,
723 .base = 0,
724 .ngpio = 144,
723}; 725};
724 726
725int __init da850_register_gpio(void) 727int __init da850_register_gpio(void)
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 1fd3619f6a09..cf78da5ab054 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -701,6 +701,46 @@ static struct resource da8xx_gpio_resources[] = {
701 }, 701 },
702 { /* interrupt */ 702 { /* interrupt */
703 .start = IRQ_DA8XX_GPIO0, 703 .start = IRQ_DA8XX_GPIO0,
704 .end = IRQ_DA8XX_GPIO0,
705 .flags = IORESOURCE_IRQ,
706 },
707 {
708 .start = IRQ_DA8XX_GPIO1,
709 .end = IRQ_DA8XX_GPIO1,
710 .flags = IORESOURCE_IRQ,
711 },
712 {
713 .start = IRQ_DA8XX_GPIO2,
714 .end = IRQ_DA8XX_GPIO2,
715 .flags = IORESOURCE_IRQ,
716 },
717 {
718 .start = IRQ_DA8XX_GPIO3,
719 .end = IRQ_DA8XX_GPIO3,
720 .flags = IORESOURCE_IRQ,
721 },
722 {
723 .start = IRQ_DA8XX_GPIO4,
724 .end = IRQ_DA8XX_GPIO4,
725 .flags = IORESOURCE_IRQ,
726 },
727 {
728 .start = IRQ_DA8XX_GPIO5,
729 .end = IRQ_DA8XX_GPIO5,
730 .flags = IORESOURCE_IRQ,
731 },
732 {
733 .start = IRQ_DA8XX_GPIO6,
734 .end = IRQ_DA8XX_GPIO6,
735 .flags = IORESOURCE_IRQ,
736 },
737 {
738 .start = IRQ_DA8XX_GPIO7,
739 .end = IRQ_DA8XX_GPIO7,
740 .flags = IORESOURCE_IRQ,
741 },
742 {
743 .start = IRQ_DA8XX_GPIO8,
704 .end = IRQ_DA8XX_GPIO8, 744 .end = IRQ_DA8XX_GPIO8,
705 .flags = IORESOURCE_IRQ, 745 .flags = IORESOURCE_IRQ,
706 }, 746 },
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 9f7d38d12c88..4c6e0bef4509 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -548,12 +548,44 @@ static struct resource dm355_gpio_resources[] = {
548 }, 548 },
549 { /* interrupt */ 549 { /* interrupt */
550 .start = IRQ_DM355_GPIOBNK0, 550 .start = IRQ_DM355_GPIOBNK0,
551 .end = IRQ_DM355_GPIOBNK0,
552 .flags = IORESOURCE_IRQ,
553 },
554 {
555 .start = IRQ_DM355_GPIOBNK1,
556 .end = IRQ_DM355_GPIOBNK1,
557 .flags = IORESOURCE_IRQ,
558 },
559 {
560 .start = IRQ_DM355_GPIOBNK2,
561 .end = IRQ_DM355_GPIOBNK2,
562 .flags = IORESOURCE_IRQ,
563 },
564 {
565 .start = IRQ_DM355_GPIOBNK3,
566 .end = IRQ_DM355_GPIOBNK3,
567 .flags = IORESOURCE_IRQ,
568 },
569 {
570 .start = IRQ_DM355_GPIOBNK4,
571 .end = IRQ_DM355_GPIOBNK4,
572 .flags = IORESOURCE_IRQ,
573 },
574 {
575 .start = IRQ_DM355_GPIOBNK5,
576 .end = IRQ_DM355_GPIOBNK5,
577 .flags = IORESOURCE_IRQ,
578 },
579 {
580 .start = IRQ_DM355_GPIOBNK6,
551 .end = IRQ_DM355_GPIOBNK6, 581 .end = IRQ_DM355_GPIOBNK6,
552 .flags = IORESOURCE_IRQ, 582 .flags = IORESOURCE_IRQ,
553 }, 583 },
554}; 584};
555 585
556static struct davinci_gpio_platform_data dm355_gpio_platform_data = { 586static struct davinci_gpio_platform_data dm355_gpio_platform_data = {
587 .no_auto_base = true,
588 .base = 0,
557 .ngpio = 104, 589 .ngpio = 104,
558}; 590};
559 591
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index abcf2a5ed89b..01fb2b0c82de 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -267,12 +267,49 @@ static struct resource dm365_gpio_resources[] = {
267 }, 267 },
268 { /* interrupt */ 268 { /* interrupt */
269 .start = IRQ_DM365_GPIO0, 269 .start = IRQ_DM365_GPIO0,
270 .end = IRQ_DM365_GPIO0,
271 .flags = IORESOURCE_IRQ,
272 },
273 {
274 .start = IRQ_DM365_GPIO1,
275 .end = IRQ_DM365_GPIO1,
276 .flags = IORESOURCE_IRQ,
277 },
278 {
279 .start = IRQ_DM365_GPIO2,
280 .end = IRQ_DM365_GPIO2,
281 .flags = IORESOURCE_IRQ,
282 },
283 {
284 .start = IRQ_DM365_GPIO3,
285 .end = IRQ_DM365_GPIO3,
286 .flags = IORESOURCE_IRQ,
287 },
288 {
289 .start = IRQ_DM365_GPIO4,
290 .end = IRQ_DM365_GPIO4,
291 .flags = IORESOURCE_IRQ,
292 },
293 {
294 .start = IRQ_DM365_GPIO5,
295 .end = IRQ_DM365_GPIO5,
296 .flags = IORESOURCE_IRQ,
297 },
298 {
299 .start = IRQ_DM365_GPIO6,
300 .end = IRQ_DM365_GPIO6,
301 .flags = IORESOURCE_IRQ,
302 },
303 {
304 .start = IRQ_DM365_GPIO7,
270 .end = IRQ_DM365_GPIO7, 305 .end = IRQ_DM365_GPIO7,
271 .flags = IORESOURCE_IRQ, 306 .flags = IORESOURCE_IRQ,
272 }, 307 },
273}; 308};
274 309
275static struct davinci_gpio_platform_data dm365_gpio_platform_data = { 310static struct davinci_gpio_platform_data dm365_gpio_platform_data = {
311 .no_auto_base = true,
312 .base = 0,
276 .ngpio = 104, 313 .ngpio = 104,
277 .gpio_unbanked = 8, 314 .gpio_unbanked = 8,
278}; 315};
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 0720da7809a6..38f92b7d413e 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -492,12 +492,34 @@ static struct resource dm644_gpio_resources[] = {
492 }, 492 },
493 { /* interrupt */ 493 { /* interrupt */
494 .start = IRQ_GPIOBNK0, 494 .start = IRQ_GPIOBNK0,
495 .end = IRQ_GPIOBNK0,
496 .flags = IORESOURCE_IRQ,
497 },
498 {
499 .start = IRQ_GPIOBNK1,
500 .end = IRQ_GPIOBNK1,
501 .flags = IORESOURCE_IRQ,
502 },
503 {
504 .start = IRQ_GPIOBNK2,
505 .end = IRQ_GPIOBNK2,
506 .flags = IORESOURCE_IRQ,
507 },
508 {
509 .start = IRQ_GPIOBNK3,
510 .end = IRQ_GPIOBNK3,
511 .flags = IORESOURCE_IRQ,
512 },
513 {
514 .start = IRQ_GPIOBNK4,
495 .end = IRQ_GPIOBNK4, 515 .end = IRQ_GPIOBNK4,
496 .flags = IORESOURCE_IRQ, 516 .flags = IORESOURCE_IRQ,
497 }, 517 },
498}; 518};
499 519
500static struct davinci_gpio_platform_data dm644_gpio_platform_data = { 520static struct davinci_gpio_platform_data dm644_gpio_platform_data = {
521 .no_auto_base = true,
522 .base = 0,
501 .ngpio = 71, 523 .ngpio = 71,
502}; 524};
503 525
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 6bd2ed069d0d..7dc54b2a610f 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -442,12 +442,24 @@ static struct resource dm646x_gpio_resources[] = {
442 }, 442 },
443 { /* interrupt */ 443 { /* interrupt */
444 .start = IRQ_DM646X_GPIOBNK0, 444 .start = IRQ_DM646X_GPIOBNK0,
445 .end = IRQ_DM646X_GPIOBNK0,
446 .flags = IORESOURCE_IRQ,
447 },
448 {
449 .start = IRQ_DM646X_GPIOBNK1,
450 .end = IRQ_DM646X_GPIOBNK1,
451 .flags = IORESOURCE_IRQ,
452 },
453 {
454 .start = IRQ_DM646X_GPIOBNK2,
445 .end = IRQ_DM646X_GPIOBNK2, 455 .end = IRQ_DM646X_GPIOBNK2,
446 .flags = IORESOURCE_IRQ, 456 .flags = IORESOURCE_IRQ,
447 }, 457 },
448}; 458};
449 459
450static struct davinci_gpio_platform_data dm646x_gpio_platform_data = { 460static struct davinci_gpio_platform_data dm646x_gpio_platform_data = {
461 .no_auto_base = true,
462 .base = 0,
451 .ngpio = 43, 463 .ngpio = 43,
452}; 464};
453 465
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 3d191fd52910..17886744dbe6 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -750,6 +750,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
750 struct modem_private_data *priv = port->private_data; 750 struct modem_private_data *priv = port->private_data;
751 int ret; 751 int ret;
752 752
753 if (!priv)
754 return;
755
753 if (IS_ERR(priv->regulator)) 756 if (IS_ERR(priv->regulator))
754 return; 757 return;
755 758
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 7b95729e8359..38a1be6c3694 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -351,7 +351,7 @@ static void omap44xx_prm_reconfigure_io_chain(void)
351 * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and 351 * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
352 * omap44xx_prm_reconfigure_io_chain() must be called. No return value. 352 * omap44xx_prm_reconfigure_io_chain() must be called. No return value.
353 */ 353 */
354static void __init omap44xx_prm_enable_io_wakeup(void) 354static void omap44xx_prm_enable_io_wakeup(void)
355{ 355{
356 s32 inst = omap4_prmst_get_prm_dev_inst(); 356 s32 inst = omap4_prmst_get_prm_dev_inst();
357 357
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 787d7850e064..ea2ab0330e3a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -497,6 +497,24 @@ config ARM64_ERRATUM_1188873
497 497
498 If unsure, say Y. 498 If unsure, say Y.
499 499
500config ARM64_ERRATUM_1286807
501 bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
502 default y
503 select ARM64_WORKAROUND_REPEAT_TLBI
504 help
505 This option adds workaround for ARM Cortex-A76 erratum 1286807
506
507 On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual
508 address for a cacheable mapping of a location is being
509 accessed by a core while another core is remapping the virtual
510 address to a new physical page using the recommended
511 break-before-make sequence, then under very rare circumstances
512 TLBI+DSB completes before a read using the translation being
513 invalidated has been observed by other observers. The
514 workaround repeats the TLBI+DSB operation.
515
516 If unsure, say Y.
517
500config CAVIUM_ERRATUM_22375 518config CAVIUM_ERRATUM_22375
501 bool "Cavium erratum 22375, 24313" 519 bool "Cavium erratum 22375, 24313"
502 default y 520 default y
@@ -566,9 +584,16 @@ config QCOM_FALKOR_ERRATUM_1003
566 is unchanged. Work around the erratum by invalidating the walk cache 584 is unchanged. Work around the erratum by invalidating the walk cache
567 entries for the trampoline before entering the kernel proper. 585 entries for the trampoline before entering the kernel proper.
568 586
587config ARM64_WORKAROUND_REPEAT_TLBI
588 bool
589 help
590 Enable the repeat TLBI workaround for Falkor erratum 1009 and
591 Cortex-A76 erratum 1286807.
592
569config QCOM_FALKOR_ERRATUM_1009 593config QCOM_FALKOR_ERRATUM_1009
570 bool "Falkor E1009: Prematurely complete a DSB after a TLBI" 594 bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
571 default y 595 default y
596 select ARM64_WORKAROUND_REPEAT_TLBI
572 help 597 help
573 On Falkor v1, the CPU may prematurely complete a DSB following a 598 On Falkor v1, the CPU may prematurely complete a DSB following a
574 TLBI xxIS invalidate maintenance operation. Repeat the TLBI operation 599 TLBI xxIS invalidate maintenance operation. Repeat the TLBI operation
diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
index b4276da1fb0d..11fd1fe8bdb5 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
@@ -241,3 +241,7 @@
241 }; 241 };
242 }; 242 };
243}; 243};
244
245&tlmm {
246 gpio-reserved-ranges = <0 4>, <81 4>;
247};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index eedfaf8922e2..d667eee4e6d0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -352,6 +352,10 @@
352 status = "okay"; 352 status = "okay";
353}; 353};
354 354
355&tlmm {
356 gpio-reserved-ranges = <0 4>, <81 4>;
357};
358
355&uart9 { 359&uart9 {
356 status = "okay"; 360 status = "okay";
357}; 361};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index 2dceeea29b83..1e6a71066c16 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -153,7 +153,7 @@
153}; 153};
154 154
155&pcie0 { 155&pcie0 {
156 ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_LOW>; 156 ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>;
157 num-lanes = <4>; 157 num-lanes = <4>;
158 pinctrl-names = "default"; 158 pinctrl-names = "default";
159 pinctrl-0 = <&pcie_clkreqn_cpm>; 159 pinctrl-0 = <&pcie_clkreqn_cpm>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
index 6c8c4ab044aa..56abbb08c133 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
@@ -57,18 +57,6 @@
57 regulator-always-on; 57 regulator-always-on;
58 vin-supply = <&vcc_sys>; 58 vin-supply = <&vcc_sys>;
59 }; 59 };
60
61 vdd_log: vdd-log {
62 compatible = "pwm-regulator";
63 pwms = <&pwm2 0 25000 0>;
64 regulator-name = "vdd_log";
65 regulator-min-microvolt = <800000>;
66 regulator-max-microvolt = <1400000>;
67 regulator-always-on;
68 regulator-boot-on;
69 vin-supply = <&vcc_sys>;
70 };
71
72}; 60};
73 61
74&cpu_l0 { 62&cpu_l0 {
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index affc3c309353..8d7b47f9dfbf 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -36,7 +36,7 @@
36 36
37 wkup_uart0: serial@42300000 { 37 wkup_uart0: serial@42300000 {
38 compatible = "ti,am654-uart"; 38 compatible = "ti,am654-uart";
39 reg = <0x00 0x42300000 0x00 0x100>; 39 reg = <0x42300000 0x100>;
40 reg-shift = <2>; 40 reg-shift = <2>;
41 reg-io-width = <4>; 41 reg-io-width = <4>;
42 interrupts = <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>; 42 interrupts = <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index caa955f10e19..fac54fb050d0 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -56,6 +56,19 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
56{ 56{
57 return is_compat_task(); 57 return is_compat_task();
58} 58}
59
60#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
61
62static inline bool arch_syscall_match_sym_name(const char *sym,
63 const char *name)
64{
65 /*
66 * Since all syscall functions have __arm64_ prefix, we must skip it.
67 * However, as we described above, we decided to ignore compat
68 * syscalls, so we don't care about __arm64_compat_ prefix here.
69 */
70 return !strcmp(sym + 8, name);
71}
59#endif /* ifndef __ASSEMBLY__ */ 72#endif /* ifndef __ASSEMBLY__ */
60 73
61#endif /* __ASM_FTRACE_H */ 74#endif /* __ASM_FTRACE_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 0c909c4a932f..842fb9572661 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -468,7 +468,7 @@
468 SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ 468 SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
469 SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) 469 SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
470 470
471#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff 471#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL
472#error "Inconsistent SCTLR_EL2 set/clear bits" 472#error "Inconsistent SCTLR_EL2 set/clear bits"
473#endif 473#endif
474 474
@@ -509,7 +509,7 @@
509 SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ 509 SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
510 SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0) 510 SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0)
511 511
512#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff 512#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL
513#error "Inconsistent SCTLR_EL1 set/clear bits" 513#error "Inconsistent SCTLR_EL1 set/clear bits"
514#endif 514#endif
515 515
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index c3c0387aee18..5dfd23897dea 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -41,14 +41,14 @@
41 ALTERNATIVE("nop\n nop", \ 41 ALTERNATIVE("nop\n nop", \
42 "dsb ish\n tlbi " #op, \ 42 "dsb ish\n tlbi " #op, \
43 ARM64_WORKAROUND_REPEAT_TLBI, \ 43 ARM64_WORKAROUND_REPEAT_TLBI, \
44 CONFIG_QCOM_FALKOR_ERRATUM_1009) \ 44 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
45 : : ) 45 : : )
46 46
47#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ 47#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
48 ALTERNATIVE("nop\n nop", \ 48 ALTERNATIVE("nop\n nop", \
49 "dsb ish\n tlbi " #op ", %0", \ 49 "dsb ish\n tlbi " #op ", %0", \
50 ARM64_WORKAROUND_REPEAT_TLBI, \ 50 ARM64_WORKAROUND_REPEAT_TLBI, \
51 CONFIG_QCOM_FALKOR_ERRATUM_1009) \ 51 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
52 : : "r" (arg)) 52 : : "r" (arg))
53 53
54#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) 54#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index a509e35132d2..6ad715d67df8 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -570,6 +570,20 @@ static const struct midr_range arm64_harden_el2_vectors[] = {
570 570
571#endif 571#endif
572 572
573#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
574
575static const struct midr_range arm64_repeat_tlbi_cpus[] = {
576#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
577 MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0),
578#endif
579#ifdef CONFIG_ARM64_ERRATUM_1286807
580 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
581#endif
582 {},
583};
584
585#endif
586
573const struct arm64_cpu_capabilities arm64_errata[] = { 587const struct arm64_cpu_capabilities arm64_errata[] = {
574#if defined(CONFIG_ARM64_ERRATUM_826319) || \ 588#if defined(CONFIG_ARM64_ERRATUM_826319) || \
575 defined(CONFIG_ARM64_ERRATUM_827319) || \ 589 defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -695,11 +709,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
695 .matches = is_kryo_midr, 709 .matches = is_kryo_midr,
696 }, 710 },
697#endif 711#endif
698#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 712#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
699 { 713 {
700 .desc = "Qualcomm Technologies Falkor erratum 1009", 714 .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
701 .capability = ARM64_WORKAROUND_REPEAT_TLBI, 715 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
702 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), 716 ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus),
703 }, 717 },
704#endif 718#endif
705#ifdef CONFIG_ARM64_ERRATUM_858921 719#ifdef CONFIG_ARM64_ERRATUM_858921
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index af50064dea51..aec5ecb85737 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1333,7 +1333,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
1333 .cpu_enable = cpu_enable_hw_dbm, 1333 .cpu_enable = cpu_enable_hw_dbm,
1334 }, 1334 },
1335#endif 1335#endif
1336#ifdef CONFIG_ARM64_SSBD
1337 { 1336 {
1338 .desc = "CRC32 instructions", 1337 .desc = "CRC32 instructions",
1339 .capability = ARM64_HAS_CRC32, 1338 .capability = ARM64_HAS_CRC32,
@@ -1343,6 +1342,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
1343 .field_pos = ID_AA64ISAR0_CRC32_SHIFT, 1342 .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
1344 .min_field_value = 1, 1343 .min_field_value = 1,
1345 }, 1344 },
1345#ifdef CONFIG_ARM64_SSBD
1346 { 1346 {
1347 .desc = "Speculative Store Bypassing Safe (SSBS)", 1347 .desc = "Speculative Store Bypassing Safe (SSBS)",
1348 .capability = ARM64_SSBS, 1348 .capability = ARM64_SSBS,
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 50986e388d2b..57e962290df3 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -216,8 +216,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
216{ 216{
217 unsigned long return_hooker = (unsigned long)&return_to_handler; 217 unsigned long return_hooker = (unsigned long)&return_to_handler;
218 unsigned long old; 218 unsigned long old;
219 struct ftrace_graph_ent trace;
220 int err;
221 219
222 if (unlikely(atomic_read(&current->tracing_graph_pause))) 220 if (unlikely(atomic_read(&current->tracing_graph_pause)))
223 return; 221 return;
@@ -229,18 +227,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
229 */ 227 */
230 old = *parent; 228 old = *parent;
231 229
232 trace.func = self_addr; 230 if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
233 trace.depth = current->curr_ret_stack + 1;
234
235 /* Only trace if the calling function expects to */
236 if (!ftrace_graph_entry(&trace))
237 return;
238
239 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
240 frame_pointer, NULL);
241 if (err == -EBUSY)
242 return;
243 else
244 *parent = return_hooker; 231 *parent = return_hooker;
245} 232}
246 233
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index a6fdaea07c63..89198017e8e6 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -351,7 +351,8 @@ static void build_epilogue(struct jit_ctx *ctx)
351 * >0 - successfully JITed a 16-byte eBPF instruction. 351 * >0 - successfully JITed a 16-byte eBPF instruction.
352 * <0 - failed to JIT. 352 * <0 - failed to JIT.
353 */ 353 */
354static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) 354static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
355 bool extra_pass)
355{ 356{
356 const u8 code = insn->code; 357 const u8 code = insn->code;
357 const u8 dst = bpf2a64[insn->dst_reg]; 358 const u8 dst = bpf2a64[insn->dst_reg];
@@ -625,12 +626,19 @@ emit_cond_jmp:
625 case BPF_JMP | BPF_CALL: 626 case BPF_JMP | BPF_CALL:
626 { 627 {
627 const u8 r0 = bpf2a64[BPF_REG_0]; 628 const u8 r0 = bpf2a64[BPF_REG_0];
628 const u64 func = (u64)__bpf_call_base + imm; 629 bool func_addr_fixed;
630 u64 func_addr;
631 int ret;
629 632
630 if (ctx->prog->is_func) 633 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
631 emit_addr_mov_i64(tmp, func, ctx); 634 &func_addr, &func_addr_fixed);
635 if (ret < 0)
636 return ret;
637 if (func_addr_fixed)
638 /* We can use optimized emission here. */
639 emit_a64_mov_i64(tmp, func_addr, ctx);
632 else 640 else
633 emit_a64_mov_i64(tmp, func, ctx); 641 emit_addr_mov_i64(tmp, func_addr, ctx);
634 emit(A64_BLR(tmp), ctx); 642 emit(A64_BLR(tmp), ctx);
635 emit(A64_MOV(1, r0, A64_R(0)), ctx); 643 emit(A64_MOV(1, r0, A64_R(0)), ctx);
636 break; 644 break;
@@ -753,7 +761,7 @@ emit_cond_jmp:
753 return 0; 761 return 0;
754} 762}
755 763
756static int build_body(struct jit_ctx *ctx) 764static int build_body(struct jit_ctx *ctx, bool extra_pass)
757{ 765{
758 const struct bpf_prog *prog = ctx->prog; 766 const struct bpf_prog *prog = ctx->prog;
759 int i; 767 int i;
@@ -762,7 +770,7 @@ static int build_body(struct jit_ctx *ctx)
762 const struct bpf_insn *insn = &prog->insnsi[i]; 770 const struct bpf_insn *insn = &prog->insnsi[i];
763 int ret; 771 int ret;
764 772
765 ret = build_insn(insn, ctx); 773 ret = build_insn(insn, ctx, extra_pass);
766 if (ret > 0) { 774 if (ret > 0) {
767 i++; 775 i++;
768 if (ctx->image == NULL) 776 if (ctx->image == NULL)
@@ -858,7 +866,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
858 /* 1. Initial fake pass to compute ctx->idx. */ 866 /* 1. Initial fake pass to compute ctx->idx. */
859 867
860 /* Fake pass to fill in ctx->offset. */ 868 /* Fake pass to fill in ctx->offset. */
861 if (build_body(&ctx)) { 869 if (build_body(&ctx, extra_pass)) {
862 prog = orig_prog; 870 prog = orig_prog;
863 goto out_off; 871 goto out_off;
864 } 872 }
@@ -888,7 +896,7 @@ skip_init_ctx:
888 896
889 build_prologue(&ctx, was_classic); 897 build_prologue(&ctx, was_classic);
890 898
891 if (build_body(&ctx)) { 899 if (build_body(&ctx, extra_pass)) {
892 bpf_jit_binary_free(header); 900 bpf_jit_binary_free(header);
893 prog = orig_prog; 901 prog = orig_prog;
894 goto out_off; 902 goto out_off;
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index ebef7f40aabb..c5c253cb9bd6 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -59,7 +59,9 @@ extern struct node_cpuid_s node_cpuid[NR_CPUS];
59 */ 59 */
60 60
61extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES]; 61extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
62#define node_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)]) 62#define slit_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)])
63extern int __node_distance(int from, int to);
64#define node_distance(from,to) __node_distance(from, to)
63 65
64extern int paddr_to_nid(unsigned long paddr); 66extern int paddr_to_nid(unsigned long paddr);
65 67
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 1dacbf5e9e09..41eb281709da 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -578,8 +578,8 @@ void __init acpi_numa_fixup(void)
578 if (!slit_table) { 578 if (!slit_table) {
579 for (i = 0; i < MAX_NUMNODES; i++) 579 for (i = 0; i < MAX_NUMNODES; i++)
580 for (j = 0; j < MAX_NUMNODES; j++) 580 for (j = 0; j < MAX_NUMNODES; j++)
581 node_distance(i, j) = i == j ? LOCAL_DISTANCE : 581 slit_distance(i, j) = i == j ?
582 REMOTE_DISTANCE; 582 LOCAL_DISTANCE : REMOTE_DISTANCE;
583 return; 583 return;
584 } 584 }
585 585
@@ -592,7 +592,7 @@ void __init acpi_numa_fixup(void)
592 if (!pxm_bit_test(j)) 592 if (!pxm_bit_test(j))
593 continue; 593 continue;
594 node_to = pxm_to_node(j); 594 node_to = pxm_to_node(j);
595 node_distance(node_from, node_to) = 595 slit_distance(node_from, node_to) =
596 slit_table->entry[i * slit_table->locality_count + j]; 596 slit_table->entry[i * slit_table->locality_count + j];
597 } 597 }
598 } 598 }
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 3861d6e32d5f..a03803506b0c 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -36,6 +36,12 @@ struct node_cpuid_s node_cpuid[NR_CPUS] =
36 */ 36 */
37u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES]; 37u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
38 38
39int __node_distance(int from, int to)
40{
41 return slit_distance(from, to);
42}
43EXPORT_SYMBOL(__node_distance);
44
39/* Identify which cnode a physical address resides on */ 45/* Identify which cnode a physical address resides on */
40int 46int
41paddr_to_nid(unsigned long paddr) 47paddr_to_nid(unsigned long paddr)
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
index d57563c58a26..224eea40e1ee 100644
--- a/arch/microblaze/kernel/ftrace.c
+++ b/arch/microblaze/kernel/ftrace.c
@@ -22,8 +22,7 @@
22void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) 22void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
23{ 23{
24 unsigned long old; 24 unsigned long old;
25 int faulted, err; 25 int faulted;
26 struct ftrace_graph_ent trace;
27 unsigned long return_hooker = (unsigned long) 26 unsigned long return_hooker = (unsigned long)
28 &return_to_handler; 27 &return_to_handler;
29 28
@@ -63,18 +62,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
63 return; 62 return;
64 } 63 }
65 64
66 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL); 65 if (function_graph_enter(old, self_addr, 0, NULL))
67 if (err == -EBUSY) {
68 *parent = old; 66 *parent = old;
69 return;
70 }
71
72 trace.func = self_addr;
73 /* Only trace if the calling function expects to */
74 if (!ftrace_graph_entry(&trace)) {
75 current->curr_ret_stack--;
76 *parent = old;
77 }
78} 67}
79#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 68#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
80 69
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index 490b12af103c..c52d0efacd14 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y
140CONFIG_RTC_DRV_DS1307=y 140CONFIG_RTC_DRV_DS1307=y
141CONFIG_STAGING=y 141CONFIG_STAGING=y
142CONFIG_OCTEON_ETHERNET=y 142CONFIG_OCTEON_ETHERNET=y
143CONFIG_OCTEON_USB=y
143# CONFIG_IOMMU_SUPPORT is not set 144# CONFIG_IOMMU_SUPPORT is not set
144CONFIG_RAS=y 145CONFIG_RAS=y
145CONFIG_EXT4_FS=y 146CONFIG_EXT4_FS=y
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 0170602a1e4e..6cf8ffb5367e 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -73,7 +73,7 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
73#ifdef CONFIG_64BIT 73#ifdef CONFIG_64BIT
74 case 4: case 5: case 6: case 7: 74 case 4: case 5: case 6: case 7:
75#ifdef CONFIG_MIPS32_O32 75#ifdef CONFIG_MIPS32_O32
76 if (test_thread_flag(TIF_32BIT_REGS)) 76 if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
77 return get_user(*arg, (int *)usp + n); 77 return get_user(*arg, (int *)usp + n);
78 else 78 else
79#endif 79#endif
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 7f3dfdbc3657..b122cbb4aad1 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -322,7 +322,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
322 unsigned long fp) 322 unsigned long fp)
323{ 323{
324 unsigned long old_parent_ra; 324 unsigned long old_parent_ra;
325 struct ftrace_graph_ent trace;
326 unsigned long return_hooker = (unsigned long) 325 unsigned long return_hooker = (unsigned long)
327 &return_to_handler; 326 &return_to_handler;
328 int faulted, insns; 327 int faulted, insns;
@@ -369,12 +368,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
369 if (unlikely(faulted)) 368 if (unlikely(faulted))
370 goto out; 369 goto out;
371 370
372 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp,
373 NULL) == -EBUSY) {
374 *parent_ra_addr = old_parent_ra;
375 return;
376 }
377
378 /* 371 /*
379 * Get the recorded ip of the current mcount calling site in the 372 * Get the recorded ip of the current mcount calling site in the
380 * __mcount_loc section, which will be used to filter the function 373 * __mcount_loc section, which will be used to filter the function
@@ -382,13 +375,10 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
382 */ 375 */
383 376
384 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 377 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
385 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); 378 self_ra -= (MCOUNT_INSN_SIZE * insns);
386 379
387 /* Only trace if the calling function expects to */ 380 if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
388 if (!ftrace_graph_entry(&trace)) {
389 current->curr_ret_stack--;
390 *parent_ra_addr = old_parent_ra; 381 *parent_ra_addr = old_parent_ra;
391 }
392 return; 382 return;
393out: 383out:
394 ftrace_graph_stop(); 384 ftrace_graph_stop();
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index ea09ed6a80a9..8c6c48ed786a 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -794,6 +794,7 @@ static void __init arch_mem_init(char **cmdline_p)
794 794
795 /* call board setup routine */ 795 /* call board setup routine */
796 plat_mem_setup(); 796 plat_mem_setup();
797 memblock_set_bottom_up(true);
797 798
798 /* 799 /*
799 * Make sure all kernel memory is in the maps. The "UP" and 800 * Make sure all kernel memory is in the maps. The "UP" and
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 0f852e1b5891..15e103c6d799 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2260,10 +2260,8 @@ void __init trap_init(void)
2260 unsigned long size = 0x200 + VECTORSPACING*64; 2260 unsigned long size = 0x200 + VECTORSPACING*64;
2261 phys_addr_t ebase_pa; 2261 phys_addr_t ebase_pa;
2262 2262
2263 memblock_set_bottom_up(true);
2264 ebase = (unsigned long) 2263 ebase = (unsigned long)
2265 memblock_alloc_from(size, 1 << fls(size), 0); 2264 memblock_alloc_from(size, 1 << fls(size), 0);
2266 memblock_set_bottom_up(false);
2267 2265
2268 /* 2266 /*
2269 * Try to ensure ebase resides in KSeg0 if possible. 2267 * Try to ensure ebase resides in KSeg0 if possible.
@@ -2307,6 +2305,7 @@ void __init trap_init(void)
2307 if (board_ebase_setup) 2305 if (board_ebase_setup)
2308 board_ebase_setup(); 2306 board_ebase_setup();
2309 per_cpu_trap_init(true); 2307 per_cpu_trap_init(true);
2308 memblock_set_bottom_up(false);
2310 2309
2311 /* 2310 /*
2312 * Copy the generic exception handlers to their final destination. 2311 * Copy the generic exception handlers to their final destination.
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index 622761878cd1..60bf0a1cb757 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -231,6 +231,8 @@ static __init void prom_meminit(void)
231 cpumask_clear(&__node_data[(node)]->cpumask); 231 cpumask_clear(&__node_data[(node)]->cpumask);
232 } 232 }
233 } 233 }
234 max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
235
234 for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { 236 for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
235 node = cpu / loongson_sysconf.cores_per_node; 237 node = cpu / loongson_sysconf.cores_per_node;
236 if (node >= num_online_nodes()) 238 if (node >= num_online_nodes())
@@ -248,19 +250,9 @@ static __init void prom_meminit(void)
248 250
249void __init paging_init(void) 251void __init paging_init(void)
250{ 252{
251 unsigned node;
252 unsigned long zones_size[MAX_NR_ZONES] = {0, }; 253 unsigned long zones_size[MAX_NR_ZONES] = {0, };
253 254
254 pagetable_init(); 255 pagetable_init();
255
256 for_each_online_node(node) {
257 unsigned long start_pfn, end_pfn;
258
259 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
260
261 if (end_pfn > max_low_pfn)
262 max_low_pfn = end_pfn;
263 }
264#ifdef CONFIG_ZONE_DMA32 256#ifdef CONFIG_ZONE_DMA32
265 zones_size[ZONE_DMA32] = MAX_DMA32_PFN; 257 zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
266#endif 258#endif
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 41b71c4352c2..c1ce6f43642b 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -84,7 +84,7 @@ static struct rt2880_pmx_func pcie_rst_grp[] = {
84}; 84};
85static struct rt2880_pmx_func nd_sd_grp[] = { 85static struct rt2880_pmx_func nd_sd_grp[] = {
86 FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), 86 FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
87 FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15) 87 FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
88}; 88};
89 89
90static struct rt2880_pmx_group mt7620a_pinmux_data[] = { 90static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index d8b8444d6795..813d13f92957 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -435,6 +435,7 @@ void __init prom_meminit(void)
435 435
436 mlreset(); 436 mlreset();
437 szmem(); 437 szmem();
438 max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
438 439
439 for (node = 0; node < MAX_COMPACT_NODES; node++) { 440 for (node = 0; node < MAX_COMPACT_NODES; node++) {
440 if (node_online(node)) { 441 if (node_online(node)) {
@@ -455,18 +456,8 @@ extern void setup_zero_pages(void);
455void __init paging_init(void) 456void __init paging_init(void)
456{ 457{
457 unsigned long zones_size[MAX_NR_ZONES] = {0, }; 458 unsigned long zones_size[MAX_NR_ZONES] = {0, };
458 unsigned node;
459 459
460 pagetable_init(); 460 pagetable_init();
461
462 for_each_online_node(node) {
463 unsigned long start_pfn, end_pfn;
464
465 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
466
467 if (end_pfn > max_low_pfn)
468 max_low_pfn = end_pfn;
469 }
470 zones_size[ZONE_NORMAL] = max_low_pfn; 461 zones_size[ZONE_NORMAL] = max_low_pfn;
471 free_area_init_nodes(zones_size); 462 free_area_init_nodes(zones_size);
472} 463}
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
index a0a9679ad5de..8a41372551ff 100644
--- a/arch/nds32/kernel/ftrace.c
+++ b/arch/nds32/kernel/ftrace.c
@@ -211,29 +211,15 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
211 unsigned long frame_pointer) 211 unsigned long frame_pointer)
212{ 212{
213 unsigned long return_hooker = (unsigned long)&return_to_handler; 213 unsigned long return_hooker = (unsigned long)&return_to_handler;
214 struct ftrace_graph_ent trace;
215 unsigned long old; 214 unsigned long old;
216 int err;
217 215
218 if (unlikely(atomic_read(&current->tracing_graph_pause))) 216 if (unlikely(atomic_read(&current->tracing_graph_pause)))
219 return; 217 return;
220 218
221 old = *parent; 219 old = *parent;
222 220
223 trace.func = self_addr; 221 if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
224 trace.depth = current->curr_ret_stack + 1; 222 *parent = return_hooker;
225
226 /* Only trace if the calling function expects to */
227 if (!ftrace_graph_entry(&trace))
228 return;
229
230 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
231 frame_pointer, NULL);
232
233 if (err == -EBUSY)
234 return;
235
236 *parent = return_hooker;
237} 223}
238 224
239noinline void ftrace_graph_caller(void) 225noinline void ftrace_graph_caller(void)
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 6fa8535d3cce..e46a4157a894 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -30,7 +30,6 @@ static void __hot prepare_ftrace_return(unsigned long *parent,
30 unsigned long self_addr) 30 unsigned long self_addr)
31{ 31{
32 unsigned long old; 32 unsigned long old;
33 struct ftrace_graph_ent trace;
34 extern int parisc_return_to_handler; 33 extern int parisc_return_to_handler;
35 34
36 if (unlikely(ftrace_graph_is_dead())) 35 if (unlikely(ftrace_graph_is_dead()))
@@ -41,19 +40,9 @@ static void __hot prepare_ftrace_return(unsigned long *parent,
41 40
42 old = *parent; 41 old = *parent;
43 42
44 trace.func = self_addr; 43 if (!function_graph_enter(old, self_addr, 0, NULL))
45 trace.depth = current->curr_ret_stack + 1; 44 /* activate parisc_return_to_handler() as return point */
46 45 *parent = (unsigned long) &parisc_return_to_handler;
47 /* Only trace if the calling function expects to */
48 if (!ftrace_graph_entry(&trace))
49 return;
50
51 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
52 0, NULL) == -EBUSY)
53 return;
54
55 /* activate parisc_return_to_handler() as return point */
56 *parent = (unsigned long) &parisc_return_to_handler;
57} 46}
58#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 47#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
59 48
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 4bf051d3e21e..b65c8a34ad6e 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -950,7 +950,6 @@ int ftrace_disable_ftrace_graph_caller(void)
950 */ 950 */
951unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) 951unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
952{ 952{
953 struct ftrace_graph_ent trace;
954 unsigned long return_hooker; 953 unsigned long return_hooker;
955 954
956 if (unlikely(ftrace_graph_is_dead())) 955 if (unlikely(ftrace_graph_is_dead()))
@@ -961,18 +960,8 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
961 960
962 return_hooker = ppc_function_entry(return_to_handler); 961 return_hooker = ppc_function_entry(return_to_handler);
963 962
964 trace.func = ip; 963 if (!function_graph_enter(parent, ip, 0, NULL))
965 trace.depth = current->curr_ret_stack + 1; 964 parent = return_hooker;
966
967 /* Only trace if the calling function expects to */
968 if (!ftrace_graph_entry(&trace))
969 goto out;
970
971 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
972 NULL) == -EBUSY)
973 goto out;
974
975 parent = return_hooker;
976out: 965out:
977 return parent; 966 return parent;
978} 967}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index d65b961661fb..a56f8413758a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -983,6 +983,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
983 ret = kvmhv_enter_nested_guest(vcpu); 983 ret = kvmhv_enter_nested_guest(vcpu);
984 if (ret == H_INTERRUPT) { 984 if (ret == H_INTERRUPT) {
985 kvmppc_set_gpr(vcpu, 3, 0); 985 kvmppc_set_gpr(vcpu, 3, 0);
986 vcpu->arch.hcall_needed = 0;
986 return -EINTR; 987 return -EINTR;
987 } 988 }
988 break; 989 break;
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 50b129785aee..17482f5de3e2 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -166,7 +166,33 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
166 PPC_BLR(); 166 PPC_BLR();
167} 167}
168 168
169static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func) 169static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
170 u64 func)
171{
172#ifdef PPC64_ELF_ABI_v1
173 /* func points to the function descriptor */
174 PPC_LI64(b2p[TMP_REG_2], func);
175 /* Load actual entry point from function descriptor */
176 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
177 /* ... and move it to LR */
178 PPC_MTLR(b2p[TMP_REG_1]);
179 /*
180 * Load TOC from function descriptor at offset 8.
181 * We can clobber r2 since we get called through a
182 * function pointer (so caller will save/restore r2)
183 * and since we don't use a TOC ourself.
184 */
185 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
186#else
187 /* We can clobber r12 */
188 PPC_FUNC_ADDR(12, func);
189 PPC_MTLR(12);
190#endif
191 PPC_BLRL();
192}
193
194static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
195 u64 func)
170{ 196{
171 unsigned int i, ctx_idx = ctx->idx; 197 unsigned int i, ctx_idx = ctx->idx;
172 198
@@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
273{ 299{
274 const struct bpf_insn *insn = fp->insnsi; 300 const struct bpf_insn *insn = fp->insnsi;
275 int flen = fp->len; 301 int flen = fp->len;
276 int i; 302 int i, ret;
277 303
278 /* Start of epilogue code - will only be valid 2nd pass onwards */ 304 /* Start of epilogue code - will only be valid 2nd pass onwards */
279 u32 exit_addr = addrs[flen]; 305 u32 exit_addr = addrs[flen];
@@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
284 u32 src_reg = b2p[insn[i].src_reg]; 310 u32 src_reg = b2p[insn[i].src_reg];
285 s16 off = insn[i].off; 311 s16 off = insn[i].off;
286 s32 imm = insn[i].imm; 312 s32 imm = insn[i].imm;
313 bool func_addr_fixed;
314 u64 func_addr;
287 u64 imm64; 315 u64 imm64;
288 u8 *func;
289 u32 true_cond; 316 u32 true_cond;
290 u32 tmp_idx; 317 u32 tmp_idx;
291 318
@@ -711,23 +738,15 @@ emit_clear:
711 case BPF_JMP | BPF_CALL: 738 case BPF_JMP | BPF_CALL:
712 ctx->seen |= SEEN_FUNC; 739 ctx->seen |= SEEN_FUNC;
713 740
714 /* bpf function call */ 741 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
715 if (insn[i].src_reg == BPF_PSEUDO_CALL) 742 &func_addr, &func_addr_fixed);
716 if (!extra_pass) 743 if (ret < 0)
717 func = NULL; 744 return ret;
718 else if (fp->aux->func && off < fp->aux->func_cnt)
719 /* use the subprog id from the off
720 * field to lookup the callee address
721 */
722 func = (u8 *) fp->aux->func[off]->bpf_func;
723 else
724 return -EINVAL;
725 /* kernel helper call */
726 else
727 func = (u8 *) __bpf_call_base + imm;
728
729 bpf_jit_emit_func_call(image, ctx, (u64)func);
730 745
746 if (func_addr_fixed)
747 bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
748 else
749 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
731 /* move return value from r3 to BPF_REG_0 */ 750 /* move return value from r3 to BPF_REG_0 */
732 PPC_MR(b2p[BPF_REG_0], 3); 751 PPC_MR(b2p[BPF_REG_0], 3);
733 break; 752 break;
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 4af153a182b0..4b594f2e4f7e 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -71,6 +71,10 @@ KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
71# arch specific predefines for sparse 71# arch specific predefines for sparse
72CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) 72CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
73 73
74# Default target when executing plain make
75boot := arch/riscv/boot
76KBUILD_IMAGE := $(boot)/Image.gz
77
74head-y := arch/riscv/kernel/head.o 78head-y := arch/riscv/kernel/head.o
75 79
76core-y += arch/riscv/kernel/ arch/riscv/mm/ 80core-y += arch/riscv/kernel/ arch/riscv/mm/
@@ -81,4 +85,13 @@ PHONY += vdso_install
81vdso_install: 85vdso_install:
82 $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ 86 $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
83 87
84all: vmlinux 88all: Image.gz
89
90Image: vmlinux
91 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
92
93Image.%: Image
94 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
95
96zinstall install:
97 $(Q)$(MAKE) $(build)=$(boot) $@
diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore
new file mode 100644
index 000000000000..8dab0bb6ae66
--- /dev/null
+++ b/arch/riscv/boot/.gitignore
@@ -0,0 +1,2 @@
1Image
2Image.gz
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
new file mode 100644
index 000000000000..0990a9fdbe5d
--- /dev/null
+++ b/arch/riscv/boot/Makefile
@@ -0,0 +1,33 @@
1#
2# arch/riscv/boot/Makefile
3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
7# This file is subject to the terms and conditions of the GNU General Public
8# License. See the file "COPYING" in the main directory of this archive
9# for more details.
10#
11# Copyright (C) 2018, Anup Patel.
12# Author: Anup Patel <anup@brainfault.org>
13#
14# Based on the ia64 and arm64 boot/Makefile.
15#
16
17OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
18
19targets := Image
20
21$(obj)/Image: vmlinux FORCE
22 $(call if_changed,objcopy)
23
24$(obj)/Image.gz: $(obj)/Image FORCE
25 $(call if_changed,gzip)
26
27install:
28 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
29 $(obj)/Image System.map "$(INSTALL_PATH)"
30
31zinstall:
32 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
33 $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/riscv/boot/install.sh b/arch/riscv/boot/install.sh
new file mode 100644
index 000000000000..18c39159c0ff
--- /dev/null
+++ b/arch/riscv/boot/install.sh
@@ -0,0 +1,60 @@
1#!/bin/sh
2#
3# arch/riscv/boot/install.sh
4#
5# This file is subject to the terms and conditions of the GNU General Public
6# License. See the file "COPYING" in the main directory of this archive
7# for more details.
8#
9# Copyright (C) 1995 by Linus Torvalds
10#
11# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
12# Adapted from code in arch/i386/boot/install.sh by Russell King
13#
14# "make install" script for the RISC-V Linux port
15#
16# Arguments:
17# $1 - kernel version
18# $2 - kernel image file
19# $3 - kernel map file
20# $4 - default install path (blank if root directory)
21#
22
23verify () {
24 if [ ! -f "$1" ]; then
25 echo "" 1>&2
26 echo " *** Missing file: $1" 1>&2
27 echo ' *** You need to run "make" before "make install".' 1>&2
28 echo "" 1>&2
29 exit 1
30 fi
31}
32
33# Make sure the files actually exist
34verify "$2"
35verify "$3"
36
37# User may have a custom install script
38if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
39if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
40
41if [ "$(basename $2)" = "Image.gz" ]; then
42# Compressed install
43 echo "Installing compressed kernel"
44 base=vmlinuz
45else
46# Normal install
47 echo "Installing normal kernel"
48 base=vmlinux
49fi
50
51if [ -f $4/$base-$1 ]; then
52 mv $4/$base-$1 $4/$base-$1.old
53fi
54cat $2 > $4/$base-$1
55
56# Install system map file
57if [ -f $4/System.map-$1 ]; then
58 mv $4/System.map-$1 $4/System.map-$1.old
59fi
60cp $3 $4/System.map-$1
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h
index 349df33808c4..cd2af4b013e3 100644
--- a/arch/riscv/include/asm/module.h
+++ b/arch/riscv/include/asm/module.h
@@ -8,6 +8,7 @@
8 8
9#define MODULE_ARCH_VERMAGIC "riscv" 9#define MODULE_ARCH_VERMAGIC "riscv"
10 10
11struct module;
11u64 module_emit_got_entry(struct module *mod, u64 val); 12u64 module_emit_got_entry(struct module *mod, u64 val);
12u64 module_emit_plt_entry(struct module *mod, u64 val); 13u64 module_emit_plt_entry(struct module *mod, u64 val);
13 14
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 473cfc84e412..8c3e3e3c8be1 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -400,13 +400,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to,
400static inline unsigned long 400static inline unsigned long
401raw_copy_from_user(void *to, const void __user *from, unsigned long n) 401raw_copy_from_user(void *to, const void __user *from, unsigned long n)
402{ 402{
403 return __asm_copy_to_user(to, from, n); 403 return __asm_copy_from_user(to, from, n);
404} 404}
405 405
406static inline unsigned long 406static inline unsigned long
407raw_copy_to_user(void __user *to, const void *from, unsigned long n) 407raw_copy_to_user(void __user *to, const void *from, unsigned long n)
408{ 408{
409 return __asm_copy_from_user(to, from, n); 409 return __asm_copy_to_user(to, from, n);
410} 410}
411 411
412extern long strncpy_from_user(char *dest, const char __user *src, long count); 412extern long strncpy_from_user(char *dest, const char __user *src, long count);
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index eff7aa9aa163..fef96f117b4d 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -13,10 +13,9 @@
13 13
14/* 14/*
15 * There is explicitly no include guard here because this file is expected to 15 * There is explicitly no include guard here because this file is expected to
16 * be included multiple times. See uapi/asm/syscalls.h for more info. 16 * be included multiple times.
17 */ 17 */
18 18
19#define __ARCH_WANT_NEW_STAT
20#define __ARCH_WANT_SYS_CLONE 19#define __ARCH_WANT_SYS_CLONE
20
21#include <uapi/asm/unistd.h> 21#include <uapi/asm/unistd.h>
22#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/unistd.h
index 206dc4b0f6ea..1f3bd3ebbb0d 100644
--- a/arch/riscv/include/uapi/asm/syscalls.h
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -1,13 +1,25 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/* 2/*
3 * Copyright (C) 2017-2018 SiFive 3 * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
4 */ 16 */
5 17
6/* 18#ifdef __LP64__
7 * There is explicitly no include guard here because this file is expected to 19#define __ARCH_WANT_NEW_STAT
8 * be included multiple times in order to define the syscall macros via 20#endif /* __LP64__ */
9 * __SYSCALL. 21
10 */ 22#include <asm-generic/unistd.h>
11 23
12/* 24/*
13 * Allows the instruction cache to be flushed from userspace. Despite RISC-V 25 * Allows the instruction cache to be flushed from userspace. Despite RISC-V
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 3a5a2ee31547..b4a7d4427fbb 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -64,7 +64,7 @@ int riscv_of_processor_hartid(struct device_node *node)
64 64
65static void print_isa(struct seq_file *f, const char *orig_isa) 65static void print_isa(struct seq_file *f, const char *orig_isa)
66{ 66{
67 static const char *ext = "mafdc"; 67 static const char *ext = "mafdcsu";
68 const char *isa = orig_isa; 68 const char *isa = orig_isa;
69 const char *e; 69 const char *e;
70 70
@@ -88,11 +88,14 @@ static void print_isa(struct seq_file *f, const char *orig_isa)
88 /* 88 /*
89 * Check the rest of the ISA string for valid extensions, printing those 89 * Check the rest of the ISA string for valid extensions, printing those
90 * we find. RISC-V ISA strings define an order, so we only print the 90 * we find. RISC-V ISA strings define an order, so we only print the
91 * extension bits when they're in order. 91 * extension bits when they're in order. Hide the supervisor (S)
92 * extension from userspace as it's not accessible from there.
92 */ 93 */
93 for (e = ext; *e != '\0'; ++e) { 94 for (e = ext; *e != '\0'; ++e) {
94 if (isa[0] == e[0]) { 95 if (isa[0] == e[0]) {
95 seq_write(f, isa, 1); 96 if (isa[0] != 's')
97 seq_write(f, isa, 1);
98
96 isa++; 99 isa++;
97 } 100 }
98 } 101 }
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 1157b6b52d25..c433f6d3dd64 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -132,7 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
132{ 132{
133 unsigned long return_hooker = (unsigned long)&return_to_handler; 133 unsigned long return_hooker = (unsigned long)&return_to_handler;
134 unsigned long old; 134 unsigned long old;
135 struct ftrace_graph_ent trace;
136 int err; 135 int err;
137 136
138 if (unlikely(atomic_read(&current->tracing_graph_pause))) 137 if (unlikely(atomic_read(&current->tracing_graph_pause)))
@@ -144,17 +143,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
144 */ 143 */
145 old = *parent; 144 old = *parent;
146 145
147 trace.func = self_addr; 146 if (function_graph_enter(old, self_addr, frame_pointer, parent))
148 trace.depth = current->curr_ret_stack + 1; 147 *parent = return_hooker;
149
150 if (!ftrace_graph_entry(&trace))
151 return;
152
153 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
154 frame_pointer, parent);
155 if (err == -EBUSY)
156 return;
157 *parent = return_hooker;
158} 148}
159 149
160#ifdef CONFIG_DYNAMIC_FTRACE 150#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 711190d473d4..fe884cd69abd 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -44,6 +44,16 @@ ENTRY(_start)
44 amoadd.w a3, a2, (a3) 44 amoadd.w a3, a2, (a3)
45 bnez a3, .Lsecondary_start 45 bnez a3, .Lsecondary_start
46 46
47 /* Clear BSS for flat non-ELF images */
48 la a3, __bss_start
49 la a4, __bss_stop
50 ble a4, a3, clear_bss_done
51clear_bss:
52 REG_S zero, (a3)
53 add a3, a3, RISCV_SZPTR
54 blt a3, a4, clear_bss
55clear_bss_done:
56
47 /* Save hart ID and DTB physical address */ 57 /* Save hart ID and DTB physical address */
48 mv s0, a0 58 mv s0, a0
49 mv s1, a1 59 mv s1, a1
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index ece84991609c..65df1dfdc303 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -74,7 +74,7 @@ SECTIONS
74 *(.sbss*) 74 *(.sbss*)
75 } 75 }
76 76
77 BSS_SECTION(0, 0, 0) 77 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
78 78
79 EXCEPTION_TABLE(0x10) 79 EXCEPTION_TABLE(0x10)
80 NOTES 80 NOTES
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 84be7f02d0c2..39b13d71a8fe 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -203,22 +203,13 @@ device_initcall(ftrace_plt_init);
203 */ 203 */
204unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) 204unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
205{ 205{
206 struct ftrace_graph_ent trace;
207
208 if (unlikely(ftrace_graph_is_dead())) 206 if (unlikely(ftrace_graph_is_dead()))
209 goto out; 207 goto out;
210 if (unlikely(atomic_read(&current->tracing_graph_pause))) 208 if (unlikely(atomic_read(&current->tracing_graph_pause)))
211 goto out; 209 goto out;
212 ip -= MCOUNT_INSN_SIZE; 210 ip -= MCOUNT_INSN_SIZE;
213 trace.func = ip; 211 if (!function_graph_enter(parent, ip, 0, NULL))
214 trace.depth = current->curr_ret_stack + 1; 212 parent = (unsigned long) return_to_handler;
215 /* Only trace if the calling function expects to. */
216 if (!ftrace_graph_entry(&trace))
217 goto out;
218 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
219 NULL) == -EBUSY)
220 goto out;
221 parent = (unsigned long) return_to_handler;
222out: 213out:
223 return parent; 214 return parent;
224} 215}
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 74091fd3101e..d5523adeddbf 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -346,6 +346,8 @@ static int __hw_perf_event_init(struct perf_event *event)
346 break; 346 break;
347 347
348 case PERF_TYPE_HARDWARE: 348 case PERF_TYPE_HARDWARE:
349 if (is_sampling_event(event)) /* No sampling support */
350 return -ENOENT;
349 ev = attr->config; 351 ev = attr->config;
350 /* Count user space (problem-state) only */ 352 /* Count user space (problem-state) only */
351 if (!attr->exclude_user && attr->exclude_kernel) { 353 if (!attr->exclude_user && attr->exclude_kernel) {
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 814f26520aa2..6791562779ee 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -131,6 +131,7 @@ void crst_table_downgrade(struct mm_struct *mm)
131 } 131 }
132 132
133 pgd = mm->pgd; 133 pgd = mm->pgd;
134 mm_dec_nr_pmds(mm);
134 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); 135 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
135 mm->context.asce_limit = _REGION3_SIZE; 136 mm->context.asce_limit = _REGION3_SIZE;
136 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 137 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 96dd9f7da250..1b04270e5460 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -321,8 +321,7 @@ int ftrace_disable_ftrace_graph_caller(void)
321void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) 321void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
322{ 322{
323 unsigned long old; 323 unsigned long old;
324 int faulted, err; 324 int faulted;
325 struct ftrace_graph_ent trace;
326 unsigned long return_hooker = (unsigned long)&return_to_handler; 325 unsigned long return_hooker = (unsigned long)&return_to_handler;
327 326
328 if (unlikely(ftrace_graph_is_dead())) 327 if (unlikely(ftrace_graph_is_dead()))
@@ -365,18 +364,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
365 return; 364 return;
366 } 365 }
367 366
368 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL); 367 if (function_graph_enter(old, self_addr, 0, NULL))
369 if (err == -EBUSY) {
370 __raw_writel(old, parent); 368 __raw_writel(old, parent);
371 return;
372 }
373
374 trace.func = self_addr;
375
376 /* Only trace if the calling function expects to */
377 if (!ftrace_graph_entry(&trace)) {
378 current->curr_ret_stack--;
379 __raw_writel(old, parent);
380 }
381} 369}
382#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 370#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 915dda4ae412..684b84ce397f 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -126,20 +126,11 @@ unsigned long prepare_ftrace_return(unsigned long parent,
126 unsigned long frame_pointer) 126 unsigned long frame_pointer)
127{ 127{
128 unsigned long return_hooker = (unsigned long) &return_to_handler; 128 unsigned long return_hooker = (unsigned long) &return_to_handler;
129 struct ftrace_graph_ent trace;
130 129
131 if (unlikely(atomic_read(&current->tracing_graph_pause))) 130 if (unlikely(atomic_read(&current->tracing_graph_pause)))
132 return parent + 8UL; 131 return parent + 8UL;
133 132
134 trace.func = self_addr; 133 if (function_graph_enter(parent, self_addr, frame_pointer, NULL))
135 trace.depth = current->curr_ret_stack + 1;
136
137 /* Only trace if the calling function expects to */
138 if (!ftrace_graph_entry(&trace))
139 return parent + 8UL;
140
141 if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
142 frame_pointer, NULL) == -EBUSY)
143 return parent + 8UL; 134 return parent + 8UL;
144 135
145 return return_hooker; 136 return return_hooker;
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 222785af550b..5fda4f7bf15d 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -791,7 +791,7 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
791} 791}
792 792
793/* Just skip the save instruction and the ctx register move. */ 793/* Just skip the save instruction and the ctx register move. */
794#define BPF_TAILCALL_PROLOGUE_SKIP 16 794#define BPF_TAILCALL_PROLOGUE_SKIP 32
795#define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128) 795#define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128)
796 796
797static void build_prologue(struct jit_ctx *ctx) 797static void build_prologue(struct jit_ctx *ctx)
@@ -824,9 +824,15 @@ static void build_prologue(struct jit_ctx *ctx)
824 const u8 vfp = bpf2sparc[BPF_REG_FP]; 824 const u8 vfp = bpf2sparc[BPF_REG_FP];
825 825
826 emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx); 826 emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx);
827 } else {
828 emit_nop(ctx);
827 } 829 }
828 830
829 emit_reg_move(I0, O0, ctx); 831 emit_reg_move(I0, O0, ctx);
832 emit_reg_move(I1, O1, ctx);
833 emit_reg_move(I2, O2, ctx);
834 emit_reg_move(I3, O3, ctx);
835 emit_reg_move(I4, O4, ctx);
830 /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */ 836 /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */
831} 837}
832 838
@@ -1270,6 +1276,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1270 const u8 tmp2 = bpf2sparc[TMP_REG_2]; 1276 const u8 tmp2 = bpf2sparc[TMP_REG_2];
1271 u32 opcode = 0, rs2; 1277 u32 opcode = 0, rs2;
1272 1278
1279 if (insn->dst_reg == BPF_REG_FP)
1280 ctx->saw_frame_pointer = true;
1281
1273 ctx->tmp_2_used = true; 1282 ctx->tmp_2_used = true;
1274 emit_loadimm(imm, tmp2, ctx); 1283 emit_loadimm(imm, tmp2, ctx);
1275 1284
@@ -1308,6 +1317,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1308 const u8 tmp = bpf2sparc[TMP_REG_1]; 1317 const u8 tmp = bpf2sparc[TMP_REG_1];
1309 u32 opcode = 0, rs2; 1318 u32 opcode = 0, rs2;
1310 1319
1320 if (insn->dst_reg == BPF_REG_FP)
1321 ctx->saw_frame_pointer = true;
1322
1311 switch (BPF_SIZE(code)) { 1323 switch (BPF_SIZE(code)) {
1312 case BPF_W: 1324 case BPF_W:
1313 opcode = ST32; 1325 opcode = ST32;
@@ -1340,6 +1352,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1340 const u8 tmp2 = bpf2sparc[TMP_REG_2]; 1352 const u8 tmp2 = bpf2sparc[TMP_REG_2];
1341 const u8 tmp3 = bpf2sparc[TMP_REG_3]; 1353 const u8 tmp3 = bpf2sparc[TMP_REG_3];
1342 1354
1355 if (insn->dst_reg == BPF_REG_FP)
1356 ctx->saw_frame_pointer = true;
1357
1343 ctx->tmp_1_used = true; 1358 ctx->tmp_1_used = true;
1344 ctx->tmp_2_used = true; 1359 ctx->tmp_2_used = true;
1345 ctx->tmp_3_used = true; 1360 ctx->tmp_3_used = true;
@@ -1360,6 +1375,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1360 const u8 tmp2 = bpf2sparc[TMP_REG_2]; 1375 const u8 tmp2 = bpf2sparc[TMP_REG_2];
1361 const u8 tmp3 = bpf2sparc[TMP_REG_3]; 1376 const u8 tmp3 = bpf2sparc[TMP_REG_3];
1362 1377
1378 if (insn->dst_reg == BPF_REG_FP)
1379 ctx->saw_frame_pointer = true;
1380
1363 ctx->tmp_1_used = true; 1381 ctx->tmp_1_used = true;
1364 ctx->tmp_2_used = true; 1382 ctx->tmp_2_used = true;
1365 ctx->tmp_3_used = true; 1383 ctx->tmp_3_used = true;
@@ -1425,12 +1443,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1425 struct bpf_prog *tmp, *orig_prog = prog; 1443 struct bpf_prog *tmp, *orig_prog = prog;
1426 struct sparc64_jit_data *jit_data; 1444 struct sparc64_jit_data *jit_data;
1427 struct bpf_binary_header *header; 1445 struct bpf_binary_header *header;
1446 u32 prev_image_size, image_size;
1428 bool tmp_blinded = false; 1447 bool tmp_blinded = false;
1429 bool extra_pass = false; 1448 bool extra_pass = false;
1430 struct jit_ctx ctx; 1449 struct jit_ctx ctx;
1431 u32 image_size;
1432 u8 *image_ptr; 1450 u8 *image_ptr;
1433 int pass; 1451 int pass, i;
1434 1452
1435 if (!prog->jit_requested) 1453 if (!prog->jit_requested)
1436 return orig_prog; 1454 return orig_prog;
@@ -1461,61 +1479,82 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1461 header = jit_data->header; 1479 header = jit_data->header;
1462 extra_pass = true; 1480 extra_pass = true;
1463 image_size = sizeof(u32) * ctx.idx; 1481 image_size = sizeof(u32) * ctx.idx;
1482 prev_image_size = image_size;
1483 pass = 1;
1464 goto skip_init_ctx; 1484 goto skip_init_ctx;
1465 } 1485 }
1466 1486
1467 memset(&ctx, 0, sizeof(ctx)); 1487 memset(&ctx, 0, sizeof(ctx));
1468 ctx.prog = prog; 1488 ctx.prog = prog;
1469 1489
1470 ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL); 1490 ctx.offset = kmalloc_array(prog->len, sizeof(unsigned int), GFP_KERNEL);
1471 if (ctx.offset == NULL) { 1491 if (ctx.offset == NULL) {
1472 prog = orig_prog; 1492 prog = orig_prog;
1473 goto out_off; 1493 goto out_off;
1474 } 1494 }
1475 1495
1476 /* Fake pass to detect features used, and get an accurate assessment 1496 /* Longest sequence emitted is for bswap32, 12 instructions. Pre-cook
1477 * of what the final image size will be. 1497 * the offset array so that we converge faster.
1478 */ 1498 */
1479 if (build_body(&ctx)) { 1499 for (i = 0; i < prog->len; i++)
1480 prog = orig_prog; 1500 ctx.offset[i] = i * (12 * 4);
1481 goto out_off;
1482 }
1483 build_prologue(&ctx);
1484 build_epilogue(&ctx);
1485
1486 /* Now we know the actual image size. */
1487 image_size = sizeof(u32) * ctx.idx;
1488 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1489 sizeof(u32), jit_fill_hole);
1490 if (header == NULL) {
1491 prog = orig_prog;
1492 goto out_off;
1493 }
1494 1501
1495 ctx.image = (u32 *)image_ptr; 1502 prev_image_size = ~0U;
1496skip_init_ctx: 1503 for (pass = 1; pass < 40; pass++) {
1497 for (pass = 1; pass < 3; pass++) {
1498 ctx.idx = 0; 1504 ctx.idx = 0;
1499 1505
1500 build_prologue(&ctx); 1506 build_prologue(&ctx);
1501
1502 if (build_body(&ctx)) { 1507 if (build_body(&ctx)) {
1503 bpf_jit_binary_free(header);
1504 prog = orig_prog; 1508 prog = orig_prog;
1505 goto out_off; 1509 goto out_off;
1506 } 1510 }
1507
1508 build_epilogue(&ctx); 1511 build_epilogue(&ctx);
1509 1512
1510 if (bpf_jit_enable > 1) 1513 if (bpf_jit_enable > 1)
1511 pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c]\n", pass, 1514 pr_info("Pass %d: size = %u, seen = [%c%c%c%c%c%c]\n", pass,
1512 image_size - (ctx.idx * 4), 1515 ctx.idx * 4,
1513 ctx.tmp_1_used ? '1' : ' ', 1516 ctx.tmp_1_used ? '1' : ' ',
1514 ctx.tmp_2_used ? '2' : ' ', 1517 ctx.tmp_2_used ? '2' : ' ',
1515 ctx.tmp_3_used ? '3' : ' ', 1518 ctx.tmp_3_used ? '3' : ' ',
1516 ctx.saw_frame_pointer ? 'F' : ' ', 1519 ctx.saw_frame_pointer ? 'F' : ' ',
1517 ctx.saw_call ? 'C' : ' ', 1520 ctx.saw_call ? 'C' : ' ',
1518 ctx.saw_tail_call ? 'T' : ' '); 1521 ctx.saw_tail_call ? 'T' : ' ');
1522
1523 if (ctx.idx * 4 == prev_image_size)
1524 break;
1525 prev_image_size = ctx.idx * 4;
1526 cond_resched();
1527 }
1528
1529 /* Now we know the actual image size. */
1530 image_size = sizeof(u32) * ctx.idx;
1531 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1532 sizeof(u32), jit_fill_hole);
1533 if (header == NULL) {
1534 prog = orig_prog;
1535 goto out_off;
1536 }
1537
1538 ctx.image = (u32 *)image_ptr;
1539skip_init_ctx:
1540 ctx.idx = 0;
1541
1542 build_prologue(&ctx);
1543
1544 if (build_body(&ctx)) {
1545 bpf_jit_binary_free(header);
1546 prog = orig_prog;
1547 goto out_off;
1548 }
1549
1550 build_epilogue(&ctx);
1551
1552 if (ctx.idx * 4 != prev_image_size) {
1553 pr_err("bpf_jit: Failed to converge, prev_size=%u size=%d\n",
1554 prev_image_size, ctx.idx * 4);
1555 bpf_jit_binary_free(header);
1556 prog = orig_prog;
1557 goto out_off;
1519 } 1558 }
1520 1559
1521 if (bpf_jit_enable > 1) 1560 if (bpf_jit_enable > 1)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9d734f3c8234..8689e794a43c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -444,10 +444,6 @@ config RETPOLINE
444 branches. Requires a compiler with -mindirect-branch=thunk-extern 444 branches. Requires a compiler with -mindirect-branch=thunk-extern
445 support for full protection. The kernel may run slower. 445 support for full protection. The kernel may run slower.
446 446
447 Without compiler support, at least indirect branches in assembler
448 code are eliminated. Since this includes the syscall entry path,
449 it is not entirely pointless.
450
451config INTEL_RDT 447config INTEL_RDT
452 bool "Intel Resource Director Technology support" 448 bool "Intel Resource Director Technology support"
453 depends on X86 && CPU_SUP_INTEL 449 depends on X86 && CPU_SUP_INTEL
@@ -1004,13 +1000,7 @@ config NR_CPUS
1004 to the kernel image. 1000 to the kernel image.
1005 1001
1006config SCHED_SMT 1002config SCHED_SMT
1007 bool "SMT (Hyperthreading) scheduler support" 1003 def_bool y if SMP
1008 depends on SMP
1009 ---help---
1010 SMT scheduler support improves the CPU scheduler's decision making
1011 when dealing with Intel Pentium 4 chips with HyperThreading at a
1012 cost of slightly increased overhead in some places. If unsure say
1013 N here.
1014 1004
1015config SCHED_MC 1005config SCHED_MC
1016 def_bool y 1006 def_bool y
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 88398fdf8129..f5d7f4134524 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -220,9 +220,10 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
220 220
221# Avoid indirect branches in kernel to deal with Spectre 221# Avoid indirect branches in kernel to deal with Spectre
222ifdef CONFIG_RETPOLINE 222ifdef CONFIG_RETPOLINE
223ifneq ($(RETPOLINE_CFLAGS),) 223ifeq ($(RETPOLINE_CFLAGS),)
224 KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE 224 $(error You are building kernel with non-retpoline compiler, please update your compiler.)
225endif 225endif
226 KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
226endif 227endif
227 228
228archscripts: scripts_basic 229archscripts: scripts_basic
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 4c881c850125..850b8762e889 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -300,7 +300,7 @@ _start:
300 # Part 2 of the header, from the old setup.S 300 # Part 2 of the header, from the old setup.S
301 301
302 .ascii "HdrS" # header signature 302 .ascii "HdrS" # header signature
303 .word 0x020e # header version number (>= 0x0105) 303 .word 0x020d # header version number (>= 0x0105)
304 # or else old loadlin-1.5 will fail) 304 # or else old loadlin-1.5 will fail)
305 .globl realmode_swtch 305 .globl realmode_swtch
306realmode_swtch: .word 0, 0 # default_switch, SETUPSEG 306realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
@@ -558,10 +558,6 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
558init_size: .long INIT_SIZE # kernel initialization size 558init_size: .long INIT_SIZE # kernel initialization size
559handover_offset: .long 0 # Filled in by build.c 559handover_offset: .long 0 # Filled in by build.c
560 560
561acpi_rsdp_addr: .quad 0 # 64-bit physical pointer to the
562 # ACPI RSDP table, added with
563 # version 2.14
564
565# End of setup header ##################################################### 561# End of setup header #####################################################
566 562
567 .section ".entrytext", "ax" 563 .section ".entrytext", "ax"
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 106911b603bd..374a19712e20 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -438,26 +438,6 @@ int x86_setup_perfctr(struct perf_event *event)
438 if (config == -1LL) 438 if (config == -1LL)
439 return -EINVAL; 439 return -EINVAL;
440 440
441 /*
442 * Branch tracing:
443 */
444 if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
445 !attr->freq && hwc->sample_period == 1) {
446 /* BTS is not supported by this architecture. */
447 if (!x86_pmu.bts_active)
448 return -EOPNOTSUPP;
449
450 /* BTS is currently only allowed for user-mode. */
451 if (!attr->exclude_kernel)
452 return -EOPNOTSUPP;
453
454 /* disallow bts if conflicting events are present */
455 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
456 return -EBUSY;
457
458 event->destroy = hw_perf_lbr_event_destroy;
459 }
460
461 hwc->config |= config; 441 hwc->config |= config;
462 442
463 return 0; 443 return 0;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index af8bea9d4006..ecc3e34ca955 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2474,16 +2474,7 @@ done:
2474static struct event_constraint * 2474static struct event_constraint *
2475intel_bts_constraints(struct perf_event *event) 2475intel_bts_constraints(struct perf_event *event)
2476{ 2476{
2477 struct hw_perf_event *hwc = &event->hw; 2477 if (unlikely(intel_pmu_has_bts(event)))
2478 unsigned int hw_event, bts_event;
2479
2480 if (event->attr.freq)
2481 return NULL;
2482
2483 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
2484 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
2485
2486 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
2487 return &bts_constraint; 2478 return &bts_constraint;
2488 2479
2489 return NULL; 2480 return NULL;
@@ -3102,6 +3093,43 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3102 return flags; 3093 return flags;
3103} 3094}
3104 3095
3096static int intel_pmu_bts_config(struct perf_event *event)
3097{
3098 struct perf_event_attr *attr = &event->attr;
3099
3100 if (unlikely(intel_pmu_has_bts(event))) {
3101 /* BTS is not supported by this architecture. */
3102 if (!x86_pmu.bts_active)
3103 return -EOPNOTSUPP;
3104
3105 /* BTS is currently only allowed for user-mode. */
3106 if (!attr->exclude_kernel)
3107 return -EOPNOTSUPP;
3108
3109 /* BTS is not allowed for precise events. */
3110 if (attr->precise_ip)
3111 return -EOPNOTSUPP;
3112
3113 /* disallow bts if conflicting events are present */
3114 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3115 return -EBUSY;
3116
3117 event->destroy = hw_perf_lbr_event_destroy;
3118 }
3119
3120 return 0;
3121}
3122
3123static int core_pmu_hw_config(struct perf_event *event)
3124{
3125 int ret = x86_pmu_hw_config(event);
3126
3127 if (ret)
3128 return ret;
3129
3130 return intel_pmu_bts_config(event);
3131}
3132
3105static int intel_pmu_hw_config(struct perf_event *event) 3133static int intel_pmu_hw_config(struct perf_event *event)
3106{ 3134{
3107 int ret = x86_pmu_hw_config(event); 3135 int ret = x86_pmu_hw_config(event);
@@ -3109,6 +3137,10 @@ static int intel_pmu_hw_config(struct perf_event *event)
3109 if (ret) 3137 if (ret)
3110 return ret; 3138 return ret;
3111 3139
3140 ret = intel_pmu_bts_config(event);
3141 if (ret)
3142 return ret;
3143
3112 if (event->attr.precise_ip) { 3144 if (event->attr.precise_ip) {
3113 if (!event->attr.freq) { 3145 if (!event->attr.freq) {
3114 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; 3146 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
@@ -3131,7 +3163,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
3131 /* 3163 /*
3132 * BTS is set up earlier in this path, so don't account twice 3164 * BTS is set up earlier in this path, so don't account twice
3133 */ 3165 */
3134 if (!intel_pmu_has_bts(event)) { 3166 if (!unlikely(intel_pmu_has_bts(event))) {
3135 /* disallow lbr if conflicting events are present */ 3167 /* disallow lbr if conflicting events are present */
3136 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 3168 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3137 return -EBUSY; 3169 return -EBUSY;
@@ -3600,7 +3632,7 @@ static __initconst const struct x86_pmu core_pmu = {
3600 .enable_all = core_pmu_enable_all, 3632 .enable_all = core_pmu_enable_all,
3601 .enable = core_pmu_enable_event, 3633 .enable = core_pmu_enable_event,
3602 .disable = x86_pmu_disable_event, 3634 .disable = x86_pmu_disable_event,
3603 .hw_config = x86_pmu_hw_config, 3635 .hw_config = core_pmu_hw_config,
3604 .schedule_events = x86_schedule_events, 3636 .schedule_events = x86_schedule_events,
3605 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 3637 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3606 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 3638 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index adae087cecdd..78d7b7031bfc 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -859,11 +859,16 @@ static inline int amd_pmu_init(void)
859 859
860static inline bool intel_pmu_has_bts(struct perf_event *event) 860static inline bool intel_pmu_has_bts(struct perf_event *event)
861{ 861{
862 if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && 862 struct hw_perf_event *hwc = &event->hw;
863 !event->attr.freq && event->hw.sample_period == 1) 863 unsigned int hw_event, bts_event;
864 return true; 864
865 if (event->attr.freq)
866 return false;
867
868 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
869 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
865 870
866 return false; 871 return hw_event == bts_event && hwc->sample_period == 1;
867} 872}
868 873
869int intel_pmu_save_and_restart(struct perf_event *event); 874int intel_pmu_save_and_restart(struct perf_event *event);
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 5f7290e6e954..69dcdf195b61 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -226,7 +226,7 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
226 "3: movl $-2,%[err]\n\t" \ 226 "3: movl $-2,%[err]\n\t" \
227 "jmp 2b\n\t" \ 227 "jmp 2b\n\t" \
228 ".popsection\n\t" \ 228 ".popsection\n\t" \
229 _ASM_EXTABLE_UA(1b, 3b) \ 229 _ASM_EXTABLE(1b, 3b) \
230 : [err] "=r" (err) \ 230 : [err] "=r" (err) \
231 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ 231 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
232 : "memory") 232 : "memory")
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55e51ff7e421..fbda5a917c5b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1094,7 +1094,8 @@ struct kvm_x86_ops {
1094 bool (*has_wbinvd_exit)(void); 1094 bool (*has_wbinvd_exit)(void);
1095 1095
1096 u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); 1096 u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
1097 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 1097 /* Returns actual tsc_offset set in active VMCS */
1098 u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1098 1099
1099 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 1100 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
1100 1101
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 80f4a4f38c79..c8f73efb4ece 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -41,9 +41,10 @@
41 41
42#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ 42#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
43#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ 43#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
44#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ 44#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
45#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
45#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ 46#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
46#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ 47#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
47 48
48#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ 49#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
49#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ 50#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 80dc14422495..032b6009baab 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -3,6 +3,8 @@
3#ifndef _ASM_X86_NOSPEC_BRANCH_H_ 3#ifndef _ASM_X86_NOSPEC_BRANCH_H_
4#define _ASM_X86_NOSPEC_BRANCH_H_ 4#define _ASM_X86_NOSPEC_BRANCH_H_
5 5
6#include <linux/static_key.h>
7
6#include <asm/alternative.h> 8#include <asm/alternative.h>
7#include <asm/alternative-asm.h> 9#include <asm/alternative-asm.h>
8#include <asm/cpufeatures.h> 10#include <asm/cpufeatures.h>
@@ -162,11 +164,12 @@
162 _ASM_PTR " 999b\n\t" \ 164 _ASM_PTR " 999b\n\t" \
163 ".popsection\n\t" 165 ".popsection\n\t"
164 166
165#if defined(CONFIG_X86_64) && defined(RETPOLINE) 167#ifdef CONFIG_RETPOLINE
168#ifdef CONFIG_X86_64
166 169
167/* 170/*
168 * Since the inline asm uses the %V modifier which is only in newer GCC, 171 * Inline asm uses the %V modifier which is only in newer GCC
169 * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE. 172 * which is ensured when CONFIG_RETPOLINE is defined.
170 */ 173 */
171# define CALL_NOSPEC \ 174# define CALL_NOSPEC \
172 ANNOTATE_NOSPEC_ALTERNATIVE \ 175 ANNOTATE_NOSPEC_ALTERNATIVE \
@@ -181,7 +184,7 @@
181 X86_FEATURE_RETPOLINE_AMD) 184 X86_FEATURE_RETPOLINE_AMD)
182# define THUNK_TARGET(addr) [thunk_target] "r" (addr) 185# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
183 186
184#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE) 187#else /* CONFIG_X86_32 */
185/* 188/*
186 * For i386 we use the original ret-equivalent retpoline, because 189 * For i386 we use the original ret-equivalent retpoline, because
187 * otherwise we'll run out of registers. We don't care about CET 190 * otherwise we'll run out of registers. We don't care about CET
@@ -211,6 +214,7 @@
211 X86_FEATURE_RETPOLINE_AMD) 214 X86_FEATURE_RETPOLINE_AMD)
212 215
213# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 216# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
217#endif
214#else /* No retpoline for C / inline asm */ 218#else /* No retpoline for C / inline asm */
215# define CALL_NOSPEC "call *%[thunk_target]\n" 219# define CALL_NOSPEC "call *%[thunk_target]\n"
216# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 220# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
@@ -219,13 +223,19 @@
219/* The Spectre V2 mitigation variants */ 223/* The Spectre V2 mitigation variants */
220enum spectre_v2_mitigation { 224enum spectre_v2_mitigation {
221 SPECTRE_V2_NONE, 225 SPECTRE_V2_NONE,
222 SPECTRE_V2_RETPOLINE_MINIMAL,
223 SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
224 SPECTRE_V2_RETPOLINE_GENERIC, 226 SPECTRE_V2_RETPOLINE_GENERIC,
225 SPECTRE_V2_RETPOLINE_AMD, 227 SPECTRE_V2_RETPOLINE_AMD,
226 SPECTRE_V2_IBRS_ENHANCED, 228 SPECTRE_V2_IBRS_ENHANCED,
227}; 229};
228 230
231/* The indirect branch speculation control variants */
232enum spectre_v2_user_mitigation {
233 SPECTRE_V2_USER_NONE,
234 SPECTRE_V2_USER_STRICT,
235 SPECTRE_V2_USER_PRCTL,
236 SPECTRE_V2_USER_SECCOMP,
237};
238
229/* The Speculative Store Bypass disable variants */ 239/* The Speculative Store Bypass disable variants */
230enum ssb_mitigation { 240enum ssb_mitigation {
231 SPEC_STORE_BYPASS_NONE, 241 SPEC_STORE_BYPASS_NONE,
@@ -303,6 +313,10 @@ do { \
303 preempt_enable(); \ 313 preempt_enable(); \
304} while (0) 314} while (0)
305 315
316DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
317DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
318DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
319
306#endif /* __ASSEMBLY__ */ 320#endif /* __ASSEMBLY__ */
307 321
308/* 322/*
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index ae7c2c5cd7f0..5393babc0598 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
53 return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); 53 return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
54} 54}
55 55
56static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
57{
58 BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
59 return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
60}
61
56static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl) 62static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
57{ 63{
58 BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); 64 BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
59 return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); 65 return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
60} 66}
61 67
68static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
69{
70 BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
71 return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
72}
73
62static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) 74static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
63{ 75{
64 return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; 76 return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
@@ -70,11 +82,7 @@ extern void speculative_store_bypass_ht_init(void);
70static inline void speculative_store_bypass_ht_init(void) { } 82static inline void speculative_store_bypass_ht_init(void) { }
71#endif 83#endif
72 84
73extern void speculative_store_bypass_update(unsigned long tif); 85extern void speculation_ctrl_update(unsigned long tif);
74 86extern void speculation_ctrl_update_current(void);
75static inline void speculative_store_bypass_update_current(void)
76{
77 speculative_store_bypass_update(current_thread_info()->flags);
78}
79 87
80#endif 88#endif
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 36bd243843d6..7cf1a270d891 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -11,9 +11,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
11 11
12__visible struct task_struct *__switch_to(struct task_struct *prev, 12__visible struct task_struct *__switch_to(struct task_struct *prev,
13 struct task_struct *next); 13 struct task_struct *next);
14struct tss_struct;
15void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
16 struct tss_struct *tss);
17 14
18/* This runs runs on the previous thread's stack. */ 15/* This runs runs on the previous thread's stack. */
19static inline void prepare_switch_to(struct task_struct *next) 16static inline void prepare_switch_to(struct task_struct *next)
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2ff2a30a264f..82b73b75d67c 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -79,10 +79,12 @@ struct thread_info {
79#define TIF_SIGPENDING 2 /* signal pending */ 79#define TIF_SIGPENDING 2 /* signal pending */
80#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 80#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
81#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 81#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
82#define TIF_SSBD 5 /* Reduced data speculation */ 82#define TIF_SSBD 5 /* Speculative store bypass disable */
83#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 83#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
84#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 84#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
85#define TIF_SECCOMP 8 /* secure computing */ 85#define TIF_SECCOMP 8 /* secure computing */
86#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
87#define TIF_SPEC_FORCE_UPDATE 10 /* Force speculation MSR update in context switch */
86#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ 88#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
87#define TIF_UPROBE 12 /* breakpointed or singlestepping */ 89#define TIF_UPROBE 12 /* breakpointed or singlestepping */
88#define TIF_PATCH_PENDING 13 /* pending live patching update */ 90#define TIF_PATCH_PENDING 13 /* pending live patching update */
@@ -110,6 +112,8 @@ struct thread_info {
110#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 112#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
111#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 113#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
112#define _TIF_SECCOMP (1 << TIF_SECCOMP) 114#define _TIF_SECCOMP (1 << TIF_SECCOMP)
115#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
116#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
113#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) 117#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
114#define _TIF_UPROBE (1 << TIF_UPROBE) 118#define _TIF_UPROBE (1 << TIF_UPROBE)
115#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) 119#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
@@ -145,8 +149,18 @@ struct thread_info {
145 _TIF_FSCHECK) 149 _TIF_FSCHECK)
146 150
147/* flags to check in __switch_to() */ 151/* flags to check in __switch_to() */
148#define _TIF_WORK_CTXSW \ 152#define _TIF_WORK_CTXSW_BASE \
149 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD) 153 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
154 _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
155
156/*
157 * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
158 */
159#ifdef CONFIG_SMP
160# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
161#else
162# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE)
163#endif
150 164
151#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) 165#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
152#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) 166#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d760611cfc35..f4204bf377fc 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -169,10 +169,14 @@ struct tlb_state {
169 169
170#define LOADED_MM_SWITCHING ((struct mm_struct *)1) 170#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
171 171
172 /* Last user mm for optimizing IBPB */
173 union {
174 struct mm_struct *last_user_mm;
175 unsigned long last_user_mm_ibpb;
176 };
177
172 u16 loaded_mm_asid; 178 u16 loaded_mm_asid;
173 u16 next_asid; 179 u16 next_asid;
174 /* last user mm's ctx id */
175 u64 last_ctx_id;
176 180
177 /* 181 /*
178 * We can be in one of several states: 182 * We can be in one of several states:
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 0f842104862c..b85a7c54c6a1 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -303,6 +303,4 @@ extern void x86_init_noop(void);
303extern void x86_init_uint_noop(unsigned int unused); 303extern void x86_init_uint_noop(unsigned int unused);
304extern bool x86_pnpbios_disabled(void); 304extern bool x86_pnpbios_disabled(void);
305 305
306void x86_verify_bootdata_version(void);
307
308#endif 306#endif
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 22f89d040ddd..60733f137e9a 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -16,9 +16,6 @@
16#define RAMDISK_PROMPT_FLAG 0x8000 16#define RAMDISK_PROMPT_FLAG 0x8000
17#define RAMDISK_LOAD_FLAG 0x4000 17#define RAMDISK_LOAD_FLAG 0x4000
18 18
19/* version flags */
20#define VERSION_WRITTEN 0x8000
21
22/* loadflags */ 19/* loadflags */
23#define LOADED_HIGH (1<<0) 20#define LOADED_HIGH (1<<0)
24#define KASLR_FLAG (1<<1) 21#define KASLR_FLAG (1<<1)
@@ -89,7 +86,6 @@ struct setup_header {
89 __u64 pref_address; 86 __u64 pref_address;
90 __u32 init_size; 87 __u32 init_size;
91 __u32 handover_offset; 88 __u32 handover_offset;
92 __u64 acpi_rsdp_addr;
93} __attribute__((packed)); 89} __attribute__((packed));
94 90
95struct sys_desc_table { 91struct sys_desc_table {
@@ -159,7 +155,8 @@ struct boot_params {
159 __u8 _pad2[4]; /* 0x054 */ 155 __u8 _pad2[4]; /* 0x054 */
160 __u64 tboot_addr; /* 0x058 */ 156 __u64 tboot_addr; /* 0x058 */
161 struct ist_info ist_info; /* 0x060 */ 157 struct ist_info ist_info; /* 0x060 */
162 __u8 _pad3[16]; /* 0x070 */ 158 __u64 acpi_rsdp_addr; /* 0x070 */
159 __u8 _pad3[8]; /* 0x078 */
163 __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */ 160 __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
164 __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */ 161 __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
165 struct sys_desc_table sys_desc_table; /* obsolete! */ /* 0x0a0 */ 162 struct sys_desc_table sys_desc_table; /* obsolete! */ /* 0x0a0 */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 92c76bf97ad8..06635fbca81c 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1776,5 +1776,5 @@ void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
1776 1776
1777u64 x86_default_get_root_pointer(void) 1777u64 x86_default_get_root_pointer(void)
1778{ 1778{
1779 return boot_params.hdr.acpi_rsdp_addr; 1779 return boot_params.acpi_rsdp_addr;
1780} 1780}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c37e66e493bf..500278f5308e 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/nospec.h> 15#include <linux/nospec.h>
16#include <linux/prctl.h> 16#include <linux/prctl.h>
17#include <linux/sched/smt.h>
17 18
18#include <asm/spec-ctrl.h> 19#include <asm/spec-ctrl.h>
19#include <asm/cmdline.h> 20#include <asm/cmdline.h>
@@ -53,6 +54,13 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
53u64 __ro_after_init x86_amd_ls_cfg_base; 54u64 __ro_after_init x86_amd_ls_cfg_base;
54u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 55u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
55 56
57/* Control conditional STIPB in switch_to() */
58DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
59/* Control conditional IBPB in switch_mm() */
60DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
61/* Control unconditional IBPB in switch_mm() */
62DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
63
56void __init check_bugs(void) 64void __init check_bugs(void)
57{ 65{
58 identify_boot_cpu(); 66 identify_boot_cpu();
@@ -123,31 +131,6 @@ void __init check_bugs(void)
123#endif 131#endif
124} 132}
125 133
126/* The kernel command line selection */
127enum spectre_v2_mitigation_cmd {
128 SPECTRE_V2_CMD_NONE,
129 SPECTRE_V2_CMD_AUTO,
130 SPECTRE_V2_CMD_FORCE,
131 SPECTRE_V2_CMD_RETPOLINE,
132 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
133 SPECTRE_V2_CMD_RETPOLINE_AMD,
134};
135
136static const char *spectre_v2_strings[] = {
137 [SPECTRE_V2_NONE] = "Vulnerable",
138 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
139 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
140 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
141 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
142 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
143};
144
145#undef pr_fmt
146#define pr_fmt(fmt) "Spectre V2 : " fmt
147
148static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
149 SPECTRE_V2_NONE;
150
151void 134void
152x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) 135x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
153{ 136{
@@ -169,6 +152,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
169 static_cpu_has(X86_FEATURE_AMD_SSBD)) 152 static_cpu_has(X86_FEATURE_AMD_SSBD))
170 hostval |= ssbd_tif_to_spec_ctrl(ti->flags); 153 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
171 154
155 /* Conditional STIBP enabled? */
156 if (static_branch_unlikely(&switch_to_cond_stibp))
157 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
158
172 if (hostval != guestval) { 159 if (hostval != guestval) {
173 msrval = setguest ? guestval : hostval; 160 msrval = setguest ? guestval : hostval;
174 wrmsrl(MSR_IA32_SPEC_CTRL, msrval); 161 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -202,7 +189,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
202 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 189 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
203 ssbd_spec_ctrl_to_tif(hostval); 190 ssbd_spec_ctrl_to_tif(hostval);
204 191
205 speculative_store_bypass_update(tif); 192 speculation_ctrl_update(tif);
206 } 193 }
207} 194}
208EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 195EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
@@ -217,6 +204,15 @@ static void x86_amd_ssb_disable(void)
217 wrmsrl(MSR_AMD64_LS_CFG, msrval); 204 wrmsrl(MSR_AMD64_LS_CFG, msrval);
218} 205}
219 206
207#undef pr_fmt
208#define pr_fmt(fmt) "Spectre V2 : " fmt
209
210static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
211 SPECTRE_V2_NONE;
212
213static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
214 SPECTRE_V2_USER_NONE;
215
220#ifdef RETPOLINE 216#ifdef RETPOLINE
221static bool spectre_v2_bad_module; 217static bool spectre_v2_bad_module;
222 218
@@ -238,67 +234,217 @@ static inline const char *spectre_v2_module_string(void)
238static inline const char *spectre_v2_module_string(void) { return ""; } 234static inline const char *spectre_v2_module_string(void) { return ""; }
239#endif 235#endif
240 236
241static void __init spec2_print_if_insecure(const char *reason) 237static inline bool match_option(const char *arg, int arglen, const char *opt)
242{ 238{
243 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 239 int len = strlen(opt);
244 pr_info("%s selected on command line.\n", reason); 240
241 return len == arglen && !strncmp(arg, opt, len);
245} 242}
246 243
247static void __init spec2_print_if_secure(const char *reason) 244/* The kernel command line selection for spectre v2 */
245enum spectre_v2_mitigation_cmd {
246 SPECTRE_V2_CMD_NONE,
247 SPECTRE_V2_CMD_AUTO,
248 SPECTRE_V2_CMD_FORCE,
249 SPECTRE_V2_CMD_RETPOLINE,
250 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
251 SPECTRE_V2_CMD_RETPOLINE_AMD,
252};
253
254enum spectre_v2_user_cmd {
255 SPECTRE_V2_USER_CMD_NONE,
256 SPECTRE_V2_USER_CMD_AUTO,
257 SPECTRE_V2_USER_CMD_FORCE,
258 SPECTRE_V2_USER_CMD_PRCTL,
259 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
260 SPECTRE_V2_USER_CMD_SECCOMP,
261 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
262};
263
264static const char * const spectre_v2_user_strings[] = {
265 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
266 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
267 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
268 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
269};
270
271static const struct {
272 const char *option;
273 enum spectre_v2_user_cmd cmd;
274 bool secure;
275} v2_user_options[] __initdata = {
276 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
277 { "off", SPECTRE_V2_USER_CMD_NONE, false },
278 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
279 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
280 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
281 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
282 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
283};
284
285static void __init spec_v2_user_print_cond(const char *reason, bool secure)
248{ 286{
249 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 287 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
250 pr_info("%s selected on command line.\n", reason); 288 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
251} 289}
252 290
253static inline bool retp_compiler(void) 291static enum spectre_v2_user_cmd __init
292spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
254{ 293{
255 return __is_defined(RETPOLINE); 294 char arg[20];
295 int ret, i;
296
297 switch (v2_cmd) {
298 case SPECTRE_V2_CMD_NONE:
299 return SPECTRE_V2_USER_CMD_NONE;
300 case SPECTRE_V2_CMD_FORCE:
301 return SPECTRE_V2_USER_CMD_FORCE;
302 default:
303 break;
304 }
305
306 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
307 arg, sizeof(arg));
308 if (ret < 0)
309 return SPECTRE_V2_USER_CMD_AUTO;
310
311 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
312 if (match_option(arg, ret, v2_user_options[i].option)) {
313 spec_v2_user_print_cond(v2_user_options[i].option,
314 v2_user_options[i].secure);
315 return v2_user_options[i].cmd;
316 }
317 }
318
319 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
320 return SPECTRE_V2_USER_CMD_AUTO;
256} 321}
257 322
258static inline bool match_option(const char *arg, int arglen, const char *opt) 323static void __init
324spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
259{ 325{
260 int len = strlen(opt); 326 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
327 bool smt_possible = IS_ENABLED(CONFIG_SMP);
328 enum spectre_v2_user_cmd cmd;
261 329
262 return len == arglen && !strncmp(arg, opt, len); 330 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
331 return;
332
333 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
334 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
335 smt_possible = false;
336
337 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
338 switch (cmd) {
339 case SPECTRE_V2_USER_CMD_NONE:
340 goto set_mode;
341 case SPECTRE_V2_USER_CMD_FORCE:
342 mode = SPECTRE_V2_USER_STRICT;
343 break;
344 case SPECTRE_V2_USER_CMD_PRCTL:
345 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
346 mode = SPECTRE_V2_USER_PRCTL;
347 break;
348 case SPECTRE_V2_USER_CMD_AUTO:
349 case SPECTRE_V2_USER_CMD_SECCOMP:
350 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
351 if (IS_ENABLED(CONFIG_SECCOMP))
352 mode = SPECTRE_V2_USER_SECCOMP;
353 else
354 mode = SPECTRE_V2_USER_PRCTL;
355 break;
356 }
357
358 /* Initialize Indirect Branch Prediction Barrier */
359 if (boot_cpu_has(X86_FEATURE_IBPB)) {
360 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
361
362 switch (cmd) {
363 case SPECTRE_V2_USER_CMD_FORCE:
364 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
365 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
366 static_branch_enable(&switch_mm_always_ibpb);
367 break;
368 case SPECTRE_V2_USER_CMD_PRCTL:
369 case SPECTRE_V2_USER_CMD_AUTO:
370 case SPECTRE_V2_USER_CMD_SECCOMP:
371 static_branch_enable(&switch_mm_cond_ibpb);
372 break;
373 default:
374 break;
375 }
376
377 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
378 static_key_enabled(&switch_mm_always_ibpb) ?
379 "always-on" : "conditional");
380 }
381
382 /* If enhanced IBRS is enabled no STIPB required */
383 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
384 return;
385
386 /*
387 * If SMT is not possible or STIBP is not available clear the STIPB
388 * mode.
389 */
390 if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
391 mode = SPECTRE_V2_USER_NONE;
392set_mode:
393 spectre_v2_user = mode;
394 /* Only print the STIBP mode when SMT possible */
395 if (smt_possible)
396 pr_info("%s\n", spectre_v2_user_strings[mode]);
263} 397}
264 398
399static const char * const spectre_v2_strings[] = {
400 [SPECTRE_V2_NONE] = "Vulnerable",
401 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
402 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
403 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
404};
405
265static const struct { 406static const struct {
266 const char *option; 407 const char *option;
267 enum spectre_v2_mitigation_cmd cmd; 408 enum spectre_v2_mitigation_cmd cmd;
268 bool secure; 409 bool secure;
269} mitigation_options[] = { 410} mitigation_options[] __initdata = {
270 { "off", SPECTRE_V2_CMD_NONE, false }, 411 { "off", SPECTRE_V2_CMD_NONE, false },
271 { "on", SPECTRE_V2_CMD_FORCE, true }, 412 { "on", SPECTRE_V2_CMD_FORCE, true },
272 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 413 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
273 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, 414 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
274 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 415 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
275 { "auto", SPECTRE_V2_CMD_AUTO, false }, 416 { "auto", SPECTRE_V2_CMD_AUTO, false },
276}; 417};
277 418
419static void __init spec_v2_print_cond(const char *reason, bool secure)
420{
421 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
422 pr_info("%s selected on command line.\n", reason);
423}
424
278static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 425static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
279{ 426{
427 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
280 char arg[20]; 428 char arg[20];
281 int ret, i; 429 int ret, i;
282 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
283 430
284 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) 431 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
285 return SPECTRE_V2_CMD_NONE; 432 return SPECTRE_V2_CMD_NONE;
286 else {
287 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
288 if (ret < 0)
289 return SPECTRE_V2_CMD_AUTO;
290 433
291 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 434 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
292 if (!match_option(arg, ret, mitigation_options[i].option)) 435 if (ret < 0)
293 continue; 436 return SPECTRE_V2_CMD_AUTO;
294 cmd = mitigation_options[i].cmd;
295 break;
296 }
297 437
298 if (i >= ARRAY_SIZE(mitigation_options)) { 438 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
299 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 439 if (!match_option(arg, ret, mitigation_options[i].option))
300 return SPECTRE_V2_CMD_AUTO; 440 continue;
301 } 441 cmd = mitigation_options[i].cmd;
442 break;
443 }
444
445 if (i >= ARRAY_SIZE(mitigation_options)) {
446 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
447 return SPECTRE_V2_CMD_AUTO;
302 } 448 }
303 449
304 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 450 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
@@ -316,54 +462,11 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
316 return SPECTRE_V2_CMD_AUTO; 462 return SPECTRE_V2_CMD_AUTO;
317 } 463 }
318 464
319 if (mitigation_options[i].secure) 465 spec_v2_print_cond(mitigation_options[i].option,
320 spec2_print_if_secure(mitigation_options[i].option); 466 mitigation_options[i].secure);
321 else
322 spec2_print_if_insecure(mitigation_options[i].option);
323
324 return cmd; 467 return cmd;
325} 468}
326 469
327static bool stibp_needed(void)
328{
329 if (spectre_v2_enabled == SPECTRE_V2_NONE)
330 return false;
331
332 if (!boot_cpu_has(X86_FEATURE_STIBP))
333 return false;
334
335 return true;
336}
337
338static void update_stibp_msr(void *info)
339{
340 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
341}
342
343void arch_smt_update(void)
344{
345 u64 mask;
346
347 if (!stibp_needed())
348 return;
349
350 mutex_lock(&spec_ctrl_mutex);
351 mask = x86_spec_ctrl_base;
352 if (cpu_smt_control == CPU_SMT_ENABLED)
353 mask |= SPEC_CTRL_STIBP;
354 else
355 mask &= ~SPEC_CTRL_STIBP;
356
357 if (mask != x86_spec_ctrl_base) {
358 pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
359 cpu_smt_control == CPU_SMT_ENABLED ?
360 "Enabling" : "Disabling");
361 x86_spec_ctrl_base = mask;
362 on_each_cpu(update_stibp_msr, NULL, 1);
363 }
364 mutex_unlock(&spec_ctrl_mutex);
365}
366
367static void __init spectre_v2_select_mitigation(void) 470static void __init spectre_v2_select_mitigation(void)
368{ 471{
369 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); 472 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -417,14 +520,12 @@ retpoline_auto:
417 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); 520 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
418 goto retpoline_generic; 521 goto retpoline_generic;
419 } 522 }
420 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : 523 mode = SPECTRE_V2_RETPOLINE_AMD;
421 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
422 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); 524 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
423 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 525 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
424 } else { 526 } else {
425 retpoline_generic: 527 retpoline_generic:
426 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : 528 mode = SPECTRE_V2_RETPOLINE_GENERIC;
427 SPECTRE_V2_RETPOLINE_MINIMAL;
428 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 529 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
429 } 530 }
430 531
@@ -443,12 +544,6 @@ specv2_set_mode:
443 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 544 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
444 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); 545 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
445 546
446 /* Initialize Indirect Branch Prediction Barrier if supported */
447 if (boot_cpu_has(X86_FEATURE_IBPB)) {
448 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
449 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
450 }
451
452 /* 547 /*
453 * Retpoline means the kernel is safe because it has no indirect 548 * Retpoline means the kernel is safe because it has no indirect
454 * branches. Enhanced IBRS protects firmware too, so, enable restricted 549 * branches. Enhanced IBRS protects firmware too, so, enable restricted
@@ -465,10 +560,67 @@ specv2_set_mode:
465 pr_info("Enabling Restricted Speculation for firmware calls\n"); 560 pr_info("Enabling Restricted Speculation for firmware calls\n");
466 } 561 }
467 562
563 /* Set up IBPB and STIBP depending on the general spectre V2 command */
564 spectre_v2_user_select_mitigation(cmd);
565
468 /* Enable STIBP if appropriate */ 566 /* Enable STIBP if appropriate */
469 arch_smt_update(); 567 arch_smt_update();
470} 568}
471 569
570static void update_stibp_msr(void * __unused)
571{
572 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
573}
574
575/* Update x86_spec_ctrl_base in case SMT state changed. */
576static void update_stibp_strict(void)
577{
578 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
579
580 if (sched_smt_active())
581 mask |= SPEC_CTRL_STIBP;
582
583 if (mask == x86_spec_ctrl_base)
584 return;
585
586 pr_info("Update user space SMT mitigation: STIBP %s\n",
587 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
588 x86_spec_ctrl_base = mask;
589 on_each_cpu(update_stibp_msr, NULL, 1);
590}
591
592/* Update the static key controlling the evaluation of TIF_SPEC_IB */
593static void update_indir_branch_cond(void)
594{
595 if (sched_smt_active())
596 static_branch_enable(&switch_to_cond_stibp);
597 else
598 static_branch_disable(&switch_to_cond_stibp);
599}
600
601void arch_smt_update(void)
602{
603 /* Enhanced IBRS implies STIBP. No update required. */
604 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
605 return;
606
607 mutex_lock(&spec_ctrl_mutex);
608
609 switch (spectre_v2_user) {
610 case SPECTRE_V2_USER_NONE:
611 break;
612 case SPECTRE_V2_USER_STRICT:
613 update_stibp_strict();
614 break;
615 case SPECTRE_V2_USER_PRCTL:
616 case SPECTRE_V2_USER_SECCOMP:
617 update_indir_branch_cond();
618 break;
619 }
620
621 mutex_unlock(&spec_ctrl_mutex);
622}
623
472#undef pr_fmt 624#undef pr_fmt
473#define pr_fmt(fmt) "Speculative Store Bypass: " fmt 625#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
474 626
@@ -483,7 +635,7 @@ enum ssb_mitigation_cmd {
483 SPEC_STORE_BYPASS_CMD_SECCOMP, 635 SPEC_STORE_BYPASS_CMD_SECCOMP,
484}; 636};
485 637
486static const char *ssb_strings[] = { 638static const char * const ssb_strings[] = {
487 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 639 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
488 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 640 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
489 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 641 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
@@ -493,7 +645,7 @@ static const char *ssb_strings[] = {
493static const struct { 645static const struct {
494 const char *option; 646 const char *option;
495 enum ssb_mitigation_cmd cmd; 647 enum ssb_mitigation_cmd cmd;
496} ssb_mitigation_options[] = { 648} ssb_mitigation_options[] __initdata = {
497 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 649 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
498 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 650 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
499 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 651 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
@@ -604,10 +756,25 @@ static void ssb_select_mitigation(void)
604#undef pr_fmt 756#undef pr_fmt
605#define pr_fmt(fmt) "Speculation prctl: " fmt 757#define pr_fmt(fmt) "Speculation prctl: " fmt
606 758
607static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 759static void task_update_spec_tif(struct task_struct *tsk)
608{ 760{
609 bool update; 761 /* Force the update of the real TIF bits */
762 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
610 763
764 /*
765 * Immediately update the speculation control MSRs for the current
766 * task, but for a non-current task delay setting the CPU
767 * mitigation until it is scheduled next.
768 *
769 * This can only happen for SECCOMP mitigation. For PRCTL it's
770 * always the current task.
771 */
772 if (tsk == current)
773 speculation_ctrl_update_current();
774}
775
776static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
777{
611 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 778 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
612 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 779 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
613 return -ENXIO; 780 return -ENXIO;
@@ -618,28 +785,56 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
618 if (task_spec_ssb_force_disable(task)) 785 if (task_spec_ssb_force_disable(task))
619 return -EPERM; 786 return -EPERM;
620 task_clear_spec_ssb_disable(task); 787 task_clear_spec_ssb_disable(task);
621 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); 788 task_update_spec_tif(task);
622 break; 789 break;
623 case PR_SPEC_DISABLE: 790 case PR_SPEC_DISABLE:
624 task_set_spec_ssb_disable(task); 791 task_set_spec_ssb_disable(task);
625 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); 792 task_update_spec_tif(task);
626 break; 793 break;
627 case PR_SPEC_FORCE_DISABLE: 794 case PR_SPEC_FORCE_DISABLE:
628 task_set_spec_ssb_disable(task); 795 task_set_spec_ssb_disable(task);
629 task_set_spec_ssb_force_disable(task); 796 task_set_spec_ssb_force_disable(task);
630 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); 797 task_update_spec_tif(task);
631 break; 798 break;
632 default: 799 default:
633 return -ERANGE; 800 return -ERANGE;
634 } 801 }
802 return 0;
803}
635 804
636 /* 805static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
637 * If being set on non-current task, delay setting the CPU 806{
638 * mitigation until it is next scheduled. 807 switch (ctrl) {
639 */ 808 case PR_SPEC_ENABLE:
640 if (task == current && update) 809 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
641 speculative_store_bypass_update_current(); 810 return 0;
642 811 /*
812 * Indirect branch speculation is always disabled in strict
813 * mode.
814 */
815 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
816 return -EPERM;
817 task_clear_spec_ib_disable(task);
818 task_update_spec_tif(task);
819 break;
820 case PR_SPEC_DISABLE:
821 case PR_SPEC_FORCE_DISABLE:
822 /*
823 * Indirect branch speculation is always allowed when
824 * mitigation is force disabled.
825 */
826 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
827 return -EPERM;
828 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
829 return 0;
830 task_set_spec_ib_disable(task);
831 if (ctrl == PR_SPEC_FORCE_DISABLE)
832 task_set_spec_ib_force_disable(task);
833 task_update_spec_tif(task);
834 break;
835 default:
836 return -ERANGE;
837 }
643 return 0; 838 return 0;
644} 839}
645 840
@@ -649,6 +844,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
649 switch (which) { 844 switch (which) {
650 case PR_SPEC_STORE_BYPASS: 845 case PR_SPEC_STORE_BYPASS:
651 return ssb_prctl_set(task, ctrl); 846 return ssb_prctl_set(task, ctrl);
847 case PR_SPEC_INDIRECT_BRANCH:
848 return ib_prctl_set(task, ctrl);
652 default: 849 default:
653 return -ENODEV; 850 return -ENODEV;
654 } 851 }
@@ -659,6 +856,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
659{ 856{
660 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 857 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
661 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 858 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
859 if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
860 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
662} 861}
663#endif 862#endif
664 863
@@ -681,11 +880,35 @@ static int ssb_prctl_get(struct task_struct *task)
681 } 880 }
682} 881}
683 882
883static int ib_prctl_get(struct task_struct *task)
884{
885 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
886 return PR_SPEC_NOT_AFFECTED;
887
888 switch (spectre_v2_user) {
889 case SPECTRE_V2_USER_NONE:
890 return PR_SPEC_ENABLE;
891 case SPECTRE_V2_USER_PRCTL:
892 case SPECTRE_V2_USER_SECCOMP:
893 if (task_spec_ib_force_disable(task))
894 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
895 if (task_spec_ib_disable(task))
896 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
897 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
898 case SPECTRE_V2_USER_STRICT:
899 return PR_SPEC_DISABLE;
900 default:
901 return PR_SPEC_NOT_AFFECTED;
902 }
903}
904
684int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 905int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
685{ 906{
686 switch (which) { 907 switch (which) {
687 case PR_SPEC_STORE_BYPASS: 908 case PR_SPEC_STORE_BYPASS:
688 return ssb_prctl_get(task); 909 return ssb_prctl_get(task);
910 case PR_SPEC_INDIRECT_BRANCH:
911 return ib_prctl_get(task);
689 default: 912 default:
690 return -ENODEV; 913 return -ENODEV;
691 } 914 }
@@ -823,7 +1046,7 @@ early_param("l1tf", l1tf_cmdline);
823#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 1046#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
824 1047
825#if IS_ENABLED(CONFIG_KVM_INTEL) 1048#if IS_ENABLED(CONFIG_KVM_INTEL)
826static const char *l1tf_vmx_states[] = { 1049static const char * const l1tf_vmx_states[] = {
827 [VMENTER_L1D_FLUSH_AUTO] = "auto", 1050 [VMENTER_L1D_FLUSH_AUTO] = "auto",
828 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 1051 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
829 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 1052 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
@@ -839,13 +1062,14 @@ static ssize_t l1tf_show_state(char *buf)
839 1062
840 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 1063 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
841 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 1064 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
842 cpu_smt_control == CPU_SMT_ENABLED)) 1065 sched_smt_active())) {
843 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 1066 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
844 l1tf_vmx_states[l1tf_vmx_mitigation]); 1067 l1tf_vmx_states[l1tf_vmx_mitigation]);
1068 }
845 1069
846 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 1070 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
847 l1tf_vmx_states[l1tf_vmx_mitigation], 1071 l1tf_vmx_states[l1tf_vmx_mitigation],
848 cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled"); 1072 sched_smt_active() ? "vulnerable" : "disabled");
849} 1073}
850#else 1074#else
851static ssize_t l1tf_show_state(char *buf) 1075static ssize_t l1tf_show_state(char *buf)
@@ -854,11 +1078,39 @@ static ssize_t l1tf_show_state(char *buf)
854} 1078}
855#endif 1079#endif
856 1080
1081static char *stibp_state(void)
1082{
1083 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1084 return "";
1085
1086 switch (spectre_v2_user) {
1087 case SPECTRE_V2_USER_NONE:
1088 return ", STIBP: disabled";
1089 case SPECTRE_V2_USER_STRICT:
1090 return ", STIBP: forced";
1091 case SPECTRE_V2_USER_PRCTL:
1092 case SPECTRE_V2_USER_SECCOMP:
1093 if (static_key_enabled(&switch_to_cond_stibp))
1094 return ", STIBP: conditional";
1095 }
1096 return "";
1097}
1098
1099static char *ibpb_state(void)
1100{
1101 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1102 if (static_key_enabled(&switch_mm_always_ibpb))
1103 return ", IBPB: always-on";
1104 if (static_key_enabled(&switch_mm_cond_ibpb))
1105 return ", IBPB: conditional";
1106 return ", IBPB: disabled";
1107 }
1108 return "";
1109}
1110
857static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 1111static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
858 char *buf, unsigned int bug) 1112 char *buf, unsigned int bug)
859{ 1113{
860 int ret;
861
862 if (!boot_cpu_has_bug(bug)) 1114 if (!boot_cpu_has_bug(bug))
863 return sprintf(buf, "Not affected\n"); 1115 return sprintf(buf, "Not affected\n");
864 1116
@@ -876,13 +1128,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
876 return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 1128 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
877 1129
878 case X86_BUG_SPECTRE_V2: 1130 case X86_BUG_SPECTRE_V2:
879 ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], 1131 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
880 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", 1132 ibpb_state(),
881 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", 1133 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
882 (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "", 1134 stibp_state(),
883 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", 1135 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
884 spectre_v2_module_string()); 1136 spectre_v2_module_string());
885 return ret;
886 1137
887 case X86_BUG_SPEC_STORE_BYPASS: 1138 case X86_BUG_SPEC_STORE_BYPASS:
888 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); 1139 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index dd33c357548f..e12454e21b8a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -56,7 +56,7 @@
56/* Threshold LVT offset is at MSR0xC0000410[15:12] */ 56/* Threshold LVT offset is at MSR0xC0000410[15:12] */
57#define SMCA_THR_LVT_OFF 0xF000 57#define SMCA_THR_LVT_OFF 0xF000
58 58
59static bool thresholding_en; 59static bool thresholding_irq_en;
60 60
61static const char * const th_names[] = { 61static const char * const th_names[] = {
62 "load_store", 62 "load_store",
@@ -534,9 +534,8 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
534 534
535set_offset: 535set_offset:
536 offset = setup_APIC_mce_threshold(offset, new); 536 offset = setup_APIC_mce_threshold(offset, new);
537 537 if (offset == new)
538 if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt)) 538 thresholding_irq_en = true;
539 mce_threshold_vector = amd_threshold_interrupt;
540 539
541done: 540done:
542 mce_threshold_block_init(&b, offset); 541 mce_threshold_block_init(&b, offset);
@@ -1357,9 +1356,6 @@ int mce_threshold_remove_device(unsigned int cpu)
1357{ 1356{
1358 unsigned int bank; 1357 unsigned int bank;
1359 1358
1360 if (!thresholding_en)
1361 return 0;
1362
1363 for (bank = 0; bank < mca_cfg.banks; ++bank) { 1359 for (bank = 0; bank < mca_cfg.banks; ++bank) {
1364 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 1360 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1365 continue; 1361 continue;
@@ -1377,9 +1373,6 @@ int mce_threshold_create_device(unsigned int cpu)
1377 struct threshold_bank **bp; 1373 struct threshold_bank **bp;
1378 int err = 0; 1374 int err = 0;
1379 1375
1380 if (!thresholding_en)
1381 return 0;
1382
1383 bp = per_cpu(threshold_banks, cpu); 1376 bp = per_cpu(threshold_banks, cpu);
1384 if (bp) 1377 if (bp)
1385 return 0; 1378 return 0;
@@ -1408,9 +1401,6 @@ static __init int threshold_init_device(void)
1408{ 1401{
1409 unsigned lcpu = 0; 1402 unsigned lcpu = 0;
1410 1403
1411 if (mce_threshold_vector == amd_threshold_interrupt)
1412 thresholding_en = true;
1413
1414 /* to hit CPUs online before the notifier is up */ 1404 /* to hit CPUs online before the notifier is up */
1415 for_each_online_cpu(lcpu) { 1405 for_each_online_cpu(lcpu) {
1416 int err = mce_threshold_create_device(lcpu); 1406 int err = mce_threshold_create_device(lcpu);
@@ -1419,6 +1409,9 @@ static __init int threshold_init_device(void)
1419 return err; 1409 return err;
1420 } 1410 }
1421 1411
1412 if (thresholding_irq_en)
1413 mce_threshold_vector = amd_threshold_interrupt;
1414
1422 return 0; 1415 return 0;
1423} 1416}
1424/* 1417/*
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 61a949d84dfa..d99a8ee9e185 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -344,10 +344,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
344 sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); 344 sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
345 } 345 }
346 346
347 local_bh_disable();
347 fpu->initialized = 1; 348 fpu->initialized = 1;
348 preempt_disable();
349 fpu__restore(fpu); 349 fpu__restore(fpu);
350 preempt_enable(); 350 local_bh_enable();
351 351
352 return err; 352 return err;
353 } else { 353 } else {
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 01ebcb6f263e..7ee8067cbf45 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -994,7 +994,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
994{ 994{
995 unsigned long old; 995 unsigned long old;
996 int faulted; 996 int faulted;
997 struct ftrace_graph_ent trace;
998 unsigned long return_hooker = (unsigned long) 997 unsigned long return_hooker = (unsigned long)
999 &return_to_handler; 998 &return_to_handler;
1000 999
@@ -1046,19 +1045,7 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
1046 return; 1045 return;
1047 } 1046 }
1048 1047
1049 trace.func = self_addr; 1048 if (function_graph_enter(old, self_addr, frame_pointer, parent))
1050 trace.depth = current->curr_ret_stack + 1;
1051
1052 /* Only trace if the calling function expects to */
1053 if (!ftrace_graph_entry(&trace)) {
1054 *parent = old; 1049 *parent = old;
1055 return;
1056 }
1057
1058 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
1059 frame_pointer, parent) == -EBUSY) {
1060 *parent = old;
1061 return;
1062 }
1063} 1050}
1064#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1051#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 76fa3b836598..ec6fefbfd3c0 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -37,7 +37,6 @@ asmlinkage __visible void __init i386_start_kernel(void)
37 cr4_init_shadow(); 37 cr4_init_shadow();
38 38
39 sanitize_boot_params(&boot_params); 39 sanitize_boot_params(&boot_params);
40 x86_verify_bootdata_version();
41 40
42 x86_early_init_platform_quirks(); 41 x86_early_init_platform_quirks();
43 42
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 7663a8eb602b..16b1cbd3a61e 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -457,8 +457,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
457 if (!boot_params.hdr.version) 457 if (!boot_params.hdr.version)
458 copy_bootdata(__va(real_mode_data)); 458 copy_bootdata(__va(real_mode_data));
459 459
460 x86_verify_bootdata_version();
461
462 x86_early_init_platform_quirks(); 460 x86_early_init_platform_quirks();
463 461
464 switch (boot_params.hdr.hardware_subarch) { 462 switch (boot_params.hdr.hardware_subarch) {
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c93fcfdf1673..7d31192296a8 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -40,6 +40,8 @@
40#include <asm/prctl.h> 40#include <asm/prctl.h>
41#include <asm/spec-ctrl.h> 41#include <asm/spec-ctrl.h>
42 42
43#include "process.h"
44
43/* 45/*
44 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 46 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
45 * no more per-task TSS's. The TSS size is kept cacheline-aligned 47 * no more per-task TSS's. The TSS size is kept cacheline-aligned
@@ -252,11 +254,12 @@ void arch_setup_new_exec(void)
252 enable_cpuid(); 254 enable_cpuid();
253} 255}
254 256
255static inline void switch_to_bitmap(struct tss_struct *tss, 257static inline void switch_to_bitmap(struct thread_struct *prev,
256 struct thread_struct *prev,
257 struct thread_struct *next, 258 struct thread_struct *next,
258 unsigned long tifp, unsigned long tifn) 259 unsigned long tifp, unsigned long tifn)
259{ 260{
261 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
262
260 if (tifn & _TIF_IO_BITMAP) { 263 if (tifn & _TIF_IO_BITMAP) {
261 /* 264 /*
262 * Copy the relevant range of the IO bitmap. 265 * Copy the relevant range of the IO bitmap.
@@ -395,32 +398,85 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
395 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); 398 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
396} 399}
397 400
398static __always_inline void intel_set_ssb_state(unsigned long tifn) 401/*
402 * Update the MSRs managing speculation control, during context switch.
403 *
404 * tifp: Previous task's thread flags
405 * tifn: Next task's thread flags
406 */
407static __always_inline void __speculation_ctrl_update(unsigned long tifp,
408 unsigned long tifn)
399{ 409{
400 u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); 410 unsigned long tif_diff = tifp ^ tifn;
411 u64 msr = x86_spec_ctrl_base;
412 bool updmsr = false;
413
414 /*
415 * If TIF_SSBD is different, select the proper mitigation
416 * method. Note that if SSBD mitigation is disabled or permanentely
417 * enabled this branch can't be taken because nothing can set
418 * TIF_SSBD.
419 */
420 if (tif_diff & _TIF_SSBD) {
421 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
422 amd_set_ssb_virt_state(tifn);
423 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
424 amd_set_core_ssb_state(tifn);
425 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
426 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
427 msr |= ssbd_tif_to_spec_ctrl(tifn);
428 updmsr = true;
429 }
430 }
431
432 /*
433 * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
434 * otherwise avoid the MSR write.
435 */
436 if (IS_ENABLED(CONFIG_SMP) &&
437 static_branch_unlikely(&switch_to_cond_stibp)) {
438 updmsr |= !!(tif_diff & _TIF_SPEC_IB);
439 msr |= stibp_tif_to_spec_ctrl(tifn);
440 }
401 441
402 wrmsrl(MSR_IA32_SPEC_CTRL, msr); 442 if (updmsr)
443 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
403} 444}
404 445
405static __always_inline void __speculative_store_bypass_update(unsigned long tifn) 446static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
406{ 447{
407 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) 448 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
408 amd_set_ssb_virt_state(tifn); 449 if (task_spec_ssb_disable(tsk))
409 else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 450 set_tsk_thread_flag(tsk, TIF_SSBD);
410 amd_set_core_ssb_state(tifn); 451 else
411 else 452 clear_tsk_thread_flag(tsk, TIF_SSBD);
412 intel_set_ssb_state(tifn); 453
454 if (task_spec_ib_disable(tsk))
455 set_tsk_thread_flag(tsk, TIF_SPEC_IB);
456 else
457 clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
458 }
459 /* Return the updated threadinfo flags*/
460 return task_thread_info(tsk)->flags;
413} 461}
414 462
415void speculative_store_bypass_update(unsigned long tif) 463void speculation_ctrl_update(unsigned long tif)
416{ 464{
465 /* Forced update. Make sure all relevant TIF flags are different */
417 preempt_disable(); 466 preempt_disable();
418 __speculative_store_bypass_update(tif); 467 __speculation_ctrl_update(~tif, tif);
419 preempt_enable(); 468 preempt_enable();
420} 469}
421 470
422void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 471/* Called from seccomp/prctl update */
423 struct tss_struct *tss) 472void speculation_ctrl_update_current(void)
473{
474 preempt_disable();
475 speculation_ctrl_update(speculation_ctrl_update_tif(current));
476 preempt_enable();
477}
478
479void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
424{ 480{
425 struct thread_struct *prev, *next; 481 struct thread_struct *prev, *next;
426 unsigned long tifp, tifn; 482 unsigned long tifp, tifn;
@@ -430,7 +486,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
430 486
431 tifn = READ_ONCE(task_thread_info(next_p)->flags); 487 tifn = READ_ONCE(task_thread_info(next_p)->flags);
432 tifp = READ_ONCE(task_thread_info(prev_p)->flags); 488 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
433 switch_to_bitmap(tss, prev, next, tifp, tifn); 489 switch_to_bitmap(prev, next, tifp, tifn);
434 490
435 propagate_user_return_notify(prev_p, next_p); 491 propagate_user_return_notify(prev_p, next_p);
436 492
@@ -451,8 +507,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
451 if ((tifp ^ tifn) & _TIF_NOCPUID) 507 if ((tifp ^ tifn) & _TIF_NOCPUID)
452 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); 508 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
453 509
454 if ((tifp ^ tifn) & _TIF_SSBD) 510 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
455 __speculative_store_bypass_update(tifn); 511 __speculation_ctrl_update(tifp, tifn);
512 } else {
513 speculation_ctrl_update_tif(prev_p);
514 tifn = speculation_ctrl_update_tif(next_p);
515
516 /* Enforce MSR update to ensure consistent state */
517 __speculation_ctrl_update(~tifn, tifn);
518 }
456} 519}
457 520
458/* 521/*
diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h
new file mode 100644
index 000000000000..898e97cf6629
--- /dev/null
+++ b/arch/x86/kernel/process.h
@@ -0,0 +1,39 @@
1// SPDX-License-Identifier: GPL-2.0
2//
3// Code shared between 32 and 64 bit
4
5#include <asm/spec-ctrl.h>
6
7void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
8
9/*
10 * This needs to be inline to optimize for the common case where no extra
11 * work needs to be done.
12 */
13static inline void switch_to_extra(struct task_struct *prev,
14 struct task_struct *next)
15{
16 unsigned long next_tif = task_thread_info(next)->flags;
17 unsigned long prev_tif = task_thread_info(prev)->flags;
18
19 if (IS_ENABLED(CONFIG_SMP)) {
20 /*
21 * Avoid __switch_to_xtra() invocation when conditional
22 * STIPB is disabled and the only different bit is
23 * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
24 * in the TIF_WORK_CTXSW masks.
25 */
26 if (!static_branch_likely(&switch_to_cond_stibp)) {
27 prev_tif &= ~_TIF_SPEC_IB;
28 next_tif &= ~_TIF_SPEC_IB;
29 }
30 }
31
32 /*
33 * __switch_to_xtra() handles debug registers, i/o bitmaps,
34 * speculation mitigations etc.
35 */
36 if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT ||
37 prev_tif & _TIF_WORK_CTXSW_PREV))
38 __switch_to_xtra(prev, next);
39}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 5046a3c9dec2..d3e593eb189f 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -59,6 +59,8 @@
59#include <asm/intel_rdt_sched.h> 59#include <asm/intel_rdt_sched.h>
60#include <asm/proto.h> 60#include <asm/proto.h>
61 61
62#include "process.h"
63
62void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) 64void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
63{ 65{
64 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 66 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
@@ -232,7 +234,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
232 struct fpu *prev_fpu = &prev->fpu; 234 struct fpu *prev_fpu = &prev->fpu;
233 struct fpu *next_fpu = &next->fpu; 235 struct fpu *next_fpu = &next->fpu;
234 int cpu = smp_processor_id(); 236 int cpu = smp_processor_id();
235 struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
236 237
237 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 238 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
238 239
@@ -264,12 +265,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
264 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) 265 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
265 set_iopl_mask(next->iopl); 266 set_iopl_mask(next->iopl);
266 267
267 /* 268 switch_to_extra(prev_p, next_p);
268 * Now maybe handle debug registers and/or IO bitmaps
269 */
270 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
271 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
272 __switch_to_xtra(prev_p, next_p, tss);
273 269
274 /* 270 /*
275 * Leave lazy mode, flushing any hypercalls made here. 271 * Leave lazy mode, flushing any hypercalls made here.
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 0e0b4288a4b2..bbfbf017065c 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -60,6 +60,8 @@
60#include <asm/unistd_32_ia32.h> 60#include <asm/unistd_32_ia32.h>
61#endif 61#endif
62 62
63#include "process.h"
64
63/* Prints also some state that isn't saved in the pt_regs */ 65/* Prints also some state that isn't saved in the pt_regs */
64void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) 66void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
65{ 67{
@@ -553,7 +555,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
553 struct fpu *prev_fpu = &prev->fpu; 555 struct fpu *prev_fpu = &prev->fpu;
554 struct fpu *next_fpu = &next->fpu; 556 struct fpu *next_fpu = &next->fpu;
555 int cpu = smp_processor_id(); 557 int cpu = smp_processor_id();
556 struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
557 558
558 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && 559 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
559 this_cpu_read(irq_count) != -1); 560 this_cpu_read(irq_count) != -1);
@@ -617,12 +618,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
617 /* Reload sp0. */ 618 /* Reload sp0. */
618 update_task_stack(next_p); 619 update_task_stack(next_p);
619 620
620 /* 621 switch_to_extra(prev_p, next_p);
621 * Now maybe reload the debug registers and handle I/O bitmaps
622 */
623 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
624 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
625 __switch_to_xtra(prev_p, next_p, tss);
626 622
627#ifdef CONFIG_XEN_PV 623#ifdef CONFIG_XEN_PV
628 /* 624 /*
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b74e7bfed6ab..d494b9bfe618 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1280,23 +1280,6 @@ void __init setup_arch(char **cmdline_p)
1280 unwind_init(); 1280 unwind_init();
1281} 1281}
1282 1282
1283/*
1284 * From boot protocol 2.14 onwards we expect the bootloader to set the
1285 * version to "0x8000 | <used version>". In case we find a version >= 2.14
1286 * without the 0x8000 we assume the boot loader supports 2.13 only and
1287 * reset the version accordingly. The 0x8000 flag is removed in any case.
1288 */
1289void __init x86_verify_bootdata_version(void)
1290{
1291 if (boot_params.hdr.version & VERSION_WRITTEN)
1292 boot_params.hdr.version &= ~VERSION_WRITTEN;
1293 else if (boot_params.hdr.version >= 0x020e)
1294 boot_params.hdr.version = 0x020d;
1295
1296 if (boot_params.hdr.version < 0x020e)
1297 boot_params.hdr.acpi_rsdp_addr = 0;
1298}
1299
1300#ifdef CONFIG_X86_32 1283#ifdef CONFIG_X86_32
1301 1284
1302static struct resource video_ram_resource = { 1285static struct resource video_ram_resource = {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 89db20f8cb70..c4533d05c214 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -55,7 +55,7 @@
55#define PRIo64 "o" 55#define PRIo64 "o"
56 56
57/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ 57/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
58#define apic_debug(fmt, arg...) 58#define apic_debug(fmt, arg...) do {} while (0)
59 59
60/* 14 is the version for Xeon and Pentium 8.4.8*/ 60/* 14 is the version for Xeon and Pentium 8.4.8*/
61#define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16)) 61#define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
@@ -576,6 +576,11 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
576 rcu_read_lock(); 576 rcu_read_lock();
577 map = rcu_dereference(kvm->arch.apic_map); 577 map = rcu_dereference(kvm->arch.apic_map);
578 578
579 if (unlikely(!map)) {
580 count = -EOPNOTSUPP;
581 goto out;
582 }
583
579 if (min > map->max_apic_id) 584 if (min > map->max_apic_id)
580 goto out; 585 goto out;
581 /* Bits above cluster_size are masked in the caller. */ 586 /* Bits above cluster_size are masked in the caller. */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cf5f572f2305..7c03c0f35444 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5074,9 +5074,9 @@ static bool need_remote_flush(u64 old, u64 new)
5074} 5074}
5075 5075
5076static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, 5076static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5077 const u8 *new, int *bytes) 5077 int *bytes)
5078{ 5078{
5079 u64 gentry; 5079 u64 gentry = 0;
5080 int r; 5080 int r;
5081 5081
5082 /* 5082 /*
@@ -5088,22 +5088,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5088 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ 5088 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5089 *gpa &= ~(gpa_t)7; 5089 *gpa &= ~(gpa_t)7;
5090 *bytes = 8; 5090 *bytes = 8;
5091 r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
5092 if (r)
5093 gentry = 0;
5094 new = (const u8 *)&gentry;
5095 } 5091 }
5096 5092
5097 switch (*bytes) { 5093 if (*bytes == 4 || *bytes == 8) {
5098 case 4: 5094 r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5099 gentry = *(const u32 *)new; 5095 if (r)
5100 break; 5096 gentry = 0;
5101 case 8:
5102 gentry = *(const u64 *)new;
5103 break;
5104 default:
5105 gentry = 0;
5106 break;
5107 } 5097 }
5108 5098
5109 return gentry; 5099 return gentry;
@@ -5207,8 +5197,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5207 5197
5208 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); 5198 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5209 5199
5210 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
5211
5212 /* 5200 /*
5213 * No need to care whether allocation memory is successful 5201 * No need to care whether allocation memory is successful
5214 * or not since pte prefetch is skiped if it does not have 5202 * or not since pte prefetch is skiped if it does not have
@@ -5217,6 +5205,9 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5217 mmu_topup_memory_caches(vcpu); 5205 mmu_topup_memory_caches(vcpu);
5218 5206
5219 spin_lock(&vcpu->kvm->mmu_lock); 5207 spin_lock(&vcpu->kvm->mmu_lock);
5208
5209 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5210
5220 ++vcpu->kvm->stat.mmu_pte_write; 5211 ++vcpu->kvm->stat.mmu_pte_write;
5221 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 5212 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5222 5213
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0e21ccc46792..cc6467b35a85 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1446,7 +1446,7 @@ static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
1446 return vcpu->arch.tsc_offset; 1446 return vcpu->arch.tsc_offset;
1447} 1447}
1448 1448
1449static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1449static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1450{ 1450{
1451 struct vcpu_svm *svm = to_svm(vcpu); 1451 struct vcpu_svm *svm = to_svm(vcpu);
1452 u64 g_tsc_offset = 0; 1452 u64 g_tsc_offset = 0;
@@ -1464,6 +1464,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1464 svm->vmcb->control.tsc_offset = offset + g_tsc_offset; 1464 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1465 1465
1466 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 1466 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1467 return svm->vmcb->control.tsc_offset;
1467} 1468}
1468 1469
1469static void avic_init_vmcb(struct vcpu_svm *svm) 1470static void avic_init_vmcb(struct vcpu_svm *svm)
@@ -1664,20 +1665,23 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1664static int avic_init_access_page(struct kvm_vcpu *vcpu) 1665static int avic_init_access_page(struct kvm_vcpu *vcpu)
1665{ 1666{
1666 struct kvm *kvm = vcpu->kvm; 1667 struct kvm *kvm = vcpu->kvm;
1667 int ret; 1668 int ret = 0;
1668 1669
1670 mutex_lock(&kvm->slots_lock);
1669 if (kvm->arch.apic_access_page_done) 1671 if (kvm->arch.apic_access_page_done)
1670 return 0; 1672 goto out;
1671 1673
1672 ret = x86_set_memory_region(kvm, 1674 ret = __x86_set_memory_region(kvm,
1673 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 1675 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1674 APIC_DEFAULT_PHYS_BASE, 1676 APIC_DEFAULT_PHYS_BASE,
1675 PAGE_SIZE); 1677 PAGE_SIZE);
1676 if (ret) 1678 if (ret)
1677 return ret; 1679 goto out;
1678 1680
1679 kvm->arch.apic_access_page_done = true; 1681 kvm->arch.apic_access_page_done = true;
1680 return 0; 1682out:
1683 mutex_unlock(&kvm->slots_lock);
1684 return ret;
1681} 1685}
1682 1686
1683static int avic_init_backing_page(struct kvm_vcpu *vcpu) 1687static int avic_init_backing_page(struct kvm_vcpu *vcpu)
@@ -2189,21 +2193,31 @@ out:
2189 return ERR_PTR(err); 2193 return ERR_PTR(err);
2190} 2194}
2191 2195
2196static void svm_clear_current_vmcb(struct vmcb *vmcb)
2197{
2198 int i;
2199
2200 for_each_online_cpu(i)
2201 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
2202}
2203
2192static void svm_free_vcpu(struct kvm_vcpu *vcpu) 2204static void svm_free_vcpu(struct kvm_vcpu *vcpu)
2193{ 2205{
2194 struct vcpu_svm *svm = to_svm(vcpu); 2206 struct vcpu_svm *svm = to_svm(vcpu);
2195 2207
2208 /*
2209 * The vmcb page can be recycled, causing a false negative in
2210 * svm_vcpu_load(). So, ensure that no logical CPU has this
2211 * vmcb page recorded as its current vmcb.
2212 */
2213 svm_clear_current_vmcb(svm->vmcb);
2214
2196 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); 2215 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
2197 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); 2216 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
2198 __free_page(virt_to_page(svm->nested.hsave)); 2217 __free_page(virt_to_page(svm->nested.hsave));
2199 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); 2218 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
2200 kvm_vcpu_uninit(vcpu); 2219 kvm_vcpu_uninit(vcpu);
2201 kmem_cache_free(kvm_vcpu_cache, svm); 2220 kmem_cache_free(kvm_vcpu_cache, svm);
2202 /*
2203 * The vmcb page can be recycled, causing a false negative in
2204 * svm_vcpu_load(). So do a full IBPB now.
2205 */
2206 indirect_branch_prediction_barrier();
2207} 2221}
2208 2222
2209static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2223static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -7149,7 +7163,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7149 .has_wbinvd_exit = svm_has_wbinvd_exit, 7163 .has_wbinvd_exit = svm_has_wbinvd_exit,
7150 7164
7151 .read_l1_tsc_offset = svm_read_l1_tsc_offset, 7165 .read_l1_tsc_offset = svm_read_l1_tsc_offset,
7152 .write_tsc_offset = svm_write_tsc_offset, 7166 .write_l1_tsc_offset = svm_write_l1_tsc_offset,
7153 7167
7154 .set_tdp_cr3 = set_tdp_cr3, 7168 .set_tdp_cr3 = set_tdp_cr3,
7155 7169
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4555077d69ce..02edd9960e9d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -174,6 +174,7 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
174 * refer SDM volume 3b section 21.6.13 & 22.1.3. 174 * refer SDM volume 3b section 21.6.13 & 22.1.3.
175 */ 175 */
176static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; 176static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
177module_param(ple_gap, uint, 0444);
177 178
178static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; 179static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
179module_param(ple_window, uint, 0444); 180module_param(ple_window, uint, 0444);
@@ -984,6 +985,7 @@ struct vcpu_vmx {
984 struct shared_msr_entry *guest_msrs; 985 struct shared_msr_entry *guest_msrs;
985 int nmsrs; 986 int nmsrs;
986 int save_nmsrs; 987 int save_nmsrs;
988 bool guest_msrs_dirty;
987 unsigned long host_idt_base; 989 unsigned long host_idt_base;
988#ifdef CONFIG_X86_64 990#ifdef CONFIG_X86_64
989 u64 msr_host_kernel_gs_base; 991 u64 msr_host_kernel_gs_base;
@@ -1306,7 +1308,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
1306static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 1308static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
1307 u16 error_code); 1309 u16 error_code);
1308static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); 1310static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
1309static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, 1311static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
1310 u32 msr, int type); 1312 u32 msr, int type);
1311 1313
1312static DEFINE_PER_CPU(struct vmcs *, vmxarea); 1314static DEFINE_PER_CPU(struct vmcs *, vmxarea);
@@ -1610,12 +1612,6 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
1610{ 1612{
1611 struct vcpu_vmx *vmx = to_vmx(vcpu); 1613 struct vcpu_vmx *vmx = to_vmx(vcpu);
1612 1614
1613 /* We don't support disabling the feature for simplicity. */
1614 if (vmx->nested.enlightened_vmcs_enabled)
1615 return 0;
1616
1617 vmx->nested.enlightened_vmcs_enabled = true;
1618
1619 /* 1615 /*
1620 * vmcs_version represents the range of supported Enlightened VMCS 1616 * vmcs_version represents the range of supported Enlightened VMCS
1621 * versions: lower 8 bits is the minimal version, higher 8 bits is the 1617 * versions: lower 8 bits is the minimal version, higher 8 bits is the
@@ -1625,6 +1621,12 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
1625 if (vmcs_version) 1621 if (vmcs_version)
1626 *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1; 1622 *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1;
1627 1623
1624 /* We don't support disabling the feature for simplicity. */
1625 if (vmx->nested.enlightened_vmcs_enabled)
1626 return 0;
1627
1628 vmx->nested.enlightened_vmcs_enabled = true;
1629
1628 vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; 1630 vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
1629 vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; 1631 vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
1630 vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; 1632 vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
@@ -2897,6 +2899,20 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
2897 2899
2898 vmx->req_immediate_exit = false; 2900 vmx->req_immediate_exit = false;
2899 2901
2902 /*
2903 * Note that guest MSRs to be saved/restored can also be changed
2904 * when guest state is loaded. This happens when guest transitions
2905 * to/from long-mode by setting MSR_EFER.LMA.
2906 */
2907 if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) {
2908 vmx->guest_msrs_dirty = false;
2909 for (i = 0; i < vmx->save_nmsrs; ++i)
2910 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2911 vmx->guest_msrs[i].data,
2912 vmx->guest_msrs[i].mask);
2913
2914 }
2915
2900 if (vmx->loaded_cpu_state) 2916 if (vmx->loaded_cpu_state)
2901 return; 2917 return;
2902 2918
@@ -2957,11 +2973,6 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
2957 vmcs_writel(HOST_GS_BASE, gs_base); 2973 vmcs_writel(HOST_GS_BASE, gs_base);
2958 host_state->gs_base = gs_base; 2974 host_state->gs_base = gs_base;
2959 } 2975 }
2960
2961 for (i = 0; i < vmx->save_nmsrs; ++i)
2962 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2963 vmx->guest_msrs[i].data,
2964 vmx->guest_msrs[i].mask);
2965} 2976}
2966 2977
2967static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) 2978static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
@@ -3436,6 +3447,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
3436 move_msr_up(vmx, index, save_nmsrs++); 3447 move_msr_up(vmx, index, save_nmsrs++);
3437 3448
3438 vmx->save_nmsrs = save_nmsrs; 3449 vmx->save_nmsrs = save_nmsrs;
3450 vmx->guest_msrs_dirty = true;
3439 3451
3440 if (cpu_has_vmx_msr_bitmap()) 3452 if (cpu_has_vmx_msr_bitmap())
3441 vmx_update_msr_bitmap(&vmx->vcpu); 3453 vmx_update_msr_bitmap(&vmx->vcpu);
@@ -3452,11 +3464,9 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
3452 return vcpu->arch.tsc_offset; 3464 return vcpu->arch.tsc_offset;
3453} 3465}
3454 3466
3455/* 3467static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
3456 * writes 'offset' into guest's timestamp counter offset register
3457 */
3458static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
3459{ 3468{
3469 u64 active_offset = offset;
3460 if (is_guest_mode(vcpu)) { 3470 if (is_guest_mode(vcpu)) {
3461 /* 3471 /*
3462 * We're here if L1 chose not to trap WRMSR to TSC. According 3472 * We're here if L1 chose not to trap WRMSR to TSC. According
@@ -3464,17 +3474,16 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
3464 * set for L2 remains unchanged, and still needs to be added 3474 * set for L2 remains unchanged, and still needs to be added
3465 * to the newly set TSC to get L2's TSC. 3475 * to the newly set TSC to get L2's TSC.
3466 */ 3476 */
3467 struct vmcs12 *vmcs12; 3477 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3468 /* recalculate vmcs02.TSC_OFFSET: */ 3478 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING))
3469 vmcs12 = get_vmcs12(vcpu); 3479 active_offset += vmcs12->tsc_offset;
3470 vmcs_write64(TSC_OFFSET, offset +
3471 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
3472 vmcs12->tsc_offset : 0));
3473 } else { 3480 } else {
3474 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 3481 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
3475 vmcs_read64(TSC_OFFSET), offset); 3482 vmcs_read64(TSC_OFFSET), offset);
3476 vmcs_write64(TSC_OFFSET, offset);
3477 } 3483 }
3484
3485 vmcs_write64(TSC_OFFSET, active_offset);
3486 return active_offset;
3478} 3487}
3479 3488
3480/* 3489/*
@@ -5944,7 +5953,7 @@ static void free_vpid(int vpid)
5944 spin_unlock(&vmx_vpid_lock); 5953 spin_unlock(&vmx_vpid_lock);
5945} 5954}
5946 5955
5947static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, 5956static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
5948 u32 msr, int type) 5957 u32 msr, int type)
5949{ 5958{
5950 int f = sizeof(unsigned long); 5959 int f = sizeof(unsigned long);
@@ -5982,7 +5991,7 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit
5982 } 5991 }
5983} 5992}
5984 5993
5985static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, 5994static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
5986 u32 msr, int type) 5995 u32 msr, int type)
5987{ 5996{
5988 int f = sizeof(unsigned long); 5997 int f = sizeof(unsigned long);
@@ -6020,7 +6029,7 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm
6020 } 6029 }
6021} 6030}
6022 6031
6023static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap, 6032static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
6024 u32 msr, int type, bool value) 6033 u32 msr, int type, bool value)
6025{ 6034{
6026 if (value) 6035 if (value)
@@ -8664,8 +8673,6 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
8664 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 8673 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
8665 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 8674 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
8666 8675
8667 vmcs12->hdr.revision_id = evmcs->revision_id;
8668
8669 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 8676 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
8670 vmcs12->tpr_threshold = evmcs->tpr_threshold; 8677 vmcs12->tpr_threshold = evmcs->tpr_threshold;
8671 vmcs12->guest_rip = evmcs->guest_rip; 8678 vmcs12->guest_rip = evmcs->guest_rip;
@@ -9369,7 +9376,30 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
9369 9376
9370 vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page); 9377 vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
9371 9378
9372 if (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION) { 9379 /*
9380 * Currently, KVM only supports eVMCS version 1
9381 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
9382 * value to first u32 field of eVMCS which should specify eVMCS
9383 * VersionNumber.
9384 *
9385 * Guest should be aware of supported eVMCS versions by host by
9386 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
9387 * expected to set this CPUID leaf according to the value
9388 * returned in vmcs_version from nested_enable_evmcs().
9389 *
9390 * However, it turns out that Microsoft Hyper-V fails to comply
9391 * to their own invented interface: When Hyper-V use eVMCS, it
9392 * just sets first u32 field of eVMCS to revision_id specified
9393 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
9394 * which is one of the supported versions specified in
9395 * CPUID.0x4000000A.EAX[0:15].
9396 *
9397 * To overcome Hyper-V bug, we accept here either a supported
9398 * eVMCS version or VMCS12 revision_id as valid values for first
9399 * u32 field of eVMCS.
9400 */
9401 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
9402 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
9373 nested_release_evmcs(vcpu); 9403 nested_release_evmcs(vcpu);
9374 return 0; 9404 return 0;
9375 } 9405 }
@@ -9390,9 +9420,11 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
9390 * present in struct hv_enlightened_vmcs, ...). Make sure there 9420 * present in struct hv_enlightened_vmcs, ...). Make sure there
9391 * are no leftovers. 9421 * are no leftovers.
9392 */ 9422 */
9393 if (from_launch) 9423 if (from_launch) {
9394 memset(vmx->nested.cached_vmcs12, 0, 9424 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9395 sizeof(*vmx->nested.cached_vmcs12)); 9425 memset(vmcs12, 0, sizeof(*vmcs12));
9426 vmcs12->hdr.revision_id = VMCS12_REVISION;
9427 }
9396 9428
9397 } 9429 }
9398 return 1; 9430 return 1;
@@ -15062,7 +15094,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
15062 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 15094 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
15063 15095
15064 .read_l1_tsc_offset = vmx_read_l1_tsc_offset, 15096 .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
15065 .write_tsc_offset = vmx_write_tsc_offset, 15097 .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
15066 15098
15067 .set_tdp_cr3 = vmx_set_cr3, 15099 .set_tdp_cr3 = vmx_set_cr3,
15068 15100
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5cd5647120f2..d02937760c3b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1665,8 +1665,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
1665 1665
1666static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1666static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1667{ 1667{
1668 kvm_x86_ops->write_tsc_offset(vcpu, offset); 1668 vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);
1669 vcpu->arch.tsc_offset = offset;
1670} 1669}
1671 1670
1672static inline bool kvm_check_tsc_unstable(void) 1671static inline bool kvm_check_tsc_unstable(void)
@@ -1794,7 +1793,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
1794static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, 1793static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
1795 s64 adjustment) 1794 s64 adjustment)
1796{ 1795{
1797 kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment); 1796 u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
1797 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
1798} 1798}
1799 1799
1800static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) 1800static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -6918,6 +6918,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
6918 clock_pairing.nsec = ts.tv_nsec; 6918 clock_pairing.nsec = ts.tv_nsec;
6919 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); 6919 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
6920 clock_pairing.flags = 0; 6920 clock_pairing.flags = 0;
6921 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
6921 6922
6922 ret = 0; 6923 ret = 0;
6923 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, 6924 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
@@ -7455,7 +7456,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
7455 else { 7456 else {
7456 if (vcpu->arch.apicv_active) 7457 if (vcpu->arch.apicv_active)
7457 kvm_x86_ops->sync_pir_to_irr(vcpu); 7458 kvm_x86_ops->sync_pir_to_irr(vcpu);
7458 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 7459 if (ioapic_in_kernel(vcpu->kvm))
7460 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
7459 } 7461 }
7460 7462
7461 if (is_guest_mode(vcpu)) 7463 if (is_guest_mode(vcpu))
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index bddd6b3cee1d..03b6b4c2238d 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -7,7 +7,6 @@
7#include <linux/export.h> 7#include <linux/export.h>
8#include <linux/cpu.h> 8#include <linux/cpu.h>
9#include <linux/debugfs.h> 9#include <linux/debugfs.h>
10#include <linux/ptrace.h>
11 10
12#include <asm/tlbflush.h> 11#include <asm/tlbflush.h>
13#include <asm/mmu_context.h> 12#include <asm/mmu_context.h>
@@ -31,6 +30,12 @@
31 */ 30 */
32 31
33/* 32/*
33 * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
34 * stored in cpu_tlb_state.last_user_mm_ibpb.
35 */
36#define LAST_USER_MM_IBPB 0x1UL
37
38/*
34 * We get here when we do something requiring a TLB invalidation 39 * We get here when we do something requiring a TLB invalidation
35 * but could not go invalidate all of the contexts. We do the 40 * but could not go invalidate all of the contexts. We do the
36 * necessary invalidation by clearing out the 'ctx_id' which 41 * necessary invalidation by clearing out the 'ctx_id' which
@@ -181,17 +186,87 @@ static void sync_current_stack_to_mm(struct mm_struct *mm)
181 } 186 }
182} 187}
183 188
184static bool ibpb_needed(struct task_struct *tsk, u64 last_ctx_id) 189static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
190{
191 unsigned long next_tif = task_thread_info(next)->flags;
192 unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
193
194 return (unsigned long)next->mm | ibpb;
195}
196
197static void cond_ibpb(struct task_struct *next)
185{ 198{
199 if (!next || !next->mm)
200 return;
201
186 /* 202 /*
187 * Check if the current (previous) task has access to the memory 203 * Both, the conditional and the always IBPB mode use the mm
188 * of the @tsk (next) task. If access is denied, make sure to 204 * pointer to avoid the IBPB when switching between tasks of the
189 * issue a IBPB to stop user->user Spectre-v2 attacks. 205 * same process. Using the mm pointer instead of mm->context.ctx_id
190 * 206 * opens a hypothetical hole vs. mm_struct reuse, which is more or
191 * Note: __ptrace_may_access() returns 0 or -ERRNO. 207 * less impossible to control by an attacker. Aside of that it
208 * would only affect the first schedule so the theoretically
209 * exposed data is not really interesting.
192 */ 210 */
193 return (tsk && tsk->mm && tsk->mm->context.ctx_id != last_ctx_id && 211 if (static_branch_likely(&switch_mm_cond_ibpb)) {
194 ptrace_may_access_sched(tsk, PTRACE_MODE_SPEC_IBPB)); 212 unsigned long prev_mm, next_mm;
213
214 /*
215 * This is a bit more complex than the always mode because
216 * it has to handle two cases:
217 *
218 * 1) Switch from a user space task (potential attacker)
219 * which has TIF_SPEC_IB set to a user space task
220 * (potential victim) which has TIF_SPEC_IB not set.
221 *
222 * 2) Switch from a user space task (potential attacker)
223 * which has TIF_SPEC_IB not set to a user space task
224 * (potential victim) which has TIF_SPEC_IB set.
225 *
226 * This could be done by unconditionally issuing IBPB when
227 * a task which has TIF_SPEC_IB set is either scheduled in
228 * or out. Though that results in two flushes when:
229 *
230 * - the same user space task is scheduled out and later
231 * scheduled in again and only a kernel thread ran in
232 * between.
233 *
234 * - a user space task belonging to the same process is
235 * scheduled in after a kernel thread ran in between
236 *
237 * - a user space task belonging to the same process is
238 * scheduled in immediately.
239 *
240 * Optimize this with reasonably small overhead for the
241 * above cases. Mangle the TIF_SPEC_IB bit into the mm
242 * pointer of the incoming task which is stored in
243 * cpu_tlbstate.last_user_mm_ibpb for comparison.
244 */
245 next_mm = mm_mangle_tif_spec_ib(next);
246 prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
247
248 /*
249 * Issue IBPB only if the mm's are different and one or
250 * both have the IBPB bit set.
251 */
252 if (next_mm != prev_mm &&
253 (next_mm | prev_mm) & LAST_USER_MM_IBPB)
254 indirect_branch_prediction_barrier();
255
256 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
257 }
258
259 if (static_branch_unlikely(&switch_mm_always_ibpb)) {
260 /*
261 * Only flush when switching to a user space task with a
262 * different context than the user space task which ran
263 * last on this CPU.
264 */
265 if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
266 indirect_branch_prediction_barrier();
267 this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
268 }
269 }
195} 270}
196 271
197void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 272void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
@@ -292,22 +367,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
292 new_asid = prev_asid; 367 new_asid = prev_asid;
293 need_flush = true; 368 need_flush = true;
294 } else { 369 } else {
295 u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
296
297 /* 370 /*
298 * Avoid user/user BTB poisoning by flushing the branch 371 * Avoid user/user BTB poisoning by flushing the branch
299 * predictor when switching between processes. This stops 372 * predictor when switching between processes. This stops
300 * one process from doing Spectre-v2 attacks on another. 373 * one process from doing Spectre-v2 attacks on another.
301 *
302 * As an optimization, flush indirect branches only when
303 * switching into a processes that can't be ptrace by the
304 * current one (as in such case, attacker has much more
305 * convenient way how to tamper with the next process than
306 * branch buffer poisoning).
307 */ 374 */
308 if (static_cpu_has(X86_FEATURE_USE_IBPB) && 375 cond_ibpb(tsk);
309 ibpb_needed(tsk, last_ctx_id))
310 indirect_branch_prediction_barrier();
311 376
312 if (IS_ENABLED(CONFIG_VMAP_STACK)) { 377 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
313 /* 378 /*
@@ -365,14 +430,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
365 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); 430 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
366 } 431 }
367 432
368 /*
369 * Record last user mm's context id, so we can avoid
370 * flushing branch buffer with IBPB if we switch back
371 * to the same user.
372 */
373 if (next != &init_mm)
374 this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
375
376 /* Make sure we write CR3 before loaded_mm. */ 433 /* Make sure we write CR3 before loaded_mm. */
377 barrier(); 434 barrier();
378 435
@@ -441,7 +498,7 @@ void initialize_tlbstate_and_flush(void)
441 write_cr3(build_cr3(mm->pgd, 0)); 498 write_cr3(build_cr3(mm->pgd, 0));
442 499
443 /* Reinitialize tlbstate. */ 500 /* Reinitialize tlbstate. */
444 this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id); 501 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
445 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); 502 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
446 this_cpu_write(cpu_tlbstate.next_asid, 1); 503 this_cpu_write(cpu_tlbstate.next_asid, 1);
447 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); 504 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index e996e8e744cb..750f46ad018a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -10,7 +10,6 @@
10#include <xen/xen.h> 10#include <xen/xen.h>
11#include <xen/features.h> 11#include <xen/features.h>
12#include <xen/page.h> 12#include <xen/page.h>
13#include <xen/interface/memory.h>
14 13
15#include <asm/xen/hypercall.h> 14#include <asm/xen/hypercall.h>
16#include <asm/xen/hypervisor.h> 15#include <asm/xen/hypervisor.h>
@@ -346,80 +345,3 @@ void xen_arch_unregister_cpu(int num)
346} 345}
347EXPORT_SYMBOL(xen_arch_unregister_cpu); 346EXPORT_SYMBOL(xen_arch_unregister_cpu);
348#endif 347#endif
349
350#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
351void __init arch_xen_balloon_init(struct resource *hostmem_resource)
352{
353 struct xen_memory_map memmap;
354 int rc;
355 unsigned int i, last_guest_ram;
356 phys_addr_t max_addr = PFN_PHYS(max_pfn);
357 struct e820_table *xen_e820_table;
358 const struct e820_entry *entry;
359 struct resource *res;
360
361 if (!xen_initial_domain())
362 return;
363
364 xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
365 if (!xen_e820_table)
366 return;
367
368 memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
369 set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
370 rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
371 if (rc) {
372 pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
373 goto out;
374 }
375
376 last_guest_ram = 0;
377 for (i = 0; i < memmap.nr_entries; i++) {
378 if (xen_e820_table->entries[i].addr >= max_addr)
379 break;
380 if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
381 last_guest_ram = i;
382 }
383
384 entry = &xen_e820_table->entries[last_guest_ram];
385 if (max_addr >= entry->addr + entry->size)
386 goto out; /* No unallocated host RAM. */
387
388 hostmem_resource->start = max_addr;
389 hostmem_resource->end = entry->addr + entry->size;
390
391 /*
392 * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
393 * as unavailable. The rest of that region can be used for hotplug-based
394 * ballooning.
395 */
396 for (; i < memmap.nr_entries; i++) {
397 entry = &xen_e820_table->entries[i];
398
399 if (entry->type == E820_TYPE_RAM)
400 continue;
401
402 if (entry->addr >= hostmem_resource->end)
403 break;
404
405 res = kzalloc(sizeof(*res), GFP_KERNEL);
406 if (!res)
407 goto out;
408
409 res->name = "Unavailable host RAM";
410 res->start = entry->addr;
411 res->end = (entry->addr + entry->size < hostmem_resource->end) ?
412 entry->addr + entry->size : hostmem_resource->end;
413 rc = insert_resource(hostmem_resource, res);
414 if (rc) {
415 pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
416 __func__, res->start, res->end, rc);
417 kfree(res);
418 goto out;
419 }
420 }
421
422 out:
423 kfree(xen_e820_table);
424}
425#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 2bce7958ce8b..0766a08bdf45 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -69,6 +69,11 @@ void xen_mc_flush(void)
69 69
70 trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); 70 trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
71 71
72#if MC_DEBUG
73 memcpy(b->debug, b->entries,
74 b->mcidx * sizeof(struct multicall_entry));
75#endif
76
72 switch (b->mcidx) { 77 switch (b->mcidx) {
73 case 0: 78 case 0:
74 /* no-op */ 79 /* no-op */
@@ -87,32 +92,34 @@ void xen_mc_flush(void)
87 break; 92 break;
88 93
89 default: 94 default:
90#if MC_DEBUG
91 memcpy(b->debug, b->entries,
92 b->mcidx * sizeof(struct multicall_entry));
93#endif
94
95 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) 95 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
96 BUG(); 96 BUG();
97 for (i = 0; i < b->mcidx; i++) 97 for (i = 0; i < b->mcidx; i++)
98 if (b->entries[i].result < 0) 98 if (b->entries[i].result < 0)
99 ret++; 99 ret++;
100 }
100 101
102 if (WARN_ON(ret)) {
103 pr_err("%d of %d multicall(s) failed: cpu %d\n",
104 ret, b->mcidx, smp_processor_id());
105 for (i = 0; i < b->mcidx; i++) {
106 if (b->entries[i].result < 0) {
101#if MC_DEBUG 107#if MC_DEBUG
102 if (ret) { 108 pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pF\n",
103 printk(KERN_ERR "%d multicall(s) failed: cpu %d\n", 109 i + 1,
104 ret, smp_processor_id());
105 dump_stack();
106 for (i = 0; i < b->mcidx; i++) {
107 printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n",
108 i+1, b->mcidx,
109 b->debug[i].op, 110 b->debug[i].op,
110 b->debug[i].args[0], 111 b->debug[i].args[0],
111 b->entries[i].result, 112 b->entries[i].result,
112 b->caller[i]); 113 b->caller[i]);
114#else
115 pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n",
116 i + 1,
117 b->entries[i].op,
118 b->entries[i].args[0],
119 b->entries[i].result);
120#endif
113 } 121 }
114 } 122 }
115#endif
116 } 123 }
117 124
118 b->mcidx = 0; 125 b->mcidx = 0;
@@ -126,8 +133,6 @@ void xen_mc_flush(void)
126 b->cbidx = 0; 133 b->cbidx = 0;
127 134
128 local_irq_restore(flags); 135 local_irq_restore(flags);
129
130 WARN_ON(ret);
131} 136}
132 137
133struct multicall_space __xen_mc_entry(size_t args) 138struct multicall_space __xen_mc_entry(size_t args)
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1163e33121fb..075ed47993bb 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -808,6 +808,7 @@ char * __init xen_memory_setup(void)
808 addr = xen_e820_table.entries[0].addr; 808 addr = xen_e820_table.entries[0].addr;
809 size = xen_e820_table.entries[0].size; 809 size = xen_e820_table.entries[0].size;
810 while (i < xen_e820_table.nr_entries) { 810 while (i < xen_e820_table.nr_entries) {
811 bool discard = false;
811 812
812 chunk_size = size; 813 chunk_size = size;
813 type = xen_e820_table.entries[i].type; 814 type = xen_e820_table.entries[i].type;
@@ -823,10 +824,11 @@ char * __init xen_memory_setup(void)
823 xen_add_extra_mem(pfn_s, n_pfns); 824 xen_add_extra_mem(pfn_s, n_pfns);
824 xen_max_p2m_pfn = pfn_s + n_pfns; 825 xen_max_p2m_pfn = pfn_s + n_pfns;
825 } else 826 } else
826 type = E820_TYPE_UNUSABLE; 827 discard = true;
827 } 828 }
828 829
829 xen_align_and_add_e820_region(addr, chunk_size, type); 830 if (!discard)
831 xen_align_and_add_e820_region(addr, chunk_size, type);
830 832
831 addr += chunk_size; 833 addr += chunk_size;
832 size -= chunk_size; 834 size -= chunk_size;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 1c8a8816a402..3776122c87cc 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -3,22 +3,17 @@
3 * Split spinlock implementation out into its own file, so it can be 3 * Split spinlock implementation out into its own file, so it can be
4 * compiled in a FTRACE-compatible way. 4 * compiled in a FTRACE-compatible way.
5 */ 5 */
6#include <linux/kernel_stat.h> 6#include <linux/kernel.h>
7#include <linux/spinlock.h> 7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/log2.h>
10#include <linux/gfp.h>
11#include <linux/slab.h> 8#include <linux/slab.h>
12#include <linux/atomic.h> 9#include <linux/atomic.h>
13 10
14#include <asm/paravirt.h> 11#include <asm/paravirt.h>
15#include <asm/qspinlock.h> 12#include <asm/qspinlock.h>
16 13
17#include <xen/interface/xen.h>
18#include <xen/events.h> 14#include <xen/events.h>
19 15
20#include "xen-ops.h" 16#include "xen-ops.h"
21#include "debugfs.h"
22 17
23static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 18static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
24static DEFINE_PER_CPU(char *, irq_name); 19static DEFINE_PER_CPU(char *, irq_name);
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 67904f55f188..120dd746a147 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -94,14 +94,14 @@ int main(void)
94 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); 94 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
95 DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable)); 95 DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
96#if XTENSA_HAVE_COPROCESSORS 96#if XTENSA_HAVE_COPROCESSORS
97 DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp)); 97 DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
98 DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp)); 98 DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
99 DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp)); 99 DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
100 DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp)); 100 DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
101 DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp)); 101 DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
102 DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp)); 102 DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
103 DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp)); 103 DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
104 DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp)); 104 DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
105#endif 105#endif
106 DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); 106 DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
107 DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); 107 DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 483dcfb6e681..4bb68133a72a 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -94,18 +94,21 @@ void coprocessor_release_all(struct thread_info *ti)
94 94
95void coprocessor_flush_all(struct thread_info *ti) 95void coprocessor_flush_all(struct thread_info *ti)
96{ 96{
97 unsigned long cpenable; 97 unsigned long cpenable, old_cpenable;
98 int i; 98 int i;
99 99
100 preempt_disable(); 100 preempt_disable();
101 101
102 RSR_CPENABLE(old_cpenable);
102 cpenable = ti->cpenable; 103 cpenable = ti->cpenable;
104 WSR_CPENABLE(cpenable);
103 105
104 for (i = 0; i < XCHAL_CP_MAX; i++) { 106 for (i = 0; i < XCHAL_CP_MAX; i++) {
105 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti) 107 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
106 coprocessor_flush(ti, i); 108 coprocessor_flush(ti, i);
107 cpenable >>= 1; 109 cpenable >>= 1;
108 } 110 }
111 WSR_CPENABLE(old_cpenable);
109 112
110 preempt_enable(); 113 preempt_enable();
111} 114}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index c0845cb1cbb9..d9541be0605a 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -127,12 +127,37 @@ static int ptrace_setregs(struct task_struct *child, void __user *uregs)
127} 127}
128 128
129 129
130#if XTENSA_HAVE_COPROCESSORS
131#define CP_OFFSETS(cp) \
132 { \
133 .elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \
134 .ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \
135 .sz = sizeof(xtregs_ ## cp ## _t), \
136 }
137
138static const struct {
139 size_t elf_xtregs_offset;
140 size_t ti_offset;
141 size_t sz;
142} cp_offsets[] = {
143 CP_OFFSETS(cp0),
144 CP_OFFSETS(cp1),
145 CP_OFFSETS(cp2),
146 CP_OFFSETS(cp3),
147 CP_OFFSETS(cp4),
148 CP_OFFSETS(cp5),
149 CP_OFFSETS(cp6),
150 CP_OFFSETS(cp7),
151};
152#endif
153
130static int ptrace_getxregs(struct task_struct *child, void __user *uregs) 154static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
131{ 155{
132 struct pt_regs *regs = task_pt_regs(child); 156 struct pt_regs *regs = task_pt_regs(child);
133 struct thread_info *ti = task_thread_info(child); 157 struct thread_info *ti = task_thread_info(child);
134 elf_xtregs_t __user *xtregs = uregs; 158 elf_xtregs_t __user *xtregs = uregs;
135 int ret = 0; 159 int ret = 0;
160 int i __maybe_unused;
136 161
137 if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t))) 162 if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
138 return -EIO; 163 return -EIO;
@@ -140,8 +165,13 @@ static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
140#if XTENSA_HAVE_COPROCESSORS 165#if XTENSA_HAVE_COPROCESSORS
141 /* Flush all coprocessor registers to memory. */ 166 /* Flush all coprocessor registers to memory. */
142 coprocessor_flush_all(ti); 167 coprocessor_flush_all(ti);
143 ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp, 168
144 sizeof(xtregs_coprocessor_t)); 169 for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
170 ret |= __copy_to_user((char __user *)xtregs +
171 cp_offsets[i].elf_xtregs_offset,
172 (const char *)ti +
173 cp_offsets[i].ti_offset,
174 cp_offsets[i].sz);
145#endif 175#endif
146 ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt, 176 ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
147 sizeof(xtregs->opt)); 177 sizeof(xtregs->opt));
@@ -157,6 +187,7 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
157 struct pt_regs *regs = task_pt_regs(child); 187 struct pt_regs *regs = task_pt_regs(child);
158 elf_xtregs_t *xtregs = uregs; 188 elf_xtregs_t *xtregs = uregs;
159 int ret = 0; 189 int ret = 0;
190 int i __maybe_unused;
160 191
161 if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t))) 192 if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
162 return -EFAULT; 193 return -EFAULT;
@@ -166,8 +197,11 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
166 coprocessor_flush_all(ti); 197 coprocessor_flush_all(ti);
167 coprocessor_release_all(ti); 198 coprocessor_release_all(ti);
168 199
169 ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0, 200 for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
170 sizeof(xtregs_coprocessor_t)); 201 ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset,
202 (const char __user *)xtregs +
203 cp_offsets[i].elf_xtregs_offset,
204 cp_offsets[i].sz);
171#endif 205#endif
172 ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt, 206 ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
173 sizeof(xtregs->opt)); 207 sizeof(xtregs->opt));
diff --git a/block/blk-merge.c b/block/blk-merge.c
index e7696c47489a..7695034f4b87 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -820,7 +820,7 @@ static struct request *attempt_merge(struct request_queue *q,
820 820
821 req->__data_len += blk_rq_bytes(next); 821 req->__data_len += blk_rq_bytes(next);
822 822
823 if (req_op(req) != REQ_OP_DISCARD) 823 if (!blk_discard_mergable(req))
824 elv_merge_requests(q, req, next); 824 elv_merge_requests(q, req, next);
825 825
826 /* 826 /*
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index eaa60c94205a..1f32caa87686 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = {
30 {"PNP0200", 0}, /* AT DMA Controller */ 30 {"PNP0200", 0}, /* AT DMA Controller */
31 {"ACPI0009", 0}, /* IOxAPIC */ 31 {"ACPI0009", 0}, /* IOxAPIC */
32 {"ACPI000A", 0}, /* IOAPIC */ 32 {"ACPI000A", 0}, /* IOAPIC */
33 {"SMB0001", 0}, /* ACPI SMBUS virtual device */
33 {"", 0}, 34 {"", 0},
34}; 35};
35 36
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 0d42f30e5b25..9920fac6413f 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -244,7 +244,6 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
244{ 244{
245 acpi_status status; 245 acpi_status status;
246 u32 buffer_length; 246 u32 buffer_length;
247 u32 data_length;
248 void *buffer; 247 void *buffer;
249 union acpi_operand_object *buffer_desc; 248 union acpi_operand_object *buffer_desc;
250 u32 function; 249 u32 function;
@@ -282,14 +281,12 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
282 case ACPI_ADR_SPACE_SMBUS: 281 case ACPI_ADR_SPACE_SMBUS:
283 282
284 buffer_length = ACPI_SMBUS_BUFFER_SIZE; 283 buffer_length = ACPI_SMBUS_BUFFER_SIZE;
285 data_length = ACPI_SMBUS_DATA_SIZE;
286 function = ACPI_WRITE | (obj_desc->field.attribute << 16); 284 function = ACPI_WRITE | (obj_desc->field.attribute << 16);
287 break; 285 break;
288 286
289 case ACPI_ADR_SPACE_IPMI: 287 case ACPI_ADR_SPACE_IPMI:
290 288
291 buffer_length = ACPI_IPMI_BUFFER_SIZE; 289 buffer_length = ACPI_IPMI_BUFFER_SIZE;
292 data_length = ACPI_IPMI_DATA_SIZE;
293 function = ACPI_WRITE; 290 function = ACPI_WRITE;
294 break; 291 break;
295 292
@@ -310,7 +307,6 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
310 /* Add header length to get the full size of the buffer */ 307 /* Add header length to get the full size of the buffer */
311 308
312 buffer_length += ACPI_SERIAL_HEADER_SIZE; 309 buffer_length += ACPI_SERIAL_HEADER_SIZE;
313 data_length = source_desc->buffer.pointer[1];
314 function = ACPI_WRITE | (accessor_type << 16); 310 function = ACPI_WRITE | (accessor_type << 16);
315 break; 311 break;
316 312
@@ -318,20 +314,6 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
318 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); 314 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
319 } 315 }
320 316
321#if 0
322 OBSOLETE ?
323 /* Check for possible buffer overflow */
324 if (data_length > source_desc->buffer.length) {
325 ACPI_ERROR((AE_INFO,
326 "Length in buffer header (%u)(%u) is greater than "
327 "the physical buffer length (%u) and will overflow",
328 data_length, buffer_length,
329 source_desc->buffer.length));
330
331 return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
332 }
333#endif
334
335 /* Create the transfer/bidirectional/return buffer */ 317 /* Create the transfer/bidirectional/return buffer */
336 318
337 buffer_desc = acpi_ut_create_buffer_object(buffer_length); 319 buffer_desc = acpi_ut_create_buffer_object(buffer_length);
@@ -342,7 +324,8 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
342 /* Copy the input buffer data to the transfer buffer */ 324 /* Copy the input buffer data to the transfer buffer */
343 325
344 buffer = buffer_desc->buffer.pointer; 326 buffer = buffer_desc->buffer.pointer;
345 memcpy(buffer, source_desc->buffer.pointer, data_length); 327 memcpy(buffer, source_desc->buffer.pointer,
328 min(buffer_length, source_desc->buffer.length));
346 329
347 /* Lock entire transaction if requested */ 330 /* Lock entire transaction if requested */
348 331
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 2a361e22d38d..70f4e80b9246 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -700,7 +700,7 @@ static void iort_set_device_domain(struct device *dev,
700 */ 700 */
701static struct irq_domain *iort_get_platform_device_domain(struct device *dev) 701static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
702{ 702{
703 struct acpi_iort_node *node, *msi_parent; 703 struct acpi_iort_node *node, *msi_parent = NULL;
704 struct fwnode_handle *iort_fwnode; 704 struct fwnode_handle *iort_fwnode;
705 struct acpi_iort_its_group *its; 705 struct acpi_iort_its_group *its;
706 int i; 706 int i;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index cb30a524d16d..9f1000d2a40c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2974,7 +2974,6 @@ static void binder_transaction(struct binder_proc *proc,
2974 t->buffer = NULL; 2974 t->buffer = NULL;
2975 goto err_binder_alloc_buf_failed; 2975 goto err_binder_alloc_buf_failed;
2976 } 2976 }
2977 t->buffer->allow_user_free = 0;
2978 t->buffer->debug_id = t->debug_id; 2977 t->buffer->debug_id = t->debug_id;
2979 t->buffer->transaction = t; 2978 t->buffer->transaction = t;
2980 t->buffer->target_node = target_node; 2979 t->buffer->target_node = target_node;
@@ -3510,14 +3509,18 @@ static int binder_thread_write(struct binder_proc *proc,
3510 3509
3511 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3510 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3512 data_ptr); 3511 data_ptr);
3513 if (buffer == NULL) { 3512 if (IS_ERR_OR_NULL(buffer)) {
3514 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 3513 if (PTR_ERR(buffer) == -EPERM) {
3515 proc->pid, thread->pid, (u64)data_ptr); 3514 binder_user_error(
3516 break; 3515 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3517 } 3516 proc->pid, thread->pid,
3518 if (!buffer->allow_user_free) { 3517 (u64)data_ptr);
3519 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 3518 } else {
3520 proc->pid, thread->pid, (u64)data_ptr); 3519 binder_user_error(
3520 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3521 proc->pid, thread->pid,
3522 (u64)data_ptr);
3523 }
3521 break; 3524 break;
3522 } 3525 }
3523 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3526 binder_debug(BINDER_DEBUG_FREE_BUFFER,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 64fd96eada31..030c98f35cca 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -151,16 +151,12 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
151 else { 151 else {
152 /* 152 /*
153 * Guard against user threads attempting to 153 * Guard against user threads attempting to
154 * free the buffer twice 154 * free the buffer when in use by kernel or
155 * after it's already been freed.
155 */ 156 */
156 if (buffer->free_in_progress) { 157 if (!buffer->allow_user_free)
157 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 158 return ERR_PTR(-EPERM);
158 "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", 159 buffer->allow_user_free = 0;
159 alloc->pid, current->pid,
160 (u64)user_ptr);
161 return NULL;
162 }
163 buffer->free_in_progress = 1;
164 return buffer; 160 return buffer;
165 } 161 }
166 } 162 }
@@ -500,7 +496,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
500 496
501 rb_erase(best_fit, &alloc->free_buffers); 497 rb_erase(best_fit, &alloc->free_buffers);
502 buffer->free = 0; 498 buffer->free = 0;
503 buffer->free_in_progress = 0; 499 buffer->allow_user_free = 0;
504 binder_insert_allocated_buffer_locked(alloc, buffer); 500 binder_insert_allocated_buffer_locked(alloc, buffer);
505 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 501 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
506 "%d: binder_alloc_buf size %zd got %pK\n", 502 "%d: binder_alloc_buf size %zd got %pK\n",
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 9ef64e563856..fb3238c74c8a 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -50,8 +50,7 @@ struct binder_buffer {
50 unsigned free:1; 50 unsigned free:1;
51 unsigned allow_user_free:1; 51 unsigned allow_user_free:1;
52 unsigned async_transaction:1; 52 unsigned async_transaction:1;
53 unsigned free_in_progress:1; 53 unsigned debug_id:29;
54 unsigned debug_id:28;
55 54
56 struct binder_transaction *transaction; 55 struct binder_transaction *transaction;
57 56
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 4e46dc9e41ad..11e1663bdc4d 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1410,7 +1410,7 @@ static int init_q(struct fs_dev *dev, struct queue *txq, int queue,
1410 1410
1411 func_enter (); 1411 func_enter ();
1412 1412
1413 fs_dprintk (FS_DEBUG_INIT, "Inititing queue at %x: %d entries:\n", 1413 fs_dprintk (FS_DEBUG_INIT, "Initializing queue at %x: %d entries:\n",
1414 queue, nentries); 1414 queue, nentries);
1415 1415
1416 p = aligned_kmalloc (sz, GFP_KERNEL, 0x10); 1416 p = aligned_kmalloc (sz, GFP_KERNEL, 0x10);
@@ -1443,7 +1443,7 @@ static int init_fp(struct fs_dev *dev, struct freepool *fp, int queue,
1443{ 1443{
1444 func_enter (); 1444 func_enter ();
1445 1445
1446 fs_dprintk (FS_DEBUG_INIT, "Inititing free pool at %x:\n", queue); 1446 fs_dprintk (FS_DEBUG_INIT, "Initializing free pool at %x:\n", queue);
1447 1447
1448 write_fs (dev, FP_CNF(queue), (bufsize * RBFP_RBS) | RBFP_RBSVAL | RBFP_CME); 1448 write_fs (dev, FP_CNF(queue), (bufsize * RBFP_RBS) | RBFP_RBSVAL | RBFP_CME);
1449 write_fs (dev, FP_SA(queue), 0); 1449 write_fs (dev, FP_SA(queue), 0);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 4aaf00d2098b..e038e2b3b7ea 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -26,8 +26,14 @@ struct devres_node {
26 26
27struct devres { 27struct devres {
28 struct devres_node node; 28 struct devres_node node;
29 /* -- 3 pointers */ 29 /*
30 unsigned long long data[]; /* guarantee ull alignment */ 30 * Some archs want to perform DMA into kmalloc caches
31 * and need a guaranteed alignment larger than
32 * the alignment of a 64-bit integer.
33 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
34 * buffer alignment as if it was allocated by plain kmalloc().
35 */
36 u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
31}; 37};
32 38
33struct devres_group { 39struct devres_group {
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 3f0e2a14895a..22b53bf26817 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -201,19 +201,28 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
201 {}, 201 {},
202}; 202};
203 203
204static const struct of_device_id *ti_cpufreq_match_node(void)
205{
206 struct device_node *np;
207 const struct of_device_id *match;
208
209 np = of_find_node_by_path("/");
210 match = of_match_node(ti_cpufreq_of_match, np);
211 of_node_put(np);
212
213 return match;
214}
215
204static int ti_cpufreq_probe(struct platform_device *pdev) 216static int ti_cpufreq_probe(struct platform_device *pdev)
205{ 217{
206 u32 version[VERSION_COUNT]; 218 u32 version[VERSION_COUNT];
207 struct device_node *np;
208 const struct of_device_id *match; 219 const struct of_device_id *match;
209 struct opp_table *ti_opp_table; 220 struct opp_table *ti_opp_table;
210 struct ti_cpufreq_data *opp_data; 221 struct ti_cpufreq_data *opp_data;
211 const char * const reg_names[] = {"vdd", "vbb"}; 222 const char * const reg_names[] = {"vdd", "vbb"};
212 int ret; 223 int ret;
213 224
214 np = of_find_node_by_path("/"); 225 match = dev_get_platdata(&pdev->dev);
215 match = of_match_node(ti_cpufreq_of_match, np);
216 of_node_put(np);
217 if (!match) 226 if (!match)
218 return -ENODEV; 227 return -ENODEV;
219 228
@@ -290,7 +299,14 @@ fail_put_node:
290 299
291static int ti_cpufreq_init(void) 300static int ti_cpufreq_init(void)
292{ 301{
293 platform_device_register_simple("ti-cpufreq", -1, NULL, 0); 302 const struct of_device_id *match;
303
304 /* Check to ensure we are on a compatible platform */
305 match = ti_cpufreq_match_node();
306 if (match)
307 platform_device_register_data(NULL, "ti-cpufreq", -1, match,
308 sizeof(*match));
309
294 return 0; 310 return 0;
295} 311}
296module_init(ti_cpufreq_init); 312module_init(ti_cpufreq_init);
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 5b44ef226904..fc359ca4503d 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -184,6 +184,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
184 exp_info.ops = &udmabuf_ops; 184 exp_info.ops = &udmabuf_ops;
185 exp_info.size = ubuf->pagecount << PAGE_SHIFT; 185 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
186 exp_info.priv = ubuf; 186 exp_info.priv = ubuf;
187 exp_info.flags = O_RDWR;
187 188
188 buf = dma_buf_export(&exp_info); 189 buf = dma_buf_export(&exp_info);
189 if (IS_ERR(buf)) { 190 if (IS_ERR(buf)) {
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7cbac6e8c113..01d936c9fe89 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1641,6 +1641,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
1641 atchan->descs_allocated = 0; 1641 atchan->descs_allocated = 0;
1642 atchan->status = 0; 1642 atchan->status = 0;
1643 1643
1644 /*
1645 * Free atslave allocated in at_dma_xlate()
1646 */
1647 kfree(chan->private);
1648 chan->private = NULL;
1649
1644 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1650 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1645} 1651}
1646 1652
@@ -1675,7 +1681,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1675 dma_cap_zero(mask); 1681 dma_cap_zero(mask);
1676 dma_cap_set(DMA_SLAVE, mask); 1682 dma_cap_set(DMA_SLAVE, mask);
1677 1683
1678 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); 1684 atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
1679 if (!atslave) 1685 if (!atslave)
1680 return NULL; 1686 return NULL;
1681 1687
@@ -2000,6 +2006,8 @@ static int at_dma_remove(struct platform_device *pdev)
2000 struct resource *io; 2006 struct resource *io;
2001 2007
2002 at_dma_off(atdma); 2008 at_dma_off(atdma);
2009 if (pdev->dev.of_node)
2010 of_dma_controller_free(pdev->dev.of_node);
2003 dma_async_device_unregister(&atdma->dma_common); 2011 dma_async_device_unregister(&atdma->dma_common);
2004 2012
2005 dma_pool_destroy(atdma->memset_pool); 2013 dma_pool_destroy(atdma->memset_pool);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index fad7c62cfc0e..415849bab233 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -969,13 +969,33 @@ bool efi_is_table_address(unsigned long phys_addr)
969static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 969static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
970static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; 970static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
971 971
972int efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 972static int __init efi_memreserve_map_root(void)
973{
974 if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
975 return -ENODEV;
976
977 efi_memreserve_root = memremap(efi.mem_reserve,
978 sizeof(*efi_memreserve_root),
979 MEMREMAP_WB);
980 if (WARN_ON_ONCE(!efi_memreserve_root))
981 return -ENOMEM;
982 return 0;
983}
984
985int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
973{ 986{
974 struct linux_efi_memreserve *rsv; 987 struct linux_efi_memreserve *rsv;
988 int rc;
975 989
976 if (!efi_memreserve_root) 990 if (efi_memreserve_root == (void *)ULONG_MAX)
977 return -ENODEV; 991 return -ENODEV;
978 992
993 if (!efi_memreserve_root) {
994 rc = efi_memreserve_map_root();
995 if (rc)
996 return rc;
997 }
998
979 rsv = kmalloc(sizeof(*rsv), GFP_ATOMIC); 999 rsv = kmalloc(sizeof(*rsv), GFP_ATOMIC);
980 if (!rsv) 1000 if (!rsv)
981 return -ENOMEM; 1001 return -ENOMEM;
@@ -993,14 +1013,10 @@ int efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
993 1013
994static int __init efi_memreserve_root_init(void) 1014static int __init efi_memreserve_root_init(void)
995{ 1015{
996 if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR) 1016 if (efi_memreserve_root)
997 return -ENODEV; 1017 return 0;
998 1018 if (efi_memreserve_map_root())
999 efi_memreserve_root = memremap(efi.mem_reserve, 1019 efi_memreserve_root = (void *)ULONG_MAX;
1000 sizeof(*efi_memreserve_root),
1001 MEMREMAP_WB);
1002 if (!efi_memreserve_root)
1003 return -ENOMEM;
1004 return 0; 1020 return 0;
1005} 1021}
1006early_initcall(efi_memreserve_root_init); 1022early_initcall(efi_memreserve_root_init);
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index af3a20dd5aa4..99c99a5d57fe 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -46,6 +46,7 @@ config FSI_MASTER_AST_CF
46 tristate "FSI master based on Aspeed ColdFire coprocessor" 46 tristate "FSI master based on Aspeed ColdFire coprocessor"
47 depends on GPIOLIB 47 depends on GPIOLIB
48 depends on GPIO_ASPEED 48 depends on GPIO_ASPEED
49 select GENERIC_ALLOCATOR
49 ---help--- 50 ---help---
50 This option enables a FSI master using the AST2400 and AST2500 GPIO 51 This option enables a FSI master using the AST2400 and AST2500 GPIO
51 lines driven by the internal ColdFire coprocessor. This requires 52 lines driven by the internal ColdFire coprocessor. This requires
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index df94021dd9d1..81dc01ac2351 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -20,7 +20,6 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/cdev.h>
24#include <linux/list.h> 23#include <linux/list.h>
25 24
26#include <uapi/linux/fsi.h> 25#include <uapi/linux/fsi.h>
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
index b01ba4438501..31e891f00175 100644
--- a/drivers/gnss/serial.c
+++ b/drivers/gnss/serial.c
@@ -13,6 +13,7 @@
13#include <linux/of.h> 13#include <linux/of.h>
14#include <linux/pm.h> 14#include <linux/pm.h>
15#include <linux/pm_runtime.h> 15#include <linux/pm_runtime.h>
16#include <linux/sched.h>
16#include <linux/serdev.h> 17#include <linux/serdev.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18 19
@@ -63,7 +64,7 @@ static int gnss_serial_write_raw(struct gnss_device *gdev,
63 int ret; 64 int ret;
64 65
65 /* write is only buffered synchronously */ 66 /* write is only buffered synchronously */
66 ret = serdev_device_write(serdev, buf, count, 0); 67 ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
67 if (ret < 0) 68 if (ret < 0)
68 return ret; 69 return ret;
69 70
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 79cb98950013..71d014edd167 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -16,6 +16,7 @@
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <linux/regulator/consumer.h> 18#include <linux/regulator/consumer.h>
19#include <linux/sched.h>
19#include <linux/serdev.h> 20#include <linux/serdev.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21#include <linux/wait.h> 22#include <linux/wait.h>
@@ -83,7 +84,7 @@ static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
83 int ret; 84 int ret;
84 85
85 /* write is only buffered synchronously */ 86 /* write is only buffered synchronously */
86 ret = serdev_device_write(serdev, buf, count, 0); 87 ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
87 if (ret < 0) 88 if (ret < 0)
88 return ret; 89 return ret;
89 90
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 5c1564fcc24e..bdb29e51b417 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -258,7 +258,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
258 chips->chip.set = davinci_gpio_set; 258 chips->chip.set = davinci_gpio_set;
259 259
260 chips->chip.ngpio = ngpio; 260 chips->chip.ngpio = ngpio;
261 chips->chip.base = -1; 261 chips->chip.base = pdata->no_auto_base ? pdata->base : -1;
262 262
263#ifdef CONFIG_OF_GPIO 263#ifdef CONFIG_OF_GPIO
264 chips->chip.of_gpio_n_cells = 2; 264 chips->chip.of_gpio_n_cells = 2;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 8269cffc2967..6a50f9f59c90 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -35,8 +35,8 @@
35#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__) 35#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
36 36
37enum { 37enum {
38 GPIO_MOCKUP_DIR_OUT = 0, 38 GPIO_MOCKUP_DIR_IN = 0,
39 GPIO_MOCKUP_DIR_IN = 1, 39 GPIO_MOCKUP_DIR_OUT = 1,
40}; 40};
41 41
42/* 42/*
@@ -131,7 +131,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
131{ 131{
132 struct gpio_mockup_chip *chip = gpiochip_get_data(gc); 132 struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
133 133
134 return chip->lines[offset].dir; 134 return !chip->lines[offset].dir;
135} 135}
136 136
137static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset) 137static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index bfe4c5c9f41c..e9600b556f39 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -268,8 +268,8 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
268 268
269 if (pxa_gpio_has_pinctrl()) { 269 if (pxa_gpio_has_pinctrl()) {
270 ret = pinctrl_gpio_direction_input(chip->base + offset); 270 ret = pinctrl_gpio_direction_input(chip->base + offset);
271 if (!ret) 271 if (ret)
272 return 0; 272 return ret;
273 } 273 }
274 274
275 spin_lock_irqsave(&gpio_lock, flags); 275 spin_lock_irqsave(&gpio_lock, flags);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 230e41562462..a2cbb474901c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1295,7 +1295,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1295 gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); 1295 gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
1296 if (!gdev->descs) { 1296 if (!gdev->descs) {
1297 status = -ENOMEM; 1297 status = -ENOMEM;
1298 goto err_free_gdev; 1298 goto err_free_ida;
1299 } 1299 }
1300 1300
1301 if (chip->ngpio == 0) { 1301 if (chip->ngpio == 0) {
@@ -1427,8 +1427,9 @@ err_free_label:
1427 kfree_const(gdev->label); 1427 kfree_const(gdev->label);
1428err_free_descs: 1428err_free_descs:
1429 kfree(gdev->descs); 1429 kfree(gdev->descs);
1430err_free_gdev: 1430err_free_ida:
1431 ida_simple_remove(&gpio_ida, gdev->id); 1431 ida_simple_remove(&gpio_ida, gdev->id);
1432err_free_gdev:
1432 /* failures here can mean systems won't boot... */ 1433 /* failures here can mean systems won't boot... */
1433 pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__, 1434 pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
1434 gdev->base, gdev->base + gdev->ngpio - 1, 1435 gdev->base, gdev->base + gdev->ngpio - 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index c31a8849e9f8..1580ec60b89f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -501,8 +501,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
501{ 501{
502 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 502 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
503 503
504 amdgpu_dpm_switch_power_profile(adev, 504 if (adev->powerplay.pp_funcs &&
505 PP_SMC_POWER_PROFILE_COMPUTE, !idle); 505 adev->powerplay.pp_funcs->switch_power_profile)
506 amdgpu_dpm_switch_power_profile(adev,
507 PP_SMC_POWER_PROFILE_COMPUTE,
508 !idle);
506} 509}
507 510
508bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) 511bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6748cd7fc129..686a26de50f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -626,6 +626,13 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
626 "dither", 626 "dither",
627 amdgpu_dither_enum_list, sz); 627 amdgpu_dither_enum_list, sz);
628 628
629 if (amdgpu_device_has_dc_support(adev)) {
630 adev->mode_info.max_bpc_property =
631 drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
632 if (!adev->mode_info.max_bpc_property)
633 return -ENOMEM;
634 }
635
629 return 0; 636 return 0;
630} 637}
631 638
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index b9e9e8b02fb7..d1b4d9b6aae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -339,6 +339,8 @@ struct amdgpu_mode_info {
339 struct drm_property *audio_property; 339 struct drm_property *audio_property;
340 /* FMT dithering */ 340 /* FMT dithering */
341 struct drm_property *dither_property; 341 struct drm_property *dither_property;
342 /* maximum number of bits per channel for monitor color */
343 struct drm_property *max_bpc_property;
342 /* hardcoded DFP edid from BIOS */ 344 /* hardcoded DFP edid from BIOS */
343 struct edid *bios_hardcoded_edid; 345 struct edid *bios_hardcoded_edid;
344 int bios_hardcoded_edid_size; 346 int bios_hardcoded_edid_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index dad0e2342df9..0877ff9a9594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -181,7 +181,7 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
181 181
182 if (level == adev->vm_manager.root_level) 182 if (level == adev->vm_manager.root_level)
183 /* For the root directory */ 183 /* For the root directory */
184 return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift; 184 return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
185 else if (level != AMDGPU_VM_PTB) 185 else if (level != AMDGPU_VM_PTB)
186 /* Everything in between */ 186 /* Everything in between */
187 return 512; 187 return 512;
@@ -1656,9 +1656,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1656 if (!amdgpu_vm_pt_descendant(adev, &cursor)) 1656 if (!amdgpu_vm_pt_descendant(adev, &cursor))
1657 return -ENOENT; 1657 return -ENOENT;
1658 continue; 1658 continue;
1659 } else if (frag >= parent_shift) { 1659 } else if (frag >= parent_shift &&
1660 cursor.level - 1 != adev->vm_manager.root_level) {
1660 /* If the fragment size is even larger than the parent 1661 /* If the fragment size is even larger than the parent
1661 * shift we should go up one level and check it again. 1662 * shift we should go up one level and check it again
1663 * unless one level up is the root level.
1662 */ 1664 */
1663 if (!amdgpu_vm_pt_ancestor(&cursor)) 1665 if (!amdgpu_vm_pt_ancestor(&cursor))
1664 return -ENOENT; 1666 return -ENOENT;
@@ -1666,10 +1668,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1666 } 1668 }
1667 1669
1668 /* Looks good so far, calculate parameters for the update */ 1670 /* Looks good so far, calculate parameters for the update */
1669 incr = AMDGPU_GPU_PAGE_SIZE << shift; 1671 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1670 mask = amdgpu_vm_entries_mask(adev, cursor.level); 1672 mask = amdgpu_vm_entries_mask(adev, cursor.level);
1671 pe_start = ((cursor.pfn >> shift) & mask) * 8; 1673 pe_start = ((cursor.pfn >> shift) & mask) * 8;
1672 entry_end = (mask + 1) << shift; 1674 entry_end = (uint64_t)(mask + 1) << shift;
1673 entry_end += cursor.pfn & ~(entry_end - 1); 1675 entry_end += cursor.pfn & ~(entry_end - 1);
1674 entry_end = min(entry_end, end); 1676 entry_end = min(entry_end, end);
1675 1677
@@ -1682,7 +1684,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1682 flags | AMDGPU_PTE_FRAG(frag)); 1684 flags | AMDGPU_PTE_FRAG(frag));
1683 1685
1684 pe_start += nptes * 8; 1686 pe_start += nptes * 8;
1685 dst += nptes * AMDGPU_GPU_PAGE_SIZE << shift; 1687 dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
1686 1688
1687 frag_start = upd_end; 1689 frag_start = upd_end;
1688 if (frag_start >= frag_end) { 1690 if (frag_start >= frag_end) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6d7baf59d6e1..21363b2b2ee5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2440,12 +2440,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2440#endif 2440#endif
2441 2441
2442 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 2442 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2443 udelay(50);
2443 2444
2444 /* carrizo do enable cp interrupt after cp inited */ 2445 /* carrizo do enable cp interrupt after cp inited */
2445 if (!(adev->flags & AMD_IS_APU)) 2446 if (!(adev->flags & AMD_IS_APU)) {
2446 gfx_v9_0_enable_gui_idle_interrupt(adev, true); 2447 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2447 2448 udelay(50);
2448 udelay(50); 2449 }
2449 2450
2450#ifdef AMDGPU_RLC_DEBUG_RETRY 2451#ifdef AMDGPU_RLC_DEBUG_RETRY
2451 /* RLC_GPM_GENERAL_6 : RLC Ucode version */ 2452 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index e1c2b4e9c7b2..73ad02aea2b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
46MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); 46MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
47MODULE_FIRMWARE("amdgpu/verde_mc.bin"); 47MODULE_FIRMWARE("amdgpu/verde_mc.bin");
48MODULE_FIRMWARE("amdgpu/oland_mc.bin"); 48MODULE_FIRMWARE("amdgpu/oland_mc.bin");
49MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
49MODULE_FIRMWARE("amdgpu/si58_mc.bin"); 50MODULE_FIRMWARE("amdgpu/si58_mc.bin");
50 51
51#define MC_SEQ_MISC0__MT__MASK 0xf0000000 52#define MC_SEQ_MISC0__MT__MASK 0xf0000000
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index bf5e6a413dee..4cc0dcb1a187 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -65,6 +65,13 @@
65#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba 65#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
66#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 66#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
67 67
68/* for Vega20 register name change */
69#define mmHDP_MEM_POWER_CTRL 0x00d4
70#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
71#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
72#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
73#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
74#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
68/* 75/*
69 * Indirect registers accessor 76 * Indirect registers accessor
70 */ 77 */
@@ -870,15 +877,33 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
870{ 877{
871 uint32_t def, data; 878 uint32_t def, data;
872 879
873 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); 880 if (adev->asic_type == CHIP_VEGA20) {
881 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
874 882
875 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 883 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
876 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 884 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
877 else 885 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
878 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 886 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
887 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
888 else
889 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
890 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
891 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
892 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
879 893
880 if (def != data) 894 if (def != data)
881 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); 895 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
896 } else {
897 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
898
899 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
900 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
901 else
902 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
903
904 if (def != data)
905 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
906 }
882} 907}
883 908
884static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) 909static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c1262f62cd9f..ca925200fe09 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2358,8 +2358,15 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2358static enum dc_color_depth 2358static enum dc_color_depth
2359convert_color_depth_from_display_info(const struct drm_connector *connector) 2359convert_color_depth_from_display_info(const struct drm_connector *connector)
2360{ 2360{
2361 struct dm_connector_state *dm_conn_state =
2362 to_dm_connector_state(connector->state);
2361 uint32_t bpc = connector->display_info.bpc; 2363 uint32_t bpc = connector->display_info.bpc;
2362 2364
2365 /* TODO: Remove this when there's support for max_bpc in drm */
2366 if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2367 /* Round down to nearest even number. */
2368 bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
2369
2363 switch (bpc) { 2370 switch (bpc) {
2364 case 0: 2371 case 0:
2365 /* 2372 /*
@@ -2943,6 +2950,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2943 } else if (property == adev->mode_info.underscan_property) { 2950 } else if (property == adev->mode_info.underscan_property) {
2944 dm_new_state->underscan_enable = val; 2951 dm_new_state->underscan_enable = val;
2945 ret = 0; 2952 ret = 0;
2953 } else if (property == adev->mode_info.max_bpc_property) {
2954 dm_new_state->max_bpc = val;
2955 ret = 0;
2946 } 2956 }
2947 2957
2948 return ret; 2958 return ret;
@@ -2985,6 +2995,9 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2985 } else if (property == adev->mode_info.underscan_property) { 2995 } else if (property == adev->mode_info.underscan_property) {
2986 *val = dm_state->underscan_enable; 2996 *val = dm_state->underscan_enable;
2987 ret = 0; 2997 ret = 0;
2998 } else if (property == adev->mode_info.max_bpc_property) {
2999 *val = dm_state->max_bpc;
3000 ret = 0;
2988 } 3001 }
2989 return ret; 3002 return ret;
2990} 3003}
@@ -3795,6 +3808,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3795 drm_object_attach_property(&aconnector->base.base, 3808 drm_object_attach_property(&aconnector->base.base,
3796 adev->mode_info.underscan_vborder_property, 3809 adev->mode_info.underscan_vborder_property,
3797 0); 3810 0);
3811 drm_object_attach_property(&aconnector->base.base,
3812 adev->mode_info.max_bpc_property,
3813 0);
3798 3814
3799} 3815}
3800 3816
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 924a38a1fc44..6e069d777ab2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -204,6 +204,7 @@ struct dm_connector_state {
204 enum amdgpu_rmx_type scaling; 204 enum amdgpu_rmx_type scaling;
205 uint8_t underscan_vborder; 205 uint8_t underscan_vborder;
206 uint8_t underscan_hborder; 206 uint8_t underscan_hborder;
207 uint8_t max_bpc;
207 bool underscan_enable; 208 bool underscan_enable;
208 bool freesync_enable; 209 bool freesync_enable;
209 bool freesync_capable; 210 bool freesync_capable;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index d02c32a1039c..1b0d209d8367 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -342,10 +342,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
342 master->connector_id); 342 master->connector_id);
343 343
344 aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); 344 aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
345 drm_connector_attach_encoder(&aconnector->base,
346 &aconnector->mst_encoder->base);
345 347
346 /*
347 * TODO: understand why this one is needed
348 */
349 drm_object_attach_property( 348 drm_object_attach_property(
350 &connector->base, 349 &connector->base,
351 dev->mode_config.path_property, 350 dev->mode_config.path_property,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index ed35ec0341e6..88f6b35ea6fe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4525,12 +4525,12 @@ static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4525 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 4525 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4526 struct smu7_single_dpm_table *golden_sclk_table = 4526 struct smu7_single_dpm_table *golden_sclk_table =
4527 &(data->golden_dpm_table.sclk_table); 4527 &(data->golden_dpm_table.sclk_table);
4528 int value; 4528 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4529 int golden_value = golden_sclk_table->dpm_levels
4530 [golden_sclk_table->count - 1].value;
4529 4531
4530 value = (sclk_table->dpm_levels[sclk_table->count - 1].value - 4532 value -= golden_value;
4531 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 4533 value = DIV_ROUND_UP(value * 100, golden_value);
4532 100 /
4533 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4534 4534
4535 return value; 4535 return value;
4536} 4536}
@@ -4567,12 +4567,12 @@ static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4567 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 4567 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4568 struct smu7_single_dpm_table *golden_mclk_table = 4568 struct smu7_single_dpm_table *golden_mclk_table =
4569 &(data->golden_dpm_table.mclk_table); 4569 &(data->golden_dpm_table.mclk_table);
4570 int value; 4570 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
4571 int golden_value = golden_mclk_table->dpm_levels
4572 [golden_mclk_table->count - 1].value;
4571 4573
4572 value = (mclk_table->dpm_levels[mclk_table->count - 1].value - 4574 value -= golden_value;
4573 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 4575 value = DIV_ROUND_UP(value * 100, golden_value);
4574 100 /
4575 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4576 4576
4577 return value; 4577 return value;
4578} 4578}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 8c4db86bb4b7..e2bc6e0c229f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4522,15 +4522,13 @@ static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4522 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); 4522 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4523 struct vega10_single_dpm_table *golden_sclk_table = 4523 struct vega10_single_dpm_table *golden_sclk_table =
4524 &(data->golden_dpm_table.gfx_table); 4524 &(data->golden_dpm_table.gfx_table);
4525 int value; 4525 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4526 4526 int golden_value = golden_sclk_table->dpm_levels
4527 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4528 golden_sclk_table->dpm_levels
4529 [golden_sclk_table->count - 1].value) *
4530 100 /
4531 golden_sclk_table->dpm_levels
4532 [golden_sclk_table->count - 1].value; 4527 [golden_sclk_table->count - 1].value;
4533 4528
4529 value -= golden_value;
4530 value = DIV_ROUND_UP(value * 100, golden_value);
4531
4534 return value; 4532 return value;
4535} 4533}
4536 4534
@@ -4575,16 +4573,13 @@ static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4575 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); 4573 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4576 struct vega10_single_dpm_table *golden_mclk_table = 4574 struct vega10_single_dpm_table *golden_mclk_table =
4577 &(data->golden_dpm_table.mem_table); 4575 &(data->golden_dpm_table.mem_table);
4578 int value; 4576 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
4579 4577 int golden_value = golden_mclk_table->dpm_levels
4580 value = (mclk_table->dpm_levels
4581 [mclk_table->count - 1].value -
4582 golden_mclk_table->dpm_levels
4583 [golden_mclk_table->count - 1].value) *
4584 100 /
4585 golden_mclk_table->dpm_levels
4586 [golden_mclk_table->count - 1].value; 4578 [golden_mclk_table->count - 1].value;
4587 4579
4580 value -= golden_value;
4581 value = DIV_ROUND_UP(value * 100, golden_value);
4582
4588 return value; 4583 return value;
4589} 4584}
4590 4585
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 74bc37308dc0..54364444ecd1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -2243,12 +2243,12 @@ static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
2243 struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); 2243 struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
2244 struct vega12_single_dpm_table *golden_sclk_table = 2244 struct vega12_single_dpm_table *golden_sclk_table =
2245 &(data->golden_dpm_table.gfx_table); 2245 &(data->golden_dpm_table.gfx_table);
2246 int value; 2246 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
2247 int golden_value = golden_sclk_table->dpm_levels
2248 [golden_sclk_table->count - 1].value;
2247 2249
2248 value = (sclk_table->dpm_levels[sclk_table->count - 1].value - 2250 value -= golden_value;
2249 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 2251 value = DIV_ROUND_UP(value * 100, golden_value);
2250 100 /
2251 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
2252 2252
2253 return value; 2253 return value;
2254} 2254}
@@ -2264,16 +2264,13 @@ static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
2264 struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); 2264 struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
2265 struct vega12_single_dpm_table *golden_mclk_table = 2265 struct vega12_single_dpm_table *golden_mclk_table =
2266 &(data->golden_dpm_table.mem_table); 2266 &(data->golden_dpm_table.mem_table);
2267 int value; 2267 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
2268 2268 int golden_value = golden_mclk_table->dpm_levels
2269 value = (mclk_table->dpm_levels
2270 [mclk_table->count - 1].value -
2271 golden_mclk_table->dpm_levels
2272 [golden_mclk_table->count - 1].value) *
2273 100 /
2274 golden_mclk_table->dpm_levels
2275 [golden_mclk_table->count - 1].value; 2269 [golden_mclk_table->count - 1].value;
2276 2270
2271 value -= golden_value;
2272 value = DIV_ROUND_UP(value * 100, golden_value);
2273
2277 return value; 2274 return value;
2278} 2275}
2279 2276
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 99861f32b1f9..b4eadd47f3a4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -75,7 +75,17 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
75 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 75 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
76 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 76 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
77 77
78 data->registry_data.disallowed_features = 0x0; 78 /*
79 * Disable the following features for now:
80 * GFXCLK DS
81 * SOCLK DS
82 * LCLK DS
83 * DCEFCLK DS
84 * FCLK DS
85 * MP1CLK DS
86 * MP0CLK DS
87 */
88 data->registry_data.disallowed_features = 0xE0041C00;
79 data->registry_data.od_state_in_dc_support = 0; 89 data->registry_data.od_state_in_dc_support = 0;
80 data->registry_data.thermal_support = 1; 90 data->registry_data.thermal_support = 1;
81 data->registry_data.skip_baco_hardware = 0; 91 data->registry_data.skip_baco_hardware = 0;
@@ -1313,12 +1323,13 @@ static int vega20_get_sclk_od(
1313 &(data->dpm_table.gfx_table); 1323 &(data->dpm_table.gfx_table);
1314 struct vega20_single_dpm_table *golden_sclk_table = 1324 struct vega20_single_dpm_table *golden_sclk_table =
1315 &(data->golden_dpm_table.gfx_table); 1325 &(data->golden_dpm_table.gfx_table);
1316 int value; 1326 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
1327 int golden_value = golden_sclk_table->dpm_levels
1328 [golden_sclk_table->count - 1].value;
1317 1329
1318 /* od percentage */ 1330 /* od percentage */
1319 value = DIV_ROUND_UP((sclk_table->dpm_levels[sclk_table->count - 1].value - 1331 value -= golden_value;
1320 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 100, 1332 value = DIV_ROUND_UP(value * 100, golden_value);
1321 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value);
1322 1333
1323 return value; 1334 return value;
1324} 1335}
@@ -1358,12 +1369,13 @@ static int vega20_get_mclk_od(
1358 &(data->dpm_table.mem_table); 1369 &(data->dpm_table.mem_table);
1359 struct vega20_single_dpm_table *golden_mclk_table = 1370 struct vega20_single_dpm_table *golden_mclk_table =
1360 &(data->golden_dpm_table.mem_table); 1371 &(data->golden_dpm_table.mem_table);
1361 int value; 1372 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
1373 int golden_value = golden_mclk_table->dpm_levels
1374 [golden_mclk_table->count - 1].value;
1362 1375
1363 /* od percentage */ 1376 /* od percentage */
1364 value = DIV_ROUND_UP((mclk_table->dpm_levels[mclk_table->count - 1].value - 1377 value -= golden_value;
1365 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 100, 1378 value = DIV_ROUND_UP(value * 100, golden_value);
1366 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value);
1367 1379
1368 return value; 1380 return value;
1369} 1381}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 69dab82a3771..bf589c53b908 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = {
60 60
61MODULE_DEVICE_TABLE(pci, pciidlist); 61MODULE_DEVICE_TABLE(pci, pciidlist);
62 62
63static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
64{
65 struct apertures_struct *ap;
66 bool primary = false;
67
68 ap = alloc_apertures(1);
69 if (!ap)
70 return;
71
72 ap->ranges[0].base = pci_resource_start(pdev, 0);
73 ap->ranges[0].size = pci_resource_len(pdev, 0);
74
75#ifdef CONFIG_X86
76 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
77#endif
78 drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary);
79 kfree(ap);
80}
81
63static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 82static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
64{ 83{
84 ast_kick_out_firmware_fb(pdev);
85
65 return drm_get_pci_dev(pdev, ent, &driver); 86 return drm_get_pci_dev(pdev, ent, &driver);
66} 87}
67 88
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index dac355812adc..373700c05a00 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -583,7 +583,8 @@ void ast_driver_unload(struct drm_device *dev)
583 drm_mode_config_cleanup(dev); 583 drm_mode_config_cleanup(dev);
584 584
585 ast_mm_fini(ast); 585 ast_mm_fini(ast);
586 pci_iounmap(dev->pdev, ast->ioregs); 586 if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET)
587 pci_iounmap(dev->pdev, ast->ioregs);
587 pci_iounmap(dev->pdev, ast->regs); 588 pci_iounmap(dev->pdev, ast->regs);
588 kfree(ast); 589 kfree(ast);
589} 590}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 5e77d456d9bb..8bb355d5d43d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
568 } 568 }
569 ast_bo_unreserve(bo); 569 ast_bo_unreserve(bo);
570 570
571 ast_set_offset_reg(crtc);
571 ast_set_start_address_crt1(crtc, (u32)gpu_addr); 572 ast_set_start_address_crt1(crtc, (u32)gpu_addr);
572 573
573 return 0; 574 return 0;
@@ -972,9 +973,21 @@ static int get_clock(void *i2c_priv)
972{ 973{
973 struct ast_i2c_chan *i2c = i2c_priv; 974 struct ast_i2c_chan *i2c = i2c_priv;
974 struct ast_private *ast = i2c->dev->dev_private; 975 struct ast_private *ast = i2c->dev->dev_private;
975 uint32_t val; 976 uint32_t val, val2, count, pass;
977
978 count = 0;
979 pass = 0;
980 val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
981 do {
982 val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
983 if (val == val2) {
984 pass++;
985 } else {
986 pass = 0;
987 val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
988 }
989 } while ((pass < 5) && (count++ < 0x10000));
976 990
977 val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
978 return val & 1 ? 1 : 0; 991 return val & 1 ? 1 : 0;
979} 992}
980 993
@@ -982,9 +995,21 @@ static int get_data(void *i2c_priv)
982{ 995{
983 struct ast_i2c_chan *i2c = i2c_priv; 996 struct ast_i2c_chan *i2c = i2c_priv;
984 struct ast_private *ast = i2c->dev->dev_private; 997 struct ast_private *ast = i2c->dev->dev_private;
985 uint32_t val; 998 uint32_t val, val2, count, pass;
999
1000 count = 0;
1001 pass = 0;
1002 val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
1003 do {
1004 val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
1005 if (val == val2) {
1006 pass++;
1007 } else {
1008 pass = 0;
1009 val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
1010 }
1011 } while ((pass < 5) && (count++ < 0x10000));
986 1012
987 val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
988 return val & 1 ? 1 : 0; 1013 return val & 1 ? 1 : 0;
989} 1014}
990 1015
@@ -997,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock)
997 1022
998 for (i = 0; i < 0x10000; i++) { 1023 for (i = 0; i < 0x10000; i++) {
999 ujcrb7 = ((clock & 0x01) ? 0 : 1); 1024 ujcrb7 = ((clock & 0x01) ? 0 : 1);
1000 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7); 1025 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
1001 jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01); 1026 jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
1002 if (ujcrb7 == jtemp) 1027 if (ujcrb7 == jtemp)
1003 break; 1028 break;
@@ -1013,7 +1038,7 @@ static void set_data(void *i2c_priv, int data)
1013 1038
1014 for (i = 0; i < 0x10000; i++) { 1039 for (i = 0; i < 0x10000; i++) {
1015 ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; 1040 ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
1016 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7); 1041 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
1017 jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04); 1042 jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
1018 if (ujcrb7 == jtemp) 1043 if (ujcrb7 == jtemp)
1019 break; 1044 break;
@@ -1254,7 +1279,7 @@ static int ast_cursor_move(struct drm_crtc *crtc,
1254 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07)); 1279 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
1255 1280
1256 /* dummy write to fire HWC */ 1281 /* dummy write to fire HWC */
1257 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00); 1282 ast_show_cursor(crtc);
1258 1283
1259 return 0; 1284 return 0;
1260} 1285}
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index d9c0f7573905..1669c42c40ed 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -142,6 +142,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
142 142
143 lockdep_assert_held_once(&dev->master_mutex); 143 lockdep_assert_held_once(&dev->master_mutex);
144 144
145 WARN_ON(fpriv->is_master);
145 old_master = fpriv->master; 146 old_master = fpriv->master;
146 fpriv->master = drm_master_create(dev); 147 fpriv->master = drm_master_create(dev);
147 if (!fpriv->master) { 148 if (!fpriv->master) {
@@ -170,6 +171,7 @@ out_err:
170 /* drop references and restore old master on failure */ 171 /* drop references and restore old master on failure */
171 drm_master_put(&fpriv->master); 172 drm_master_put(&fpriv->master);
172 fpriv->master = old_master; 173 fpriv->master = old_master;
174 fpriv->is_master = 0;
173 175
174 return ret; 176 return ret;
175} 177}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a502f3e519fd..dd852a25d375 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -219,6 +219,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
219 mutex_lock(&fb_helper->lock); 219 mutex_lock(&fb_helper->lock);
220 drm_connector_list_iter_begin(dev, &conn_iter); 220 drm_connector_list_iter_begin(dev, &conn_iter);
221 drm_for_each_connector_iter(connector, &conn_iter) { 221 drm_for_each_connector_iter(connector, &conn_iter) {
222 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
223 continue;
224
222 ret = __drm_fb_helper_add_one_connector(fb_helper, connector); 225 ret = __drm_fb_helper_add_one_connector(fb_helper, connector);
223 if (ret) 226 if (ret)
224 goto fail; 227 goto fail;
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index fe754022e356..359d37d5c958 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,10 +61,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
61 } 61 }
62 62
63 mutex_lock(&dev_priv->drm.struct_mutex); 63 mutex_lock(&dev_priv->drm.struct_mutex);
64 mmio_hw_access_pre(dev_priv);
64 ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, 65 ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
65 size, I915_GTT_PAGE_SIZE, 66 size, I915_GTT_PAGE_SIZE,
66 I915_COLOR_UNEVICTABLE, 67 I915_COLOR_UNEVICTABLE,
67 start, end, flags); 68 start, end, flags);
69 mmio_hw_access_post(dev_priv);
68 mutex_unlock(&dev_priv->drm.struct_mutex); 70 mutex_unlock(&dev_priv->drm.struct_mutex);
69 if (ret) 71 if (ret)
70 gvt_err("fail to alloc %s gm space from host\n", 72 gvt_err("fail to alloc %s gm space from host\n",
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 58e166effa45..c7103dd2d8d5 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2447,10 +2447,11 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2447 2447
2448static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) 2448static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2449{ 2449{
2450 struct intel_gvt_partial_pte *pos; 2450 struct intel_gvt_partial_pte *pos, *next;
2451 2451
2452 list_for_each_entry(pos, 2452 list_for_each_entry_safe(pos, next,
2453 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) { 2453 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2454 list) {
2454 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n", 2455 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2455 pos->offset, pos->data); 2456 pos->offset, pos->data);
2456 kfree(pos); 2457 kfree(pos);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 36a5147cd01e..d6e02c15ef97 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -158,6 +158,8 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
158 int ring_id, i; 158 int ring_id, i;
159 159
160 for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { 160 for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
161 if (!HAS_ENGINE(dev_priv, ring_id))
162 continue;
161 offset.reg = regs[ring_id]; 163 offset.reg = regs[ring_id];
162 for (i = 0; i < GEN9_MOCS_SIZE; i++) { 164 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
163 gen9_render_mocs.control_table[ring_id][i] = 165 gen9_render_mocs.control_table[ring_id][i] =
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1aaccbe7e1de..d4fac09095f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
1268 else if (gen >= 4) 1268 else if (gen >= 4)
1269 len = 4; 1269 len = 4;
1270 else 1270 else
1271 len = 3; 1271 len = 6;
1272 1272
1273 batch = reloc_gpu(eb, vma, len); 1273 batch = reloc_gpu(eb, vma, len);
1274 if (IS_ERR(batch)) 1274 if (IS_ERR(batch))
@@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma,
1309 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 1309 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1310 *batch++ = addr; 1310 *batch++ = addr;
1311 *batch++ = target_offset; 1311 *batch++ = target_offset;
1312
1313 /* And again for good measure (blb/pnv) */
1314 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1315 *batch++ = addr;
1316 *batch++ = target_offset;
1312 } 1317 }
1313 1318
1314 goto out; 1319 goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 47c302543799..07999fe09ad2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3413,6 +3413,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3413 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; 3413 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
3414 if (ggtt->vm.clear_range != nop_clear_range) 3414 if (ggtt->vm.clear_range != nop_clear_range)
3415 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; 3415 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3416
3417 /* Prevent recursively calling stop_machine() and deadlocks. */
3418 dev_info(dev_priv->drm.dev,
3419 "Disabling error capture for VT-d workaround\n");
3420 i915_disable_error_state(dev_priv, -ENODEV);
3416 } 3421 }
3417 3422
3418 ggtt->invalidate = gen6_ggtt_invalidate; 3423 ggtt->invalidate = gen6_ggtt_invalidate;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8762d17b6659..3eb33e000d6f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -648,6 +648,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
648 return 0; 648 return 0;
649 } 649 }
650 650
651 if (IS_ERR(error))
652 return PTR_ERR(error);
653
651 if (*error->error_msg) 654 if (*error->error_msg)
652 err_printf(m, "%s\n", error->error_msg); 655 err_printf(m, "%s\n", error->error_msg);
653 err_printf(m, "Kernel: " UTS_RELEASE "\n"); 656 err_printf(m, "Kernel: " UTS_RELEASE "\n");
@@ -1859,6 +1862,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
1859 error = i915_capture_gpu_state(i915); 1862 error = i915_capture_gpu_state(i915);
1860 if (!error) { 1863 if (!error) {
1861 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1864 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1865 i915_disable_error_state(i915, -ENOMEM);
1862 return; 1866 return;
1863 } 1867 }
1864 1868
@@ -1914,5 +1918,14 @@ void i915_reset_error_state(struct drm_i915_private *i915)
1914 i915->gpu_error.first_error = NULL; 1918 i915->gpu_error.first_error = NULL;
1915 spin_unlock_irq(&i915->gpu_error.lock); 1919 spin_unlock_irq(&i915->gpu_error.lock);
1916 1920
1917 i915_gpu_state_put(error); 1921 if (!IS_ERR(error))
1922 i915_gpu_state_put(error);
1923}
1924
1925void i915_disable_error_state(struct drm_i915_private *i915, int err)
1926{
1927 spin_lock_irq(&i915->gpu_error.lock);
1928 if (!i915->gpu_error.first_error)
1929 i915->gpu_error.first_error = ERR_PTR(err);
1930 spin_unlock_irq(&i915->gpu_error.lock);
1918} 1931}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 8710fb18ed74..3ec89a504de5 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -343,6 +343,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
343 343
344struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); 344struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
345void i915_reset_error_state(struct drm_i915_private *i915); 345void i915_reset_error_state(struct drm_i915_private *i915);
346void i915_disable_error_state(struct drm_i915_private *i915, int err);
346 347
347#else 348#else
348 349
@@ -355,13 +356,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
355static inline struct i915_gpu_state * 356static inline struct i915_gpu_state *
356i915_first_error_state(struct drm_i915_private *i915) 357i915_first_error_state(struct drm_i915_private *i915)
357{ 358{
358 return NULL; 359 return ERR_PTR(-ENODEV);
359} 360}
360 361
361static inline void i915_reset_error_state(struct drm_i915_private *i915) 362static inline void i915_reset_error_state(struct drm_i915_private *i915)
362{ 363{
363} 364}
364 365
366static inline void i915_disable_error_state(struct drm_i915_private *i915,
367 int err)
368{
369}
370
365#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ 371#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
366 372
367#endif /* _I915_GPU_ERROR_H_ */ 373#endif /* _I915_GPU_ERROR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a54843fdeb2f..c9878dd1f7cd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2890,6 +2890,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2890 return; 2890 return;
2891 2891
2892valid_fb: 2892valid_fb:
2893 intel_state->base.rotation = plane_config->rotation;
2893 intel_fill_fb_ggtt_view(&intel_state->view, fb, 2894 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2894 intel_state->base.rotation); 2895 intel_state->base.rotation);
2895 intel_state->color_plane[0].stride = 2896 intel_state->color_plane[0].stride =
@@ -7882,8 +7883,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7882 plane_config->tiling = I915_TILING_X; 7883 plane_config->tiling = I915_TILING_X;
7883 fb->modifier = I915_FORMAT_MOD_X_TILED; 7884 fb->modifier = I915_FORMAT_MOD_X_TILED;
7884 } 7885 }
7886
7887 if (val & DISPPLANE_ROTATE_180)
7888 plane_config->rotation = DRM_MODE_ROTATE_180;
7885 } 7889 }
7886 7890
7891 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
7892 val & DISPPLANE_MIRROR)
7893 plane_config->rotation |= DRM_MODE_REFLECT_X;
7894
7887 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7895 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7888 fourcc = i9xx_format_to_fourcc(pixel_format); 7896 fourcc = i9xx_format_to_fourcc(pixel_format);
7889 fb->format = drm_format_info(fourcc); 7897 fb->format = drm_format_info(fourcc);
@@ -8952,6 +8960,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
8952 goto error; 8960 goto error;
8953 } 8961 }
8954 8962
8963 /*
8964 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
8965 * while i915 HW rotation is clockwise, thats why this swapping.
8966 */
8967 switch (val & PLANE_CTL_ROTATE_MASK) {
8968 case PLANE_CTL_ROTATE_0:
8969 plane_config->rotation = DRM_MODE_ROTATE_0;
8970 break;
8971 case PLANE_CTL_ROTATE_90:
8972 plane_config->rotation = DRM_MODE_ROTATE_270;
8973 break;
8974 case PLANE_CTL_ROTATE_180:
8975 plane_config->rotation = DRM_MODE_ROTATE_180;
8976 break;
8977 case PLANE_CTL_ROTATE_270:
8978 plane_config->rotation = DRM_MODE_ROTATE_90;
8979 break;
8980 }
8981
8982 if (INTEL_GEN(dev_priv) >= 10 &&
8983 val & PLANE_CTL_FLIP_HORIZONTAL)
8984 plane_config->rotation |= DRM_MODE_REFLECT_X;
8985
8955 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; 8986 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
8956 plane_config->base = base; 8987 plane_config->base = base;
8957 8988
@@ -15267,6 +15298,14 @@ retry:
15267 ret = drm_atomic_add_affected_planes(state, crtc); 15298 ret = drm_atomic_add_affected_planes(state, crtc);
15268 if (ret) 15299 if (ret)
15269 goto out; 15300 goto out;
15301
15302 /*
15303 * FIXME hack to force a LUT update to avoid the
15304 * plane update forcing the pipe gamma on without
15305 * having a proper LUT loaded. Remove once we
15306 * have readout for pipe gamma enable.
15307 */
15308 crtc_state->color_mgmt_changed = true;
15270 } 15309 }
15271 } 15310 }
15272 15311
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8b298e5f012d..db6fa1d0cbda 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -547,6 +547,7 @@ struct intel_initial_plane_config {
547 unsigned int tiling; 547 unsigned int tiling;
548 int size; 548 int size;
549 u32 base; 549 u32 base;
550 u8 rotation;
550}; 551};
551 552
552#define SKL_MIN_SRC_W 8 553#define SKL_MIN_SRC_W 8
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 245f0022bcfd..3fe358db1276 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2493 uint32_t method1, method2; 2493 uint32_t method1, method2;
2494 int cpp; 2494 int cpp;
2495 2495
2496 if (mem_value == 0)
2497 return U32_MAX;
2498
2496 if (!intel_wm_plane_visible(cstate, pstate)) 2499 if (!intel_wm_plane_visible(cstate, pstate))
2497 return 0; 2500 return 0;
2498 2501
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2522 uint32_t method1, method2; 2525 uint32_t method1, method2;
2523 int cpp; 2526 int cpp;
2524 2527
2528 if (mem_value == 0)
2529 return U32_MAX;
2530
2525 if (!intel_wm_plane_visible(cstate, pstate)) 2531 if (!intel_wm_plane_visible(cstate, pstate))
2526 return 0; 2532 return 0;
2527 2533
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2545{ 2551{
2546 int cpp; 2552 int cpp;
2547 2553
2554 if (mem_value == 0)
2555 return U32_MAX;
2556
2548 if (!intel_wm_plane_visible(cstate, pstate)) 2557 if (!intel_wm_plane_visible(cstate, pstate))
2549 return 0; 2558 return 0;
2550 2559
@@ -3008,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3008 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 3017 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3009} 3018}
3010 3019
3020static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3021{
3022 /*
3023 * On some SNB machines (Thinkpad X220 Tablet at least)
3024 * LP3 usage can cause vblank interrupts to be lost.
3025 * The DEIIR bit will go high but it looks like the CPU
3026 * never gets interrupted.
3027 *
3028 * It's not clear whether other interrupt source could
3029 * be affected or if this is somehow limited to vblank
3030 * interrupts only. To play it safe we disable LP3
3031 * watermarks entirely.
3032 */
3033 if (dev_priv->wm.pri_latency[3] == 0 &&
3034 dev_priv->wm.spr_latency[3] == 0 &&
3035 dev_priv->wm.cur_latency[3] == 0)
3036 return;
3037
3038 dev_priv->wm.pri_latency[3] = 0;
3039 dev_priv->wm.spr_latency[3] = 0;
3040 dev_priv->wm.cur_latency[3] = 0;
3041
3042 DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
3043 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3044 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3045 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3046}
3047
3011static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) 3048static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3012{ 3049{
3013 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); 3050 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@@ -3024,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3024 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); 3061 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3025 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 3062 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3026 3063
3027 if (IS_GEN6(dev_priv)) 3064 if (IS_GEN6(dev_priv)) {
3028 snb_wm_latency_quirk(dev_priv); 3065 snb_wm_latency_quirk(dev_priv);
3066 snb_wm_lp3_irq_quirk(dev_priv);
3067 }
3029} 3068}
3030 3069
3031static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) 3070static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 05520202c967..191b314f9e9e 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -45,6 +45,7 @@ struct meson_crtc {
45 struct drm_crtc base; 45 struct drm_crtc base;
46 struct drm_pending_vblank_event *event; 46 struct drm_pending_vblank_event *event;
47 struct meson_drm *priv; 47 struct meson_drm *priv;
48 bool enabled;
48}; 49};
49#define to_meson_crtc(x) container_of(x, struct meson_crtc, base) 50#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
50 51
@@ -80,8 +81,7 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
80 81
81}; 82};
82 83
83static void meson_crtc_atomic_enable(struct drm_crtc *crtc, 84static void meson_crtc_enable(struct drm_crtc *crtc)
84 struct drm_crtc_state *old_state)
85{ 85{
86 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 86 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
87 struct drm_crtc_state *crtc_state = crtc->state; 87 struct drm_crtc_state *crtc_state = crtc->state;
@@ -101,6 +101,22 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
101 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE, 101 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
102 priv->io_base + _REG(VPP_MISC)); 102 priv->io_base + _REG(VPP_MISC));
103 103
104 drm_crtc_vblank_on(crtc);
105
106 meson_crtc->enabled = true;
107}
108
109static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
110 struct drm_crtc_state *old_state)
111{
112 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
113 struct meson_drm *priv = meson_crtc->priv;
114
115 DRM_DEBUG_DRIVER("\n");
116
117 if (!meson_crtc->enabled)
118 meson_crtc_enable(crtc);
119
104 priv->viu.osd1_enabled = true; 120 priv->viu.osd1_enabled = true;
105} 121}
106 122
@@ -110,6 +126,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
110 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 126 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
111 struct meson_drm *priv = meson_crtc->priv; 127 struct meson_drm *priv = meson_crtc->priv;
112 128
129 drm_crtc_vblank_off(crtc);
130
113 priv->viu.osd1_enabled = false; 131 priv->viu.osd1_enabled = false;
114 priv->viu.osd1_commit = false; 132 priv->viu.osd1_commit = false;
115 133
@@ -124,6 +142,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
124 142
125 crtc->state->event = NULL; 143 crtc->state->event = NULL;
126 } 144 }
145
146 meson_crtc->enabled = false;
127} 147}
128 148
129static void meson_crtc_atomic_begin(struct drm_crtc *crtc, 149static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -132,6 +152,9 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
132 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 152 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
133 unsigned long flags; 153 unsigned long flags;
134 154
155 if (crtc->state->enable && !meson_crtc->enabled)
156 meson_crtc_enable(crtc);
157
135 if (crtc->state->event) { 158 if (crtc->state->event) {
136 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 159 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
137 160
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index df7247cd93f9..2cb2ad26d716 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -706,6 +706,7 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
706 .reg_read = meson_dw_hdmi_reg_read, 706 .reg_read = meson_dw_hdmi_reg_read,
707 .reg_write = meson_dw_hdmi_reg_write, 707 .reg_write = meson_dw_hdmi_reg_write,
708 .max_register = 0x10000, 708 .max_register = 0x10000,
709 .fast_io = true,
709}; 710};
710 711
711static bool meson_hdmi_connector_is_available(struct device *dev) 712static bool meson_hdmi_connector_is_available(struct device *dev)
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index acbbad3e322c..be76f3d64bf2 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -71,6 +71,7 @@
71 */ 71 */
72 72
73/* HHI Registers */ 73/* HHI Registers */
74#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
74#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ 75#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
75#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ 76#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
76#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */ 77#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
@@ -714,6 +715,7 @@ struct meson_hdmi_venc_vic_mode {
714 { 5, &meson_hdmi_encp_mode_1080i60 }, 715 { 5, &meson_hdmi_encp_mode_1080i60 },
715 { 20, &meson_hdmi_encp_mode_1080i50 }, 716 { 20, &meson_hdmi_encp_mode_1080i50 },
716 { 32, &meson_hdmi_encp_mode_1080p24 }, 717 { 32, &meson_hdmi_encp_mode_1080p24 },
718 { 33, &meson_hdmi_encp_mode_1080p50 },
717 { 34, &meson_hdmi_encp_mode_1080p30 }, 719 { 34, &meson_hdmi_encp_mode_1080p30 },
718 { 31, &meson_hdmi_encp_mode_1080p50 }, 720 { 31, &meson_hdmi_encp_mode_1080p50 },
719 { 16, &meson_hdmi_encp_mode_1080p60 }, 721 { 16, &meson_hdmi_encp_mode_1080p60 },
@@ -1530,10 +1532,12 @@ unsigned int meson_venci_get_field(struct meson_drm *priv)
1530void meson_venc_enable_vsync(struct meson_drm *priv) 1532void meson_venc_enable_vsync(struct meson_drm *priv)
1531{ 1533{
1532 writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL)); 1534 writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL));
1535 regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25));
1533} 1536}
1534 1537
1535void meson_venc_disable_vsync(struct meson_drm *priv) 1538void meson_venc_disable_vsync(struct meson_drm *priv)
1536{ 1539{
1540 regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), 0);
1537 writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL)); 1541 writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL));
1538} 1542}
1539 1543
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 6bcfa527c180..26a0857878bf 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -184,18 +184,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
184 if (lut_sel == VIU_LUT_OSD_OETF) { 184 if (lut_sel == VIU_LUT_OSD_OETF) {
185 writel(0, priv->io_base + _REG(addr_port)); 185 writel(0, priv->io_base + _REG(addr_port));
186 186
187 for (i = 0; i < 20; i++) 187 for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
188 writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16), 188 writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
189 priv->io_base + _REG(data_port)); 189 priv->io_base + _REG(data_port));
190 190
191 writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16), 191 writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16),
192 priv->io_base + _REG(data_port)); 192 priv->io_base + _REG(data_port));
193 193
194 for (i = 0; i < 20; i++) 194 for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
195 writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16), 195 writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
196 priv->io_base + _REG(data_port)); 196 priv->io_base + _REG(data_port));
197 197
198 for (i = 0; i < 20; i++) 198 for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
199 writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16), 199 writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
200 priv->io_base + _REG(data_port)); 200 priv->io_base + _REG(data_port));
201 201
@@ -211,18 +211,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
211 } else if (lut_sel == VIU_LUT_OSD_EOTF) { 211 } else if (lut_sel == VIU_LUT_OSD_EOTF) {
212 writel(0, priv->io_base + _REG(addr_port)); 212 writel(0, priv->io_base + _REG(addr_port));
213 213
214 for (i = 0; i < 20; i++) 214 for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
215 writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16), 215 writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
216 priv->io_base + _REG(data_port)); 216 priv->io_base + _REG(data_port));
217 217
218 writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16), 218 writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16),
219 priv->io_base + _REG(data_port)); 219 priv->io_base + _REG(data_port));
220 220
221 for (i = 0; i < 20; i++) 221 for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
222 writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16), 222 writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
223 priv->io_base + _REG(data_port)); 223 priv->io_base + _REG(data_port));
224 224
225 for (i = 0; i < 20; i++) 225 for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
226 writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16), 226 writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
227 priv->io_base + _REG(data_port)); 227 priv->io_base + _REG(data_port));
228 228
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index d85f0a1c1581..cebf313c6e1f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -202,10 +202,25 @@ void rcar_du_group_put(struct rcar_du_group *rgrp)
202 202
203static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) 203static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
204{ 204{
205 struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2]; 205 struct rcar_du_device *rcdu = rgrp->dev;
206
207 /*
208 * Group start/stop is controlled by the DRES and DEN bits of DSYSR0
209 * for the first group and DSYSR2 for the second group. On most DU
210 * instances, this maps to the first CRTC of the group, and we can just
211 * use rcar_du_crtc_dsysr_clr_set() to access the correct DSYSR. On
212 * M3-N, however, DU2 doesn't exist, but DSYSR2 does. We thus need to
213 * access the register directly using group read/write.
214 */
215 if (rcdu->info->channels_mask & BIT(rgrp->index * 2)) {
216 struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2];
206 217
207 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN, 218 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN,
208 start ? DSYSR_DEN : DSYSR_DRES); 219 start ? DSYSR_DEN : DSYSR_DRES);
220 } else {
221 rcar_du_group_write(rgrp, DSYSR,
222 start ? DSYSR_DEN : DSYSR_DRES);
223 }
209} 224}
210 225
211void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) 226void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 127468785f74..1f94b9affe4b 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -214,6 +214,12 @@ static int vc4_atomic_commit(struct drm_device *dev,
214 return 0; 214 return 0;
215 } 215 }
216 216
217 /* We know for sure we don't want an async update here. Set
218 * state->legacy_cursor_update to false to prevent
219 * drm_atomic_helper_setup_commit() from auto-completing
220 * commit->flip_done.
221 */
222 state->legacy_cursor_update = false;
217 ret = drm_atomic_helper_setup_commit(state, nonblock); 223 ret = drm_atomic_helper_setup_commit(state, nonblock);
218 if (ret) 224 if (ret)
219 return ret; 225 return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 9dc3fcbd290b..c6635f23918a 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -807,7 +807,7 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
807static void vc4_plane_atomic_async_update(struct drm_plane *plane, 807static void vc4_plane_atomic_async_update(struct drm_plane *plane,
808 struct drm_plane_state *state) 808 struct drm_plane_state *state)
809{ 809{
810 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 810 struct vc4_plane_state *vc4_state, *new_vc4_state;
811 811
812 if (plane->state->fb != state->fb) { 812 if (plane->state->fb != state->fb) {
813 vc4_plane_async_set_fb(plane, state->fb); 813 vc4_plane_async_set_fb(plane, state->fb);
@@ -828,7 +828,18 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
828 plane->state->src_y = state->src_y; 828 plane->state->src_y = state->src_y;
829 829
830 /* Update the display list based on the new crtc_x/y. */ 830 /* Update the display list based on the new crtc_x/y. */
831 vc4_plane_atomic_check(plane, plane->state); 831 vc4_plane_atomic_check(plane, state);
832
833 new_vc4_state = to_vc4_plane_state(state);
834 vc4_state = to_vc4_plane_state(plane->state);
835
836 /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
837 vc4_state->dlist[vc4_state->pos0_offset] =
838 new_vc4_state->dlist[vc4_state->pos0_offset];
839 vc4_state->dlist[vc4_state->pos2_offset] =
840 new_vc4_state->dlist[vc4_state->pos2_offset];
841 vc4_state->dlist[vc4_state->ptr0_offset] =
842 new_vc4_state->dlist[vc4_state->ptr0_offset];
832 843
833 /* Note that we can't just call vc4_plane_write_dlist() 844 /* Note that we can't just call vc4_plane_write_dlist()
834 * because that would smash the context data that the HVS is 845 * because that would smash the context data that the HVS is
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c0d668944dbe..ed35c9a9a110 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -275,6 +275,9 @@
275 275
276#define USB_VENDOR_ID_CIDC 0x1677 276#define USB_VENDOR_ID_CIDC 0x1677
277 277
278#define I2C_VENDOR_ID_CIRQUE 0x0488
279#define I2C_PRODUCT_ID_CIRQUE_121F 0x121F
280
278#define USB_VENDOR_ID_CJTOUCH 0x24b8 281#define USB_VENDOR_ID_CJTOUCH 0x24b8
279#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020 282#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
280#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040 283#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
@@ -707,6 +710,7 @@
707#define USB_VENDOR_ID_LG 0x1fd2 710#define USB_VENDOR_ID_LG 0x1fd2
708#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 711#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
709#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007 712#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007
713#define I2C_DEVICE_ID_LG_8001 0x8001
710 714
711#define USB_VENDOR_ID_LOGITECH 0x046d 715#define USB_VENDOR_ID_LOGITECH 0x046d
712#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e 716#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
@@ -805,6 +809,7 @@
805#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 809#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
806#define USB_DEVICE_ID_MS_POWER_COVER 0x07da 810#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
807#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd 811#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
812#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
808 813
809#define USB_VENDOR_ID_MOJO 0x8282 814#define USB_VENDOR_ID_MOJO 0x8282
810#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 815#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -1043,6 +1048,7 @@
1043#define USB_VENDOR_ID_SYMBOL 0x05e0 1048#define USB_VENDOR_ID_SYMBOL 0x05e0
1044#define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800 1049#define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800
1045#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300 1050#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
1051#define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200
1046 1052
1047#define USB_VENDOR_ID_SYNAPTICS 0x06cb 1053#define USB_VENDOR_ID_SYNAPTICS 0x06cb
1048#define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 1054#define USB_DEVICE_ID_SYNAPTICS_TP 0x0001
@@ -1204,6 +1210,8 @@
1204#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22 1210#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
1205#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 1211#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
1206#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72 1212#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
1213#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f
1214#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22 0x4e22
1207 1215
1208 1216
1209#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */ 1217#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index a2f74e6adc70..d6fab5798487 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -325,6 +325,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
325 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, 325 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM,
326 USB_DEVICE_ID_ELECOM_BM084), 326 USB_DEVICE_ID_ELECOM_BM084),
327 HID_BATTERY_QUIRK_IGNORE }, 327 HID_BATTERY_QUIRK_IGNORE },
328 { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL,
329 USB_DEVICE_ID_SYMBOL_SCANNER_3),
330 HID_BATTERY_QUIRK_IGNORE },
328 {} 331 {}
329}; 332};
330 333
@@ -1838,47 +1841,3 @@ void hidinput_disconnect(struct hid_device *hid)
1838} 1841}
1839EXPORT_SYMBOL_GPL(hidinput_disconnect); 1842EXPORT_SYMBOL_GPL(hidinput_disconnect);
1840 1843
1841/**
1842 * hid_scroll_counter_handle_scroll() - Send high- and low-resolution scroll
1843 * events given a high-resolution wheel
1844 * movement.
1845 * @counter: a hid_scroll_counter struct describing the wheel.
1846 * @hi_res_value: the movement of the wheel, in the mouse's high-resolution
1847 * units.
1848 *
1849 * Given a high-resolution movement, this function converts the movement into
1850 * microns and emits high-resolution scroll events for the input device. It also
1851 * uses the multiplier from &struct hid_scroll_counter to emit low-resolution
1852 * scroll events when appropriate for backwards-compatibility with userspace
1853 * input libraries.
1854 */
1855void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
1856 int hi_res_value)
1857{
1858 int low_res_value, remainder, multiplier;
1859
1860 input_report_rel(counter->dev, REL_WHEEL_HI_RES,
1861 hi_res_value * counter->microns_per_hi_res_unit);
1862
1863 /*
1864 * Update the low-res remainder with the high-res value,
1865 * but reset if the direction has changed.
1866 */
1867 remainder = counter->remainder;
1868 if ((remainder ^ hi_res_value) < 0)
1869 remainder = 0;
1870 remainder += hi_res_value;
1871
1872 /*
1873 * Then just use the resolution multiplier to see if
1874 * we should send a low-res (aka regular wheel) event.
1875 */
1876 multiplier = counter->resolution_multiplier;
1877 low_res_value = remainder / multiplier;
1878 remainder -= low_res_value * multiplier;
1879 counter->remainder = remainder;
1880
1881 if (low_res_value)
1882 input_report_rel(counter->dev, REL_WHEEL, low_res_value);
1883}
1884EXPORT_SYMBOL_GPL(hid_scroll_counter_handle_scroll);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index f01280898b24..19cc980eebce 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -64,14 +64,6 @@ MODULE_PARM_DESC(disable_tap_to_click,
64#define HIDPP_QUIRK_NO_HIDINPUT BIT(23) 64#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
65#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24) 65#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24)
66#define HIDPP_QUIRK_UNIFYING BIT(25) 66#define HIDPP_QUIRK_UNIFYING BIT(25)
67#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
68#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
69#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
70
71/* Convenience constant to check for any high-res support. */
72#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
73 HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \
74 HIDPP_QUIRK_HI_RES_SCROLL_X2121)
75 67
76#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT 68#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
77 69
@@ -157,7 +149,6 @@ struct hidpp_device {
157 unsigned long capabilities; 149 unsigned long capabilities;
158 150
159 struct hidpp_battery battery; 151 struct hidpp_battery battery;
160 struct hid_scroll_counter vertical_wheel_counter;
161}; 152};
162 153
163/* HID++ 1.0 error codes */ 154/* HID++ 1.0 error codes */
@@ -409,53 +400,32 @@ static void hidpp_prefix_name(char **name, int name_length)
409#define HIDPP_SET_LONG_REGISTER 0x82 400#define HIDPP_SET_LONG_REGISTER 0x82
410#define HIDPP_GET_LONG_REGISTER 0x83 401#define HIDPP_GET_LONG_REGISTER 0x83
411 402
412/** 403#define HIDPP_REG_GENERAL 0x00
413 * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register. 404
414 * @hidpp_dev: the device to set the register on. 405static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
415 * @register_address: the address of the register to modify.
416 * @byte: the byte of the register to modify. Should be less than 3.
417 * Return: 0 if successful, otherwise a negative error code.
418 */
419static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
420 u8 register_address, u8 byte, u8 bit)
421{ 406{
422 struct hidpp_report response; 407 struct hidpp_report response;
423 int ret; 408 int ret;
424 u8 params[3] = { 0 }; 409 u8 params[3] = { 0 };
425 410
426 ret = hidpp_send_rap_command_sync(hidpp_dev, 411 ret = hidpp_send_rap_command_sync(hidpp_dev,
427 REPORT_ID_HIDPP_SHORT, 412 REPORT_ID_HIDPP_SHORT,
428 HIDPP_GET_REGISTER, 413 HIDPP_GET_REGISTER,
429 register_address, 414 HIDPP_REG_GENERAL,
430 NULL, 0, &response); 415 NULL, 0, &response);
431 if (ret) 416 if (ret)
432 return ret; 417 return ret;
433 418
434 memcpy(params, response.rap.params, 3); 419 memcpy(params, response.rap.params, 3);
435 420
436 params[byte] |= BIT(bit); 421 /* Set the battery bit */
422 params[0] |= BIT(4);
437 423
438 return hidpp_send_rap_command_sync(hidpp_dev, 424 return hidpp_send_rap_command_sync(hidpp_dev,
439 REPORT_ID_HIDPP_SHORT, 425 REPORT_ID_HIDPP_SHORT,
440 HIDPP_SET_REGISTER, 426 HIDPP_SET_REGISTER,
441 register_address, 427 HIDPP_REG_GENERAL,
442 params, 3, &response); 428 params, 3, &response);
443}
444
445
446#define HIDPP_REG_GENERAL 0x00
447
448static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
449{
450 return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_GENERAL, 0, 4);
451}
452
453#define HIDPP_REG_FEATURES 0x01
454
455/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */
456static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev)
457{
458 return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6);
459} 429}
460 430
461#define HIDPP_REG_BATTERY_STATUS 0x07 431#define HIDPP_REG_BATTERY_STATUS 0x07
@@ -1167,100 +1137,6 @@ static int hidpp_battery_get_property(struct power_supply *psy,
1167} 1137}
1168 1138
1169/* -------------------------------------------------------------------------- */ 1139/* -------------------------------------------------------------------------- */
1170/* 0x2120: Hi-resolution scrolling */
1171/* -------------------------------------------------------------------------- */
1172
1173#define HIDPP_PAGE_HI_RESOLUTION_SCROLLING 0x2120
1174
1175#define CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE 0x10
1176
1177static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp,
1178 bool enabled, u8 *multiplier)
1179{
1180 u8 feature_index;
1181 u8 feature_type;
1182 int ret;
1183 u8 params[1];
1184 struct hidpp_report response;
1185
1186 ret = hidpp_root_get_feature(hidpp,
1187 HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
1188 &feature_index,
1189 &feature_type);
1190 if (ret)
1191 return ret;
1192
1193 params[0] = enabled ? BIT(0) : 0;
1194 ret = hidpp_send_fap_command_sync(hidpp, feature_index,
1195 CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE,
1196 params, sizeof(params), &response);
1197 if (ret)
1198 return ret;
1199 *multiplier = response.fap.params[1];
1200 return 0;
1201}
1202
1203/* -------------------------------------------------------------------------- */
1204/* 0x2121: HiRes Wheel */
1205/* -------------------------------------------------------------------------- */
1206
1207#define HIDPP_PAGE_HIRES_WHEEL 0x2121
1208
1209#define CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY 0x00
1210#define CMD_HIRES_WHEEL_SET_WHEEL_MODE 0x20
1211
1212static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp,
1213 u8 *multiplier)
1214{
1215 u8 feature_index;
1216 u8 feature_type;
1217 int ret;
1218 struct hidpp_report response;
1219
1220 ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
1221 &feature_index, &feature_type);
1222 if (ret)
1223 goto return_default;
1224
1225 ret = hidpp_send_fap_command_sync(hidpp, feature_index,
1226 CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY,
1227 NULL, 0, &response);
1228 if (ret)
1229 goto return_default;
1230
1231 *multiplier = response.fap.params[0];
1232 return 0;
1233return_default:
1234 hid_warn(hidpp->hid_dev,
1235 "Couldn't get wheel multiplier (error %d), assuming %d.\n",
1236 ret, *multiplier);
1237 return ret;
1238}
1239
1240static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert,
1241 bool high_resolution, bool use_hidpp)
1242{
1243 u8 feature_index;
1244 u8 feature_type;
1245 int ret;
1246 u8 params[1];
1247 struct hidpp_report response;
1248
1249 ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
1250 &feature_index, &feature_type);
1251 if (ret)
1252 return ret;
1253
1254 params[0] = (invert ? BIT(2) : 0) |
1255 (high_resolution ? BIT(1) : 0) |
1256 (use_hidpp ? BIT(0) : 0);
1257
1258 return hidpp_send_fap_command_sync(hidpp, feature_index,
1259 CMD_HIRES_WHEEL_SET_WHEEL_MODE,
1260 params, sizeof(params), &response);
1261}
1262
1263/* -------------------------------------------------------------------------- */
1264/* 0x4301: Solar Keyboard */ 1140/* 0x4301: Solar Keyboard */
1265/* -------------------------------------------------------------------------- */ 1141/* -------------------------------------------------------------------------- */
1266 1142
@@ -2523,8 +2399,7 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
2523 input_report_rel(mydata->input, REL_Y, v); 2399 input_report_rel(mydata->input, REL_Y, v);
2524 2400
2525 v = hid_snto32(data[6], 8); 2401 v = hid_snto32(data[6], 8);
2526 hid_scroll_counter_handle_scroll( 2402 input_report_rel(mydata->input, REL_WHEEL, v);
2527 &hidpp->vertical_wheel_counter, v);
2528 2403
2529 input_sync(mydata->input); 2404 input_sync(mydata->input);
2530 } 2405 }
@@ -2653,72 +2528,6 @@ static int g920_get_config(struct hidpp_device *hidpp)
2653} 2528}
2654 2529
2655/* -------------------------------------------------------------------------- */ 2530/* -------------------------------------------------------------------------- */
2656/* High-resolution scroll wheels */
2657/* -------------------------------------------------------------------------- */
2658
2659/**
2660 * struct hi_res_scroll_info - Stores info on a device's high-res scroll wheel.
2661 * @product_id: the HID product ID of the device being described.
2662 * @microns_per_hi_res_unit: the distance moved by the user's finger for each
2663 * high-resolution unit reported by the device, in
2664 * 256ths of a millimetre.
2665 */
2666struct hi_res_scroll_info {
2667 __u32 product_id;
2668 int microns_per_hi_res_unit;
2669};
2670
2671static struct hi_res_scroll_info hi_res_scroll_devices[] = {
2672 { /* Anywhere MX */
2673 .product_id = 0x1017, .microns_per_hi_res_unit = 445 },
2674 { /* Performance MX */
2675 .product_id = 0x101a, .microns_per_hi_res_unit = 406 },
2676 { /* M560 */
2677 .product_id = 0x402d, .microns_per_hi_res_unit = 435 },
2678 { /* MX Master 2S */
2679 .product_id = 0x4069, .microns_per_hi_res_unit = 406 },
2680};
2681
2682static int hi_res_scroll_look_up_microns(__u32 product_id)
2683{
2684 int i;
2685 int num_devices = sizeof(hi_res_scroll_devices)
2686 / sizeof(hi_res_scroll_devices[0]);
2687 for (i = 0; i < num_devices; i++) {
2688 if (hi_res_scroll_devices[i].product_id == product_id)
2689 return hi_res_scroll_devices[i].microns_per_hi_res_unit;
2690 }
2691 /* We don't have a value for this device, so use a sensible default. */
2692 return 406;
2693}
2694
2695static int hi_res_scroll_enable(struct hidpp_device *hidpp)
2696{
2697 int ret;
2698 u8 multiplier = 8;
2699
2700 if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) {
2701 ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false);
2702 hidpp_hrw_get_wheel_capability(hidpp, &multiplier);
2703 } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) {
2704 ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true,
2705 &multiplier);
2706 } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */
2707 ret = hidpp10_enable_scrolling_acceleration(hidpp);
2708
2709 if (ret)
2710 return ret;
2711
2712 hidpp->vertical_wheel_counter.resolution_multiplier = multiplier;
2713 hidpp->vertical_wheel_counter.microns_per_hi_res_unit =
2714 hi_res_scroll_look_up_microns(hidpp->hid_dev->product);
2715 hid_info(hidpp->hid_dev, "multiplier = %d, microns = %d\n",
2716 multiplier,
2717 hidpp->vertical_wheel_counter.microns_per_hi_res_unit);
2718 return 0;
2719}
2720
2721/* -------------------------------------------------------------------------- */
2722/* Generic HID++ devices */ 2531/* Generic HID++ devices */
2723/* -------------------------------------------------------------------------- */ 2532/* -------------------------------------------------------------------------- */
2724 2533
@@ -2763,11 +2572,6 @@ static void hidpp_populate_input(struct hidpp_device *hidpp,
2763 wtp_populate_input(hidpp, input, origin_is_hid_core); 2572 wtp_populate_input(hidpp, input, origin_is_hid_core);
2764 else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) 2573 else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
2765 m560_populate_input(hidpp, input, origin_is_hid_core); 2574 m560_populate_input(hidpp, input, origin_is_hid_core);
2766
2767 if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) {
2768 input_set_capability(input, EV_REL, REL_WHEEL_HI_RES);
2769 hidpp->vertical_wheel_counter.dev = input;
2770 }
2771} 2575}
2772 2576
2773static int hidpp_input_configured(struct hid_device *hdev, 2577static int hidpp_input_configured(struct hid_device *hdev,
@@ -2886,27 +2690,6 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
2886 return 0; 2690 return 0;
2887} 2691}
2888 2692
2889static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
2890 struct hid_usage *usage, __s32 value)
2891{
2892 /* This function will only be called for scroll events, due to the
2893 * restriction imposed in hidpp_usages.
2894 */
2895 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
2896 struct hid_scroll_counter *counter = &hidpp->vertical_wheel_counter;
2897 /* A scroll event may occur before the multiplier has been retrieved or
2898 * the input device set, or high-res scroll enabling may fail. In such
2899 * cases we must return early (falling back to default behaviour) to
2900 * avoid a crash in hid_scroll_counter_handle_scroll.
2901 */
2902 if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
2903 || counter->dev == NULL || counter->resolution_multiplier == 0)
2904 return 0;
2905
2906 hid_scroll_counter_handle_scroll(counter, value);
2907 return 1;
2908}
2909
2910static int hidpp_initialize_battery(struct hidpp_device *hidpp) 2693static int hidpp_initialize_battery(struct hidpp_device *hidpp)
2911{ 2694{
2912 static atomic_t battery_no = ATOMIC_INIT(0); 2695 static atomic_t battery_no = ATOMIC_INIT(0);
@@ -3118,9 +2901,6 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
3118 if (hidpp->battery.ps) 2901 if (hidpp->battery.ps)
3119 power_supply_changed(hidpp->battery.ps); 2902 power_supply_changed(hidpp->battery.ps);
3120 2903
3121 if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
3122 hi_res_scroll_enable(hidpp);
3123
3124 if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input) 2904 if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
3125 /* if the input nodes are already created, we can stop now */ 2905 /* if the input nodes are already created, we can stop now */
3126 return; 2906 return;
@@ -3306,63 +3086,35 @@ static void hidpp_remove(struct hid_device *hdev)
3306 mutex_destroy(&hidpp->send_mutex); 3086 mutex_destroy(&hidpp->send_mutex);
3307} 3087}
3308 3088
3309#define LDJ_DEVICE(product) \
3310 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
3311 USB_VENDOR_ID_LOGITECH, (product))
3312
3313static const struct hid_device_id hidpp_devices[] = { 3089static const struct hid_device_id hidpp_devices[] = {
3314 { /* wireless touchpad */ 3090 { /* wireless touchpad */
3315 LDJ_DEVICE(0x4011), 3091 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3092 USB_VENDOR_ID_LOGITECH, 0x4011),
3316 .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT | 3093 .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT |
3317 HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS }, 3094 HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS },
3318 { /* wireless touchpad T650 */ 3095 { /* wireless touchpad T650 */
3319 LDJ_DEVICE(0x4101), 3096 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3097 USB_VENDOR_ID_LOGITECH, 0x4101),
3320 .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT }, 3098 .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
3321 { /* wireless touchpad T651 */ 3099 { /* wireless touchpad T651 */
3322 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 3100 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
3323 USB_DEVICE_ID_LOGITECH_T651), 3101 USB_DEVICE_ID_LOGITECH_T651),
3324 .driver_data = HIDPP_QUIRK_CLASS_WTP }, 3102 .driver_data = HIDPP_QUIRK_CLASS_WTP },
3325 { /* Mouse Logitech Anywhere MX */
3326 LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
3327 { /* Mouse Logitech Cube */
3328 LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
3329 { /* Mouse Logitech M335 */
3330 LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3331 { /* Mouse Logitech M515 */
3332 LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
3333 { /* Mouse logitech M560 */ 3103 { /* Mouse logitech M560 */
3334 LDJ_DEVICE(0x402d), 3104 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3335 .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 3105 USB_VENDOR_ID_LOGITECH, 0x402d),
3336 | HIDPP_QUIRK_HI_RES_SCROLL_X2120 }, 3106 .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
3337 { /* Mouse Logitech M705 (firmware RQM17) */
3338 LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
3339 { /* Mouse Logitech M705 (firmware RQM67) */
3340 LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3341 { /* Mouse Logitech M720 */
3342 LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3343 { /* Mouse Logitech MX Anywhere 2 */
3344 LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3345 { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3346 { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3347 { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3348 { /* Mouse Logitech MX Anywhere 2S */
3349 LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3350 { /* Mouse Logitech MX Master */
3351 LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3352 { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3353 { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3354 { /* Mouse Logitech MX Master 2S */
3355 LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3356 { /* Mouse Logitech Performance MX */
3357 LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
3358 { /* Keyboard logitech K400 */ 3107 { /* Keyboard logitech K400 */
3359 LDJ_DEVICE(0x4024), 3108 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3109 USB_VENDOR_ID_LOGITECH, 0x4024),
3360 .driver_data = HIDPP_QUIRK_CLASS_K400 }, 3110 .driver_data = HIDPP_QUIRK_CLASS_K400 },
3361 { /* Solar Keyboard Logitech K750 */ 3111 { /* Solar Keyboard Logitech K750 */
3362 LDJ_DEVICE(0x4002), 3112 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3113 USB_VENDOR_ID_LOGITECH, 0x4002),
3363 .driver_data = HIDPP_QUIRK_CLASS_K750 }, 3114 .driver_data = HIDPP_QUIRK_CLASS_K750 },
3364 3115
3365 { LDJ_DEVICE(HID_ANY_ID) }, 3116 { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3117 USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
3366 3118
3367 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL), 3119 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
3368 .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS}, 3120 .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
@@ -3371,19 +3123,12 @@ static const struct hid_device_id hidpp_devices[] = {
3371 3123
3372MODULE_DEVICE_TABLE(hid, hidpp_devices); 3124MODULE_DEVICE_TABLE(hid, hidpp_devices);
3373 3125
3374static const struct hid_usage_id hidpp_usages[] = {
3375 { HID_GD_WHEEL, EV_REL, REL_WHEEL },
3376 { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
3377};
3378
3379static struct hid_driver hidpp_driver = { 3126static struct hid_driver hidpp_driver = {
3380 .name = "logitech-hidpp-device", 3127 .name = "logitech-hidpp-device",
3381 .id_table = hidpp_devices, 3128 .id_table = hidpp_devices,
3382 .probe = hidpp_probe, 3129 .probe = hidpp_probe,
3383 .remove = hidpp_remove, 3130 .remove = hidpp_remove,
3384 .raw_event = hidpp_raw_event, 3131 .raw_event = hidpp_raw_event,
3385 .usage_table = hidpp_usages,
3386 .event = hidpp_event,
3387 .input_configured = hidpp_input_configured, 3132 .input_configured = hidpp_input_configured,
3388 .input_mapping = hidpp_input_mapping, 3133 .input_mapping = hidpp_input_mapping,
3389 .input_mapped = hidpp_input_mapped, 3134 .input_mapped = hidpp_input_mapped,
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f7c6de2b6730..dca0a3a90fb8 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1814,6 +1814,12 @@ static const struct hid_device_id mt_devices[] = {
1814 MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, 1814 MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
1815 USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) }, 1815 USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
1816 1816
1817 /* Cirque devices */
1818 { .driver_data = MT_CLS_WIN_8_DUAL,
1819 HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
1820 I2C_VENDOR_ID_CIRQUE,
1821 I2C_PRODUCT_ID_CIRQUE_121F) },
1822
1817 /* CJTouch panels */ 1823 /* CJTouch panels */
1818 { .driver_data = MT_CLS_NSMU, 1824 { .driver_data = MT_CLS_NSMU,
1819 MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH, 1825 MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 8237dd86fb17..c85a79986b6a 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -107,6 +107,7 @@ static const struct hid_device_id hid_quirks[] = {
107 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL }, 107 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
108 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL }, 108 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
109 { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT }, 109 { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
110 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
110 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS }, 111 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
111 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS }, 112 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
112 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS }, 113 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
@@ -129,6 +130,8 @@ static const struct hid_device_id hid_quirks[] = {
129 { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS }, 130 { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
130 { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, 131 { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
131 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL }, 132 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
133 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL },
134 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL },
132 { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET }, 135 { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET },
133 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET }, 136 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET },
134 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET }, 137 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index e8a114157f87..bb012bc032e0 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -358,7 +358,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
358 sensor_inst->hsdev, 358 sensor_inst->hsdev,
359 sensor_inst->hsdev->usage, 359 sensor_inst->hsdev->usage,
360 usage, report_id, 360 usage, report_id,
361 SENSOR_HUB_SYNC); 361 SENSOR_HUB_SYNC, false);
362 } else if (!strncmp(name, "units", strlen("units"))) 362 } else if (!strncmp(name, "units", strlen("units")))
363 value = sensor_inst->fields[field_index].attribute.units; 363 value = sensor_inst->fields[field_index].attribute.units;
364 else if (!strncmp(name, "unit-expo", strlen("unit-expo"))) 364 else if (!strncmp(name, "unit-expo", strlen("unit-expo")))
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 2b63487057c2..4256fdc5cd6d 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(sensor_hub_get_feature);
299int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, 299int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
300 u32 usage_id, 300 u32 usage_id,
301 u32 attr_usage_id, u32 report_id, 301 u32 attr_usage_id, u32 report_id,
302 enum sensor_hub_read_flags flag) 302 enum sensor_hub_read_flags flag,
303 bool is_signed)
303{ 304{
304 struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev); 305 struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
305 unsigned long flags; 306 unsigned long flags;
@@ -331,10 +332,16 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
331 &hsdev->pending.ready, HZ*5); 332 &hsdev->pending.ready, HZ*5);
332 switch (hsdev->pending.raw_size) { 333 switch (hsdev->pending.raw_size) {
333 case 1: 334 case 1:
334 ret_val = *(u8 *)hsdev->pending.raw_data; 335 if (is_signed)
336 ret_val = *(s8 *)hsdev->pending.raw_data;
337 else
338 ret_val = *(u8 *)hsdev->pending.raw_data;
335 break; 339 break;
336 case 2: 340 case 2:
337 ret_val = *(u16 *)hsdev->pending.raw_data; 341 if (is_signed)
342 ret_val = *(s16 *)hsdev->pending.raw_data;
343 else
344 ret_val = *(u16 *)hsdev->pending.raw_data;
338 break; 345 break;
339 case 4: 346 case 4:
340 ret_val = *(u32 *)hsdev->pending.raw_data; 347 ret_val = *(u32 *)hsdev->pending.raw_data;
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 0422ec2b13d2..dc4128bfe2ca 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -23,8 +23,9 @@
23 * In order to avoid breaking them this driver creates a layered hidraw device, 23 * In order to avoid breaking them this driver creates a layered hidraw device,
24 * so it can detect when the client is running and then: 24 * so it can detect when the client is running and then:
25 * - it will not send any command to the controller. 25 * - it will not send any command to the controller.
26 * - this input device will be disabled, to avoid double input of the same 26 * - this input device will be removed, to avoid double input of the same
27 * user action. 27 * user action.
28 * When the client is closed, this input device will be created again.
28 * 29 *
29 * For additional functions, such as changing the right-pad margin or switching 30 * For additional functions, such as changing the right-pad margin or switching
30 * the led, you can use the user-space tool at: 31 * the led, you can use the user-space tool at:
@@ -113,7 +114,7 @@ struct steam_device {
113 spinlock_t lock; 114 spinlock_t lock;
114 struct hid_device *hdev, *client_hdev; 115 struct hid_device *hdev, *client_hdev;
115 struct mutex mutex; 116 struct mutex mutex;
116 bool client_opened, input_opened; 117 bool client_opened;
117 struct input_dev __rcu *input; 118 struct input_dev __rcu *input;
118 unsigned long quirks; 119 unsigned long quirks;
119 struct work_struct work_connect; 120 struct work_struct work_connect;
@@ -279,18 +280,6 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
279 } 280 }
280} 281}
281 282
282static void steam_update_lizard_mode(struct steam_device *steam)
283{
284 mutex_lock(&steam->mutex);
285 if (!steam->client_opened) {
286 if (steam->input_opened)
287 steam_set_lizard_mode(steam, false);
288 else
289 steam_set_lizard_mode(steam, lizard_mode);
290 }
291 mutex_unlock(&steam->mutex);
292}
293
294static int steam_input_open(struct input_dev *dev) 283static int steam_input_open(struct input_dev *dev)
295{ 284{
296 struct steam_device *steam = input_get_drvdata(dev); 285 struct steam_device *steam = input_get_drvdata(dev);
@@ -301,7 +290,6 @@ static int steam_input_open(struct input_dev *dev)
301 return ret; 290 return ret;
302 291
303 mutex_lock(&steam->mutex); 292 mutex_lock(&steam->mutex);
304 steam->input_opened = true;
305 if (!steam->client_opened && lizard_mode) 293 if (!steam->client_opened && lizard_mode)
306 steam_set_lizard_mode(steam, false); 294 steam_set_lizard_mode(steam, false);
307 mutex_unlock(&steam->mutex); 295 mutex_unlock(&steam->mutex);
@@ -313,7 +301,6 @@ static void steam_input_close(struct input_dev *dev)
313 struct steam_device *steam = input_get_drvdata(dev); 301 struct steam_device *steam = input_get_drvdata(dev);
314 302
315 mutex_lock(&steam->mutex); 303 mutex_lock(&steam->mutex);
316 steam->input_opened = false;
317 if (!steam->client_opened && lizard_mode) 304 if (!steam->client_opened && lizard_mode)
318 steam_set_lizard_mode(steam, true); 305 steam_set_lizard_mode(steam, true);
319 mutex_unlock(&steam->mutex); 306 mutex_unlock(&steam->mutex);
@@ -400,7 +387,7 @@ static int steam_battery_register(struct steam_device *steam)
400 return 0; 387 return 0;
401} 388}
402 389
403static int steam_register(struct steam_device *steam) 390static int steam_input_register(struct steam_device *steam)
404{ 391{
405 struct hid_device *hdev = steam->hdev; 392 struct hid_device *hdev = steam->hdev;
406 struct input_dev *input; 393 struct input_dev *input;
@@ -414,17 +401,6 @@ static int steam_register(struct steam_device *steam)
414 return 0; 401 return 0;
415 } 402 }
416 403
417 /*
418 * Unlikely, but getting the serial could fail, and it is not so
419 * important, so make up a serial number and go on.
420 */
421 if (steam_get_serial(steam) < 0)
422 strlcpy(steam->serial_no, "XXXXXXXXXX",
423 sizeof(steam->serial_no));
424
425 hid_info(hdev, "Steam Controller '%s' connected",
426 steam->serial_no);
427
428 input = input_allocate_device(); 404 input = input_allocate_device();
429 if (!input) 405 if (!input)
430 return -ENOMEM; 406 return -ENOMEM;
@@ -492,11 +468,6 @@ static int steam_register(struct steam_device *steam)
492 goto input_register_fail; 468 goto input_register_fail;
493 469
494 rcu_assign_pointer(steam->input, input); 470 rcu_assign_pointer(steam->input, input);
495
496 /* ignore battery errors, we can live without it */
497 if (steam->quirks & STEAM_QUIRK_WIRELESS)
498 steam_battery_register(steam);
499
500 return 0; 471 return 0;
501 472
502input_register_fail: 473input_register_fail:
@@ -504,27 +475,88 @@ input_register_fail:
504 return ret; 475 return ret;
505} 476}
506 477
507static void steam_unregister(struct steam_device *steam) 478static void steam_input_unregister(struct steam_device *steam)
508{ 479{
509 struct input_dev *input; 480 struct input_dev *input;
481 rcu_read_lock();
482 input = rcu_dereference(steam->input);
483 rcu_read_unlock();
484 if (!input)
485 return;
486 RCU_INIT_POINTER(steam->input, NULL);
487 synchronize_rcu();
488 input_unregister_device(input);
489}
490
491static void steam_battery_unregister(struct steam_device *steam)
492{
510 struct power_supply *battery; 493 struct power_supply *battery;
511 494
512 rcu_read_lock(); 495 rcu_read_lock();
513 input = rcu_dereference(steam->input);
514 battery = rcu_dereference(steam->battery); 496 battery = rcu_dereference(steam->battery);
515 rcu_read_unlock(); 497 rcu_read_unlock();
516 498
517 if (battery) { 499 if (!battery)
518 RCU_INIT_POINTER(steam->battery, NULL); 500 return;
519 synchronize_rcu(); 501 RCU_INIT_POINTER(steam->battery, NULL);
520 power_supply_unregister(battery); 502 synchronize_rcu();
503 power_supply_unregister(battery);
504}
505
506static int steam_register(struct steam_device *steam)
507{
508 int ret;
509
510 /*
511 * This function can be called several times in a row with the
512 * wireless adaptor, without steam_unregister() between them, because
513 * another client send a get_connection_status command, for example.
514 * The battery and serial number are set just once per device.
515 */
516 if (!steam->serial_no[0]) {
517 /*
518 * Unlikely, but getting the serial could fail, and it is not so
519 * important, so make up a serial number and go on.
520 */
521 if (steam_get_serial(steam) < 0)
522 strlcpy(steam->serial_no, "XXXXXXXXXX",
523 sizeof(steam->serial_no));
524
525 hid_info(steam->hdev, "Steam Controller '%s' connected",
526 steam->serial_no);
527
528 /* ignore battery errors, we can live without it */
529 if (steam->quirks & STEAM_QUIRK_WIRELESS)
530 steam_battery_register(steam);
531
532 mutex_lock(&steam_devices_lock);
533 list_add(&steam->list, &steam_devices);
534 mutex_unlock(&steam_devices_lock);
521 } 535 }
522 if (input) { 536
523 RCU_INIT_POINTER(steam->input, NULL); 537 mutex_lock(&steam->mutex);
524 synchronize_rcu(); 538 if (!steam->client_opened) {
539 steam_set_lizard_mode(steam, lizard_mode);
540 ret = steam_input_register(steam);
541 } else {
542 ret = 0;
543 }
544 mutex_unlock(&steam->mutex);
545
546 return ret;
547}
548
549static void steam_unregister(struct steam_device *steam)
550{
551 steam_battery_unregister(steam);
552 steam_input_unregister(steam);
553 if (steam->serial_no[0]) {
525 hid_info(steam->hdev, "Steam Controller '%s' disconnected", 554 hid_info(steam->hdev, "Steam Controller '%s' disconnected",
526 steam->serial_no); 555 steam->serial_no);
527 input_unregister_device(input); 556 mutex_lock(&steam_devices_lock);
557 list_del(&steam->list);
558 mutex_unlock(&steam_devices_lock);
559 steam->serial_no[0] = 0;
528 } 560 }
529} 561}
530 562
@@ -600,6 +632,9 @@ static int steam_client_ll_open(struct hid_device *hdev)
600 mutex_lock(&steam->mutex); 632 mutex_lock(&steam->mutex);
601 steam->client_opened = true; 633 steam->client_opened = true;
602 mutex_unlock(&steam->mutex); 634 mutex_unlock(&steam->mutex);
635
636 steam_input_unregister(steam);
637
603 return ret; 638 return ret;
604} 639}
605 640
@@ -609,13 +644,13 @@ static void steam_client_ll_close(struct hid_device *hdev)
609 644
610 mutex_lock(&steam->mutex); 645 mutex_lock(&steam->mutex);
611 steam->client_opened = false; 646 steam->client_opened = false;
612 if (steam->input_opened)
613 steam_set_lizard_mode(steam, false);
614 else
615 steam_set_lizard_mode(steam, lizard_mode);
616 mutex_unlock(&steam->mutex); 647 mutex_unlock(&steam->mutex);
617 648
618 hid_hw_close(steam->hdev); 649 hid_hw_close(steam->hdev);
650 if (steam->connected) {
651 steam_set_lizard_mode(steam, lizard_mode);
652 steam_input_register(steam);
653 }
619} 654}
620 655
621static int steam_client_ll_raw_request(struct hid_device *hdev, 656static int steam_client_ll_raw_request(struct hid_device *hdev,
@@ -744,11 +779,6 @@ static int steam_probe(struct hid_device *hdev,
744 } 779 }
745 } 780 }
746 781
747 mutex_lock(&steam_devices_lock);
748 steam_update_lizard_mode(steam);
749 list_add(&steam->list, &steam_devices);
750 mutex_unlock(&steam_devices_lock);
751
752 return 0; 782 return 0;
753 783
754hid_hw_open_fail: 784hid_hw_open_fail:
@@ -774,10 +804,6 @@ static void steam_remove(struct hid_device *hdev)
774 return; 804 return;
775 } 805 }
776 806
777 mutex_lock(&steam_devices_lock);
778 list_del(&steam->list);
779 mutex_unlock(&steam_devices_lock);
780
781 hid_destroy_device(steam->client_hdev); 807 hid_destroy_device(steam->client_hdev);
782 steam->client_opened = false; 808 steam->client_opened = false;
783 cancel_work_sync(&steam->work_connect); 809 cancel_work_sync(&steam->work_connect);
@@ -792,12 +818,14 @@ static void steam_remove(struct hid_device *hdev)
792static void steam_do_connect_event(struct steam_device *steam, bool connected) 818static void steam_do_connect_event(struct steam_device *steam, bool connected)
793{ 819{
794 unsigned long flags; 820 unsigned long flags;
821 bool changed;
795 822
796 spin_lock_irqsave(&steam->lock, flags); 823 spin_lock_irqsave(&steam->lock, flags);
824 changed = steam->connected != connected;
797 steam->connected = connected; 825 steam->connected = connected;
798 spin_unlock_irqrestore(&steam->lock, flags); 826 spin_unlock_irqrestore(&steam->lock, flags);
799 827
800 if (schedule_work(&steam->work_connect) == 0) 828 if (changed && schedule_work(&steam->work_connect) == 0)
801 dbg_hid("%s: connected=%d event already queued\n", 829 dbg_hid("%s: connected=%d event already queued\n",
802 __func__, connected); 830 __func__, connected);
803} 831}
@@ -1019,13 +1047,8 @@ static int steam_raw_event(struct hid_device *hdev,
1019 return 0; 1047 return 0;
1020 rcu_read_lock(); 1048 rcu_read_lock();
1021 input = rcu_dereference(steam->input); 1049 input = rcu_dereference(steam->input);
1022 if (likely(input)) { 1050 if (likely(input))
1023 steam_do_input_event(steam, input, data); 1051 steam_do_input_event(steam, input, data);
1024 } else {
1025 dbg_hid("%s: input data without connect event\n",
1026 __func__);
1027 steam_do_connect_event(steam, true);
1028 }
1029 rcu_read_unlock(); 1052 rcu_read_unlock();
1030 break; 1053 break;
1031 case STEAM_EV_CONNECT: 1054 case STEAM_EV_CONNECT:
@@ -1074,7 +1097,10 @@ static int steam_param_set_lizard_mode(const char *val,
1074 1097
1075 mutex_lock(&steam_devices_lock); 1098 mutex_lock(&steam_devices_lock);
1076 list_for_each_entry(steam, &steam_devices, list) { 1099 list_for_each_entry(steam, &steam_devices, list) {
1077 steam_update_lizard_mode(steam); 1100 mutex_lock(&steam->mutex);
1101 if (!steam->client_opened)
1102 steam_set_lizard_mode(steam, lizard_mode);
1103 mutex_unlock(&steam->mutex);
1078 } 1104 }
1079 mutex_unlock(&steam_devices_lock); 1105 mutex_unlock(&steam_devices_lock);
1080 return 0; 1106 return 0;
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 3cde7c1b9c33..8555ce7e737b 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -177,6 +177,8 @@ static const struct i2c_hid_quirks {
177 I2C_HID_QUIRK_NO_RUNTIME_PM }, 177 I2C_HID_QUIRK_NO_RUNTIME_PM },
178 { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33, 178 { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
179 I2C_HID_QUIRK_DELAY_AFTER_SLEEP }, 179 I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
180 { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
181 I2C_HID_QUIRK_NO_RUNTIME_PM },
180 { 0, 0 } 182 { 0, 0 }
181}; 183};
182 184
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 3c5507313606..840634e0f1e3 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14#include <linux/compat.h> 14#include <linux/compat.h>
15#include <linux/cred.h>
15#include <linux/device.h> 16#include <linux/device.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
17#include <linux/hid.h> 18#include <linux/hid.h>
@@ -496,12 +497,13 @@ static int uhid_dev_create2(struct uhid_device *uhid,
496 goto err_free; 497 goto err_free;
497 } 498 }
498 499
499 len = min(sizeof(hid->name), sizeof(ev->u.create2.name)); 500 /* @hid is zero-initialized, strncpy() is correct, strlcpy() not */
500 strlcpy(hid->name, ev->u.create2.name, len); 501 len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
501 len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)); 502 strncpy(hid->name, ev->u.create2.name, len);
502 strlcpy(hid->phys, ev->u.create2.phys, len); 503 len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
503 len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)); 504 strncpy(hid->phys, ev->u.create2.phys, len);
504 strlcpy(hid->uniq, ev->u.create2.uniq, len); 505 len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
506 strncpy(hid->uniq, ev->u.create2.uniq, len);
505 507
506 hid->ll_driver = &uhid_hid_driver; 508 hid->ll_driver = &uhid_hid_driver;
507 hid->bus = ev->u.create2.bus; 509 hid->bus = ev->u.create2.bus;
@@ -722,6 +724,17 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
722 724
723 switch (uhid->input_buf.type) { 725 switch (uhid->input_buf.type) {
724 case UHID_CREATE: 726 case UHID_CREATE:
727 /*
728 * 'struct uhid_create_req' contains a __user pointer which is
729 * copied from, so it's unsafe to allow this with elevated
730 * privileges (e.g. from a setuid binary) or via kernel_write().
731 */
732 if (file->f_cred != current_cred() || uaccess_kernel()) {
733 pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
734 task_tgid_vnr(current), current->comm);
735 ret = -EACCES;
736 goto unlock;
737 }
725 ret = uhid_dev_create(uhid, &uhid->input_buf); 738 ret = uhid_dev_create(uhid, &uhid->input_buf);
726 break; 739 break;
727 case UHID_CREATE2: 740 case UHID_CREATE2:
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index de8193f3b838..fe00b12e4417 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -516,6 +516,14 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
516 } 516 }
517 wait_for_completion(&msginfo->waitevent); 517 wait_for_completion(&msginfo->waitevent);
518 518
519 if (msginfo->response.gpadl_created.creation_status != 0) {
520 pr_err("Failed to establish GPADL: err = 0x%x\n",
521 msginfo->response.gpadl_created.creation_status);
522
523 ret = -EDQUOT;
524 goto cleanup;
525 }
526
519 if (channel->rescind) { 527 if (channel->rescind) {
520 ret = -ENODEV; 528 ret = -ENODEV;
521 goto cleanup; 529 goto cleanup;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index a7513a8a8e37..d6106e1a0d4a 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -353,6 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
353 353
354 out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled; 354 out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
355 355
356 /* fallthrough */
357
358 case KVP_OP_GET_IP_INFO:
356 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id, 359 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
357 MAX_ADAPTER_ID_SIZE, 360 MAX_ADAPTER_ID_SIZE,
358 UTF16_LITTLE_ENDIAN, 361 UTF16_LITTLE_ENDIAN,
@@ -405,7 +408,11 @@ kvp_send_key(struct work_struct *dummy)
405 process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO); 408 process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
406 break; 409 break;
407 case KVP_OP_GET_IP_INFO: 410 case KVP_OP_GET_IP_INFO:
408 /* We only need to pass on message->kvp_hdr.operation. */ 411 /*
412 * We only need to pass on the info of operation, adapter_id
413 * and addr_family to the userland kvp daemon.
414 */
415 process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
409 break; 416 break;
410 case KVP_OP_SET: 417 case KVP_OP_SET:
411 switch (in_msg->body.kvp_set.data.value_type) { 418 switch (in_msg->body.kvp_set.data.value_type) {
@@ -446,9 +453,9 @@ kvp_send_key(struct work_struct *dummy)
446 453
447 } 454 }
448 455
449 break; 456 /*
450 457 * The key is always a string - utf16 encoding.
451 case KVP_OP_GET: 458 */
452 message->body.kvp_set.data.key_size = 459 message->body.kvp_set.data.key_size =
453 utf16s_to_utf8s( 460 utf16s_to_utf8s(
454 (wchar_t *)in_msg->body.kvp_set.data.key, 461 (wchar_t *)in_msg->body.kvp_set.data.key,
@@ -456,6 +463,17 @@ kvp_send_key(struct work_struct *dummy)
456 UTF16_LITTLE_ENDIAN, 463 UTF16_LITTLE_ENDIAN,
457 message->body.kvp_set.data.key, 464 message->body.kvp_set.data.key,
458 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1; 465 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
466
467 break;
468
469 case KVP_OP_GET:
470 message->body.kvp_get.data.key_size =
471 utf16s_to_utf8s(
472 (wchar_t *)in_msg->body.kvp_get.data.key,
473 in_msg->body.kvp_get.data.key_size,
474 UTF16_LITTLE_ENDIAN,
475 message->body.kvp_get.data.key,
476 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
459 break; 477 break;
460 478
461 case KVP_OP_DELETE: 479 case KVP_OP_DELETE:
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 71d3445ba869..07ee19573b3f 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -274,7 +274,7 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
274 break; 274 break;
275 case INA2XX_CURRENT: 275 case INA2XX_CURRENT:
276 /* signed register, result in mA */ 276 /* signed register, result in mA */
277 val = regval * data->current_lsb_uA; 277 val = (s16)regval * data->current_lsb_uA;
278 val = DIV_ROUND_CLOSEST(val, 1000); 278 val = DIV_ROUND_CLOSEST(val, 1000);
279 break; 279 break;
280 case INA2XX_CALIBRATION: 280 case INA2XX_CALIBRATION:
@@ -491,7 +491,7 @@ static int ina2xx_probe(struct i2c_client *client,
491 } 491 }
492 492
493 data->groups[group++] = &ina2xx_group; 493 data->groups[group++] = &ina2xx_group;
494 if (id->driver_data == ina226) 494 if (chip == ina226)
495 data->groups[group++] = &ina226_group; 495 data->groups[group++] = &ina226_group;
496 496
497 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, 497 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
@@ -500,7 +500,7 @@ static int ina2xx_probe(struct i2c_client *client,
500 return PTR_ERR(hwmon_dev); 500 return PTR_ERR(hwmon_dev);
501 501
502 dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n", 502 dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
503 id->name, data->rshunt); 503 client->name, data->rshunt);
504 504
505 return 0; 505 return 0;
506} 506}
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index de46577c7d5a..d8fa4bea4bc8 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -51,7 +51,7 @@
51 */ 51 */
52#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \ 52#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \
53 ((rval) + (s)) * (d))) 53 ((rval) + (s)) * (d)))
54#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask))) 54#define MLXREG_FAN_GET_FAULT(val, mask) (!((val) ^ (mask)))
55#define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \ 55#define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \
56 MLXREG_FAN_MAX_STATE, \ 56 MLXREG_FAN_MAX_STATE, \
57 MLXREG_FAN_MAX_DUTY)) 57 MLXREG_FAN_MAX_DUTY))
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index be5ba4690895..0d0457245e7d 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -115,7 +115,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
115{ 115{
116 struct device *dev = &pdev->dev; 116 struct device *dev = &pdev->dev;
117 struct rpi_hwmon_data *data; 117 struct rpi_hwmon_data *data;
118 int ret;
119 118
120 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 119 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
121 if (!data) 120 if (!data)
@@ -124,11 +123,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
124 /* Parent driver assure that firmware is correct */ 123 /* Parent driver assure that firmware is correct */
125 data->fw = dev_get_drvdata(dev->parent); 124 data->fw = dev_get_drvdata(dev->parent);
126 125
127 /* Init throttled */
128 ret = rpi_firmware_property(data->fw, RPI_FIRMWARE_GET_THROTTLED,
129 &data->last_throttled,
130 sizeof(data->last_throttled));
131
132 data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt", 126 data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt",
133 data, 127 data,
134 &rpi_chip_info, 128 &rpi_chip_info,
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 49276bbdac3d..1bb80f992aa8 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -1691,7 +1691,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
1691 * somewhere else in the code 1691 * somewhere else in the code
1692 */ 1692 */
1693#define SENSOR_ATTR_TEMP(index) { \ 1693#define SENSOR_ATTR_TEMP(index) { \
1694 SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \ 1694 SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \
1695 show_temp_mode, store_temp_mode, NOT_USED, index - 1), \ 1695 show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
1696 SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \ 1696 SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
1697 NULL, TEMP_READ, index - 1), \ 1697 NULL, TEMP_READ, index - 1), \
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 41d97faf5013..38ff374a3ca4 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -149,6 +149,7 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
149 int report_id = -1; 149 int report_id = -1;
150 u32 address; 150 u32 address;
151 int ret_type; 151 int ret_type;
152 s32 min;
152 struct hid_sensor_hub_device *hsdev = 153 struct hid_sensor_hub_device *hsdev =
153 accel_state->common_attributes.hsdev; 154 accel_state->common_attributes.hsdev;
154 155
@@ -158,12 +159,14 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
158 case IIO_CHAN_INFO_RAW: 159 case IIO_CHAN_INFO_RAW:
159 hid_sensor_power_state(&accel_state->common_attributes, true); 160 hid_sensor_power_state(&accel_state->common_attributes, true);
160 report_id = accel_state->accel[chan->scan_index].report_id; 161 report_id = accel_state->accel[chan->scan_index].report_id;
162 min = accel_state->accel[chan->scan_index].logical_minimum;
161 address = accel_3d_addresses[chan->scan_index]; 163 address = accel_3d_addresses[chan->scan_index];
162 if (report_id >= 0) 164 if (report_id >= 0)
163 *val = sensor_hub_input_attr_get_raw_value( 165 *val = sensor_hub_input_attr_get_raw_value(
164 accel_state->common_attributes.hsdev, 166 accel_state->common_attributes.hsdev,
165 hsdev->usage, address, report_id, 167 hsdev->usage, address, report_id,
166 SENSOR_HUB_SYNC); 168 SENSOR_HUB_SYNC,
169 min < 0);
167 else { 170 else {
168 *val = 0; 171 *val = 0;
169 hid_sensor_power_state(&accel_state->common_attributes, 172 hid_sensor_power_state(&accel_state->common_attributes,
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 36941e69f959..88e857c4baf4 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -111,6 +111,7 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
111 int report_id = -1; 111 int report_id = -1;
112 u32 address; 112 u32 address;
113 int ret_type; 113 int ret_type;
114 s32 min;
114 115
115 *val = 0; 116 *val = 0;
116 *val2 = 0; 117 *val2 = 0;
@@ -118,13 +119,15 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
118 case IIO_CHAN_INFO_RAW: 119 case IIO_CHAN_INFO_RAW:
119 hid_sensor_power_state(&gyro_state->common_attributes, true); 120 hid_sensor_power_state(&gyro_state->common_attributes, true);
120 report_id = gyro_state->gyro[chan->scan_index].report_id; 121 report_id = gyro_state->gyro[chan->scan_index].report_id;
122 min = gyro_state->gyro[chan->scan_index].logical_minimum;
121 address = gyro_3d_addresses[chan->scan_index]; 123 address = gyro_3d_addresses[chan->scan_index];
122 if (report_id >= 0) 124 if (report_id >= 0)
123 *val = sensor_hub_input_attr_get_raw_value( 125 *val = sensor_hub_input_attr_get_raw_value(
124 gyro_state->common_attributes.hsdev, 126 gyro_state->common_attributes.hsdev,
125 HID_USAGE_SENSOR_GYRO_3D, address, 127 HID_USAGE_SENSOR_GYRO_3D, address,
126 report_id, 128 report_id,
127 SENSOR_HUB_SYNC); 129 SENSOR_HUB_SYNC,
130 min < 0);
128 else { 131 else {
129 *val = 0; 132 *val = 0;
130 hid_sensor_power_state(&gyro_state->common_attributes, 133 hid_sensor_power_state(&gyro_state->common_attributes,
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index beab6d6fd6e1..4bc95f31c730 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -75,7 +75,8 @@ static int humidity_read_raw(struct iio_dev *indio_dev,
75 HID_USAGE_SENSOR_HUMIDITY, 75 HID_USAGE_SENSOR_HUMIDITY,
76 HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY, 76 HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY,
77 humid_st->humidity_attr.report_id, 77 humid_st->humidity_attr.report_id,
78 SENSOR_HUB_SYNC); 78 SENSOR_HUB_SYNC,
79 humid_st->humidity_attr.logical_minimum < 0);
79 hid_sensor_power_state(&humid_st->common_attributes, false); 80 hid_sensor_power_state(&humid_st->common_attributes, false);
80 81
81 return IIO_VAL_INT; 82 return IIO_VAL_INT;
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 406caaee9a3c..94f33250ba5a 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -93,6 +93,7 @@ static int als_read_raw(struct iio_dev *indio_dev,
93 int report_id = -1; 93 int report_id = -1;
94 u32 address; 94 u32 address;
95 int ret_type; 95 int ret_type;
96 s32 min;
96 97
97 *val = 0; 98 *val = 0;
98 *val2 = 0; 99 *val2 = 0;
@@ -102,8 +103,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
102 case CHANNEL_SCAN_INDEX_INTENSITY: 103 case CHANNEL_SCAN_INDEX_INTENSITY:
103 case CHANNEL_SCAN_INDEX_ILLUM: 104 case CHANNEL_SCAN_INDEX_ILLUM:
104 report_id = als_state->als_illum.report_id; 105 report_id = als_state->als_illum.report_id;
105 address = 106 min = als_state->als_illum.logical_minimum;
106 HID_USAGE_SENSOR_LIGHT_ILLUM; 107 address = HID_USAGE_SENSOR_LIGHT_ILLUM;
107 break; 108 break;
108 default: 109 default:
109 report_id = -1; 110 report_id = -1;
@@ -116,7 +117,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
116 als_state->common_attributes.hsdev, 117 als_state->common_attributes.hsdev,
117 HID_USAGE_SENSOR_ALS, address, 118 HID_USAGE_SENSOR_ALS, address,
118 report_id, 119 report_id,
119 SENSOR_HUB_SYNC); 120 SENSOR_HUB_SYNC,
121 min < 0);
120 hid_sensor_power_state(&als_state->common_attributes, 122 hid_sensor_power_state(&als_state->common_attributes,
121 false); 123 false);
122 } else { 124 } else {
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 45107f7537b5..cf5a0c242609 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -73,6 +73,7 @@ static int prox_read_raw(struct iio_dev *indio_dev,
73 int report_id = -1; 73 int report_id = -1;
74 u32 address; 74 u32 address;
75 int ret_type; 75 int ret_type;
76 s32 min;
76 77
77 *val = 0; 78 *val = 0;
78 *val2 = 0; 79 *val2 = 0;
@@ -81,8 +82,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
81 switch (chan->scan_index) { 82 switch (chan->scan_index) {
82 case CHANNEL_SCAN_INDEX_PRESENCE: 83 case CHANNEL_SCAN_INDEX_PRESENCE:
83 report_id = prox_state->prox_attr.report_id; 84 report_id = prox_state->prox_attr.report_id;
84 address = 85 min = prox_state->prox_attr.logical_minimum;
85 HID_USAGE_SENSOR_HUMAN_PRESENCE; 86 address = HID_USAGE_SENSOR_HUMAN_PRESENCE;
86 break; 87 break;
87 default: 88 default:
88 report_id = -1; 89 report_id = -1;
@@ -95,7 +96,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
95 prox_state->common_attributes.hsdev, 96 prox_state->common_attributes.hsdev,
96 HID_USAGE_SENSOR_PROX, address, 97 HID_USAGE_SENSOR_PROX, address,
97 report_id, 98 report_id,
98 SENSOR_HUB_SYNC); 99 SENSOR_HUB_SYNC,
100 min < 0);
99 hid_sensor_power_state(&prox_state->common_attributes, 101 hid_sensor_power_state(&prox_state->common_attributes,
100 false); 102 false);
101 } else { 103 } else {
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index d55c4885211a..f3c0d41e5a8c 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -163,21 +163,23 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
163 int report_id = -1; 163 int report_id = -1;
164 u32 address; 164 u32 address;
165 int ret_type; 165 int ret_type;
166 s32 min;
166 167
167 *val = 0; 168 *val = 0;
168 *val2 = 0; 169 *val2 = 0;
169 switch (mask) { 170 switch (mask) {
170 case IIO_CHAN_INFO_RAW: 171 case IIO_CHAN_INFO_RAW:
171 hid_sensor_power_state(&magn_state->magn_flux_attributes, true); 172 hid_sensor_power_state(&magn_state->magn_flux_attributes, true);
172 report_id = 173 report_id = magn_state->magn[chan->address].report_id;
173 magn_state->magn[chan->address].report_id; 174 min = magn_state->magn[chan->address].logical_minimum;
174 address = magn_3d_addresses[chan->address]; 175 address = magn_3d_addresses[chan->address];
175 if (report_id >= 0) 176 if (report_id >= 0)
176 *val = sensor_hub_input_attr_get_raw_value( 177 *val = sensor_hub_input_attr_get_raw_value(
177 magn_state->magn_flux_attributes.hsdev, 178 magn_state->magn_flux_attributes.hsdev,
178 HID_USAGE_SENSOR_COMPASS_3D, address, 179 HID_USAGE_SENSOR_COMPASS_3D, address,
179 report_id, 180 report_id,
180 SENSOR_HUB_SYNC); 181 SENSOR_HUB_SYNC,
182 min < 0);
181 else { 183 else {
182 *val = 0; 184 *val = 0;
183 hid_sensor_power_state( 185 hid_sensor_power_state(
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
index 0a9e8fadfa9d..37ab30566464 100644
--- a/drivers/iio/magnetometer/st_magn_buffer.c
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -30,11 +30,6 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state)
30 return st_sensors_set_dataready_irq(indio_dev, state); 30 return st_sensors_set_dataready_irq(indio_dev, state);
31} 31}
32 32
33static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
34{
35 return st_sensors_set_enable(indio_dev, true);
36}
37
38static int st_magn_buffer_postenable(struct iio_dev *indio_dev) 33static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
39{ 34{
40 int err; 35 int err;
@@ -50,7 +45,7 @@ static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
50 if (err < 0) 45 if (err < 0)
51 goto st_magn_buffer_postenable_error; 46 goto st_magn_buffer_postenable_error;
52 47
53 return err; 48 return st_sensors_set_enable(indio_dev, true);
54 49
55st_magn_buffer_postenable_error: 50st_magn_buffer_postenable_error:
56 kfree(mdata->buffer_data); 51 kfree(mdata->buffer_data);
@@ -63,11 +58,11 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev)
63 int err; 58 int err;
64 struct st_sensor_data *mdata = iio_priv(indio_dev); 59 struct st_sensor_data *mdata = iio_priv(indio_dev);
65 60
66 err = iio_triggered_buffer_predisable(indio_dev); 61 err = st_sensors_set_enable(indio_dev, false);
67 if (err < 0) 62 if (err < 0)
68 goto st_magn_buffer_predisable_error; 63 goto st_magn_buffer_predisable_error;
69 64
70 err = st_sensors_set_enable(indio_dev, false); 65 err = iio_triggered_buffer_predisable(indio_dev);
71 66
72st_magn_buffer_predisable_error: 67st_magn_buffer_predisable_error:
73 kfree(mdata->buffer_data); 68 kfree(mdata->buffer_data);
@@ -75,7 +70,6 @@ st_magn_buffer_predisable_error:
75} 70}
76 71
77static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = { 72static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
78 .preenable = &st_magn_buffer_preenable,
79 .postenable = &st_magn_buffer_postenable, 73 .postenable = &st_magn_buffer_postenable,
80 .predisable = &st_magn_buffer_predisable, 74 .predisable = &st_magn_buffer_predisable,
81}; 75};
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index 1e5451d1ff88..bdc5e4554ee4 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -111,21 +111,23 @@ static int incl_3d_read_raw(struct iio_dev *indio_dev,
111 int report_id = -1; 111 int report_id = -1;
112 u32 address; 112 u32 address;
113 int ret_type; 113 int ret_type;
114 s32 min;
114 115
115 *val = 0; 116 *val = 0;
116 *val2 = 0; 117 *val2 = 0;
117 switch (mask) { 118 switch (mask) {
118 case IIO_CHAN_INFO_RAW: 119 case IIO_CHAN_INFO_RAW:
119 hid_sensor_power_state(&incl_state->common_attributes, true); 120 hid_sensor_power_state(&incl_state->common_attributes, true);
120 report_id = 121 report_id = incl_state->incl[chan->scan_index].report_id;
121 incl_state->incl[chan->scan_index].report_id; 122 min = incl_state->incl[chan->scan_index].logical_minimum;
122 address = incl_3d_addresses[chan->scan_index]; 123 address = incl_3d_addresses[chan->scan_index];
123 if (report_id >= 0) 124 if (report_id >= 0)
124 *val = sensor_hub_input_attr_get_raw_value( 125 *val = sensor_hub_input_attr_get_raw_value(
125 incl_state->common_attributes.hsdev, 126 incl_state->common_attributes.hsdev,
126 HID_USAGE_SENSOR_INCLINOMETER_3D, address, 127 HID_USAGE_SENSOR_INCLINOMETER_3D, address,
127 report_id, 128 report_id,
128 SENSOR_HUB_SYNC); 129 SENSOR_HUB_SYNC,
130 min < 0);
129 else { 131 else {
130 hid_sensor_power_state(&incl_state->common_attributes, 132 hid_sensor_power_state(&incl_state->common_attributes,
131 false); 133 false);
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 4c437918f1d2..d7b1c00ceb4d 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -77,6 +77,7 @@ static int press_read_raw(struct iio_dev *indio_dev,
77 int report_id = -1; 77 int report_id = -1;
78 u32 address; 78 u32 address;
79 int ret_type; 79 int ret_type;
80 s32 min;
80 81
81 *val = 0; 82 *val = 0;
82 *val2 = 0; 83 *val2 = 0;
@@ -85,8 +86,8 @@ static int press_read_raw(struct iio_dev *indio_dev,
85 switch (chan->scan_index) { 86 switch (chan->scan_index) {
86 case CHANNEL_SCAN_INDEX_PRESSURE: 87 case CHANNEL_SCAN_INDEX_PRESSURE:
87 report_id = press_state->press_attr.report_id; 88 report_id = press_state->press_attr.report_id;
88 address = 89 min = press_state->press_attr.logical_minimum;
89 HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE; 90 address = HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE;
90 break; 91 break;
91 default: 92 default:
92 report_id = -1; 93 report_id = -1;
@@ -99,7 +100,8 @@ static int press_read_raw(struct iio_dev *indio_dev,
99 press_state->common_attributes.hsdev, 100 press_state->common_attributes.hsdev,
100 HID_USAGE_SENSOR_PRESSURE, address, 101 HID_USAGE_SENSOR_PRESSURE, address,
101 report_id, 102 report_id,
102 SENSOR_HUB_SYNC); 103 SENSOR_HUB_SYNC,
104 min < 0);
103 hid_sensor_power_state(&press_state->common_attributes, 105 hid_sensor_power_state(&press_state->common_attributes,
104 false); 106 false);
105 } else { 107 } else {
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index beaf6fd3e337..b592fc4f007e 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -76,7 +76,8 @@ static int temperature_read_raw(struct iio_dev *indio_dev,
76 HID_USAGE_SENSOR_TEMPERATURE, 76 HID_USAGE_SENSOR_TEMPERATURE,
77 HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE, 77 HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE,
78 temp_st->temperature_attr.report_id, 78 temp_st->temperature_attr.report_id,
79 SENSOR_HUB_SYNC); 79 SENSOR_HUB_SYNC,
80 temp_st->temperature_attr.logical_minimum < 0);
80 hid_sensor_power_state( 81 hid_sensor_power_state(
81 &temp_st->common_attributes, 82 &temp_st->common_attributes,
82 false); 83 false);
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index ee366199b169..25d43c8f1c2a 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -767,8 +767,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
767 767
768 case NETDEV_CHANGEADDR: 768 case NETDEV_CHANGEADDR:
769 cmds[0] = netdev_del_cmd; 769 cmds[0] = netdev_del_cmd;
770 cmds[1] = add_default_gid_cmd; 770 if (ndev->reg_state == NETREG_REGISTERED) {
771 cmds[2] = add_cmd; 771 cmds[1] = add_default_gid_cmd;
772 cmds[2] = add_cmd;
773 }
772 break; 774 break;
773 775
774 case NETDEV_CHANGEUPPER: 776 case NETDEV_CHANGEUPPER:
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 2b4c5e7dd5a1..676c1fd1119d 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -137,15 +137,6 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn,
137 up_read(&per_mm->umem_rwsem); 137 up_read(&per_mm->umem_rwsem);
138} 138}
139 139
140static int invalidate_page_trampoline(struct ib_umem_odp *item, u64 start,
141 u64 end, void *cookie)
142{
143 ib_umem_notifier_start_account(item);
144 item->umem.context->invalidate_range(item, start, start + PAGE_SIZE);
145 ib_umem_notifier_end_account(item);
146 return 0;
147}
148
149static int invalidate_range_start_trampoline(struct ib_umem_odp *item, 140static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
150 u64 start, u64 end, void *cookie) 141 u64 start, u64 end, void *cookie)
151{ 142{
@@ -553,12 +544,13 @@ out:
553 put_page(page); 544 put_page(page);
554 545
555 if (remove_existing_mapping && umem->context->invalidate_range) { 546 if (remove_existing_mapping && umem->context->invalidate_range) {
556 invalidate_page_trampoline( 547 ib_umem_notifier_start_account(umem_odp);
548 umem->context->invalidate_range(
557 umem_odp, 549 umem_odp,
558 ib_umem_start(umem) + (page_index >> umem->page_shift), 550 ib_umem_start(umem) + (page_index << umem->page_shift),
559 ib_umem_start(umem) + ((page_index + 1) >> 551 ib_umem_start(umem) +
560 umem->page_shift), 552 ((page_index + 1) << umem->page_shift));
561 NULL); 553 ib_umem_notifier_end_account(umem_odp);
562 ret = -EAGAIN; 554 ret = -EAGAIN;
563 } 555 }
564 556
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index cf2282654210..77f095e5fbe3 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1268,6 +1268,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1268 /* Registered a new RoCE device instance to netdev */ 1268 /* Registered a new RoCE device instance to netdev */
1269 rc = bnxt_re_register_netdev(rdev); 1269 rc = bnxt_re_register_netdev(rdev);
1270 if (rc) { 1270 if (rc) {
1271 rtnl_unlock();
1271 pr_err("Failed to register with netedev: %#x\n", rc); 1272 pr_err("Failed to register with netedev: %#x\n", rc);
1272 return -EINVAL; 1273 return -EINVAL;
1273 } 1274 }
@@ -1466,6 +1467,7 @@ static void bnxt_re_task(struct work_struct *work)
1466 "Failed to register with IB: %#x", rc); 1467 "Failed to register with IB: %#x", rc);
1467 bnxt_re_remove_one(rdev); 1468 bnxt_re_remove_one(rdev);
1468 bnxt_re_dev_unreg(rdev); 1469 bnxt_re_dev_unreg(rdev);
1470 goto exit;
1469 } 1471 }
1470 break; 1472 break;
1471 case NETDEV_UP: 1473 case NETDEV_UP:
@@ -1489,6 +1491,7 @@ static void bnxt_re_task(struct work_struct *work)
1489 } 1491 }
1490 smp_mb__before_atomic(); 1492 smp_mb__before_atomic();
1491 atomic_dec(&rdev->sched_count); 1493 atomic_dec(&rdev->sched_count);
1494exit:
1492 kfree(re_work); 1495 kfree(re_work);
1493} 1496}
1494 1497
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index a4c62ae23a9a..3beb1523e17c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1756,10 +1756,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1756 return hns_roce_cmq_send(hr_dev, &desc, 1); 1756 return hns_roce_cmq_send(hr_dev, &desc, 1);
1757} 1757}
1758 1758
1759static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, 1759static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
1760 unsigned long mtpt_idx) 1760 struct hns_roce_mr *mr)
1761{ 1761{
1762 struct hns_roce_v2_mpt_entry *mpt_entry;
1763 struct scatterlist *sg; 1762 struct scatterlist *sg;
1764 u64 page_addr; 1763 u64 page_addr;
1765 u64 *pages; 1764 u64 *pages;
@@ -1767,6 +1766,53 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1767 int len; 1766 int len;
1768 int entry; 1767 int entry;
1769 1768
1769 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1770 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1771 roce_set_field(mpt_entry->byte_48_mode_ba,
1772 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
1773 upper_32_bits(mr->pbl_ba >> 3));
1774
1775 pages = (u64 *)__get_free_page(GFP_KERNEL);
1776 if (!pages)
1777 return -ENOMEM;
1778
1779 i = 0;
1780 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1781 len = sg_dma_len(sg) >> PAGE_SHIFT;
1782 for (j = 0; j < len; ++j) {
1783 page_addr = sg_dma_address(sg) +
1784 (j << mr->umem->page_shift);
1785 pages[i] = page_addr >> 6;
1786 /* Record the first 2 entry directly to MTPT table */
1787 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
1788 goto found;
1789 i++;
1790 }
1791 }
1792found:
1793 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
1794 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
1795 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
1796
1797 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
1798 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
1799 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
1800 roce_set_field(mpt_entry->byte_64_buf_pa1,
1801 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1802 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
1803 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
1804
1805 free_page((unsigned long)pages);
1806
1807 return 0;
1808}
1809
1810static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1811 unsigned long mtpt_idx)
1812{
1813 struct hns_roce_v2_mpt_entry *mpt_entry;
1814 int ret;
1815
1770 mpt_entry = mb_buf; 1816 mpt_entry = mb_buf;
1771 memset(mpt_entry, 0, sizeof(*mpt_entry)); 1817 memset(mpt_entry, 0, sizeof(*mpt_entry));
1772 1818
@@ -1781,7 +1827,6 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1781 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET); 1827 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
1782 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, 1828 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1783 V2_MPT_BYTE_4_PD_S, mr->pd); 1829 V2_MPT_BYTE_4_PD_S, mr->pd);
1784 mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
1785 1830
1786 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0); 1831 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
1787 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); 1832 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
@@ -1796,13 +1841,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1796 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); 1841 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1797 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1842 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1798 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); 1843 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1799 mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
1800 1844
1801 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 1845 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
1802 mr->type == MR_TYPE_MR ? 0 : 1); 1846 mr->type == MR_TYPE_MR ? 0 : 1);
1803 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S, 1847 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1804 1); 1848 1);
1805 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
1806 1849
1807 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); 1850 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
1808 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); 1851 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
@@ -1813,53 +1856,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1813 if (mr->type == MR_TYPE_DMA) 1856 if (mr->type == MR_TYPE_DMA)
1814 return 0; 1857 return 0;
1815 1858
1816 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); 1859 ret = set_mtpt_pbl(mpt_entry, mr);
1817
1818 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1819 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
1820 V2_MPT_BYTE_48_PBL_BA_H_S,
1821 upper_32_bits(mr->pbl_ba >> 3));
1822 mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
1823
1824 pages = (u64 *)__get_free_page(GFP_KERNEL);
1825 if (!pages)
1826 return -ENOMEM;
1827
1828 i = 0;
1829 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1830 len = sg_dma_len(sg) >> PAGE_SHIFT;
1831 for (j = 0; j < len; ++j) {
1832 page_addr = sg_dma_address(sg) +
1833 (j << mr->umem->page_shift);
1834 pages[i] = page_addr >> 6;
1835
1836 /* Record the first 2 entry directly to MTPT table */
1837 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
1838 goto found;
1839 i++;
1840 }
1841 }
1842
1843found:
1844 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
1845 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
1846 V2_MPT_BYTE_56_PA0_H_S,
1847 upper_32_bits(pages[0]));
1848 mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
1849
1850 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
1851 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
1852 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
1853 1860
1854 free_page((unsigned long)pages); 1861 return ret;
1855
1856 roce_set_field(mpt_entry->byte_64_buf_pa1,
1857 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1858 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
1859 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
1860 mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
1861
1862 return 0;
1863} 1862}
1864 1863
1865static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, 1864static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
@@ -1868,6 +1867,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1868 u64 size, void *mb_buf) 1867 u64 size, void *mb_buf)
1869{ 1868{
1870 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf; 1869 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
1870 int ret = 0;
1871 1871
1872 if (flags & IB_MR_REREG_PD) { 1872 if (flags & IB_MR_REREG_PD) {
1873 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, 1873 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
@@ -1880,14 +1880,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1880 V2_MPT_BYTE_8_BIND_EN_S, 1880 V2_MPT_BYTE_8_BIND_EN_S,
1881 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); 1881 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
1882 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, 1882 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1883 V2_MPT_BYTE_8_ATOMIC_EN_S, 1883 V2_MPT_BYTE_8_ATOMIC_EN_S,
1884 (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0)); 1884 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
1885 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, 1885 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1886 (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0)); 1886 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
1887 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, 1887 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1888 (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); 1888 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
1889 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1889 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1890 (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); 1890 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
1891 } 1891 }
1892 1892
1893 if (flags & IB_MR_REREG_TRANS) { 1893 if (flags & IB_MR_REREG_TRANS) {
@@ -1896,21 +1896,13 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1896 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size)); 1896 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
1897 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size)); 1897 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
1898 1898
1899 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1900 mpt_entry->pbl_ba_l =
1901 cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1902 roce_set_field(mpt_entry->byte_48_mode_ba,
1903 V2_MPT_BYTE_48_PBL_BA_H_M,
1904 V2_MPT_BYTE_48_PBL_BA_H_S,
1905 upper_32_bits(mr->pbl_ba >> 3));
1906 mpt_entry->byte_48_mode_ba =
1907 cpu_to_le32(mpt_entry->byte_48_mode_ba);
1908
1909 mr->iova = iova; 1899 mr->iova = iova;
1910 mr->size = size; 1900 mr->size = size;
1901
1902 ret = set_mtpt_pbl(mpt_entry, mr);
1911 } 1903 }
1912 1904
1913 return 0; 1905 return ret;
1914} 1906}
1915 1907
1916static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) 1908static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e9c428071df3..3569fda07e07 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1094,31 +1094,26 @@ enum mlx5_ib_width {
1094 MLX5_IB_WIDTH_12X = 1 << 4 1094 MLX5_IB_WIDTH_12X = 1 << 4
1095}; 1095};
1096 1096
1097static int translate_active_width(struct ib_device *ibdev, u8 active_width, 1097static void translate_active_width(struct ib_device *ibdev, u8 active_width,
1098 u8 *ib_width) 1098 u8 *ib_width)
1099{ 1099{
1100 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1100 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1101 int err = 0;
1102 1101
1103 if (active_width & MLX5_IB_WIDTH_1X) { 1102 if (active_width & MLX5_IB_WIDTH_1X)
1104 *ib_width = IB_WIDTH_1X; 1103 *ib_width = IB_WIDTH_1X;
1105 } else if (active_width & MLX5_IB_WIDTH_2X) { 1104 else if (active_width & MLX5_IB_WIDTH_4X)
1106 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
1107 (int)active_width);
1108 err = -EINVAL;
1109 } else if (active_width & MLX5_IB_WIDTH_4X) {
1110 *ib_width = IB_WIDTH_4X; 1105 *ib_width = IB_WIDTH_4X;
1111 } else if (active_width & MLX5_IB_WIDTH_8X) { 1106 else if (active_width & MLX5_IB_WIDTH_8X)
1112 *ib_width = IB_WIDTH_8X; 1107 *ib_width = IB_WIDTH_8X;
1113 } else if (active_width & MLX5_IB_WIDTH_12X) { 1108 else if (active_width & MLX5_IB_WIDTH_12X)
1114 *ib_width = IB_WIDTH_12X; 1109 *ib_width = IB_WIDTH_12X;
1115 } else { 1110 else {
1116 mlx5_ib_dbg(dev, "Invalid active_width %d\n", 1111 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1117 (int)active_width); 1112 (int)active_width);
1118 err = -EINVAL; 1113 *ib_width = IB_WIDTH_4X;
1119 } 1114 }
1120 1115
1121 return err; 1116 return;
1122} 1117}
1123 1118
1124static int mlx5_mtu_to_ib_mtu(int mtu) 1119static int mlx5_mtu_to_ib_mtu(int mtu)
@@ -1225,10 +1220,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1225 if (err) 1220 if (err)
1226 goto out; 1221 goto out;
1227 1222
1228 err = translate_active_width(ibdev, ib_link_width_oper, 1223 translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1229 &props->active_width); 1224
1230 if (err)
1231 goto out;
1232 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); 1225 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
1233 if (err) 1226 if (err)
1234 goto out; 1227 goto out;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index b04eb6775326..2cc3d69ab6f6 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -674,6 +674,15 @@ next_mr:
674 goto srcu_unlock; 674 goto srcu_unlock;
675 } 675 }
676 676
677 if (!mr->umem->is_odp) {
678 mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
679 key);
680 if (bytes_mapped)
681 *bytes_mapped += bcnt;
682 ret = 0;
683 goto srcu_unlock;
684 }
685
677 ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped); 686 ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped);
678 if (ret < 0) 687 if (ret < 0)
679 goto srcu_unlock; 688 goto srcu_unlock;
@@ -735,6 +744,7 @@ next_mr:
735 head = frame; 744 head = frame;
736 745
737 bcnt -= frame->bcnt; 746 bcnt -= frame->bcnt;
747 offset = 0;
738 } 748 }
739 break; 749 break;
740 750
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 6841c0f9237f..3747cc681b18 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2633,8 +2633,7 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
2633 2633
2634 if (access_flags & IB_ACCESS_REMOTE_READ) 2634 if (access_flags & IB_ACCESS_REMOTE_READ)
2635 *hw_access_flags |= MLX5_QP_BIT_RRE; 2635 *hw_access_flags |= MLX5_QP_BIT_RRE;
2636 if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) && 2636 if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
2637 qp->ibqp.qp_type == IB_QPT_RC) {
2638 int atomic_mode; 2637 int atomic_mode;
2639 2638
2640 atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type); 2639 atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
@@ -4678,17 +4677,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
4678 goto out; 4677 goto out;
4679 } 4678 }
4680 4679
4681 if (wr->opcode == IB_WR_LOCAL_INV || 4680 if (wr->opcode == IB_WR_REG_MR) {
4682 wr->opcode == IB_WR_REG_MR) {
4683 fence = dev->umr_fence; 4681 fence = dev->umr_fence;
4684 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 4682 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
4685 } else if (wr->send_flags & IB_SEND_FENCE) { 4683 } else {
4686 if (qp->next_fence) 4684 if (wr->send_flags & IB_SEND_FENCE) {
4687 fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; 4685 if (qp->next_fence)
4688 else 4686 fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
4689 fence = MLX5_FENCE_MODE_FENCE; 4687 else
4690 } else { 4688 fence = MLX5_FENCE_MODE_FENCE;
4691 fence = qp->next_fence; 4689 } else {
4690 fence = qp->next_fence;
4691 }
4692 } 4692 }
4693 4693
4694 switch (ibqp->qp_type) { 4694 switch (ibqp->qp_type) {
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index 89ec0f64abfc..084bb4baebb5 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah);
91 * rvt_create_ah - create an address handle 91 * rvt_create_ah - create an address handle
92 * @pd: the protection domain 92 * @pd: the protection domain
93 * @ah_attr: the attributes of the AH 93 * @ah_attr: the attributes of the AH
94 * @udata: pointer to user's input output buffer information.
94 * 95 *
95 * This may be called from interrupt context. 96 * This may be called from interrupt context.
96 * 97 *
97 * Return: newly allocated ah 98 * Return: newly allocated ah
98 */ 99 */
99struct ib_ah *rvt_create_ah(struct ib_pd *pd, 100struct ib_ah *rvt_create_ah(struct ib_pd *pd,
100 struct rdma_ah_attr *ah_attr) 101 struct rdma_ah_attr *ah_attr,
102 struct ib_udata *udata)
101{ 103{
102 struct rvt_ah *ah; 104 struct rvt_ah *ah;
103 struct rvt_dev_info *dev = ib_to_rvt(pd->device); 105 struct rvt_dev_info *dev = ib_to_rvt(pd->device);
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
index 16105af99189..25271b48a683 100644
--- a/drivers/infiniband/sw/rdmavt/ah.h
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -51,7 +51,8 @@
51#include <rdma/rdma_vt.h> 51#include <rdma/rdma_vt.h>
52 52
53struct ib_ah *rvt_create_ah(struct ib_pd *pd, 53struct ib_ah *rvt_create_ah(struct ib_pd *pd,
54 struct rdma_ah_attr *ah_attr); 54 struct rdma_ah_attr *ah_attr,
55 struct ib_udata *udata);
55int rvt_destroy_ah(struct ib_ah *ibah); 56int rvt_destroy_ah(struct ib_ah *ibah);
56int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); 57int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
57int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); 58int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 946b623ba5eb..4ff3d98fa6a4 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1124,7 +1124,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1124 IB_MR_CHECK_SIG_STATUS, &mr_status); 1124 IB_MR_CHECK_SIG_STATUS, &mr_status);
1125 if (ret) { 1125 if (ret) {
1126 pr_err("ib_check_mr_status failed, ret %d\n", ret); 1126 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1127 goto err; 1127 /* Not a lot we can do, return ambiguous guard error */
1128 *sector = 0;
1129 return 0x1;
1128 } 1130 }
1129 1131
1130 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1132 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
@@ -1152,9 +1154,6 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1152 } 1154 }
1153 1155
1154 return 0; 1156 return 0;
1155err:
1156 /* Not alot we can do here, return ambiguous guard error */
1157 return 0x1;
1158} 1157}
1159 1158
1160void iser_err_comp(struct ib_wc *wc, const char *type) 1159void iser_err_comp(struct ib_wc *wc, const char *type)
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index bb2cd29e1658..d8f7000a466a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -797,7 +797,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
797 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; 797 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
798 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, 798 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
799 &entry, sizeof(entry)); 799 &entry, sizeof(entry));
800 entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL; 800 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
801 (BIT_ULL(52)-1)) & ~7ULL;
801 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, 802 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
802 &entry, sizeof(entry)); 803 &entry, sizeof(entry));
803 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 804 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f3ccf025108b..41a4b8808802 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3075,7 +3075,7 @@ static int copy_context_table(struct intel_iommu *iommu,
3075 } 3075 }
3076 3076
3077 if (old_ce) 3077 if (old_ce)
3078 iounmap(old_ce); 3078 memunmap(old_ce);
3079 3079
3080 ret = 0; 3080 ret = 0;
3081 if (devfn < 0x80) 3081 if (devfn < 0x80)
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index db301efe126d..887150907526 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -595,7 +595,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
595 pr_err("%s: Page request without PASID: %08llx %08llx\n", 595 pr_err("%s: Page request without PASID: %08llx %08llx\n",
596 iommu->name, ((unsigned long long *)req)[0], 596 iommu->name, ((unsigned long long *)req)[0],
597 ((unsigned long long *)req)[1]); 597 ((unsigned long long *)req)[1]);
598 goto bad_req; 598 goto no_pasid;
599 } 599 }
600 600
601 if (!svm || svm->pasid != req->pasid) { 601 if (!svm || svm->pasid != req->pasid) {
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index b98a03189580..ddf3a492e1d5 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -498,6 +498,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
498 498
499static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) 499static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
500{ 500{
501 if (!domain->mmu)
502 return;
503
501 /* 504 /*
502 * Disable the context. Flush the TLB as required when modifying the 505 * Disable the context. Flush the TLB as required when modifying the
503 * context registers. 506 * context registers.
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 31d1f4ab915e..65a933a21e68 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -807,7 +807,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
807 } 807 }
808 808
809 if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) { 809 if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) {
810 dprintk(1, "%s: transmit queue full\n", __func__); 810 dprintk(2, "%s: transmit queue full\n", __func__);
811 return -EBUSY; 811 return -EBUSY;
812 } 812 }
813 813
@@ -1180,6 +1180,8 @@ static int cec_config_log_addr(struct cec_adapter *adap,
1180{ 1180{
1181 struct cec_log_addrs *las = &adap->log_addrs; 1181 struct cec_log_addrs *las = &adap->log_addrs;
1182 struct cec_msg msg = { }; 1182 struct cec_msg msg = { };
1183 const unsigned int max_retries = 2;
1184 unsigned int i;
1183 int err; 1185 int err;
1184 1186
1185 if (cec_has_log_addr(adap, log_addr)) 1187 if (cec_has_log_addr(adap, log_addr))
@@ -1188,19 +1190,44 @@ static int cec_config_log_addr(struct cec_adapter *adap,
1188 /* Send poll message */ 1190 /* Send poll message */
1189 msg.len = 1; 1191 msg.len = 1;
1190 msg.msg[0] = (log_addr << 4) | log_addr; 1192 msg.msg[0] = (log_addr << 4) | log_addr;
1191 err = cec_transmit_msg_fh(adap, &msg, NULL, true);
1192 1193
1193 /* 1194 for (i = 0; i < max_retries; i++) {
1194 * While trying to poll the physical address was reset 1195 err = cec_transmit_msg_fh(adap, &msg, NULL, true);
1195 * and the adapter was unconfigured, so bail out.
1196 */
1197 if (!adap->is_configuring)
1198 return -EINTR;
1199 1196
1200 if (err) 1197 /*
1201 return err; 1198 * While trying to poll the physical address was reset
1199 * and the adapter was unconfigured, so bail out.
1200 */
1201 if (!adap->is_configuring)
1202 return -EINTR;
1203
1204 if (err)
1205 return err;
1202 1206
1203 if (msg.tx_status & CEC_TX_STATUS_OK) 1207 /*
1208 * The message was aborted due to a disconnect or
1209 * unconfigure, just bail out.
1210 */
1211 if (msg.tx_status & CEC_TX_STATUS_ABORTED)
1212 return -EINTR;
1213 if (msg.tx_status & CEC_TX_STATUS_OK)
1214 return 0;
1215 if (msg.tx_status & CEC_TX_STATUS_NACK)
1216 break;
1217 /*
1218 * Retry up to max_retries times if the message was neither
1219 * OKed or NACKed. This can happen due to e.g. a Lost
1220 * Arbitration condition.
1221 */
1222 }
1223
1224 /*
1225 * If we are unable to get an OK or a NACK after max_retries attempts
1226 * (and note that each attempt already consists of four polls), then
1227 * then we assume that something is really weird and that it is not a
1228 * good idea to try and claim this logical address.
1229 */
1230 if (i == max_retries)
1204 return 0; 1231 return 0;
1205 1232
1206 /* 1233 /*
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index ca5d92942820..41d470d9ca94 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1918,7 +1918,6 @@ static int tc358743_probe_of(struct tc358743_state *state)
1918 ret = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep), &endpoint); 1918 ret = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep), &endpoint);
1919 if (ret) { 1919 if (ret) {
1920 dev_err(dev, "failed to parse endpoint\n"); 1920 dev_err(dev, "failed to parse endpoint\n");
1921 ret = ret;
1922 goto put_node; 1921 goto put_node;
1923 } 1922 }
1924 1923
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 452eb9b42140..447baaebca44 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1844,14 +1844,12 @@ fail_mutex_destroy:
1844static void cio2_pci_remove(struct pci_dev *pci_dev) 1844static void cio2_pci_remove(struct pci_dev *pci_dev)
1845{ 1845{
1846 struct cio2_device *cio2 = pci_get_drvdata(pci_dev); 1846 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1847 unsigned int i;
1848 1847
1848 media_device_unregister(&cio2->media_dev);
1849 cio2_notifier_exit(cio2); 1849 cio2_notifier_exit(cio2);
1850 cio2_queues_exit(cio2);
1850 cio2_fbpt_exit_dummy(cio2); 1851 cio2_fbpt_exit_dummy(cio2);
1851 for (i = 0; i < CIO2_QUEUES; i++)
1852 cio2_queue_exit(cio2, &cio2->queue[i]);
1853 v4l2_device_unregister(&cio2->v4l2_dev); 1852 v4l2_device_unregister(&cio2->v4l2_dev);
1854 media_device_unregister(&cio2->media_dev);
1855 media_device_cleanup(&cio2->media_dev); 1853 media_device_cleanup(&cio2->media_dev);
1856 mutex_destroy(&cio2->lock); 1854 mutex_destroy(&cio2->lock);
1857} 1855}
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 77fb7987b42f..13f2828d880d 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1587,6 +1587,8 @@ static void isp_pm_complete(struct device *dev)
1587 1587
1588static void isp_unregister_entities(struct isp_device *isp) 1588static void isp_unregister_entities(struct isp_device *isp)
1589{ 1589{
1590 media_device_unregister(&isp->media_dev);
1591
1590 omap3isp_csi2_unregister_entities(&isp->isp_csi2a); 1592 omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
1591 omap3isp_ccp2_unregister_entities(&isp->isp_ccp2); 1593 omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
1592 omap3isp_ccdc_unregister_entities(&isp->isp_ccdc); 1594 omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
@@ -1597,7 +1599,6 @@ static void isp_unregister_entities(struct isp_device *isp)
1597 omap3isp_stat_unregister_entities(&isp->isp_hist); 1599 omap3isp_stat_unregister_entities(&isp->isp_hist);
1598 1600
1599 v4l2_device_unregister(&isp->v4l2_dev); 1601 v4l2_device_unregister(&isp->v4l2_dev);
1600 media_device_unregister(&isp->media_dev);
1601 media_device_cleanup(&isp->media_dev); 1602 media_device_cleanup(&isp->media_dev);
1602} 1603}
1603 1604
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 1eb9132bfc85..b292cff26c86 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(debug, " activates debug info");
42#define MAX_WIDTH 4096U 42#define MAX_WIDTH 4096U
43#define MIN_WIDTH 640U 43#define MIN_WIDTH 640U
44#define MAX_HEIGHT 2160U 44#define MAX_HEIGHT 2160U
45#define MIN_HEIGHT 480U 45#define MIN_HEIGHT 360U
46 46
47#define dprintk(dev, fmt, arg...) \ 47#define dprintk(dev, fmt, arg...) \
48 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) 48 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index af150a0395df..d82db738f174 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1009,7 +1009,7 @@ static const struct v4l2_m2m_ops m2m_ops = {
1009 1009
1010static const struct media_device_ops m2m_media_ops = { 1010static const struct media_device_ops m2m_media_ops = {
1011 .req_validate = vb2_request_validate, 1011 .req_validate = vb2_request_validate,
1012 .req_queue = vb2_m2m_request_queue, 1012 .req_queue = v4l2_m2m_request_queue,
1013}; 1013};
1014 1014
1015static int vim2m_probe(struct platform_device *pdev) 1015static int vim2m_probe(struct platform_device *pdev)
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 6e37950292cd..5f2b033a7a42 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1664,6 +1664,11 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
1664 p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME) 1664 p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME)
1665 return -EINVAL; 1665 return -EINVAL;
1666 1666
1667 if (p_mpeg2_slice_params->pad ||
1668 p_mpeg2_slice_params->picture.pad ||
1669 p_mpeg2_slice_params->sequence.pad)
1670 return -EINVAL;
1671
1667 return 0; 1672 return 0;
1668 1673
1669 case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION: 1674 case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index a3ef1f50a4b3..481e3c65cf97 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh)
193} 193}
194EXPORT_SYMBOL_GPL(v4l2_event_pending); 194EXPORT_SYMBOL_GPL(v4l2_event_pending);
195 195
196static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
197{
198 struct v4l2_fh *fh = sev->fh;
199 unsigned int i;
200
201 lockdep_assert_held(&fh->subscribe_lock);
202 assert_spin_locked(&fh->vdev->fh_lock);
203
204 /* Remove any pending events for this subscription */
205 for (i = 0; i < sev->in_use; i++) {
206 list_del(&sev->events[sev_pos(sev, i)].list);
207 fh->navailable--;
208 }
209 list_del(&sev->list);
210}
211
196int v4l2_event_subscribe(struct v4l2_fh *fh, 212int v4l2_event_subscribe(struct v4l2_fh *fh,
197 const struct v4l2_event_subscription *sub, unsigned elems, 213 const struct v4l2_event_subscription *sub, unsigned elems,
198 const struct v4l2_subscribed_event_ops *ops) 214 const struct v4l2_subscribed_event_ops *ops)
@@ -224,27 +240,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
224 240
225 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 241 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
226 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); 242 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
243 if (!found_ev)
244 list_add(&sev->list, &fh->subscribed);
227 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 245 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
228 246
229 if (found_ev) { 247 if (found_ev) {
230 /* Already listening */ 248 /* Already listening */
231 kvfree(sev); 249 kvfree(sev);
232 goto out_unlock; 250 } else if (sev->ops && sev->ops->add) {
233 }
234
235 if (sev->ops && sev->ops->add) {
236 ret = sev->ops->add(sev, elems); 251 ret = sev->ops->add(sev, elems);
237 if (ret) { 252 if (ret) {
253 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
254 __v4l2_event_unsubscribe(sev);
255 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
238 kvfree(sev); 256 kvfree(sev);
239 goto out_unlock;
240 } 257 }
241 } 258 }
242 259
243 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
244 list_add(&sev->list, &fh->subscribed);
245 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
246
247out_unlock:
248 mutex_unlock(&fh->subscribe_lock); 260 mutex_unlock(&fh->subscribe_lock);
249 261
250 return ret; 262 return ret;
@@ -279,7 +291,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
279{ 291{
280 struct v4l2_subscribed_event *sev; 292 struct v4l2_subscribed_event *sev;
281 unsigned long flags; 293 unsigned long flags;
282 int i;
283 294
284 if (sub->type == V4L2_EVENT_ALL) { 295 if (sub->type == V4L2_EVENT_ALL) {
285 v4l2_event_unsubscribe_all(fh); 296 v4l2_event_unsubscribe_all(fh);
@@ -291,14 +302,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
291 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 302 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
292 303
293 sev = v4l2_event_subscribed(fh, sub->type, sub->id); 304 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
294 if (sev != NULL) { 305 if (sev != NULL)
295 /* Remove any pending events for this subscription */ 306 __v4l2_event_unsubscribe(sev);
296 for (i = 0; i < sev->in_use; i++) {
297 list_del(&sev->events[sev_pos(sev, i)].list);
298 fh->navailable--;
299 }
300 list_del(&sev->list);
301 }
302 307
303 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 308 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
304 309
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index d7806db222d8..1ed2465972ac 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -953,7 +953,7 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
953} 953}
954EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 954EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
955 955
956void vb2_m2m_request_queue(struct media_request *req) 956void v4l2_m2m_request_queue(struct media_request *req)
957{ 957{
958 struct media_request_object *obj, *obj_safe; 958 struct media_request_object *obj, *obj_safe;
959 struct v4l2_m2m_ctx *m2m_ctx = NULL; 959 struct v4l2_m2m_ctx *m2m_ctx = NULL;
@@ -997,7 +997,7 @@ void vb2_m2m_request_queue(struct media_request *req)
997 if (m2m_ctx) 997 if (m2m_ctx)
998 v4l2_m2m_try_schedule(m2m_ctx); 998 v4l2_m2m_try_schedule(m2m_ctx);
999} 999}
1000EXPORT_SYMBOL_GPL(vb2_m2m_request_queue); 1000EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
1001 1001
1002/* Videobuf2 ioctl helpers */ 1002/* Videobuf2 ioctl helpers */
1003 1003
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index b2a0340f277e..d8e3cc2dc747 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -132,7 +132,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = {
132MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids); 132MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
133#endif 133#endif
134 134
135static inline const struct atmel_ssc_platform_data * __init 135static inline const struct atmel_ssc_platform_data *
136 atmel_ssc_get_driver_data(struct platform_device *pdev) 136 atmel_ssc_get_driver_data(struct platform_device *pdev)
137{ 137{
138 if (pdev->dev.of_node) { 138 if (pdev->dev.of_node) {
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index c824329f7012..0e4193cb08cf 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -416,7 +416,7 @@ static int scif_create_remote_lookup(struct scif_dev *remote_dev,
416 if (err) 416 if (err)
417 goto error_window; 417 goto error_window;
418 err = scif_map_page(&window->num_pages_lookup.lookup[j], 418 err = scif_map_page(&window->num_pages_lookup.lookup[j],
419 vmalloc_dma_phys ? 419 vmalloc_num_pages ?
420 vmalloc_to_page(&window->num_pages[i]) : 420 vmalloc_to_page(&window->num_pages[i]) :
421 virt_to_page(&window->num_pages[i]), 421 virt_to_page(&window->num_pages[i]),
422 remote_dev); 422 remote_dev);
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 313da3150262..1540a7785e14 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -27,6 +27,9 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <asm/uv/uv_hub.h> 29#include <asm/uv/uv_hub.h>
30
31#include <linux/nospec.h>
32
30#include "gru.h" 33#include "gru.h"
31#include "grutables.h" 34#include "grutables.h"
32#include "gruhandles.h" 35#include "gruhandles.h"
@@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg)
196 /* Currently, only dump by gid is implemented */ 199 /* Currently, only dump by gid is implemented */
197 if (req.gid >= gru_max_gids) 200 if (req.gid >= gru_max_gids)
198 return -EINVAL; 201 return -EINVAL;
202 req.gid = array_index_nospec(req.gid, gru_max_gids);
199 203
200 gru = GID_TO_GRU(req.gid); 204 gru = GID_TO_GRU(req.gid);
201 ubuf = req.buf; 205 ubuf = req.buf;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 7bfd366d970d..c4115bae5db1 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -12,6 +12,7 @@
12 * - JMicron (hardware and technical support) 12 * - JMicron (hardware and technical support)
13 */ 13 */
14 14
15#include <linux/bitfield.h>
15#include <linux/string.h> 16#include <linux/string.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/highmem.h> 18#include <linux/highmem.h>
@@ -462,6 +463,9 @@ struct intel_host {
462 u32 dsm_fns; 463 u32 dsm_fns;
463 int drv_strength; 464 int drv_strength;
464 bool d3_retune; 465 bool d3_retune;
466 bool rpm_retune_ok;
467 u32 glk_rx_ctrl1;
468 u32 glk_tun_val;
465}; 469};
466 470
467static const guid_t intel_dsm_guid = 471static const guid_t intel_dsm_guid =
@@ -791,6 +795,77 @@ cleanup:
791 return ret; 795 return ret;
792} 796}
793 797
798#ifdef CONFIG_PM
799#define GLK_RX_CTRL1 0x834
800#define GLK_TUN_VAL 0x840
801#define GLK_PATH_PLL GENMASK(13, 8)
802#define GLK_DLY GENMASK(6, 0)
803/* Workaround firmware failing to restore the tuning value */
804static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
805{
806 struct sdhci_pci_slot *slot = chip->slots[0];
807 struct intel_host *intel_host = sdhci_pci_priv(slot);
808 struct sdhci_host *host = slot->host;
809 u32 glk_rx_ctrl1;
810 u32 glk_tun_val;
811 u32 dly;
812
813 if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
814 return;
815
816 glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
817 glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
818
819 if (susp) {
820 intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
821 intel_host->glk_tun_val = glk_tun_val;
822 return;
823 }
824
825 if (!intel_host->glk_tun_val)
826 return;
827
828 if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
829 intel_host->rpm_retune_ok = true;
830 return;
831 }
832
833 dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
834 (intel_host->glk_tun_val << 1));
835 if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
836 return;
837
838 glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
839 sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
840
841 intel_host->rpm_retune_ok = true;
842 chip->rpm_retune = true;
843 mmc_retune_needed(host->mmc);
844 pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
845}
846
847static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
848{
849 if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
850 !chip->rpm_retune)
851 glk_rpm_retune_wa(chip, susp);
852}
853
854static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
855{
856 glk_rpm_retune_chk(chip, true);
857
858 return sdhci_cqhci_runtime_suspend(chip);
859}
860
861static int glk_runtime_resume(struct sdhci_pci_chip *chip)
862{
863 glk_rpm_retune_chk(chip, false);
864
865 return sdhci_cqhci_runtime_resume(chip);
866}
867#endif
868
794#ifdef CONFIG_ACPI 869#ifdef CONFIG_ACPI
795static int ni_set_max_freq(struct sdhci_pci_slot *slot) 870static int ni_set_max_freq(struct sdhci_pci_slot *slot)
796{ 871{
@@ -879,8 +954,8 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
879 .resume = sdhci_cqhci_resume, 954 .resume = sdhci_cqhci_resume,
880#endif 955#endif
881#ifdef CONFIG_PM 956#ifdef CONFIG_PM
882 .runtime_suspend = sdhci_cqhci_runtime_suspend, 957 .runtime_suspend = glk_runtime_suspend,
883 .runtime_resume = sdhci_cqhci_runtime_resume, 958 .runtime_resume = glk_runtime_resume,
884#endif 959#endif
885 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 960 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
886 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 961 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
@@ -1762,8 +1837,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
1762 device_init_wakeup(&pdev->dev, true); 1837 device_init_wakeup(&pdev->dev, true);
1763 1838
1764 if (slot->cd_idx >= 0) { 1839 if (slot->cd_idx >= 0) {
1765 ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, 1840 ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
1766 slot->cd_override_level, 0, NULL); 1841 slot->cd_override_level, 0, NULL);
1842 if (ret && ret != -EPROBE_DEFER)
1843 ret = mmc_gpiod_request_cd(host->mmc, NULL,
1844 slot->cd_idx,
1845 slot->cd_override_level,
1846 0, NULL);
1767 if (ret == -EPROBE_DEFER) 1847 if (ret == -EPROBE_DEFER)
1768 goto remove; 1848 goto remove;
1769 1849
diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c
index 56cde38b92c0..044adf913854 100644
--- a/drivers/mtd/nand/bbt.c
+++ b/drivers/mtd/nand/bbt.c
@@ -27,7 +27,8 @@ int nanddev_bbt_init(struct nand_device *nand)
27 unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block, 27 unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
28 BITS_PER_LONG); 28 BITS_PER_LONG);
29 29
30 nand->bbt.cache = kzalloc(nwords, GFP_KERNEL); 30 nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache),
31 GFP_KERNEL);
31 if (!nand->bbt.cache) 32 if (!nand->bbt.cache)
32 return -ENOMEM; 33 return -ENOMEM;
33 34
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index fb33f6be7c4f..ad720494e8f7 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -2032,8 +2032,7 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
2032 int ret; 2032 int ret;
2033 2033
2034 nand_np = dev->of_node; 2034 nand_np = dev->of_node;
2035 nfc_np = of_find_compatible_node(dev->of_node, NULL, 2035 nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
2036 "atmel,sama5d3-nfc");
2037 if (!nfc_np) { 2036 if (!nfc_np) {
2038 dev_err(dev, "Could not find device node for sama5d3-nfc\n"); 2037 dev_err(dev, "Could not find device node for sama5d3-nfc\n");
2039 return -ENODEV; 2038 return -ENODEV;
@@ -2447,15 +2446,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev)
2447 } 2446 }
2448 2447
2449 if (caps->legacy_of_bindings) { 2448 if (caps->legacy_of_bindings) {
2449 struct device_node *nfc_node;
2450 u32 ale_offs = 21; 2450 u32 ale_offs = 21;
2451 2451
2452 /* 2452 /*
2453 * If we are parsing legacy DT props and the DT contains a 2453 * If we are parsing legacy DT props and the DT contains a
2454 * valid NFC node, forward the request to the sama5 logic. 2454 * valid NFC node, forward the request to the sama5 logic.
2455 */ 2455 */
2456 if (of_find_compatible_node(pdev->dev.of_node, NULL, 2456 nfc_node = of_get_compatible_child(pdev->dev.of_node,
2457 "atmel,sama5d3-nfc")) 2457 "atmel,sama5d3-nfc");
2458 if (nfc_node) {
2458 caps = &atmel_sama5_nand_caps; 2459 caps = &atmel_sama5_nand_caps;
2460 of_node_put(nfc_node);
2461 }
2459 2462
2460 /* 2463 /*
2461 * Even if the compatible says we are dealing with an 2464 * Even if the compatible says we are dealing with an
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index ef75dfa62a4f..699d3cf49c6d 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -150,15 +150,15 @@
150#define NAND_VERSION_MINOR_SHIFT 16 150#define NAND_VERSION_MINOR_SHIFT 16
151 151
152/* NAND OP_CMDs */ 152/* NAND OP_CMDs */
153#define PAGE_READ 0x2 153#define OP_PAGE_READ 0x2
154#define PAGE_READ_WITH_ECC 0x3 154#define OP_PAGE_READ_WITH_ECC 0x3
155#define PAGE_READ_WITH_ECC_SPARE 0x4 155#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
156#define PROGRAM_PAGE 0x6 156#define OP_PROGRAM_PAGE 0x6
157#define PAGE_PROGRAM_WITH_ECC 0x7 157#define OP_PAGE_PROGRAM_WITH_ECC 0x7
158#define PROGRAM_PAGE_SPARE 0x9 158#define OP_PROGRAM_PAGE_SPARE 0x9
159#define BLOCK_ERASE 0xa 159#define OP_BLOCK_ERASE 0xa
160#define FETCH_ID 0xb 160#define OP_FETCH_ID 0xb
161#define RESET_DEVICE 0xd 161#define OP_RESET_DEVICE 0xd
162 162
163/* Default Value for NAND_DEV_CMD_VLD */ 163/* Default Value for NAND_DEV_CMD_VLD */
164#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ 164#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
@@ -692,11 +692,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
692 692
693 if (read) { 693 if (read) {
694 if (host->use_ecc) 694 if (host->use_ecc)
695 cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; 695 cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
696 else 696 else
697 cmd = PAGE_READ | PAGE_ACC | LAST_PAGE; 697 cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
698 } else { 698 } else {
699 cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; 699 cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
700 } 700 }
701 701
702 if (host->use_ecc) { 702 if (host->use_ecc) {
@@ -1170,7 +1170,7 @@ static int nandc_param(struct qcom_nand_host *host)
1170 * in use. we configure the controller to perform a raw read of 512 1170 * in use. we configure the controller to perform a raw read of 512
1171 * bytes to read onfi params 1171 * bytes to read onfi params
1172 */ 1172 */
1173 nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE); 1173 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
1174 nandc_set_reg(nandc, NAND_ADDR0, 0); 1174 nandc_set_reg(nandc, NAND_ADDR0, 0);
1175 nandc_set_reg(nandc, NAND_ADDR1, 0); 1175 nandc_set_reg(nandc, NAND_ADDR1, 0);
1176 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE 1176 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
@@ -1224,7 +1224,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
1224 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1224 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1225 1225
1226 nandc_set_reg(nandc, NAND_FLASH_CMD, 1226 nandc_set_reg(nandc, NAND_FLASH_CMD,
1227 BLOCK_ERASE | PAGE_ACC | LAST_PAGE); 1227 OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1228 nandc_set_reg(nandc, NAND_ADDR0, page_addr); 1228 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1229 nandc_set_reg(nandc, NAND_ADDR1, 0); 1229 nandc_set_reg(nandc, NAND_ADDR1, 0);
1230 nandc_set_reg(nandc, NAND_DEV0_CFG0, 1230 nandc_set_reg(nandc, NAND_DEV0_CFG0,
@@ -1255,7 +1255,7 @@ static int read_id(struct qcom_nand_host *host, int column)
1255 if (column == -1) 1255 if (column == -1)
1256 return 0; 1256 return 0;
1257 1257
1258 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID); 1258 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
1259 nandc_set_reg(nandc, NAND_ADDR0, column); 1259 nandc_set_reg(nandc, NAND_ADDR0, column);
1260 nandc_set_reg(nandc, NAND_ADDR1, 0); 1260 nandc_set_reg(nandc, NAND_ADDR1, 0);
1261 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, 1261 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
@@ -1276,7 +1276,7 @@ static int reset(struct qcom_nand_host *host)
1276 struct nand_chip *chip = &host->chip; 1276 struct nand_chip *chip = &host->chip;
1277 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1277 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1278 1278
1279 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE); 1279 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
1280 nandc_set_reg(nandc, NAND_EXEC_CMD, 1); 1280 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1281 1281
1282 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); 1282 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index d846428ef038..04cedd3a2bf6 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -644,9 +644,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
644 ndelay(cqspi->wr_delay); 644 ndelay(cqspi->wr_delay);
645 645
646 while (remaining > 0) { 646 while (remaining > 0) {
647 size_t write_words, mod_bytes;
648
647 write_bytes = remaining > page_size ? page_size : remaining; 649 write_bytes = remaining > page_size ? page_size : remaining;
648 iowrite32_rep(cqspi->ahb_base, txbuf, 650 write_words = write_bytes / 4;
649 DIV_ROUND_UP(write_bytes, 4)); 651 mod_bytes = write_bytes % 4;
652 /* Write 4 bytes at a time then single bytes. */
653 if (write_words) {
654 iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
655 txbuf += (write_words * 4);
656 }
657 if (mod_bytes) {
658 unsigned int temp = 0xFFFFFFFF;
659
660 memcpy(&temp, txbuf, mod_bytes);
661 iowrite32(temp, cqspi->ahb_base);
662 txbuf += mod_bytes;
663 }
650 664
651 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 665 if (!wait_for_completion_timeout(&cqspi->transfer_complete,
652 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { 666 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
@@ -655,7 +669,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
655 goto failwr; 669 goto failwr;
656 } 670 }
657 671
658 txbuf += write_bytes;
659 remaining -= write_bytes; 672 remaining -= write_bytes;
660 673
661 if (remaining > 0) 674 if (remaining > 0)
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 3e54e31889c7..1fdd2834fbcb 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -2156,7 +2156,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
2156 * @nor: pointer to a 'struct spi_nor' 2156 * @nor: pointer to a 'struct spi_nor'
2157 * @addr: offset in the serial flash memory 2157 * @addr: offset in the serial flash memory
2158 * @len: number of bytes to read 2158 * @len: number of bytes to read
2159 * @buf: buffer where the data is copied into 2159 * @buf: buffer where the data is copied into (dma-safe memory)
2160 * 2160 *
2161 * Return: 0 on success, -errno otherwise. 2161 * Return: 0 on success, -errno otherwise.
2162 */ 2162 */
@@ -2522,6 +2522,34 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
2522} 2522}
2523 2523
2524/** 2524/**
2525 * spi_nor_sort_erase_mask() - sort erase mask
2526 * @map: the erase map of the SPI NOR
2527 * @erase_mask: the erase type mask to be sorted
2528 *
2529 * Replicate the sort done for the map's erase types in BFPT: sort the erase
2530 * mask in ascending order with the smallest erase type size starting from
2531 * BIT(0) in the sorted erase mask.
2532 *
2533 * Return: sorted erase mask.
2534 */
2535static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
2536{
2537 struct spi_nor_erase_type *erase_type = map->erase_type;
2538 int i;
2539 u8 sorted_erase_mask = 0;
2540
2541 if (!erase_mask)
2542 return 0;
2543
2544 /* Replicate the sort done for the map's erase types. */
2545 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
2546 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
2547 sorted_erase_mask |= BIT(i);
2548
2549 return sorted_erase_mask;
2550}
2551
2552/**
2525 * spi_nor_regions_sort_erase_types() - sort erase types in each region 2553 * spi_nor_regions_sort_erase_types() - sort erase types in each region
2526 * @map: the erase map of the SPI NOR 2554 * @map: the erase map of the SPI NOR
2527 * 2555 *
@@ -2536,19 +2564,13 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
2536static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) 2564static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
2537{ 2565{
2538 struct spi_nor_erase_region *region = map->regions; 2566 struct spi_nor_erase_region *region = map->regions;
2539 struct spi_nor_erase_type *erase_type = map->erase_type;
2540 int i;
2541 u8 region_erase_mask, sorted_erase_mask; 2567 u8 region_erase_mask, sorted_erase_mask;
2542 2568
2543 while (region) { 2569 while (region) {
2544 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; 2570 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
2545 2571
2546 /* Replicate the sort done for the map's erase types. */ 2572 sorted_erase_mask = spi_nor_sort_erase_mask(map,
2547 sorted_erase_mask = 0; 2573 region_erase_mask);
2548 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
2549 if (erase_type[i].size &&
2550 region_erase_mask & BIT(erase_type[i].idx))
2551 sorted_erase_mask |= BIT(i);
2552 2574
2553 /* Overwrite erase mask. */ 2575 /* Overwrite erase mask. */
2554 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) | 2576 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
@@ -2855,52 +2877,84 @@ static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
2855 * spi_nor_get_map_in_use() - get the configuration map in use 2877 * spi_nor_get_map_in_use() - get the configuration map in use
2856 * @nor: pointer to a 'struct spi_nor' 2878 * @nor: pointer to a 'struct spi_nor'
2857 * @smpt: pointer to the sector map parameter table 2879 * @smpt: pointer to the sector map parameter table
2880 * @smpt_len: sector map parameter table length
2881 *
2882 * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
2858 */ 2883 */
2859static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt) 2884static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
2885 u8 smpt_len)
2860{ 2886{
2861 const u32 *ret = NULL; 2887 const u32 *ret;
2862 u32 i, addr; 2888 u8 *buf;
2889 u32 addr;
2863 int err; 2890 int err;
2891 u8 i;
2864 u8 addr_width, read_opcode, read_dummy; 2892 u8 addr_width, read_opcode, read_dummy;
2865 u8 read_data_mask, data_byte, map_id; 2893 u8 read_data_mask, map_id;
2894
2895 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
2896 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
2897 if (!buf)
2898 return ERR_PTR(-ENOMEM);
2866 2899
2867 addr_width = nor->addr_width; 2900 addr_width = nor->addr_width;
2868 read_dummy = nor->read_dummy; 2901 read_dummy = nor->read_dummy;
2869 read_opcode = nor->read_opcode; 2902 read_opcode = nor->read_opcode;
2870 2903
2871 map_id = 0; 2904 map_id = 0;
2872 i = 0;
2873 /* Determine if there are any optional Detection Command Descriptors */ 2905 /* Determine if there are any optional Detection Command Descriptors */
2874 while (!(smpt[i] & SMPT_DESC_TYPE_MAP)) { 2906 for (i = 0; i < smpt_len; i += 2) {
2907 if (smpt[i] & SMPT_DESC_TYPE_MAP)
2908 break;
2909
2875 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); 2910 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
2876 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]); 2911 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
2877 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); 2912 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
2878 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); 2913 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
2879 addr = smpt[i + 1]; 2914 addr = smpt[i + 1];
2880 2915
2881 err = spi_nor_read_raw(nor, addr, 1, &data_byte); 2916 err = spi_nor_read_raw(nor, addr, 1, buf);
2882 if (err) 2917 if (err) {
2918 ret = ERR_PTR(err);
2883 goto out; 2919 goto out;
2920 }
2884 2921
2885 /* 2922 /*
2886 * Build an index value that is used to select the Sector Map 2923 * Build an index value that is used to select the Sector Map
2887 * Configuration that is currently in use. 2924 * Configuration that is currently in use.
2888 */ 2925 */
2889 map_id = map_id << 1 | !!(data_byte & read_data_mask); 2926 map_id = map_id << 1 | !!(*buf & read_data_mask);
2890 i = i + 2;
2891 } 2927 }
2892 2928
2893 /* Find the matching configuration map */ 2929 /*
2894 while (SMPT_MAP_ID(smpt[i]) != map_id) { 2930 * If command descriptors are provided, they always precede map
2931 * descriptors in the table. There is no need to start the iteration
2932 * over smpt array all over again.
2933 *
2934 * Find the matching configuration map.
2935 */
2936 ret = ERR_PTR(-EINVAL);
2937 while (i < smpt_len) {
2938 if (SMPT_MAP_ID(smpt[i]) == map_id) {
2939 ret = smpt + i;
2940 break;
2941 }
2942
2943 /*
2944 * If there are no more configuration map descriptors and no
2945 * configuration ID matched the configuration identifier, the
2946 * sector address map is unknown.
2947 */
2895 if (smpt[i] & SMPT_DESC_END) 2948 if (smpt[i] & SMPT_DESC_END)
2896 goto out; 2949 break;
2950
2897 /* increment the table index to the next map */ 2951 /* increment the table index to the next map */
2898 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1; 2952 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
2899 } 2953 }
2900 2954
2901 ret = smpt + i;
2902 /* fall through */ 2955 /* fall through */
2903out: 2956out:
2957 kfree(buf);
2904 nor->addr_width = addr_width; 2958 nor->addr_width = addr_width;
2905 nor->read_dummy = read_dummy; 2959 nor->read_dummy = read_dummy;
2906 nor->read_opcode = read_opcode; 2960 nor->read_opcode = read_opcode;
@@ -2941,12 +2995,13 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
2941 const u32 *smpt) 2995 const u32 *smpt)
2942{ 2996{
2943 struct spi_nor_erase_map *map = &nor->erase_map; 2997 struct spi_nor_erase_map *map = &nor->erase_map;
2944 const struct spi_nor_erase_type *erase = map->erase_type; 2998 struct spi_nor_erase_type *erase = map->erase_type;
2945 struct spi_nor_erase_region *region; 2999 struct spi_nor_erase_region *region;
2946 u64 offset; 3000 u64 offset;
2947 u32 region_count; 3001 u32 region_count;
2948 int i, j; 3002 int i, j;
2949 u8 erase_type; 3003 u8 uniform_erase_type, save_uniform_erase_type;
3004 u8 erase_type, regions_erase_type;
2950 3005
2951 region_count = SMPT_MAP_REGION_COUNT(*smpt); 3006 region_count = SMPT_MAP_REGION_COUNT(*smpt);
2952 /* 3007 /*
@@ -2959,7 +3014,8 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
2959 return -ENOMEM; 3014 return -ENOMEM;
2960 map->regions = region; 3015 map->regions = region;
2961 3016
2962 map->uniform_erase_type = 0xff; 3017 uniform_erase_type = 0xff;
3018 regions_erase_type = 0;
2963 offset = 0; 3019 offset = 0;
2964 /* Populate regions. */ 3020 /* Populate regions. */
2965 for (i = 0; i < region_count; i++) { 3021 for (i = 0; i < region_count; i++) {
@@ -2974,12 +3030,40 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
2974 * Save the erase types that are supported in all regions and 3030 * Save the erase types that are supported in all regions and
2975 * can erase the entire flash memory. 3031 * can erase the entire flash memory.
2976 */ 3032 */
2977 map->uniform_erase_type &= erase_type; 3033 uniform_erase_type &= erase_type;
3034
3035 /*
3036 * regions_erase_type mask will indicate all the erase types
3037 * supported in this configuration map.
3038 */
3039 regions_erase_type |= erase_type;
2978 3040
2979 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + 3041 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
2980 region[i].size; 3042 region[i].size;
2981 } 3043 }
2982 3044
3045 save_uniform_erase_type = map->uniform_erase_type;
3046 map->uniform_erase_type = spi_nor_sort_erase_mask(map,
3047 uniform_erase_type);
3048
3049 if (!regions_erase_type) {
3050 /*
3051 * Roll back to the previous uniform_erase_type mask, SMPT is
3052 * broken.
3053 */
3054 map->uniform_erase_type = save_uniform_erase_type;
3055 return -EINVAL;
3056 }
3057
3058 /*
3059 * BFPT advertises all the erase types supported by all the possible
3060 * map configurations. Mask out the erase types that are not supported
3061 * by the current map configuration.
3062 */
3063 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3064 if (!(regions_erase_type & BIT(erase[i].idx)))
3065 spi_nor_set_erase_type(&erase[i], 0, 0xFF);
3066
2983 spi_nor_region_mark_end(&region[i - 1]); 3067 spi_nor_region_mark_end(&region[i - 1]);
2984 3068
2985 return 0; 3069 return 0;
@@ -3020,9 +3104,9 @@ static int spi_nor_parse_smpt(struct spi_nor *nor,
3020 for (i = 0; i < smpt_header->length; i++) 3104 for (i = 0; i < smpt_header->length; i++)
3021 smpt[i] = le32_to_cpu(smpt[i]); 3105 smpt[i] = le32_to_cpu(smpt[i]);
3022 3106
3023 sector_map = spi_nor_get_map_in_use(nor, smpt); 3107 sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
3024 if (!sector_map) { 3108 if (IS_ERR(sector_map)) {
3025 ret = -EINVAL; 3109 ret = PTR_ERR(sector_map);
3026 goto out; 3110 goto out;
3027 } 3111 }
3028 3112
@@ -3125,7 +3209,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
3125 if (err) 3209 if (err)
3126 goto exit; 3210 goto exit;
3127 3211
3128 /* Parse other parameter headers. */ 3212 /* Parse optional parameter tables. */
3129 for (i = 0; i < header.nph; i++) { 3213 for (i = 0; i < header.nph; i++) {
3130 param_header = &param_headers[i]; 3214 param_header = &param_headers[i];
3131 3215
@@ -3138,8 +3222,17 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
3138 break; 3222 break;
3139 } 3223 }
3140 3224
3141 if (err) 3225 if (err) {
3142 goto exit; 3226 dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
3227 SFDP_PARAM_HEADER_ID(param_header));
3228 /*
3229 * Let's not drop all information we extracted so far
3230 * if optional table parsers fail. In case of failing,
3231 * each optional parser is responsible to roll back to
3232 * the previously known spi_nor data.
3233 */
3234 err = 0;
3235 }
3143 } 3236 }
3144 3237
3145exit: 3238exit:
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 49163570a63a..3b3f88ffab53 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -477,6 +477,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
477} 477}
478EXPORT_SYMBOL_GPL(can_put_echo_skb); 478EXPORT_SYMBOL_GPL(can_put_echo_skb);
479 479
480struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
481{
482 struct can_priv *priv = netdev_priv(dev);
483 struct sk_buff *skb = priv->echo_skb[idx];
484 struct canfd_frame *cf;
485
486 if (idx >= priv->echo_skb_max) {
487 netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
488 __func__, idx, priv->echo_skb_max);
489 return NULL;
490 }
491
492 if (!skb) {
493 netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
494 __func__, idx);
495 return NULL;
496 }
497
498 /* Using "struct canfd_frame::len" for the frame
499 * length is supported on both CAN and CANFD frames.
500 */
501 cf = (struct canfd_frame *)skb->data;
502 *len_ptr = cf->len;
503 priv->echo_skb[idx] = NULL;
504
505 return skb;
506}
507
480/* 508/*
481 * Get the skb from the stack and loop it back locally 509 * Get the skb from the stack and loop it back locally
482 * 510 *
@@ -486,22 +514,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
486 */ 514 */
487unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) 515unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
488{ 516{
489 struct can_priv *priv = netdev_priv(dev); 517 struct sk_buff *skb;
490 518 u8 len;
491 BUG_ON(idx >= priv->echo_skb_max);
492
493 if (priv->echo_skb[idx]) {
494 struct sk_buff *skb = priv->echo_skb[idx];
495 struct can_frame *cf = (struct can_frame *)skb->data;
496 u8 dlc = cf->can_dlc;
497 519
498 netif_rx(priv->echo_skb[idx]); 520 skb = __can_get_echo_skb(dev, idx, &len);
499 priv->echo_skb[idx] = NULL; 521 if (!skb)
522 return 0;
500 523
501 return dlc; 524 netif_rx(skb);
502 }
503 525
504 return 0; 526 return len;
505} 527}
506EXPORT_SYMBOL_GPL(can_get_echo_skb); 528EXPORT_SYMBOL_GPL(can_get_echo_skb);
507 529
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 8e972ef08637..75ce11395ee8 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -135,13 +135,12 @@
135 135
136/* FLEXCAN interrupt flag register (IFLAG) bits */ 136/* FLEXCAN interrupt flag register (IFLAG) bits */
137/* Errata ERR005829 step7: Reserve first valid MB */ 137/* Errata ERR005829 step7: Reserve first valid MB */
138#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 138#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
139#define FLEXCAN_TX_MB_OFF_FIFO 9
140#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 139#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
141#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1 140#define FLEXCAN_TX_MB 63
142#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1) 141#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
143#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63 142#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1)
144#define FLEXCAN_IFLAG_MB(x) BIT(x) 143#define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f)
145#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) 144#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
146#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) 145#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
147#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) 146#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
@@ -259,9 +258,7 @@ struct flexcan_priv {
259 struct can_rx_offload offload; 258 struct can_rx_offload offload;
260 259
261 struct flexcan_regs __iomem *regs; 260 struct flexcan_regs __iomem *regs;
262 struct flexcan_mb __iomem *tx_mb;
263 struct flexcan_mb __iomem *tx_mb_reserved; 261 struct flexcan_mb __iomem *tx_mb_reserved;
264 u8 tx_mb_idx;
265 u32 reg_ctrl_default; 262 u32 reg_ctrl_default;
266 u32 reg_imask1_default; 263 u32 reg_imask1_default;
267 u32 reg_imask2_default; 264 u32 reg_imask2_default;
@@ -515,6 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
515static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) 512static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
516{ 513{
517 const struct flexcan_priv *priv = netdev_priv(dev); 514 const struct flexcan_priv *priv = netdev_priv(dev);
515 struct flexcan_regs __iomem *regs = priv->regs;
518 struct can_frame *cf = (struct can_frame *)skb->data; 516 struct can_frame *cf = (struct can_frame *)skb->data;
519 u32 can_id; 517 u32 can_id;
520 u32 data; 518 u32 data;
@@ -537,17 +535,17 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
537 535
538 if (cf->can_dlc > 0) { 536 if (cf->can_dlc > 0) {
539 data = be32_to_cpup((__be32 *)&cf->data[0]); 537 data = be32_to_cpup((__be32 *)&cf->data[0]);
540 priv->write(data, &priv->tx_mb->data[0]); 538 priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[0]);
541 } 539 }
542 if (cf->can_dlc > 4) { 540 if (cf->can_dlc > 4) {
543 data = be32_to_cpup((__be32 *)&cf->data[4]); 541 data = be32_to_cpup((__be32 *)&cf->data[4]);
544 priv->write(data, &priv->tx_mb->data[1]); 542 priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[1]);
545 } 543 }
546 544
547 can_put_echo_skb(skb, dev, 0); 545 can_put_echo_skb(skb, dev, 0);
548 546
549 priv->write(can_id, &priv->tx_mb->can_id); 547 priv->write(can_id, &regs->mb[FLEXCAN_TX_MB].can_id);
550 priv->write(ctrl, &priv->tx_mb->can_ctrl); 548 priv->write(ctrl, &regs->mb[FLEXCAN_TX_MB].can_ctrl);
551 549
552 /* Errata ERR005829 step8: 550 /* Errata ERR005829 step8:
553 * Write twice INACTIVE(0x8) code to first MB. 551 * Write twice INACTIVE(0x8) code to first MB.
@@ -563,9 +561,13 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
563static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) 561static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
564{ 562{
565 struct flexcan_priv *priv = netdev_priv(dev); 563 struct flexcan_priv *priv = netdev_priv(dev);
564 struct flexcan_regs __iomem *regs = priv->regs;
566 struct sk_buff *skb; 565 struct sk_buff *skb;
567 struct can_frame *cf; 566 struct can_frame *cf;
568 bool rx_errors = false, tx_errors = false; 567 bool rx_errors = false, tx_errors = false;
568 u32 timestamp;
569
570 timestamp = priv->read(&regs->timer) << 16;
569 571
570 skb = alloc_can_err_skb(dev, &cf); 572 skb = alloc_can_err_skb(dev, &cf);
571 if (unlikely(!skb)) 573 if (unlikely(!skb))
@@ -612,17 +614,21 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
612 if (tx_errors) 614 if (tx_errors)
613 dev->stats.tx_errors++; 615 dev->stats.tx_errors++;
614 616
615 can_rx_offload_irq_queue_err_skb(&priv->offload, skb); 617 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
616} 618}
617 619
618static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) 620static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
619{ 621{
620 struct flexcan_priv *priv = netdev_priv(dev); 622 struct flexcan_priv *priv = netdev_priv(dev);
623 struct flexcan_regs __iomem *regs = priv->regs;
621 struct sk_buff *skb; 624 struct sk_buff *skb;
622 struct can_frame *cf; 625 struct can_frame *cf;
623 enum can_state new_state, rx_state, tx_state; 626 enum can_state new_state, rx_state, tx_state;
624 int flt; 627 int flt;
625 struct can_berr_counter bec; 628 struct can_berr_counter bec;
629 u32 timestamp;
630
631 timestamp = priv->read(&regs->timer) << 16;
626 632
627 flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; 633 flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
628 if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { 634 if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
@@ -652,7 +658,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
652 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 658 if (unlikely(new_state == CAN_STATE_BUS_OFF))
653 can_bus_off(dev); 659 can_bus_off(dev);
654 660
655 can_rx_offload_irq_queue_err_skb(&priv->offload, skb); 661 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
656} 662}
657 663
658static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) 664static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -720,9 +726,14 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
720 priv->write(BIT(n - 32), &regs->iflag2); 726 priv->write(BIT(n - 32), &regs->iflag2);
721 } else { 727 } else {
722 priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1); 728 priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
723 priv->read(&regs->timer);
724 } 729 }
725 730
731 /* Read the Free Running Timer. It is optional but recommended
732 * to unlock Mailbox as soon as possible and make it available
733 * for reception.
734 */
735 priv->read(&regs->timer);
736
726 return 1; 737 return 1;
727} 738}
728 739
@@ -732,9 +743,9 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
732 struct flexcan_regs __iomem *regs = priv->regs; 743 struct flexcan_regs __iomem *regs = priv->regs;
733 u32 iflag1, iflag2; 744 u32 iflag1, iflag2;
734 745
735 iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default; 746 iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default &
736 iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default & 747 ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
737 ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); 748 iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default;
738 749
739 return (u64)iflag2 << 32 | iflag1; 750 return (u64)iflag2 << 32 | iflag1;
740} 751}
@@ -746,11 +757,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
746 struct flexcan_priv *priv = netdev_priv(dev); 757 struct flexcan_priv *priv = netdev_priv(dev);
747 struct flexcan_regs __iomem *regs = priv->regs; 758 struct flexcan_regs __iomem *regs = priv->regs;
748 irqreturn_t handled = IRQ_NONE; 759 irqreturn_t handled = IRQ_NONE;
749 u32 reg_iflag1, reg_esr; 760 u32 reg_iflag2, reg_esr;
750 enum can_state last_state = priv->can.state; 761 enum can_state last_state = priv->can.state;
751 762
752 reg_iflag1 = priv->read(&regs->iflag1);
753
754 /* reception interrupt */ 763 /* reception interrupt */
755 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 764 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
756 u64 reg_iflag; 765 u64 reg_iflag;
@@ -764,6 +773,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
764 break; 773 break;
765 } 774 }
766 } else { 775 } else {
776 u32 reg_iflag1;
777
778 reg_iflag1 = priv->read(&regs->iflag1);
767 if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { 779 if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
768 handled = IRQ_HANDLED; 780 handled = IRQ_HANDLED;
769 can_rx_offload_irq_offload_fifo(&priv->offload); 781 can_rx_offload_irq_offload_fifo(&priv->offload);
@@ -779,17 +791,22 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
779 } 791 }
780 } 792 }
781 793
794 reg_iflag2 = priv->read(&regs->iflag2);
795
782 /* transmission complete interrupt */ 796 /* transmission complete interrupt */
783 if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { 797 if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) {
798 u32 reg_ctrl = priv->read(&regs->mb[FLEXCAN_TX_MB].can_ctrl);
799
784 handled = IRQ_HANDLED; 800 handled = IRQ_HANDLED;
785 stats->tx_bytes += can_get_echo_skb(dev, 0); 801 stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload,
802 0, reg_ctrl << 16);
786 stats->tx_packets++; 803 stats->tx_packets++;
787 can_led_event(dev, CAN_LED_EVENT_TX); 804 can_led_event(dev, CAN_LED_EVENT_TX);
788 805
789 /* after sending a RTR frame MB is in RX mode */ 806 /* after sending a RTR frame MB is in RX mode */
790 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, 807 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
791 &priv->tx_mb->can_ctrl); 808 &regs->mb[FLEXCAN_TX_MB].can_ctrl);
792 priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1); 809 priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), &regs->iflag2);
793 netif_wake_queue(dev); 810 netif_wake_queue(dev);
794 } 811 }
795 812
@@ -931,15 +948,13 @@ static int flexcan_chip_start(struct net_device *dev)
931 reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); 948 reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
932 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | 949 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
933 FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | 950 FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
934 FLEXCAN_MCR_IDAM_C; 951 FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB);
935 952
936 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 953 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
937 reg_mcr &= ~FLEXCAN_MCR_FEN; 954 reg_mcr &= ~FLEXCAN_MCR_FEN;
938 reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last); 955 else
939 } else { 956 reg_mcr |= FLEXCAN_MCR_FEN;
940 reg_mcr |= FLEXCAN_MCR_FEN | 957
941 FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
942 }
943 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); 958 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
944 priv->write(reg_mcr, &regs->mcr); 959 priv->write(reg_mcr, &regs->mcr);
945 960
@@ -982,16 +997,17 @@ static int flexcan_chip_start(struct net_device *dev)
982 priv->write(reg_ctrl2, &regs->ctrl2); 997 priv->write(reg_ctrl2, &regs->ctrl2);
983 } 998 }
984 999
985 /* clear and invalidate all mailboxes first */
986 for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
987 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
988 &regs->mb[i].can_ctrl);
989 }
990
991 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 1000 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
992 for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) 1001 for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
993 priv->write(FLEXCAN_MB_CODE_RX_EMPTY, 1002 priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
994 &regs->mb[i].can_ctrl); 1003 &regs->mb[i].can_ctrl);
1004 }
1005 } else {
1006 /* clear and invalidate unused mailboxes first */
1007 for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
1008 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
1009 &regs->mb[i].can_ctrl);
1010 }
995 } 1011 }
996 1012
997 /* Errata ERR005829: mark first TX mailbox as INACTIVE */ 1013 /* Errata ERR005829: mark first TX mailbox as INACTIVE */
@@ -1000,7 +1016,7 @@ static int flexcan_chip_start(struct net_device *dev)
1000 1016
1001 /* mark TX mailbox as INACTIVE */ 1017 /* mark TX mailbox as INACTIVE */
1002 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, 1018 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
1003 &priv->tx_mb->can_ctrl); 1019 &regs->mb[FLEXCAN_TX_MB].can_ctrl);
1004 1020
1005 /* acceptance mask/acceptance code (accept everything) */ 1021 /* acceptance mask/acceptance code (accept everything) */
1006 priv->write(0x0, &regs->rxgmask); 1022 priv->write(0x0, &regs->rxgmask);
@@ -1355,17 +1371,13 @@ static int flexcan_probe(struct platform_device *pdev)
1355 priv->devtype_data = devtype_data; 1371 priv->devtype_data = devtype_data;
1356 priv->reg_xceiver = reg_xceiver; 1372 priv->reg_xceiver = reg_xceiver;
1357 1373
1358 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 1374 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
1359 priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
1360 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; 1375 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
1361 } else { 1376 else
1362 priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
1363 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; 1377 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
1364 }
1365 priv->tx_mb = &regs->mb[priv->tx_mb_idx];
1366 1378
1367 priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); 1379 priv->reg_imask1_default = 0;
1368 priv->reg_imask2_default = 0; 1380 priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
1369 1381
1370 priv->offload.mailbox_read = flexcan_mailbox_read; 1382 priv->offload.mailbox_read = flexcan_mailbox_read;
1371 1383
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 11662f479e76..771a46083739 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -24,6 +24,9 @@
24 24
25#define RCAR_CAN_DRV_NAME "rcar_can" 25#define RCAR_CAN_DRV_NAME "rcar_can"
26 26
27#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
28 BIT(CLKR_CLKEXT))
29
27/* Mailbox configuration: 30/* Mailbox configuration:
28 * mailbox 60 - 63 - Rx FIFO mailboxes 31 * mailbox 60 - 63 - Rx FIFO mailboxes
29 * mailbox 56 - 59 - Tx FIFO mailboxes 32 * mailbox 56 - 59 - Tx FIFO mailboxes
@@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev)
789 goto fail_clk; 792 goto fail_clk;
790 } 793 }
791 794
792 if (clock_select >= ARRAY_SIZE(clock_names)) { 795 if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
793 err = -EINVAL; 796 err = -EINVAL;
794 dev_err(&pdev->dev, "invalid CAN clock selected\n"); 797 dev_err(&pdev->dev, "invalid CAN clock selected\n");
795 goto fail_clk; 798 goto fail_clk;
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index c7d05027a7a0..2ce4fa8698c7 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -211,7 +211,54 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
211} 211}
212EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); 212EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
213 213
214int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) 214int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
215 struct sk_buff *skb, u32 timestamp)
216{
217 struct can_rx_offload_cb *cb;
218 unsigned long flags;
219
220 if (skb_queue_len(&offload->skb_queue) >
221 offload->skb_queue_len_max)
222 return -ENOMEM;
223
224 cb = can_rx_offload_get_cb(skb);
225 cb->timestamp = timestamp;
226
227 spin_lock_irqsave(&offload->skb_queue.lock, flags);
228 __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
229 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
230
231 can_rx_offload_schedule(offload);
232
233 return 0;
234}
235EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
236
237unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
238 unsigned int idx, u32 timestamp)
239{
240 struct net_device *dev = offload->dev;
241 struct net_device_stats *stats = &dev->stats;
242 struct sk_buff *skb;
243 u8 len;
244 int err;
245
246 skb = __can_get_echo_skb(dev, idx, &len);
247 if (!skb)
248 return 0;
249
250 err = can_rx_offload_queue_sorted(offload, skb, timestamp);
251 if (err) {
252 stats->rx_errors++;
253 stats->tx_fifo_errors++;
254 }
255
256 return len;
257}
258EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
259
260int can_rx_offload_queue_tail(struct can_rx_offload *offload,
261 struct sk_buff *skb)
215{ 262{
216 if (skb_queue_len(&offload->skb_queue) > 263 if (skb_queue_len(&offload->skb_queue) >
217 offload->skb_queue_len_max) 264 offload->skb_queue_len_max)
@@ -222,7 +269,7 @@ int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_b
222 269
223 return 0; 270 return 0;
224} 271}
225EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb); 272EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
226 273
227static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) 274static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
228{ 275{
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 53e320c92a8b..ddaf46239e39 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net)
760{ 760{
761 struct hi3110_priv *priv = netdev_priv(net); 761 struct hi3110_priv *priv = netdev_priv(net);
762 struct spi_device *spi = priv->spi; 762 struct spi_device *spi = priv->spi;
763 unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING; 763 unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH;
764 int ret; 764 int ret;
765 765
766 ret = open_candev(net); 766 ret = open_candev(net);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index b939a4c10b84..c89c7d4900d7 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -528,7 +528,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
528 context = &priv->tx_contexts[i]; 528 context = &priv->tx_contexts[i];
529 529
530 context->echo_index = i; 530 context->echo_index = i;
531 can_put_echo_skb(skb, netdev, context->echo_index);
532 ++priv->active_tx_contexts; 531 ++priv->active_tx_contexts;
533 if (priv->active_tx_contexts >= (int)dev->max_tx_urbs) 532 if (priv->active_tx_contexts >= (int)dev->max_tx_urbs)
534 netif_stop_queue(netdev); 533 netif_stop_queue(netdev);
@@ -553,7 +552,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
553 dev_kfree_skb(skb); 552 dev_kfree_skb(skb);
554 spin_lock_irqsave(&priv->tx_contexts_lock, flags); 553 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
555 554
556 can_free_echo_skb(netdev, context->echo_index);
557 context->echo_index = dev->max_tx_urbs; 555 context->echo_index = dev->max_tx_urbs;
558 --priv->active_tx_contexts; 556 --priv->active_tx_contexts;
559 netif_wake_queue(netdev); 557 netif_wake_queue(netdev);
@@ -564,6 +562,8 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
564 562
565 context->priv = priv; 563 context->priv = priv;
566 564
565 can_put_echo_skb(skb, netdev, context->echo_index);
566
567 usb_fill_bulk_urb(urb, dev->udev, 567 usb_fill_bulk_urb(urb, dev->udev,
568 usb_sndbulkpipe(dev->udev, 568 usb_sndbulkpipe(dev->udev,
569 dev->bulk_out->bEndpointAddress), 569 dev->bulk_out->bEndpointAddress),
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index c084bae5ec0a..5fc0be564274 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -1019,6 +1019,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
1019 new_state : CAN_STATE_ERROR_ACTIVE; 1019 new_state : CAN_STATE_ERROR_ACTIVE;
1020 1020
1021 can_change_state(netdev, cf, tx_state, rx_state); 1021 can_change_state(netdev, cf, tx_state, rx_state);
1022
1023 if (priv->can.restart_ms &&
1024 old_state >= CAN_STATE_BUS_OFF &&
1025 new_state < CAN_STATE_BUS_OFF)
1026 cf->can_id |= CAN_ERR_RESTARTED;
1022 } 1027 }
1023 1028
1024 if (new_state == CAN_STATE_BUS_OFF) { 1029 if (new_state == CAN_STATE_BUS_OFF) {
@@ -1028,11 +1033,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
1028 1033
1029 can_bus_off(netdev); 1034 can_bus_off(netdev);
1030 } 1035 }
1031
1032 if (priv->can.restart_ms &&
1033 old_state >= CAN_STATE_BUS_OFF &&
1034 new_state < CAN_STATE_BUS_OFF)
1035 cf->can_id |= CAN_ERR_RESTARTED;
1036 } 1036 }
1037 1037
1038 if (!skb) { 1038 if (!skb) {
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 0678a38b1af4..f3d5bda012a1 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -35,10 +35,6 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/usb.h> 36#include <linux/usb.h>
37 37
38#include <linux/can.h>
39#include <linux/can/dev.h>
40#include <linux/can/error.h>
41
42#define UCAN_DRIVER_NAME "ucan" 38#define UCAN_DRIVER_NAME "ucan"
43#define UCAN_MAX_RX_URBS 8 39#define UCAN_MAX_RX_URBS 8
44/* the CAN controller needs a while to enable/disable the bus */ 40/* the CAN controller needs a while to enable/disable the bus */
@@ -1575,11 +1571,8 @@ err_firmware_needs_update:
1575/* disconnect the device */ 1571/* disconnect the device */
1576static void ucan_disconnect(struct usb_interface *intf) 1572static void ucan_disconnect(struct usb_interface *intf)
1577{ 1573{
1578 struct usb_device *udev;
1579 struct ucan_priv *up = usb_get_intfdata(intf); 1574 struct ucan_priv *up = usb_get_intfdata(intf);
1580 1575
1581 udev = interface_to_usbdev(intf);
1582
1583 usb_set_intfdata(intf, NULL); 1576 usb_set_intfdata(intf, NULL);
1584 1577
1585 if (up) { 1578 if (up) {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 18956e7604a3..a70bb1bb90e7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1848,6 +1848,8 @@ static void ena_down(struct ena_adapter *adapter)
1848 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 1848 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
1849 if (rc) 1849 if (rc)
1850 dev_err(&adapter->pdev->dev, "Device reset failed\n"); 1850 dev_err(&adapter->pdev->dev, "Device reset failed\n");
1851 /* stop submitting admin commands on a device that was reset */
1852 ena_com_set_admin_running_state(adapter->ena_dev, false);
1851 } 1853 }
1852 1854
1853 ena_destroy_all_io_queues(adapter); 1855 ena_destroy_all_io_queues(adapter);
@@ -1914,6 +1916,9 @@ static int ena_close(struct net_device *netdev)
1914 1916
1915 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); 1917 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
1916 1918
1919 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
1920 return 0;
1921
1917 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 1922 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1918 ena_down(adapter); 1923 ena_down(adapter);
1919 1924
@@ -2613,9 +2618,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
2613 ena_down(adapter); 2618 ena_down(adapter);
2614 2619
2615 /* Stop the device from sending AENQ events (in case reset flag is set 2620 /* Stop the device from sending AENQ events (in case reset flag is set
2616 * and device is up, ena_close already reset the device 2621 * and device is up, ena_down() already reset the device.
2617 * In case the reset flag is set and the device is up, ena_down()
2618 * already perform the reset, so it can be skipped.
2619 */ 2622 */
2620 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) 2623 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
2621 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 2624 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
@@ -2694,8 +2697,8 @@ err_device_destroy:
2694 ena_com_abort_admin_commands(ena_dev); 2697 ena_com_abort_admin_commands(ena_dev);
2695 ena_com_wait_for_abort_completion(ena_dev); 2698 ena_com_wait_for_abort_completion(ena_dev);
2696 ena_com_admin_destroy(ena_dev); 2699 ena_com_admin_destroy(ena_dev);
2697 ena_com_mmio_reg_read_request_destroy(ena_dev);
2698 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); 2700 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
2701 ena_com_mmio_reg_read_request_destroy(ena_dev);
2699err: 2702err:
2700 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2703 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2701 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 2704 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -3452,6 +3455,8 @@ err_rss:
3452 ena_com_rss_destroy(ena_dev); 3455 ena_com_rss_destroy(ena_dev);
3453err_free_msix: 3456err_free_msix:
3454 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); 3457 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
3458 /* stop submitting admin commands on a device that was reset */
3459 ena_com_set_admin_running_state(ena_dev, false);
3455 ena_free_mgmnt_irq(adapter); 3460 ena_free_mgmnt_irq(adapter);
3456 ena_disable_msix(adapter); 3461 ena_disable_msix(adapter);
3457err_worker_destroy: 3462err_worker_destroy:
@@ -3498,18 +3503,12 @@ static void ena_remove(struct pci_dev *pdev)
3498 3503
3499 cancel_work_sync(&adapter->reset_task); 3504 cancel_work_sync(&adapter->reset_task);
3500 3505
3501 unregister_netdev(netdev);
3502
3503 /* If the device is running then we want to make sure the device will be
3504 * reset to make sure no more events will be issued by the device.
3505 */
3506 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3507 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3508
3509 rtnl_lock(); 3506 rtnl_lock();
3510 ena_destroy_device(adapter, true); 3507 ena_destroy_device(adapter, true);
3511 rtnl_unlock(); 3508 rtnl_unlock();
3512 3509
3510 unregister_netdev(netdev);
3511
3513 free_netdev(netdev); 3512 free_netdev(netdev);
3514 3513
3515 ena_com_rss_destroy(ena_dev); 3514 ena_com_rss_destroy(ena_dev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 521873642339..dc8b6173d8d8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,7 +45,7 @@
45 45
46#define DRV_MODULE_VER_MAJOR 2 46#define DRV_MODULE_VER_MAJOR 2
47#define DRV_MODULE_VER_MINOR 0 47#define DRV_MODULE_VER_MINOR 0
48#define DRV_MODULE_VER_SUBMINOR 1 48#define DRV_MODULE_VER_SUBMINOR 2
49 49
50#define DRV_MODULE_NAME "ena" 50#define DRV_MODULE_NAME "ena"
51#ifndef DRV_MODULE_VERSION 51#ifndef DRV_MODULE_VERSION
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index b4fc0ed5bce8..9d4899826823 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
1419 1419
1420 prop = of_get_property(nd, "tpe-link-test?", NULL); 1420 prop = of_get_property(nd, "tpe-link-test?", NULL);
1421 if (!prop) 1421 if (!prop)
1422 goto no_link_test; 1422 goto node_put;
1423 1423
1424 if (strcmp(prop, "true")) { 1424 if (strcmp(prop, "true")) {
1425 printk(KERN_NOTICE "SunLance: warning: overriding option " 1425 printk(KERN_NOTICE "SunLance: warning: overriding option "
@@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op,
1428 "to ecd@skynet.be\n"); 1428 "to ecd@skynet.be\n");
1429 auxio_set_lte(AUXIO_LTE_ON); 1429 auxio_set_lte(AUXIO_LTE_ON);
1430 } 1430 }
1431node_put:
1432 of_node_put(nd);
1431no_link_test: 1433no_link_test:
1432 lp->auto_select = 1; 1434 lp->auto_select = 1;
1433 lp->tpe = 0; 1435 lp->tpe = 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index be1506169076..0de487a8f0eb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2191,6 +2191,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2191#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 2191#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
2192 E1HVN_MAX) 2192 E1HVN_MAX)
2193 2193
2194/* Following is the DMAE channel number allocation for the clients.
2195 * MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively.
2196 * Driver: 0-3 and 8-11 (for PF dmae operations)
2197 * 4 and 12 (for stats requests)
2198 */
2199#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */
2200
2194/* PCIE link and speed */ 2201/* PCIE link and speed */
2195#define PCICFG_LINK_WIDTH 0x1f00000 2202#define PCICFG_LINK_WIDTH 0x1f00000
2196#define PCICFG_LINK_WIDTH_SHIFT 20 2203#define PCICFG_LINK_WIDTH_SHIFT 20
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 3f4d2c8da21a..a9eaaf3e73a4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
6149 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); 6149 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
6150 rdata->path_id = BP_PATH(bp); 6150 rdata->path_id = BP_PATH(bp);
6151 rdata->network_cos_mode = start_params->network_cos_mode; 6151 rdata->network_cos_mode = start_params->network_cos_mode;
6152 rdata->dmae_cmd_id = BNX2X_FW_DMAE_C;
6152 6153
6153 rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); 6154 rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port);
6154 rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); 6155 rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index dd85d790f638..d4c300117529 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1675,7 +1675,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1675 } else { 1675 } else {
1676 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1676 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1677 if (dev->features & NETIF_F_RXCSUM) 1677 if (dev->features & NETIF_F_RXCSUM)
1678 cpr->rx_l4_csum_errors++; 1678 bnapi->cp_ring.rx_l4_csum_errors++;
1679 } 1679 }
1680 } 1680 }
1681 1681
@@ -8714,6 +8714,26 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
8714 return rc; 8714 return rc;
8715} 8715}
8716 8716
8717static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
8718 u32 ring_id, u32 *prod, u32 *cons)
8719{
8720 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
8721 struct hwrm_dbg_ring_info_get_input req = {0};
8722 int rc;
8723
8724 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
8725 req.ring_type = ring_type;
8726 req.fw_ring_id = cpu_to_le32(ring_id);
8727 mutex_lock(&bp->hwrm_cmd_lock);
8728 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8729 if (!rc) {
8730 *prod = le32_to_cpu(resp->producer_index);
8731 *cons = le32_to_cpu(resp->consumer_index);
8732 }
8733 mutex_unlock(&bp->hwrm_cmd_lock);
8734 return rc;
8735}
8736
8717static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 8737static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
8718{ 8738{
8719 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 8739 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
@@ -8821,6 +8841,11 @@ static void bnxt_timer(struct timer_list *t)
8821 bnxt_queue_sp_work(bp); 8841 bnxt_queue_sp_work(bp);
8822 } 8842 }
8823 } 8843 }
8844
8845 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
8846 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
8847 bnxt_queue_sp_work(bp);
8848 }
8824bnxt_restart_timer: 8849bnxt_restart_timer:
8825 mod_timer(&bp->timer, jiffies + bp->current_interval); 8850 mod_timer(&bp->timer, jiffies + bp->current_interval);
8826} 8851}
@@ -8851,6 +8876,44 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
8851 bnxt_rtnl_unlock_sp(bp); 8876 bnxt_rtnl_unlock_sp(bp);
8852} 8877}
8853 8878
8879static void bnxt_chk_missed_irq(struct bnxt *bp)
8880{
8881 int i;
8882
8883 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8884 return;
8885
8886 for (i = 0; i < bp->cp_nr_rings; i++) {
8887 struct bnxt_napi *bnapi = bp->bnapi[i];
8888 struct bnxt_cp_ring_info *cpr;
8889 u32 fw_ring_id;
8890 int j;
8891
8892 if (!bnapi)
8893 continue;
8894
8895 cpr = &bnapi->cp_ring;
8896 for (j = 0; j < 2; j++) {
8897 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
8898 u32 val[2];
8899
8900 if (!cpr2 || cpr2->has_more_work ||
8901 !bnxt_has_work(bp, cpr2))
8902 continue;
8903
8904 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
8905 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
8906 continue;
8907 }
8908 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
8909 bnxt_dbg_hwrm_ring_info_get(bp,
8910 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
8911 fw_ring_id, &val[0], &val[1]);
8912 cpr->missed_irqs++;
8913 }
8914 }
8915}
8916
8854static void bnxt_cfg_ntp_filters(struct bnxt *); 8917static void bnxt_cfg_ntp_filters(struct bnxt *);
8855 8918
8856static void bnxt_sp_task(struct work_struct *work) 8919static void bnxt_sp_task(struct work_struct *work)
@@ -8930,6 +8993,9 @@ static void bnxt_sp_task(struct work_struct *work)
8930 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 8993 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
8931 bnxt_tc_flow_stats_work(bp); 8994 bnxt_tc_flow_stats_work(bp);
8932 8995
8996 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
8997 bnxt_chk_missed_irq(bp);
8998
8933 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 8999 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
8934 * must be the last functions to be called before exiting. 9000 * must be the last functions to be called before exiting.
8935 */ 9001 */
@@ -10087,6 +10153,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10087 } 10153 }
10088 10154
10089 bnxt_hwrm_func_qcfg(bp); 10155 bnxt_hwrm_func_qcfg(bp);
10156 bnxt_hwrm_vnic_qcaps(bp);
10090 bnxt_hwrm_port_led_qcaps(bp); 10157 bnxt_hwrm_port_led_qcaps(bp);
10091 bnxt_ethtool_init(bp); 10158 bnxt_ethtool_init(bp);
10092 bnxt_dcb_init(bp); 10159 bnxt_dcb_init(bp);
@@ -10120,7 +10187,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10120 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 10187 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10121 } 10188 }
10122 10189
10123 bnxt_hwrm_vnic_qcaps(bp);
10124 if (bnxt_rfs_supported(bp)) { 10190 if (bnxt_rfs_supported(bp)) {
10125 dev->hw_features |= NETIF_F_NTUPLE; 10191 dev->hw_features |= NETIF_F_NTUPLE;
10126 if (bnxt_rfs_capable(bp)) { 10192 if (bnxt_rfs_capable(bp)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 498b373c992d..9e99d4ab3e06 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -798,6 +798,8 @@ struct bnxt_cp_ring_info {
798 u8 had_work_done:1; 798 u8 had_work_done:1;
799 u8 has_more_work:1; 799 u8 has_more_work:1;
800 800
801 u32 last_cp_raw_cons;
802
801 struct bnxt_coal rx_ring_coal; 803 struct bnxt_coal rx_ring_coal;
802 u64 rx_packets; 804 u64 rx_packets;
803 u64 rx_bytes; 805 u64 rx_bytes;
@@ -816,6 +818,7 @@ struct bnxt_cp_ring_info {
816 dma_addr_t hw_stats_map; 818 dma_addr_t hw_stats_map;
817 u32 hw_stats_ctx_id; 819 u32 hw_stats_ctx_id;
818 u64 rx_l4_csum_errors; 820 u64 rx_l4_csum_errors;
821 u64 missed_irqs;
819 822
820 struct bnxt_ring_struct cp_ring_struct; 823 struct bnxt_ring_struct cp_ring_struct;
821 824
@@ -1527,6 +1530,7 @@ struct bnxt {
1527#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 1530#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
1528#define BNXT_FLOW_STATS_SP_EVENT 15 1531#define BNXT_FLOW_STATS_SP_EVENT 15
1529#define BNXT_UPDATE_PHY_SP_EVENT 16 1532#define BNXT_UPDATE_PHY_SP_EVENT 16
1533#define BNXT_RING_COAL_NOW_SP_EVENT 17
1530 1534
1531 struct bnxt_hw_resc hw_resc; 1535 struct bnxt_hw_resc hw_resc;
1532 struct bnxt_pf_info pf; 1536 struct bnxt_pf_info pf;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 48078564f025..6cc69a58478a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,7 +137,7 @@ reset_coalesce:
137 return rc; 137 return rc;
138} 138}
139 139
140#define BNXT_NUM_STATS 21 140#define BNXT_NUM_STATS 22
141 141
142#define BNXT_RX_STATS_ENTRY(counter) \ 142#define BNXT_RX_STATS_ENTRY(counter) \
143 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 143 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
@@ -384,6 +384,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
384 for (k = 0; k < stat_fields; j++, k++) 384 for (k = 0; k < stat_fields; j++, k++)
385 buf[j] = le64_to_cpu(hw_stats[k]); 385 buf[j] = le64_to_cpu(hw_stats[k]);
386 buf[j++] = cpr->rx_l4_csum_errors; 386 buf[j++] = cpr->rx_l4_csum_errors;
387 buf[j++] = cpr->missed_irqs;
387 388
388 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += 389 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
389 le64_to_cpu(cpr->hw_stats->rx_discard_pkts); 390 le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
@@ -468,6 +469,8 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
468 buf += ETH_GSTRING_LEN; 469 buf += ETH_GSTRING_LEN;
469 sprintf(buf, "[%d]: rx_l4_csum_errors", i); 470 sprintf(buf, "[%d]: rx_l4_csum_errors", i);
470 buf += ETH_GSTRING_LEN; 471 buf += ETH_GSTRING_LEN;
472 sprintf(buf, "[%d]: missed_irqs", i);
473 buf += ETH_GSTRING_LEN;
471 } 474 }
472 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { 475 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
473 strcpy(buf, bnxt_sw_func_stats[i].string); 476 strcpy(buf, bnxt_sw_func_stats[i].string);
@@ -2942,8 +2945,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
2942 record->asic_state = 0; 2945 record->asic_state = 0;
2943 strlcpy(record->system_name, utsname()->nodename, 2946 strlcpy(record->system_name, utsname()->nodename,
2944 sizeof(record->system_name)); 2947 sizeof(record->system_name));
2945 record->year = cpu_to_le16(tm.tm_year); 2948 record->year = cpu_to_le16(tm.tm_year + 1900);
2946 record->month = cpu_to_le16(tm.tm_mon); 2949 record->month = cpu_to_le16(tm.tm_mon + 1);
2947 record->day = cpu_to_le16(tm.tm_mday); 2950 record->day = cpu_to_le16(tm.tm_mday);
2948 record->hour = cpu_to_le16(tm.tm_hour); 2951 record->hour = cpu_to_le16(tm.tm_hour);
2949 record->minute = cpu_to_le16(tm.tm_min); 2952 record->minute = cpu_to_le16(tm.tm_min);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index beee61292d5e..b59b382d34f9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -43,6 +43,9 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
43 if (ulp_id == BNXT_ROCE_ULP) { 43 if (ulp_id == BNXT_ROCE_ULP) {
44 unsigned int max_stat_ctxs; 44 unsigned int max_stat_ctxs;
45 45
46 if (bp->flags & BNXT_FLAG_CHIP_P5)
47 return -EOPNOTSUPP;
48
46 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); 49 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
47 if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS || 50 if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
48 bp->num_stat_ctxs == max_stat_ctxs) 51 bp->num_stat_ctxs == max_stat_ctxs)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 89295306f161..432c3b867084 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12422,6 +12422,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
12422{ 12422{
12423 struct tg3 *tp = netdev_priv(dev); 12423 struct tg3 *tp = netdev_priv(dev);
12424 int i, irq_sync = 0, err = 0; 12424 int i, irq_sync = 0, err = 0;
12425 bool reset_phy = false;
12425 12426
12426 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12427 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12427 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12428 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
@@ -12453,7 +12454,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
12453 12454
12454 if (netif_running(dev)) { 12455 if (netif_running(dev)) {
12455 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12456 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12456 err = tg3_restart_hw(tp, false); 12457 /* Reset PHY to avoid PHY lock up */
12458 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12459 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12460 tg3_asic_rev(tp) == ASIC_REV_5720)
12461 reset_phy = true;
12462
12463 err = tg3_restart_hw(tp, reset_phy);
12457 if (!err) 12464 if (!err)
12458 tg3_netif_start(tp); 12465 tg3_netif_start(tp);
12459 } 12466 }
@@ -12487,6 +12494,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
12487{ 12494{
12488 struct tg3 *tp = netdev_priv(dev); 12495 struct tg3 *tp = netdev_priv(dev);
12489 int err = 0; 12496 int err = 0;
12497 bool reset_phy = false;
12490 12498
12491 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12499 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12492 tg3_warn_mgmt_link_flap(tp); 12500 tg3_warn_mgmt_link_flap(tp);
@@ -12556,7 +12564,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
12556 12564
12557 if (netif_running(dev)) { 12565 if (netif_running(dev)) {
12558 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12566 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12559 err = tg3_restart_hw(tp, false); 12567 /* Reset PHY to avoid PHY lock up */
12568 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12569 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12570 tg3_asic_rev(tp) == ASIC_REV_5720)
12571 reset_phy = true;
12572
12573 err = tg3_restart_hw(tp, reset_phy);
12560 if (!err) 12574 if (!err)
12561 tg3_netif_start(tp); 12575 tg3_netif_start(tp);
12562 } 12576 }
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 55af04fa03a7..6c8dcb65ff03 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1441,6 +1441,9 @@ static void nic_remove(struct pci_dev *pdev)
1441{ 1441{
1442 struct nicpf *nic = pci_get_drvdata(pdev); 1442 struct nicpf *nic = pci_get_drvdata(pdev);
1443 1443
1444 if (!nic)
1445 return;
1446
1444 if (nic->flags & NIC_SRIOV_ENABLED) 1447 if (nic->flags & NIC_SRIOV_ENABLED)
1445 pci_disable_sriov(pdev); 1448 pci_disable_sriov(pdev);
1446 1449
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 768f584f8392..88f8a8fa93cd 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1784,6 +1784,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1784 bool if_up = netif_running(nic->netdev); 1784 bool if_up = netif_running(nic->netdev);
1785 struct bpf_prog *old_prog; 1785 struct bpf_prog *old_prog;
1786 bool bpf_attached = false; 1786 bool bpf_attached = false;
1787 int ret = 0;
1787 1788
1788 /* For now just support only the usual MTU sized frames */ 1789 /* For now just support only the usual MTU sized frames */
1789 if (prog && (dev->mtu > 1500)) { 1790 if (prog && (dev->mtu > 1500)) {
@@ -1817,8 +1818,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1817 if (nic->xdp_prog) { 1818 if (nic->xdp_prog) {
1818 /* Attach BPF program */ 1819 /* Attach BPF program */
1819 nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); 1820 nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
1820 if (!IS_ERR(nic->xdp_prog)) 1821 if (!IS_ERR(nic->xdp_prog)) {
1821 bpf_attached = true; 1822 bpf_attached = true;
1823 } else {
1824 ret = PTR_ERR(nic->xdp_prog);
1825 nic->xdp_prog = NULL;
1826 }
1822 } 1827 }
1823 1828
1824 /* Calculate Tx queues needed for XDP and network stack */ 1829 /* Calculate Tx queues needed for XDP and network stack */
@@ -1830,7 +1835,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1830 netif_trans_update(nic->netdev); 1835 netif_trans_update(nic->netdev);
1831 } 1836 }
1832 1837
1833 return 0; 1838 return ret;
1834} 1839}
1835 1840
1836static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) 1841static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 187a249ff2d1..fcaf18fa3904 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -585,10 +585,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
585 if (!sq->dmem.base) 585 if (!sq->dmem.base)
586 return; 586 return;
587 587
588 if (sq->tso_hdrs) 588 if (sq->tso_hdrs) {
589 dma_free_coherent(&nic->pdev->dev, 589 dma_free_coherent(&nic->pdev->dev,
590 sq->dmem.q_len * TSO_HEADER_SIZE, 590 sq->dmem.q_len * TSO_HEADER_SIZE,
591 sq->tso_hdrs, sq->tso_hdrs_phys); 591 sq->tso_hdrs, sq->tso_hdrs_phys);
592 sq->tso_hdrs = NULL;
593 }
592 594
593 /* Free pending skbs in the queue */ 595 /* Free pending skbs in the queue */
594 smp_rmb(); 596 smp_rmb();
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 75c1c5ed2387..e2cdfa75673f 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -67,7 +67,6 @@ config CHELSIO_T3
67config CHELSIO_T4 67config CHELSIO_T4
68 tristate "Chelsio Communications T4/T5/T6 Ethernet support" 68 tristate "Chelsio Communications T4/T5/T6 Ethernet support"
69 depends on PCI && (IPV6 || IPV6=n) 69 depends on PCI && (IPV6 || IPV6=n)
70 depends on THERMAL || !THERMAL
71 select FW_LOADER 70 select FW_LOADER
72 select MDIO 71 select MDIO
73 select ZLIB_DEFLATE 72 select ZLIB_DEFLATE
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 78e5d17a1d5f..91d8a885deba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -12,6 +12,4 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
12cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o 12cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
13cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o 13cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
14cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o 14cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
15ifdef CONFIG_THERMAL 15cxgb4-$(CONFIG_THERMAL) += cxgb4_thermal.o
16cxgb4-objs += cxgb4_thermal.o
17endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 05a46926016a..d49db46254cd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5863,7 +5863,7 @@ fw_attach_fail:
5863 if (!is_t4(adapter->params.chip)) 5863 if (!is_t4(adapter->params.chip))
5864 cxgb4_ptp_init(adapter); 5864 cxgb4_ptp_init(adapter);
5865 5865
5866 if (IS_ENABLED(CONFIG_THERMAL) && 5866 if (IS_REACHABLE(CONFIG_THERMAL) &&
5867 !is_t4(adapter->params.chip) && (adapter->flags & FW_OK)) 5867 !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
5868 cxgb4_thermal_init(adapter); 5868 cxgb4_thermal_init(adapter);
5869 5869
@@ -5932,7 +5932,7 @@ static void remove_one(struct pci_dev *pdev)
5932 5932
5933 if (!is_t4(adapter->params.chip)) 5933 if (!is_t4(adapter->params.chip))
5934 cxgb4_ptp_stop(adapter); 5934 cxgb4_ptp_stop(adapter);
5935 if (IS_ENABLED(CONFIG_THERMAL)) 5935 if (IS_REACHABLE(CONFIG_THERMAL))
5936 cxgb4_thermal_remove(adapter); 5936 cxgb4_thermal_remove(adapter);
5937 5937
5938 /* If we allocated filters, free up state associated with any 5938 /* If we allocated filters, free up state associated with any
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index ceec467f590d..949103db8a8a 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -660,7 +660,7 @@ static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
660 660
661 u64_stats_update_begin(&port->tx_stats_syncp); 661 u64_stats_update_begin(&port->tx_stats_syncp);
662 port->tx_frag_stats[nfrags]++; 662 port->tx_frag_stats[nfrags]++;
663 u64_stats_update_end(&port->ir_stats_syncp); 663 u64_stats_update_end(&port->tx_stats_syncp);
664 } 664 }
665 } 665 }
666 666
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 570caeb8ee9e..084f24daf2b5 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
872 struct net_device *netdev = dev_id; 872 struct net_device *netdev = dev_id;
873 struct ftmac100 *priv = netdev_priv(netdev); 873 struct ftmac100 *priv = netdev_priv(netdev);
874 874
875 if (likely(netif_running(netdev))) { 875 /* Disable interrupts for polling */
876 /* Disable interrupts for polling */ 876 ftmac100_disable_all_int(priv);
877 ftmac100_disable_all_int(priv); 877 if (likely(netif_running(netdev)))
878 napi_schedule(&priv->napi); 878 napi_schedule(&priv->napi);
879 }
880 879
881 return IRQ_HANDLED; 880 return IRQ_HANDLED;
882} 881}
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index be268dcde8fa..f9a4e76c5a8b 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -915,10 +915,8 @@ static int hip04_mac_probe(struct platform_device *pdev)
915 } 915 }
916 916
917 ret = register_netdev(ndev); 917 ret = register_netdev(ndev);
918 if (ret) { 918 if (ret)
919 free_netdev(ndev);
920 goto alloc_fail; 919 goto alloc_fail;
921 }
922 920
923 return 0; 921 return 0;
924 922
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c9d5d0a7fbf1..c0203a0d5e3b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
485 485
486 for (j = 0; j < rx_pool->size; j++) { 486 for (j = 0; j < rx_pool->size; j++) {
487 if (rx_pool->rx_buff[j].skb) { 487 if (rx_pool->rx_buff[j].skb) {
488 dev_kfree_skb_any(rx_pool->rx_buff[i].skb); 488 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
489 rx_pool->rx_buff[i].skb = NULL; 489 rx_pool->rx_buff[j].skb = NULL;
490 } 490 }
491 } 491 }
492 492
@@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev)
1103 return 0; 1103 return 0;
1104 } 1104 }
1105 1105
1106 mutex_lock(&adapter->reset_lock);
1107
1108 if (adapter->state != VNIC_CLOSED) { 1106 if (adapter->state != VNIC_CLOSED) {
1109 rc = ibmvnic_login(netdev); 1107 rc = ibmvnic_login(netdev);
1110 if (rc) { 1108 if (rc)
1111 mutex_unlock(&adapter->reset_lock);
1112 return rc; 1109 return rc;
1113 }
1114 1110
1115 rc = init_resources(adapter); 1111 rc = init_resources(adapter);
1116 if (rc) { 1112 if (rc) {
1117 netdev_err(netdev, "failed to initialize resources\n"); 1113 netdev_err(netdev, "failed to initialize resources\n");
1118 release_resources(adapter); 1114 release_resources(adapter);
1119 mutex_unlock(&adapter->reset_lock);
1120 return rc; 1115 return rc;
1121 } 1116 }
1122 } 1117 }
@@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev)
1124 rc = __ibmvnic_open(netdev); 1119 rc = __ibmvnic_open(netdev);
1125 netif_carrier_on(netdev); 1120 netif_carrier_on(netdev);
1126 1121
1127 mutex_unlock(&adapter->reset_lock);
1128
1129 return rc; 1122 return rc;
1130} 1123}
1131 1124
@@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev)
1269 return 0; 1262 return 0;
1270 } 1263 }
1271 1264
1272 mutex_lock(&adapter->reset_lock);
1273 rc = __ibmvnic_close(netdev); 1265 rc = __ibmvnic_close(netdev);
1274 ibmvnic_cleanup(netdev); 1266 ibmvnic_cleanup(netdev);
1275 mutex_unlock(&adapter->reset_lock);
1276 1267
1277 return rc; 1268 return rc;
1278} 1269}
@@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1746 struct ibmvnic_rwi *rwi, u32 reset_state) 1737 struct ibmvnic_rwi *rwi, u32 reset_state)
1747{ 1738{
1748 u64 old_num_rx_queues, old_num_tx_queues; 1739 u64 old_num_rx_queues, old_num_tx_queues;
1740 u64 old_num_rx_slots, old_num_tx_slots;
1749 struct net_device *netdev = adapter->netdev; 1741 struct net_device *netdev = adapter->netdev;
1750 int i, rc; 1742 int i, rc;
1751 1743
@@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1757 1749
1758 old_num_rx_queues = adapter->req_rx_queues; 1750 old_num_rx_queues = adapter->req_rx_queues;
1759 old_num_tx_queues = adapter->req_tx_queues; 1751 old_num_tx_queues = adapter->req_tx_queues;
1752 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1753 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1760 1754
1761 ibmvnic_cleanup(netdev); 1755 ibmvnic_cleanup(netdev);
1762 1756
@@ -1819,21 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1819 if (rc) 1813 if (rc)
1820 return rc; 1814 return rc;
1821 } else if (adapter->req_rx_queues != old_num_rx_queues || 1815 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1822 adapter->req_tx_queues != old_num_tx_queues) { 1816 adapter->req_tx_queues != old_num_tx_queues ||
1823 adapter->map_id = 1; 1817 adapter->req_rx_add_entries_per_subcrq !=
1818 old_num_rx_slots ||
1819 adapter->req_tx_entries_per_subcrq !=
1820 old_num_tx_slots) {
1824 release_rx_pools(adapter); 1821 release_rx_pools(adapter);
1825 release_tx_pools(adapter); 1822 release_tx_pools(adapter);
1826 rc = init_rx_pools(netdev);
1827 if (rc)
1828 return rc;
1829 rc = init_tx_pools(netdev);
1830 if (rc)
1831 return rc;
1832
1833 release_napi(adapter); 1823 release_napi(adapter);
1834 rc = init_napi(adapter); 1824 release_vpd_data(adapter);
1825
1826 rc = init_resources(adapter);
1835 if (rc) 1827 if (rc)
1836 return rc; 1828 return rc;
1829
1837 } else { 1830 } else {
1838 rc = reset_tx_pools(adapter); 1831 rc = reset_tx_pools(adapter);
1839 if (rc) 1832 if (rc)
@@ -1917,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
1917 adapter->state = VNIC_PROBED; 1910 adapter->state = VNIC_PROBED;
1918 return 0; 1911 return 0;
1919 } 1912 }
1920 /* netif_set_real_num_xx_queues needs to take rtnl lock here 1913
1921 * unless wait_for_reset is set, in which case the rtnl lock 1914 rc = init_resources(adapter);
1922 * has already been taken before initializing the reset
1923 */
1924 if (!adapter->wait_for_reset) {
1925 rtnl_lock();
1926 rc = init_resources(adapter);
1927 rtnl_unlock();
1928 } else {
1929 rc = init_resources(adapter);
1930 }
1931 if (rc) 1915 if (rc)
1932 return rc; 1916 return rc;
1933 1917
@@ -1986,13 +1970,21 @@ static void __ibmvnic_reset(struct work_struct *work)
1986 struct ibmvnic_rwi *rwi; 1970 struct ibmvnic_rwi *rwi;
1987 struct ibmvnic_adapter *adapter; 1971 struct ibmvnic_adapter *adapter;
1988 struct net_device *netdev; 1972 struct net_device *netdev;
1973 bool we_lock_rtnl = false;
1989 u32 reset_state; 1974 u32 reset_state;
1990 int rc = 0; 1975 int rc = 0;
1991 1976
1992 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 1977 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1993 netdev = adapter->netdev; 1978 netdev = adapter->netdev;
1994 1979
1995 mutex_lock(&adapter->reset_lock); 1980 /* netif_set_real_num_xx_queues needs to take rtnl lock here
1981 * unless wait_for_reset is set, in which case the rtnl lock
1982 * has already been taken before initializing the reset
1983 */
1984 if (!adapter->wait_for_reset) {
1985 rtnl_lock();
1986 we_lock_rtnl = true;
1987 }
1996 reset_state = adapter->state; 1988 reset_state = adapter->state;
1997 1989
1998 rwi = get_next_rwi(adapter); 1990 rwi = get_next_rwi(adapter);
@@ -2020,12 +2012,11 @@ static void __ibmvnic_reset(struct work_struct *work)
2020 if (rc) { 2012 if (rc) {
2021 netdev_dbg(adapter->netdev, "Reset failed\n"); 2013 netdev_dbg(adapter->netdev, "Reset failed\n");
2022 free_all_rwi(adapter); 2014 free_all_rwi(adapter);
2023 mutex_unlock(&adapter->reset_lock);
2024 return;
2025 } 2015 }
2026 2016
2027 adapter->resetting = false; 2017 adapter->resetting = false;
2028 mutex_unlock(&adapter->reset_lock); 2018 if (we_lock_rtnl)
2019 rtnl_unlock();
2029} 2020}
2030 2021
2031static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 2022static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
@@ -4768,7 +4759,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4768 4759
4769 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 4760 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4770 INIT_LIST_HEAD(&adapter->rwi_list); 4761 INIT_LIST_HEAD(&adapter->rwi_list);
4771 mutex_init(&adapter->reset_lock);
4772 mutex_init(&adapter->rwi_lock); 4762 mutex_init(&adapter->rwi_lock);
4773 adapter->resetting = false; 4763 adapter->resetting = false;
4774 4764
@@ -4840,8 +4830,8 @@ static int ibmvnic_remove(struct vio_dev *dev)
4840 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4830 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4841 4831
4842 adapter->state = VNIC_REMOVING; 4832 adapter->state = VNIC_REMOVING;
4843 unregister_netdev(netdev); 4833 rtnl_lock();
4844 mutex_lock(&adapter->reset_lock); 4834 unregister_netdevice(netdev);
4845 4835
4846 release_resources(adapter); 4836 release_resources(adapter);
4847 release_sub_crqs(adapter, 1); 4837 release_sub_crqs(adapter, 1);
@@ -4852,7 +4842,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
4852 4842
4853 adapter->state = VNIC_REMOVED; 4843 adapter->state = VNIC_REMOVED;
4854 4844
4855 mutex_unlock(&adapter->reset_lock); 4845 rtnl_unlock();
4856 device_remove_file(&dev->dev, &dev_attr_failover); 4846 device_remove_file(&dev->dev, &dev_attr_failover);
4857 free_netdev(netdev); 4847 free_netdev(netdev);
4858 dev_set_drvdata(&dev->dev, NULL); 4848 dev_set_drvdata(&dev->dev, NULL);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 18103b811d4d..99c4f8d331ce 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
1075 struct tasklet_struct tasklet; 1075 struct tasklet_struct tasklet;
1076 enum vnic_state state; 1076 enum vnic_state state;
1077 enum ibmvnic_reset_reason reset_reason; 1077 enum ibmvnic_reset_reason reset_reason;
1078 struct mutex reset_lock, rwi_lock; 1078 struct mutex rwi_lock;
1079 struct list_head rwi_list; 1079 struct list_head rwi_list;
1080 struct work_struct ibmvnic_reset; 1080 struct work_struct ibmvnic_reset;
1081 bool resetting; 1081 bool resetting;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 21c2688d6308..a3f45335437c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1413,7 +1413,7 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1413 } 1413 }
1414 1414
1415 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1415 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1416 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->state); 1416 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1417} 1417}
1418 1418
1419/** 1419/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index add1e457886d..433c8e688c78 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -33,7 +33,7 @@ static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi)
33} 33}
34 34
35/** 35/**
36 * i40e_add_xsk_umem - Store an UMEM for a certain ring/qid 36 * i40e_add_xsk_umem - Store a UMEM for a certain ring/qid
37 * @vsi: Current VSI 37 * @vsi: Current VSI
38 * @umem: UMEM to store 38 * @umem: UMEM to store
39 * @qid: Ring/qid to associate with the UMEM 39 * @qid: Ring/qid to associate with the UMEM
@@ -56,7 +56,7 @@ static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem,
56} 56}
57 57
58/** 58/**
59 * i40e_remove_xsk_umem - Remove an UMEM for a certain ring/qid 59 * i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid
60 * @vsi: Current VSI 60 * @vsi: Current VSI
61 * @qid: Ring/qid associated with the UMEM 61 * @qid: Ring/qid associated with the UMEM
62 **/ 62 **/
@@ -130,7 +130,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
130} 130}
131 131
132/** 132/**
133 * i40e_xsk_umem_enable - Enable/associate an UMEM to a certain ring/qid 133 * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
134 * @vsi: Current VSI 134 * @vsi: Current VSI
135 * @umem: UMEM 135 * @umem: UMEM
136 * @qid: Rx ring to associate UMEM to 136 * @qid: Rx ring to associate UMEM to
@@ -189,7 +189,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
189} 189}
190 190
191/** 191/**
192 * i40e_xsk_umem_disable - Diassociate an UMEM from a certain ring/qid 192 * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
193 * @vsi: Current VSI 193 * @vsi: Current VSI
194 * @qid: Rx ring to associate UMEM to 194 * @qid: Rx ring to associate UMEM to
195 * 195 *
@@ -255,12 +255,12 @@ int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
255} 255}
256 256
257/** 257/**
258 * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM 258 * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
259 * @vsi: Current VSI 259 * @vsi: Current VSI
260 * @umem: UMEM to enable/associate to a ring, or NULL to disable 260 * @umem: UMEM to enable/associate to a ring, or NULL to disable
261 * @qid: Rx ring to (dis)associate UMEM (from)to 261 * @qid: Rx ring to (dis)associate UMEM (from)to
262 * 262 *
263 * This function enables or disables an UMEM to a certain ring. 263 * This function enables or disables a UMEM to a certain ring.
264 * 264 *
265 * Returns 0 on success, <0 on failure 265 * Returns 0 on success, <0 on failure
266 **/ 266 **/
@@ -276,7 +276,7 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
276 * @rx_ring: Rx ring 276 * @rx_ring: Rx ring
277 * @xdp: xdp_buff used as input to the XDP program 277 * @xdp: xdp_buff used as input to the XDP program
278 * 278 *
279 * This function enables or disables an UMEM to a certain ring. 279 * This function enables or disables a UMEM to a certain ring.
280 * 280 *
281 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} 281 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
282 **/ 282 **/
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index c54ebedca6da..c393cb2c0f16 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -842,6 +842,7 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
842 nvm_word = E1000_INVM_DEFAULT_AL; 842 nvm_word = E1000_INVM_DEFAULT_AL;
843 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; 843 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
844 igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); 844 igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
845 phy_word = E1000_PHY_PLL_UNCONF;
845 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { 846 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
846 /* check current state directly from internal PHY */ 847 /* check current state directly from internal PHY */
847 igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); 848 igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 10dbaf4f6e80..9c42f741ed5e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -2262,7 +2262,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2262 *autoneg = false; 2262 *autoneg = false;
2263 2263
2264 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || 2264 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2265 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { 2265 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
2266 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2267 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2266 *speed = IXGBE_LINK_SPEED_1GB_FULL; 2268 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2267 return 0; 2269 return 0;
2268 } 2270 }
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 8c5ba4b81fb7..2d4d10a017e5 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -512,7 +512,8 @@ static int xrx200_probe(struct platform_device *pdev)
512 err = register_netdev(net_dev); 512 err = register_netdev(net_dev);
513 if (err) 513 if (err)
514 goto err_unprepare_clk; 514 goto err_unprepare_clk;
515 return err; 515
516 return 0;
516 517
517err_unprepare_clk: 518err_unprepare_clk:
518 clk_disable_unprepare(priv->clk); 519 clk_disable_unprepare(priv->clk);
@@ -520,7 +521,7 @@ err_unprepare_clk:
520err_uninit_dma: 521err_uninit_dma:
521 xrx200_hw_cleanup(priv); 522 xrx200_hw_cleanup(priv);
522 523
523 return 0; 524 return err;
524} 525}
525 526
526static int xrx200_remove(struct platform_device *pdev) 527static int xrx200_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 3ba672e9e353..e5397c8197b9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3343,7 +3343,6 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
3343 if (state->interface != PHY_INTERFACE_MODE_NA && 3343 if (state->interface != PHY_INTERFACE_MODE_NA &&
3344 state->interface != PHY_INTERFACE_MODE_QSGMII && 3344 state->interface != PHY_INTERFACE_MODE_QSGMII &&
3345 state->interface != PHY_INTERFACE_MODE_SGMII && 3345 state->interface != PHY_INTERFACE_MODE_SGMII &&
3346 state->interface != PHY_INTERFACE_MODE_2500BASEX &&
3347 !phy_interface_mode_is_8023z(state->interface) && 3346 !phy_interface_mode_is_8023z(state->interface) &&
3348 !phy_interface_mode_is_rgmii(state->interface)) { 3347 !phy_interface_mode_is_rgmii(state->interface)) {
3349 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 3348 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -3357,14 +3356,9 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
3357 /* Asymmetric pause is unsupported */ 3356 /* Asymmetric pause is unsupported */
3358 phylink_set(mask, Pause); 3357 phylink_set(mask, Pause);
3359 3358
3360 /* We cannot use 1Gbps when using the 2.5G interface. */ 3359 /* Half-duplex at speeds higher than 100Mbit is unsupported */
3361 if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { 3360 phylink_set(mask, 1000baseT_Full);
3362 phylink_set(mask, 2500baseT_Full); 3361 phylink_set(mask, 1000baseX_Full);
3363 phylink_set(mask, 2500baseX_Full);
3364 } else {
3365 phylink_set(mask, 1000baseT_Full);
3366 phylink_set(mask, 1000baseX_Full);
3367 }
3368 3362
3369 if (!phy_interface_mode_is_8023z(state->interface)) { 3363 if (!phy_interface_mode_is_8023z(state->interface)) {
3370 /* 10M and 100M are only supported in non-802.3z mode */ 3364 /* 10M and 100M are only supported in non-802.3z mode */
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index deef5a998985..9af34e03892c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
337static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count, 337static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
338 int align, u32 skip_mask, u32 *puid) 338 int align, u32 skip_mask, u32 *puid)
339{ 339{
340 u32 uid; 340 u32 uid = 0;
341 u32 res; 341 u32 res;
342 struct mlx4_zone_allocator *zone_alloc = zone->allocator; 342 struct mlx4_zone_allocator *zone_alloc = zone->allocator;
343 struct mlx4_zone_entry *curr_node; 343 struct mlx4_zone_entry *curr_node;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ebcd2778eeb3..23f1b5b512c2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -540,8 +540,8 @@ struct slave_list {
540struct resource_allocator { 540struct resource_allocator {
541 spinlock_t alloc_lock; /* protect quotas */ 541 spinlock_t alloc_lock; /* protect quotas */
542 union { 542 union {
543 int res_reserved; 543 unsigned int res_reserved;
544 int res_port_rsvd[MLX4_MAX_PORTS]; 544 unsigned int res_port_rsvd[MLX4_MAX_PORTS];
545 }; 545 };
546 union { 546 union {
547 int res_free; 547 int res_free;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 2e84f10f59ba..1a11bc0e1612 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
363 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, 363 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
364 buf); 364 buf);
365 365
366 (*mpt_entry)->lkey = 0;
366 err = mlx4_SW2HW_MPT(dev, mailbox, key); 367 err = mlx4_SW2HW_MPT(dev, mailbox, key);
367 } 368 }
368 369
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index d7fbd5b6ac95..118324802926 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -569,6 +569,7 @@ struct mlx5e_rq {
569 569
570 unsigned long state; 570 unsigned long state;
571 int ix; 571 int ix;
572 unsigned int hw_mtu;
572 573
573 struct net_dim dim; /* Dynamic Interrupt Moderation */ 574 struct net_dim dim; /* Dynamic Interrupt Moderation */
574 575
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 023dc4bccd28..4a37713023be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
88 88
89 eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); 89 eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
90 *speed = mlx5e_port_ptys2speed(eth_proto_oper); 90 *speed = mlx5e_port_ptys2speed(eth_proto_oper);
91 if (!(*speed)) { 91 if (!(*speed))
92 mlx5_core_warn(mdev, "cannot get port speed\n");
93 err = -EINVAL; 92 err = -EINVAL;
94 }
95 93
96 return err; 94 return err;
97} 95}
@@ -258,7 +256,7 @@ static int mlx5e_fec_admin_field(u32 *pplm,
258 case 40000: 256 case 40000:
259 if (!write) 257 if (!write)
260 *fec_policy = MLX5_GET(pplm_reg, pplm, 258 *fec_policy = MLX5_GET(pplm_reg, pplm,
261 fec_override_cap_10g_40g); 259 fec_override_admin_10g_40g);
262 else 260 else
263 MLX5_SET(pplm_reg, pplm, 261 MLX5_SET(pplm_reg, pplm,
264 fec_override_admin_10g_40g, *fec_policy); 262 fec_override_admin_10g_40g, *fec_policy);
@@ -310,7 +308,7 @@ static int mlx5e_get_fec_cap_field(u32 *pplm,
310 case 10000: 308 case 10000:
311 case 40000: 309 case 40000:
312 *fec_cap = MLX5_GET(pplm_reg, pplm, 310 *fec_cap = MLX5_GET(pplm_reg, pplm,
313 fec_override_admin_10g_40g); 311 fec_override_cap_10g_40g);
314 break; 312 break;
315 case 25000: 313 case 25000:
316 *fec_cap = MLX5_GET(pplm_reg, pplm, 314 *fec_cap = MLX5_GET(pplm_reg, pplm,
@@ -394,12 +392,12 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
394 392
395int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) 393int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
396{ 394{
395 u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC);
397 bool fec_mode_not_supp_in_speed = false; 396 bool fec_mode_not_supp_in_speed = false;
398 u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC);
399 u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; 397 u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
400 u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; 398 u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
401 int sz = MLX5_ST_SZ_BYTES(pplm_reg); 399 int sz = MLX5_ST_SZ_BYTES(pplm_reg);
402 u32 current_fec_speed; 400 u8 fec_policy_auto = 0;
403 u8 fec_caps = 0; 401 u8 fec_caps = 0;
404 int err; 402 int err;
405 int i; 403 int i;
@@ -415,23 +413,19 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
415 if (err) 413 if (err)
416 return err; 414 return err;
417 415
418 err = mlx5e_port_linkspeed(dev, &current_fec_speed); 416 MLX5_SET(pplm_reg, out, local_port, 1);
419 if (err)
420 return err;
421 417
422 memset(in, 0, sz); 418 for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) {
423 MLX5_SET(pplm_reg, in, local_port, 1);
424 for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) {
425 mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]); 419 mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
426 /* policy supported for link speed */ 420 /* policy supported for link speed, or policy is auto */
427 if (!!(fec_caps & fec_policy)) { 421 if (fec_caps & fec_policy || fec_policy == fec_policy_auto) {
428 mlx5e_fec_admin_field(in, &fec_policy, 1, 422 mlx5e_fec_admin_field(out, &fec_policy, 1,
429 fec_supported_speeds[i]); 423 fec_supported_speeds[i]);
430 } else { 424 } else {
431 if (fec_supported_speeds[i] == current_fec_speed) 425 /* turn off FEC if supported. Else, leave it the same */
432 return -EOPNOTSUPP; 426 if (fec_caps & fec_policy_nofec)
433 mlx5e_fec_admin_field(in, &no_fec_policy, 1, 427 mlx5e_fec_admin_field(out, &fec_policy_nofec, 1,
434 fec_supported_speeds[i]); 428 fec_supported_speeds[i]);
435 fec_mode_not_supp_in_speed = true; 429 fec_mode_not_supp_in_speed = true;
436 } 430 }
437 } 431 }
@@ -441,5 +435,5 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
441 "FEC policy 0x%x is not supported for some speeds", 435 "FEC policy 0x%x is not supported for some speeds",
442 fec_policy); 436 fec_policy);
443 437
444 return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1); 438 return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1);
445} 439}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index c047da8752da..eac245a93f91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
130 int err; 130 int err;
131 131
132 err = mlx5e_port_linkspeed(priv->mdev, &speed); 132 err = mlx5e_port_linkspeed(priv->mdev, &speed);
133 if (err) 133 if (err) {
134 mlx5_core_warn(priv->mdev, "cannot get port speed\n");
134 return 0; 135 return 0;
136 }
135 137
136 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; 138 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
137 139
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3e770abfd802..25c1c4f96841 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -843,8 +843,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
843 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, 843 ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
844 Autoneg); 844 Autoneg);
845 845
846 err = get_fec_supported_advertised(mdev, link_ksettings); 846 if (get_fec_supported_advertised(mdev, link_ksettings))
847 if (err)
848 netdev_dbg(netdev, "%s: FEC caps query failed: %d\n", 847 netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
849 __func__, err); 848 __func__, err);
850 849
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1243edbedc9e..871313d6b34d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -502,6 +502,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
502 rq->channel = c; 502 rq->channel = c;
503 rq->ix = c->ix; 503 rq->ix = c->ix;
504 rq->mdev = mdev; 504 rq->mdev = mdev;
505 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
505 rq->stats = &c->priv->channel_stats[c->ix].rq; 506 rq->stats = &c->priv->channel_stats[c->ix].rq;
506 507
507 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; 508 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@@ -1623,13 +1624,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1623 int err; 1624 int err;
1624 u32 i; 1625 u32 i;
1625 1626
1627 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1628 if (err)
1629 return err;
1630
1626 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq, 1631 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1627 &cq->wq_ctrl); 1632 &cq->wq_ctrl);
1628 if (err) 1633 if (err)
1629 return err; 1634 return err;
1630 1635
1631 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1632
1633 mcq->cqe_sz = 64; 1636 mcq->cqe_sz = 64;
1634 mcq->set_ci_db = cq->wq_ctrl.db.db; 1637 mcq->set_ci_db = cq->wq_ctrl.db.db;
1635 mcq->arm_db = cq->wq_ctrl.db.db + 1; 1638 mcq->arm_db = cq->wq_ctrl.db.db + 1;
@@ -1687,6 +1690,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1687 int eqn; 1690 int eqn;
1688 int err; 1691 int err;
1689 1692
1693 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1694 if (err)
1695 return err;
1696
1690 inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 1697 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1691 sizeof(u64) * cq->wq_ctrl.buf.npages; 1698 sizeof(u64) * cq->wq_ctrl.buf.npages;
1692 in = kvzalloc(inlen, GFP_KERNEL); 1699 in = kvzalloc(inlen, GFP_KERNEL);
@@ -1700,8 +1707,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1700 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, 1707 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1701 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); 1708 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1702 1709
1703 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1704
1705 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); 1710 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
1706 MLX5_SET(cqc, cqc, c_eqn, eqn); 1711 MLX5_SET(cqc, cqc, c_eqn, eqn);
1707 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); 1712 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
@@ -1921,6 +1926,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1921 int err; 1926 int err;
1922 int eqn; 1927 int eqn;
1923 1928
1929 err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1930 if (err)
1931 return err;
1932
1924 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); 1933 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1925 if (!c) 1934 if (!c)
1926 return -ENOMEM; 1935 return -ENOMEM;
@@ -1937,7 +1946,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1937 c->xdp = !!params->xdp_prog; 1946 c->xdp = !!params->xdp_prog;
1938 c->stats = &priv->channel_stats[ix].ch; 1947 c->stats = &priv->channel_stats[ix].ch;
1939 1948
1940 mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1941 c->irq_desc = irq_to_desc(irq); 1949 c->irq_desc = irq_to_desc(irq);
1942 1950
1943 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); 1951 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@@ -3574,6 +3582,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
3574 return 0; 3582 return 0;
3575} 3583}
3576 3584
3585#ifdef CONFIG_MLX5_ESWITCH
3577static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) 3586static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3578{ 3587{
3579 struct mlx5e_priv *priv = netdev_priv(netdev); 3588 struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3586,6 +3595,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3586 3595
3587 return 0; 3596 return 0;
3588} 3597}
3598#endif
3589 3599
3590static int set_feature_rx_all(struct net_device *netdev, bool enable) 3600static int set_feature_rx_all(struct net_device *netdev, bool enable)
3591{ 3601{
@@ -3684,7 +3694,9 @@ static int mlx5e_set_features(struct net_device *netdev,
3684 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); 3694 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
3685 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, 3695 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
3686 set_feature_cvlan_filter); 3696 set_feature_cvlan_filter);
3697#ifdef CONFIG_MLX5_ESWITCH
3687 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); 3698 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
3699#endif
3688 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); 3700 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
3689 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); 3701 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
3690 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); 3702 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@@ -3755,10 +3767,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
3755 } 3767 }
3756 3768
3757 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 3769 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
3770 bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
3758 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); 3771 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
3759 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); 3772 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
3760 3773
3761 reset = reset && (ppw_old != ppw_new); 3774 reset = reset && (is_linear || (ppw_old != ppw_new));
3762 } 3775 }
3763 3776
3764 if (!reset) { 3777 if (!reset) {
@@ -4678,7 +4691,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
4678 FT_CAP(modify_root) && 4691 FT_CAP(modify_root) &&
4679 FT_CAP(identified_miss_table_mode) && 4692 FT_CAP(identified_miss_table_mode) &&
4680 FT_CAP(flow_table_modify)) { 4693 FT_CAP(flow_table_modify)) {
4694#ifdef CONFIG_MLX5_ESWITCH
4681 netdev->hw_features |= NETIF_F_HW_TC; 4695 netdev->hw_features |= NETIF_F_HW_TC;
4696#endif
4682#ifdef CONFIG_MLX5_EN_ARFS 4697#ifdef CONFIG_MLX5_EN_ARFS
4683 netdev->hw_features |= NETIF_F_NTUPLE; 4698 netdev->hw_features |= NETIF_F_NTUPLE;
4684#endif 4699#endif
@@ -5004,11 +5019,21 @@ err_free_netdev:
5004int mlx5e_attach_netdev(struct mlx5e_priv *priv) 5019int mlx5e_attach_netdev(struct mlx5e_priv *priv)
5005{ 5020{
5006 const struct mlx5e_profile *profile; 5021 const struct mlx5e_profile *profile;
5022 int max_nch;
5007 int err; 5023 int err;
5008 5024
5009 profile = priv->profile; 5025 profile = priv->profile;
5010 clear_bit(MLX5E_STATE_DESTROYING, &priv->state); 5026 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
5011 5027
5028 /* max number of channels may have changed */
5029 max_nch = mlx5e_get_max_num_channels(priv->mdev);
5030 if (priv->channels.params.num_channels > max_nch) {
5031 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
5032 priv->channels.params.num_channels = max_nch;
5033 mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
5034 MLX5E_INDIR_RQT_SIZE, max_nch);
5035 }
5036
5012 err = profile->init_tx(priv); 5037 err = profile->init_tx(priv);
5013 if (err) 5038 if (err)
5014 goto out; 5039 goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 79638dcbae78..16985ca3248d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1104,6 +1104,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1104 u32 frag_size; 1104 u32 frag_size;
1105 bool consumed; 1105 bool consumed;
1106 1106
1107 /* Check packet size. Note LRO doesn't use linear SKB */
1108 if (unlikely(cqe_bcnt > rq->hw_mtu)) {
1109 rq->stats->oversize_pkts_sw_drop++;
1110 return NULL;
1111 }
1112
1107 va = page_address(di->page) + head_offset; 1113 va = page_address(di->page) + head_offset;
1108 data = va + rx_headroom; 1114 data = va + rx_headroom;
1109 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); 1115 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 35ded91203f5..4382ef85488c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
98 return 1; 98 return 1;
99} 99}
100 100
101#ifdef CONFIG_INET
102/* loopback test */
103#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
104static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
105#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
106
107struct mlx5ehdr { 101struct mlx5ehdr {
108 __be32 version; 102 __be32 version;
109 __be64 magic; 103 __be64 magic;
110 char text[ETH_GSTRING_LEN];
111}; 104};
112 105
106#ifdef CONFIG_INET
107/* loopback test */
108#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\
109 sizeof(struct udphdr) + sizeof(struct mlx5ehdr))
110#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
111
113static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) 112static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
114{ 113{
115 struct sk_buff *skb = NULL; 114 struct sk_buff *skb = NULL;
@@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
117 struct ethhdr *ethh; 116 struct ethhdr *ethh;
118 struct udphdr *udph; 117 struct udphdr *udph;
119 struct iphdr *iph; 118 struct iphdr *iph;
120 int datalen, iplen; 119 int iplen;
121
122 datalen = MLX5E_TEST_PKT_SIZE -
123 (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
124 120
125 skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE); 121 skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
126 if (!skb) { 122 if (!skb) {
@@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
149 /* Fill UDP header */ 145 /* Fill UDP header */
150 udph->source = htons(9); 146 udph->source = htons(9);
151 udph->dest = htons(9); /* Discard Protocol */ 147 udph->dest = htons(9); /* Discard Protocol */
152 udph->len = htons(datalen + sizeof(struct udphdr)); 148 udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr));
153 udph->check = 0; 149 udph->check = 0;
154 150
155 /* Fill IP header */ 151 /* Fill IP header */
@@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
157 iph->ttl = 32; 153 iph->ttl = 32;
158 iph->version = 4; 154 iph->version = 4;
159 iph->protocol = IPPROTO_UDP; 155 iph->protocol = IPPROTO_UDP;
160 iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen; 156 iplen = sizeof(struct iphdr) + sizeof(struct udphdr) +
157 sizeof(struct mlx5ehdr);
161 iph->tot_len = htons(iplen); 158 iph->tot_len = htons(iplen);
162 iph->frag_off = 0; 159 iph->frag_off = 0;
163 iph->saddr = 0; 160 iph->saddr = 0;
@@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
170 mlxh = skb_put(skb, sizeof(*mlxh)); 167 mlxh = skb_put(skb, sizeof(*mlxh));
171 mlxh->version = 0; 168 mlxh->version = 0;
172 mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC); 169 mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
173 strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
174 datalen -= sizeof(*mlxh);
175 skb_put_zero(skb, datalen);
176 170
177 skb->csum = 0; 171 skb->csum = 0;
178 skb->ip_summed = CHECKSUM_PARTIAL; 172 skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1e55b9c27ffc..3e99d0728b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = {
83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, 83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, 84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, 85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, 87 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
87 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, 88 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
88 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, 89 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@@ -161,6 +162,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
161 s->rx_wqe_err += rq_stats->wqe_err; 162 s->rx_wqe_err += rq_stats->wqe_err;
162 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; 163 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
163 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; 164 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
165 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
164 s->rx_buff_alloc_err += rq_stats->buff_alloc_err; 166 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
165 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; 167 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
166 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; 168 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@@ -1189,6 +1191,7 @@ static const struct counter_desc rq_stats_desc[] = {
1189 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, 1191 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1190 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 1192 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1191 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 1193 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1194 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1192 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 1195 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1193 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 1196 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1194 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 1197 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 77f74ce11280..3f8e870ef4c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -96,6 +96,7 @@ struct mlx5e_sw_stats {
96 u64 rx_wqe_err; 96 u64 rx_wqe_err;
97 u64 rx_mpwqe_filler_cqes; 97 u64 rx_mpwqe_filler_cqes;
98 u64 rx_mpwqe_filler_strides; 98 u64 rx_mpwqe_filler_strides;
99 u64 rx_oversize_pkts_sw_drop;
99 u64 rx_buff_alloc_err; 100 u64 rx_buff_alloc_err;
100 u64 rx_cqe_compress_blks; 101 u64 rx_cqe_compress_blks;
101 u64 rx_cqe_compress_pkts; 102 u64 rx_cqe_compress_pkts;
@@ -193,6 +194,7 @@ struct mlx5e_rq_stats {
193 u64 wqe_err; 194 u64 wqe_err;
194 u64 mpwqe_filler_cqes; 195 u64 mpwqe_filler_cqes;
195 u64 mpwqe_filler_strides; 196 u64 mpwqe_filler_strides;
197 u64 oversize_pkts_sw_drop;
196 u64 buff_alloc_err; 198 u64 buff_alloc_err;
197 u64 cqe_compress_blks; 199 u64 cqe_compress_blks;
198 u64 cqe_compress_pkts; 200 u64 cqe_compress_pkts;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 608025ca5c04..fca6f4132c91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1447,31 +1447,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1447 inner_headers); 1447 inner_headers);
1448 } 1448 }
1449 1449
1450 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1450 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1451 struct flow_dissector_key_eth_addrs *key = 1451 struct flow_dissector_key_basic *key =
1452 skb_flow_dissector_target(f->dissector, 1452 skb_flow_dissector_target(f->dissector,
1453 FLOW_DISSECTOR_KEY_ETH_ADDRS, 1453 FLOW_DISSECTOR_KEY_BASIC,
1454 f->key); 1454 f->key);
1455 struct flow_dissector_key_eth_addrs *mask = 1455 struct flow_dissector_key_basic *mask =
1456 skb_flow_dissector_target(f->dissector, 1456 skb_flow_dissector_target(f->dissector,
1457 FLOW_DISSECTOR_KEY_ETH_ADDRS, 1457 FLOW_DISSECTOR_KEY_BASIC,
1458 f->mask); 1458 f->mask);
1459 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1460 ntohs(mask->n_proto));
1461 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1462 ntohs(key->n_proto));
1459 1463
1460 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1464 if (mask->n_proto)
1461 dmac_47_16),
1462 mask->dst);
1463 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1464 dmac_47_16),
1465 key->dst);
1466
1467 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1468 smac_47_16),
1469 mask->src);
1470 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1471 smac_47_16),
1472 key->src);
1473
1474 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
1475 *match_level = MLX5_MATCH_L2; 1465 *match_level = MLX5_MATCH_L2;
1476 } 1466 }
1477 1467
@@ -1505,9 +1495,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1505 1495
1506 *match_level = MLX5_MATCH_L2; 1496 *match_level = MLX5_MATCH_L2;
1507 } 1497 }
1508 } else { 1498 } else if (*match_level != MLX5_MATCH_NONE) {
1509 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); 1499 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1510 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); 1500 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1501 *match_level = MLX5_MATCH_L2;
1511 } 1502 }
1512 1503
1513 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { 1504 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -1545,21 +1536,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1545 } 1536 }
1546 } 1537 }
1547 1538
1548 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 1539 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1549 struct flow_dissector_key_basic *key = 1540 struct flow_dissector_key_eth_addrs *key =
1550 skb_flow_dissector_target(f->dissector, 1541 skb_flow_dissector_target(f->dissector,
1551 FLOW_DISSECTOR_KEY_BASIC, 1542 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1552 f->key); 1543 f->key);
1553 struct flow_dissector_key_basic *mask = 1544 struct flow_dissector_key_eth_addrs *mask =
1554 skb_flow_dissector_target(f->dissector, 1545 skb_flow_dissector_target(f->dissector,
1555 FLOW_DISSECTOR_KEY_BASIC, 1546 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1556 f->mask); 1547 f->mask);
1557 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1558 ntohs(mask->n_proto));
1559 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1560 ntohs(key->n_proto));
1561 1548
1562 if (mask->n_proto) 1549 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1550 dmac_47_16),
1551 mask->dst);
1552 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1553 dmac_47_16),
1554 key->dst);
1555
1556 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1557 smac_47_16),
1558 mask->src);
1559 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1560 smac_47_16),
1561 key->src);
1562
1563 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
1563 *match_level = MLX5_MATCH_L2; 1564 *match_level = MLX5_MATCH_L2;
1564 } 1565 }
1565 1566
@@ -1586,10 +1587,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1586 1587
1587 /* the HW doesn't need L3 inline to match on frag=no */ 1588 /* the HW doesn't need L3 inline to match on frag=no */
1588 if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) 1589 if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
1589 *match_level = MLX5_INLINE_MODE_L2; 1590 *match_level = MLX5_MATCH_L2;
1590 /* *** L2 attributes parsing up to here *** */ 1591 /* *** L2 attributes parsing up to here *** */
1591 else 1592 else
1592 *match_level = MLX5_INLINE_MODE_IP; 1593 *match_level = MLX5_MATCH_L3;
1593 } 1594 }
1594 } 1595 }
1595 1596
@@ -2979,7 +2980,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2979 if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) 2980 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
2980 return -EOPNOTSUPP; 2981 return -EOPNOTSUPP;
2981 2982
2982 if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { 2983 if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
2983 NL_SET_ERR_MSG_MOD(extack, 2984 NL_SET_ERR_MSG_MOD(extack,
2984 "current firmware doesn't support split rule for port mirroring"); 2985 "current firmware doesn't support split rule for port mirroring");
2985 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); 2986 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 515e3d6de051..5a22c5874f3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule {
83}; 83};
84 84
85static const struct rhashtable_params rhash_sa = { 85static const struct rhashtable_params rhash_sa = {
86 .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), 86 /* Keep out "cmd" field from the key as it's
87 .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), 87 * value is not constant during the lifetime
88 * of the key object.
89 */
90 .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
91 FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
92 .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
93 FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
88 .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), 94 .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
89 .automatic_shrinking = true, 95 .automatic_shrinking = true,
90 .min_size = 1, 96 .min_size = 1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index b59953daf8b4..11dabd62e2c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -560,9 +560,9 @@ static int mlx5i_close(struct net_device *netdev)
560 560
561 netif_carrier_off(epriv->netdev); 561 netif_carrier_off(epriv->netdev);
562 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); 562 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
563 mlx5i_uninit_underlay_qp(epriv);
564 mlx5e_deactivate_priv_channels(epriv); 563 mlx5e_deactivate_priv_channels(epriv);
565 mlx5e_close_channels(&epriv->channels); 564 mlx5e_close_channels(&epriv->channels);
565 mlx5i_uninit_underlay_qp(epriv);
566unlock: 566unlock:
567 mutex_unlock(&epriv->state_lock); 567 mutex_unlock(&epriv->state_lock);
568 return 0; 568 return 0;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 867cddba840f..e8ca98c070f6 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1672,7 +1672,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
1672 netif_wake_queue(adapter->netdev); 1672 netif_wake_queue(adapter->netdev);
1673 } 1673 }
1674 1674
1675 if (!napi_complete_done(napi, weight)) 1675 if (!napi_complete(napi))
1676 goto done; 1676 goto done;
1677 1677
1678 /* enable isr */ 1678 /* enable isr */
@@ -1681,7 +1681,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
1681 lan743x_csr_read(adapter, INT_STS); 1681 lan743x_csr_read(adapter, INT_STS);
1682 1682
1683done: 1683done:
1684 return weight; 1684 return 0;
1685} 1685}
1686 1686
1687static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) 1687static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
@@ -1870,9 +1870,9 @@ static int lan743x_tx_open(struct lan743x_tx *tx)
1870 tx->vector_flags = lan743x_intr_get_vector_flags(adapter, 1870 tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
1871 INT_BIT_DMA_TX_ 1871 INT_BIT_DMA_TX_
1872 (tx->channel_number)); 1872 (tx->channel_number));
1873 netif_napi_add(adapter->netdev, 1873 netif_tx_napi_add(adapter->netdev,
1874 &tx->napi, lan743x_tx_napi_poll, 1874 &tx->napi, lan743x_tx_napi_poll,
1875 tx->ring_size - 1); 1875 tx->ring_size - 1);
1876 napi_enable(&tx->napi); 1876 napi_enable(&tx->napi);
1877 1877
1878 data = 0; 1878 data = 0;
@@ -3017,6 +3017,7 @@ static const struct dev_pm_ops lan743x_pm_ops = {
3017 3017
3018static const struct pci_device_id lan743x_pcidev_tbl[] = { 3018static const struct pci_device_id lan743x_pcidev_tbl[] = {
3019 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, 3019 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
3020 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
3020 { 0, } 3021 { 0, }
3021}; 3022};
3022 3023
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 0e82b6368798..2d6eea18973e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -548,6 +548,7 @@ struct lan743x_adapter;
548/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */ 548/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */
549#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR 549#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR
550#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430) 550#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430)
551#define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431)
551 552
552#define PCI_CONFIG_LENGTH (0x1000) 553#define PCI_CONFIG_LENGTH (0x1000)
553 554
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 8e8fa823d611..69966dfc6e3d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -191,7 +191,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
191static void 191static void
192qed_dcbx_set_params(struct qed_dcbx_results *p_data, 192qed_dcbx_set_params(struct qed_dcbx_results *p_data,
193 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 193 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
194 bool enable, u8 prio, u8 tc, 194 bool app_tlv, bool enable, u8 prio, u8 tc,
195 enum dcbx_protocol_type type, 195 enum dcbx_protocol_type type,
196 enum qed_pci_personality personality) 196 enum qed_pci_personality personality)
197{ 197{
@@ -210,7 +210,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
210 p_data->arr[type].dont_add_vlan0 = true; 210 p_data->arr[type].dont_add_vlan0 = true;
211 211
212 /* QM reconf data */ 212 /* QM reconf data */
213 if (p_hwfn->hw_info.personality == personality) 213 if (app_tlv && p_hwfn->hw_info.personality == personality)
214 qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); 214 qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
215 215
216 /* Configure dcbx vlan priority in doorbell block for roce EDPM */ 216 /* Configure dcbx vlan priority in doorbell block for roce EDPM */
@@ -225,7 +225,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
225static void 225static void
226qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, 226qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
227 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 227 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
228 bool enable, u8 prio, u8 tc, 228 bool app_tlv, bool enable, u8 prio, u8 tc,
229 enum dcbx_protocol_type type) 229 enum dcbx_protocol_type type)
230{ 230{
231 enum qed_pci_personality personality; 231 enum qed_pci_personality personality;
@@ -240,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
240 240
241 personality = qed_dcbx_app_update[i].personality; 241 personality = qed_dcbx_app_update[i].personality;
242 242
243 qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, 243 qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable,
244 prio, tc, type, personality); 244 prio, tc, type, personality);
245 } 245 }
246} 246}
@@ -319,8 +319,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
319 enable = true; 319 enable = true;
320 } 320 }
321 321
322 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, 322 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true,
323 priority, tc, type); 323 enable, priority, tc, type);
324 } 324 }
325 } 325 }
326 326
@@ -341,7 +341,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
341 continue; 341 continue;
342 342
343 enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; 343 enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
344 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, 344 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable,
345 priority, tc, type); 345 priority, tc, type);
346 } 346 }
347 347
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 78a638ec7c0a..979f1e4bc18b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -6071,7 +6071,7 @@ static const char * const s_igu_fifo_error_strs[] = {
6071 "no error", 6071 "no error",
6072 "length error", 6072 "length error",
6073 "function disabled", 6073 "function disabled",
6074 "VF sent command to attnetion address", 6074 "VF sent command to attention address",
6075 "host sent prod update command", 6075 "host sent prod update command",
6076 "read of during interrupt register while in MIMD mode", 6076 "read of during interrupt register while in MIMD mode",
6077 "access to PXP BAR reserved address", 6077 "access to PXP BAR reserved address",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 7ceb2b97538d..88a8576ca9ce 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -185,6 +185,10 @@ void qed_resc_free(struct qed_dev *cdev)
185 qed_iscsi_free(p_hwfn); 185 qed_iscsi_free(p_hwfn);
186 qed_ooo_free(p_hwfn); 186 qed_ooo_free(p_hwfn);
187 } 187 }
188
189 if (QED_IS_RDMA_PERSONALITY(p_hwfn))
190 qed_rdma_info_free(p_hwfn);
191
188 qed_iov_free(p_hwfn); 192 qed_iov_free(p_hwfn);
189 qed_l2_free(p_hwfn); 193 qed_l2_free(p_hwfn);
190 qed_dmae_info_free(p_hwfn); 194 qed_dmae_info_free(p_hwfn);
@@ -481,8 +485,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
481 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 485 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
482 486
483 /* Can't have multiple flags set here */ 487 /* Can't have multiple flags set here */
484 if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) 488 if (bitmap_weight((unsigned long *)&pq_flags,
489 sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
490 DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
491 goto err;
492 }
493
494 if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
495 DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
485 goto err; 496 goto err;
497 }
486 498
487 switch (pq_flags) { 499 switch (pq_flags) {
488 case PQ_FLAGS_RLS: 500 case PQ_FLAGS_RLS:
@@ -506,8 +518,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
506 } 518 }
507 519
508err: 520err:
509 DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 521 return &qm_info->start_pq;
510 return NULL;
511} 522}
512 523
513/* save pq index in qm info */ 524/* save pq index in qm info */
@@ -531,20 +542,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
531{ 542{
532 u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); 543 u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
533 544
545 if (max_tc == 0) {
546 DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
547 PQ_FLAGS_MCOS);
548 return p_hwfn->qm_info.start_pq;
549 }
550
534 if (tc > max_tc) 551 if (tc > max_tc)
535 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 552 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
536 553
537 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 554 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
538} 555}
539 556
540u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) 557u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
541{ 558{
542 u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); 559 u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
543 560
561 if (max_vf == 0) {
562 DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
563 PQ_FLAGS_VFS);
564 return p_hwfn->qm_info.start_pq;
565 }
566
544 if (vf > max_vf) 567 if (vf > max_vf)
545 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 568 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
546 569
547 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 570 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
548} 571}
549 572
550u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) 573u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)
@@ -1081,6 +1104,12 @@ int qed_resc_alloc(struct qed_dev *cdev)
1081 goto alloc_err; 1104 goto alloc_err;
1082 } 1105 }
1083 1106
1107 if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
1108 rc = qed_rdma_info_alloc(p_hwfn);
1109 if (rc)
1110 goto alloc_err;
1111 }
1112
1084 /* DMA info initialization */ 1113 /* DMA info initialization */
1085 rc = qed_dmae_info_alloc(p_hwfn); 1114 rc = qed_dmae_info_alloc(p_hwfn);
1086 if (rc) 1115 if (rc)
@@ -2102,11 +2131,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
2102 if (!p_ptt) 2131 if (!p_ptt)
2103 return -EAGAIN; 2132 return -EAGAIN;
2104 2133
2105 /* If roce info is allocated it means roce is initialized and should
2106 * be enabled in searcher.
2107 */
2108 if (p_hwfn->p_rdma_info && 2134 if (p_hwfn->p_rdma_info &&
2109 p_hwfn->b_rdma_enabled_in_prs) 2135 p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs)
2110 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); 2136 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
2111 2137
2112 /* Re-open incoming traffic */ 2138 /* Re-open incoming traffic */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 0f0aba793352..b22f464ea3fa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
992 */ 992 */
993 do { 993 do {
994 index = p_sb_attn->sb_index; 994 index = p_sb_attn->sb_index;
995 /* finish reading index before the loop condition */
996 dma_rmb();
995 attn_bits = le32_to_cpu(p_sb_attn->atten_bits); 997 attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
996 attn_acks = le32_to_cpu(p_sb_attn->atten_ack); 998 attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
997 } while (index != p_sb_attn->sb_index); 999 } while (index != p_sb_attn->sb_index);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 35fd0db6a677..fff7f04d4525 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1782,9 +1782,9 @@ static int qed_drain(struct qed_dev *cdev)
1782 return -EBUSY; 1782 return -EBUSY;
1783 } 1783 }
1784 rc = qed_mcp_drain(hwfn, ptt); 1784 rc = qed_mcp_drain(hwfn, ptt);
1785 qed_ptt_release(hwfn, ptt);
1785 if (rc) 1786 if (rc)
1786 return rc; 1787 return rc;
1787 qed_ptt_release(hwfn, ptt);
1788 } 1788 }
1789 1789
1790 return 0; 1790 return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 62113438c880..7873d6dfd91f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
140 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; 140 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
141} 141}
142 142
143static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, 143int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
144 struct qed_ptt *p_ptt,
145 struct qed_rdma_start_in_params *params)
146{ 144{
147 struct qed_rdma_info *p_rdma_info; 145 struct qed_rdma_info *p_rdma_info;
148 u32 num_cons, num_tasks;
149 int rc = -ENOMEM;
150 146
151 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
152
153 /* Allocate a struct with current pf rdma info */
154 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); 147 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
155 if (!p_rdma_info) 148 if (!p_rdma_info)
156 return rc; 149 return -ENOMEM;
150
151 spin_lock_init(&p_rdma_info->lock);
157 152
158 p_hwfn->p_rdma_info = p_rdma_info; 153 p_hwfn->p_rdma_info = p_rdma_info;
154 return 0;
155}
156
157void qed_rdma_info_free(struct qed_hwfn *p_hwfn)
158{
159 kfree(p_hwfn->p_rdma_info);
160 p_hwfn->p_rdma_info = NULL;
161}
162
163static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
164{
165 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
166 u32 num_cons, num_tasks;
167 int rc = -ENOMEM;
168
169 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
170
159 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 171 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
160 p_rdma_info->proto = PROTOCOLID_IWARP; 172 p_rdma_info->proto = PROTOCOLID_IWARP;
161 else 173 else
@@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
183 /* Allocate a struct with device params and fill it */ 195 /* Allocate a struct with device params and fill it */
184 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); 196 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
185 if (!p_rdma_info->dev) 197 if (!p_rdma_info->dev)
186 goto free_rdma_info; 198 return rc;
187 199
188 /* Allocate a struct with port params and fill it */ 200 /* Allocate a struct with port params and fill it */
189 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); 201 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
@@ -298,8 +310,6 @@ free_rdma_port:
298 kfree(p_rdma_info->port); 310 kfree(p_rdma_info->port);
299free_rdma_dev: 311free_rdma_dev:
300 kfree(p_rdma_info->dev); 312 kfree(p_rdma_info->dev);
301free_rdma_info:
302 kfree(p_rdma_info);
303 313
304 return rc; 314 return rc;
305} 315}
@@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
370 380
371 kfree(p_rdma_info->port); 381 kfree(p_rdma_info->port);
372 kfree(p_rdma_info->dev); 382 kfree(p_rdma_info->dev);
373
374 kfree(p_rdma_info);
375} 383}
376 384
377static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) 385static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
@@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
679 687
680 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); 688 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
681 689
682 spin_lock_init(&p_hwfn->p_rdma_info->lock);
683
684 qed_rdma_init_devinfo(p_hwfn, params); 690 qed_rdma_init_devinfo(p_hwfn, params);
685 qed_rdma_init_port(p_hwfn); 691 qed_rdma_init_port(p_hwfn);
686 qed_rdma_init_events(p_hwfn, params); 692 qed_rdma_init_events(p_hwfn, params);
@@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt)
727 /* Disable RoCE search */ 733 /* Disable RoCE search */
728 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); 734 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
729 p_hwfn->b_rdma_enabled_in_prs = false; 735 p_hwfn->b_rdma_enabled_in_prs = false;
730 736 p_hwfn->p_rdma_info->active = 0;
731 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 737 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
732 738
733 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 739 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
@@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt,
1236 u8 max_stats_queues; 1242 u8 max_stats_queues;
1237 int rc; 1243 int rc;
1238 1244
1239 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { 1245 if (!rdma_cxt || !in_params || !out_params ||
1246 !p_hwfn->p_rdma_info->active) {
1240 DP_ERR(p_hwfn->cdev, 1247 DP_ERR(p_hwfn->cdev,
1241 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", 1248 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
1242 rdma_cxt, in_params, out_params); 1249 rdma_cxt, in_params, out_params);
@@ -1802,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
1802{ 1809{
1803 bool result; 1810 bool result;
1804 1811
1805 /* if rdma info has not been allocated, naturally there are no qps */ 1812 /* if rdma wasn't activated yet, naturally there are no qps */
1806 if (!p_hwfn->p_rdma_info) 1813 if (!p_hwfn->p_rdma_info->active)
1807 return false; 1814 return false;
1808 1815
1809 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1816 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
@@ -1849,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt,
1849 if (!p_ptt) 1856 if (!p_ptt)
1850 goto err; 1857 goto err;
1851 1858
1852 rc = qed_rdma_alloc(p_hwfn, p_ptt, params); 1859 rc = qed_rdma_alloc(p_hwfn);
1853 if (rc) 1860 if (rc)
1854 goto err1; 1861 goto err1;
1855 1862
@@ -1858,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt,
1858 goto err2; 1865 goto err2;
1859 1866
1860 qed_ptt_release(p_hwfn, p_ptt); 1867 qed_ptt_release(p_hwfn, p_ptt);
1868 p_hwfn->p_rdma_info->active = 1;
1861 1869
1862 return rc; 1870 return rc;
1863 1871
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 6f722ee8ee94..3689fe3e5935 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -102,6 +102,7 @@ struct qed_rdma_info {
102 u16 max_queue_zones; 102 u16 max_queue_zones;
103 enum protocol_type proto; 103 enum protocol_type proto;
104 struct qed_iwarp_info iwarp; 104 struct qed_iwarp_info iwarp;
105 u8 active:1;
105}; 106};
106 107
107struct qed_rdma_qp { 108struct qed_rdma_qp {
@@ -176,10 +177,14 @@ struct qed_rdma_qp {
176#if IS_ENABLED(CONFIG_QED_RDMA) 177#if IS_ENABLED(CONFIG_QED_RDMA)
177void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 178void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
178void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 179void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
180int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
181void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
179#else 182#else
180static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} 183static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
181static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, 184static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
182 struct qed_ptt *p_ptt) {} 185 struct qed_ptt *p_ptt) {}
186static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;}
187static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
183#endif 188#endif
184 189
185int 190int
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ef9538ee53d0..82412691ee66 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3605,7 +3605,7 @@ static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3605 "tx_jumbo", 3605 "tx_jumbo",
3606 "rx_mac_control_frames", 3606 "rx_mac_control_frames",
3607 "tx_mac_control_frames", 3607 "tx_mac_control_frames",
3608 "rx_frame_alignement_errors", 3608 "rx_frame_alignment_errors",
3609 "rx_long_ok", 3609 "rx_long_ok",
3610 "rx_long_err", 3610 "rx_long_err",
3611 "tx_sqe_errors", 3611 "tx_sqe_errors",
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 33265747bf39..0fbcedcdf6e2 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
63 * assume the pin serves as pull-up. If direction is 63 * assume the pin serves as pull-up. If direction is
64 * output, the default value is high. 64 * output, the default value is high.
65 */ 65 */
66 gpiod_set_value(bitbang->mdo, 1); 66 gpiod_set_value_cansleep(bitbang->mdo, 1);
67 return; 67 return;
68 } 68 }
69 69
@@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
78 struct mdio_gpio_info *bitbang = 78 struct mdio_gpio_info *bitbang =
79 container_of(ctrl, struct mdio_gpio_info, ctrl); 79 container_of(ctrl, struct mdio_gpio_info, ctrl);
80 80
81 return gpiod_get_value(bitbang->mdio); 81 return gpiod_get_value_cansleep(bitbang->mdio);
82} 82}
83 83
84static void mdio_set(struct mdiobb_ctrl *ctrl, int what) 84static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
87 container_of(ctrl, struct mdio_gpio_info, ctrl); 87 container_of(ctrl, struct mdio_gpio_info, ctrl);
88 88
89 if (bitbang->mdo) 89 if (bitbang->mdo)
90 gpiod_set_value(bitbang->mdo, what); 90 gpiod_set_value_cansleep(bitbang->mdo, what);
91 else 91 else
92 gpiod_set_value(bitbang->mdio, what); 92 gpiod_set_value_cansleep(bitbang->mdio, what);
93} 93}
94 94
95static void mdc_set(struct mdiobb_ctrl *ctrl, int what) 95static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
97 struct mdio_gpio_info *bitbang = 97 struct mdio_gpio_info *bitbang =
98 container_of(ctrl, struct mdio_gpio_info, ctrl); 98 container_of(ctrl, struct mdio_gpio_info, ctrl);
99 99
100 gpiod_set_value(bitbang->mdc, what); 100 gpiod_set_value_cansleep(bitbang->mdc, what);
101} 101}
102 102
103static const struct mdiobb_ops mdio_gpio_ops = { 103static const struct mdiobb_ops mdio_gpio_ops = {
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index a2e59f4f6f01..7cae17517744 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -810,17 +810,13 @@ static int vsc85xx_default_config(struct phy_device *phydev)
810 810
811 phydev->mdix_ctrl = ETH_TP_MDI_AUTO; 811 phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
812 mutex_lock(&phydev->lock); 812 mutex_lock(&phydev->lock);
813 rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
814 if (rc < 0)
815 goto out_unlock;
816 813
817 reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL); 814 reg_val = RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS;
818 reg_val &= ~(RGMII_RX_CLK_DELAY_MASK); 815
819 reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS); 816 rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
820 phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val); 817 MSCC_PHY_RGMII_CNTL, RGMII_RX_CLK_DELAY_MASK,
818 reg_val);
821 819
822out_unlock:
823 rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
824 mutex_unlock(&phydev->lock); 820 mutex_unlock(&phydev->lock);
825 821
826 return rc; 822 return rc;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ab33d1777132..23ee3967c166 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -2197,6 +2197,14 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
2197 new_driver->mdiodrv.driver.remove = phy_remove; 2197 new_driver->mdiodrv.driver.remove = phy_remove;
2198 new_driver->mdiodrv.driver.owner = owner; 2198 new_driver->mdiodrv.driver.owner = owner;
2199 2199
2200 /* The following works around an issue where the PHY driver doesn't bind
2201 * to the device, resulting in the genphy driver being used instead of
2202 * the dedicated driver. The root cause of the issue isn't known yet
2203 * and seems to be in the base driver core. Once this is fixed we may
2204 * remove this workaround.
2205 */
2206 new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
2207
2200 retval = driver_register(&new_driver->mdiodrv.driver); 2208 retval = driver_register(&new_driver->mdiodrv.driver);
2201 if (retval) { 2209 if (retval) {
2202 pr_err("%s: Error %d in registering driver\n", 2210 pr_err("%s: Error %d in registering driver\n",
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index e9f101c9bae2..bfbb39f93554 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -216,9 +216,9 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
216 * it just report sending a packet to the target 216 * it just report sending a packet to the target
217 * (without actual packet transfer). 217 * (without actual packet transfer).
218 */ 218 */
219 dev_kfree_skb_any(skb);
220 ndev->stats.tx_packets++; 219 ndev->stats.tx_packets++;
221 ndev->stats.tx_bytes += skb->len; 220 ndev->stats.tx_bytes += skb->len;
221 dev_kfree_skb_any(skb);
222 } 222 }
223 } 223 }
224 224
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index db633ae9f784..364f514d56d8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -985,8 +985,6 @@ static void team_port_disable(struct team *team,
985 team->en_port_count--; 985 team->en_port_count--;
986 team_queue_override_port_del(team, port); 986 team_queue_override_port_del(team, port);
987 team_adjust_ops(team); 987 team_adjust_ops(team);
988 team_notify_peers(team);
989 team_mcast_rejoin(team);
990 team_lower_state_changed(port); 988 team_lower_state_changed(port);
991} 989}
992 990
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 060135ceaf0e..e244f5d7512a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1536,6 +1536,7 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1536 1536
1537 if (!rx_batched || (!more && skb_queue_empty(queue))) { 1537 if (!rx_batched || (!more && skb_queue_empty(queue))) {
1538 local_bh_disable(); 1538 local_bh_disable();
1539 skb_record_rx_queue(skb, tfile->queue_index);
1539 netif_receive_skb(skb); 1540 netif_receive_skb(skb);
1540 local_bh_enable(); 1541 local_bh_enable();
1541 return; 1542 return;
@@ -1555,8 +1556,11 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1555 struct sk_buff *nskb; 1556 struct sk_buff *nskb;
1556 1557
1557 local_bh_disable(); 1558 local_bh_disable();
1558 while ((nskb = __skb_dequeue(&process_queue))) 1559 while ((nskb = __skb_dequeue(&process_queue))) {
1560 skb_record_rx_queue(nskb, tfile->queue_index);
1559 netif_receive_skb(nskb); 1561 netif_receive_skb(nskb);
1562 }
1563 skb_record_rx_queue(skb, tfile->queue_index);
1560 netif_receive_skb(skb); 1564 netif_receive_skb(skb);
1561 local_bh_enable(); 1565 local_bh_enable();
1562 } 1566 }
@@ -2451,6 +2455,7 @@ build:
2451 if (!rcu_dereference(tun->steering_prog)) 2455 if (!rcu_dereference(tun->steering_prog))
2452 rxhash = __skb_get_hash_symmetric(skb); 2456 rxhash = __skb_get_hash_symmetric(skb);
2453 2457
2458 skb_record_rx_queue(skb, tfile->queue_index);
2454 netif_receive_skb(skb); 2459 netif_receive_skb(skb);
2455 2460
2456 stats = get_cpu_ptr(tun->pcpu_stats); 2461 stats = get_cpu_ptr(tun->pcpu_stats);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 7275761a1177..3d8a70d3ea9b 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -140,7 +140,6 @@ struct ipheth_device {
140 struct usb_device *udev; 140 struct usb_device *udev;
141 struct usb_interface *intf; 141 struct usb_interface *intf;
142 struct net_device *net; 142 struct net_device *net;
143 struct sk_buff *tx_skb;
144 struct urb *tx_urb; 143 struct urb *tx_urb;
145 struct urb *rx_urb; 144 struct urb *rx_urb;
146 unsigned char *tx_buf; 145 unsigned char *tx_buf;
@@ -230,6 +229,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
230 case -ENOENT: 229 case -ENOENT:
231 case -ECONNRESET: 230 case -ECONNRESET:
232 case -ESHUTDOWN: 231 case -ESHUTDOWN:
232 case -EPROTO:
233 return; 233 return;
234 case 0: 234 case 0:
235 break; 235 break;
@@ -281,7 +281,6 @@ static void ipheth_sndbulk_callback(struct urb *urb)
281 dev_err(&dev->intf->dev, "%s: urb status: %d\n", 281 dev_err(&dev->intf->dev, "%s: urb status: %d\n",
282 __func__, status); 282 __func__, status);
283 283
284 dev_kfree_skb_irq(dev->tx_skb);
285 if (status == 0) 284 if (status == 0)
286 netif_wake_queue(dev->net); 285 netif_wake_queue(dev->net);
287 else 286 else
@@ -423,7 +422,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
423 if (skb->len > IPHETH_BUF_SIZE) { 422 if (skb->len > IPHETH_BUF_SIZE) {
424 WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len); 423 WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
425 dev->net->stats.tx_dropped++; 424 dev->net->stats.tx_dropped++;
426 dev_kfree_skb_irq(skb); 425 dev_kfree_skb_any(skb);
427 return NETDEV_TX_OK; 426 return NETDEV_TX_OK;
428 } 427 }
429 428
@@ -443,12 +442,11 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
443 dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n", 442 dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
444 __func__, retval); 443 __func__, retval);
445 dev->net->stats.tx_errors++; 444 dev->net->stats.tx_errors++;
446 dev_kfree_skb_irq(skb); 445 dev_kfree_skb_any(skb);
447 } else { 446 } else {
448 dev->tx_skb = skb;
449
450 dev->net->stats.tx_packets++; 447 dev->net->stats.tx_packets++;
451 dev->net->stats.tx_bytes += skb->len; 448 dev->net->stats.tx_bytes += skb->len;
449 dev_consume_skb_any(skb);
452 netif_stop_queue(net); 450 netif_stop_queue(net);
453 } 451 }
454 452
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3e2c041d76ac..cecfd77c9f3c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -70,7 +70,8 @@ static const unsigned long guest_offloads[] = {
70 VIRTIO_NET_F_GUEST_TSO4, 70 VIRTIO_NET_F_GUEST_TSO4,
71 VIRTIO_NET_F_GUEST_TSO6, 71 VIRTIO_NET_F_GUEST_TSO6,
72 VIRTIO_NET_F_GUEST_ECN, 72 VIRTIO_NET_F_GUEST_ECN,
73 VIRTIO_NET_F_GUEST_UFO 73 VIRTIO_NET_F_GUEST_UFO,
74 VIRTIO_NET_F_GUEST_CSUM
74}; 75};
75 76
76struct virtnet_stat_desc { 77struct virtnet_stat_desc {
@@ -2334,9 +2335,6 @@ static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
2334 if (!vi->guest_offloads) 2335 if (!vi->guest_offloads)
2335 return 0; 2336 return 0;
2336 2337
2337 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
2338 offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM;
2339
2340 return virtnet_set_guest_offloads(vi, offloads); 2338 return virtnet_set_guest_offloads(vi, offloads);
2341} 2339}
2342 2340
@@ -2346,8 +2344,6 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
2346 2344
2347 if (!vi->guest_offloads) 2345 if (!vi->guest_offloads)
2348 return 0; 2346 return 0;
2349 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
2350 offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM;
2351 2347
2352 return virtnet_set_guest_offloads(vi, offloads); 2348 return virtnet_set_guest_offloads(vi, offloads);
2353} 2349}
@@ -2365,8 +2361,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2365 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 2361 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2366 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 2362 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2367 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 2363 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
2368 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { 2364 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
2369 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); 2365 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
2366 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
2370 return -EOPNOTSUPP; 2367 return -EOPNOTSUPP;
2371 } 2368 }
2372 2369
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a1c2801ded10..7e49342bae38 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6867,7 +6867,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6867 u32 bitmap; 6867 u32 bitmap;
6868 6868
6869 if (drop) { 6869 if (drop) {
6870 if (vif->type == NL80211_IFTYPE_STATION) { 6870 if (vif && vif->type == NL80211_IFTYPE_STATION) {
6871 bitmap = ~(1 << WMI_MGMT_TID); 6871 bitmap = ~(1 << WMI_MGMT_TID);
6872 list_for_each_entry(arvif, &ar->arvifs, list) { 6872 list_for_each_entry(arvif, &ar->arvifs, list) {
6873 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 6873 if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 1e3b5f4a4cf9..f23cb2f3d296 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1251,6 +1251,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1251 struct ath_vif *avp = (void *)vif->drv_priv; 1251 struct ath_vif *avp = (void *)vif->drv_priv;
1252 struct ath_node *an = &avp->mcast_node; 1252 struct ath_node *an = &avp->mcast_node;
1253 1253
1254 mutex_lock(&sc->mutex);
1254 if (IS_ENABLED(CONFIG_ATH9K_TX99)) { 1255 if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
1255 if (sc->cur_chan->nvifs >= 1) { 1256 if (sc->cur_chan->nvifs >= 1) {
1256 mutex_unlock(&sc->mutex); 1257 mutex_unlock(&sc->mutex);
@@ -1259,8 +1260,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1259 sc->tx99_vif = vif; 1260 sc->tx99_vif = vif;
1260 } 1261 }
1261 1262
1262 mutex_lock(&sc->mutex);
1263
1264 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); 1263 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
1265 sc->cur_chan->nvifs++; 1264 sc->cur_chan->nvifs++;
1266 1265
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 230a378c26fc..7f0a5bade70a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -6005,7 +6005,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
6005 * for subsequent chanspecs. 6005 * for subsequent chanspecs.
6006 */ 6006 */
6007 channel->flags = IEEE80211_CHAN_NO_HT40 | 6007 channel->flags = IEEE80211_CHAN_NO_HT40 |
6008 IEEE80211_CHAN_NO_80MHZ; 6008 IEEE80211_CHAN_NO_80MHZ |
6009 IEEE80211_CHAN_NO_160MHZ;
6009 ch.bw = BRCMU_CHAN_BW_20; 6010 ch.bw = BRCMU_CHAN_BW_20;
6010 cfg->d11inf.encchspec(&ch); 6011 cfg->d11inf.encchspec(&ch);
6011 chaninfo = ch.chspec; 6012 chaninfo = ch.chspec;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
index e7584b842dce..eb5db94f5745 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
@@ -193,6 +193,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
193 } 193 }
194 break; 194 break;
195 case BRCMU_CHSPEC_D11AC_BW_160: 195 case BRCMU_CHSPEC_D11AC_BW_160:
196 ch->bw = BRCMU_CHAN_BW_160;
197 ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
198 BRCMU_CHSPEC_D11AC_SB_SHIFT);
196 switch (ch->sb) { 199 switch (ch->sb) {
197 case BRCMU_CHAN_SB_LLL: 200 case BRCMU_CHAN_SB_LLL:
198 ch->control_ch_num -= CH_70MHZ_APART; 201 ch->control_ch_num -= CH_70MHZ_APART;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 2439e98431ee..7492dfb6729b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2017 Intel Deutschland GmbH 8 * Copyright(c) 2017 Intel Deutschland GmbH
9 * Copyright(c) 2018 Intel Corporation
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +27,7 @@
26 * BSD LICENSE 27 * BSD LICENSE
27 * 28 *
28 * Copyright(c) 2017 Intel Deutschland GmbH 29 * Copyright(c) 2017 Intel Deutschland GmbH
30 * Copyright(c) 2018 Intel Corporation
29 * All rights reserved. 31 * All rights reserved.
30 * 32 *
31 * Redistribution and use in source and binary forms, with or without 33 * Redistribution and use in source and binary forms, with or without
@@ -81,7 +83,7 @@
81#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) 83#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2)
82#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ 84#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \
83 ACPI_SAR_TABLE_SIZE + 3) 85 ACPI_SAR_TABLE_SIZE + 3)
84#define ACPI_WGDS_WIFI_DATA_SIZE 18 86#define ACPI_WGDS_WIFI_DATA_SIZE 19
85#define ACPI_WRDD_WIFI_DATA_SIZE 2 87#define ACPI_WRDD_WIFI_DATA_SIZE 2
86#define ACPI_SPLC_WIFI_DATA_SIZE 2 88#define ACPI_SPLC_WIFI_DATA_SIZE 2
87 89
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 6b95d0e75889..2b8b50a77990 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -154,7 +154,11 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
154 const struct iwl_fw_runtime_ops *ops, void *ops_ctx, 154 const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
155 struct dentry *dbgfs_dir); 155 struct dentry *dbgfs_dir);
156 156
157void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); 157static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
158{
159 kfree(fwrt->dump.d3_debug_data);
160 fwrt->dump.d3_debug_data = NULL;
161}
158 162
159void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); 163void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);
160 164
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index dade206d5511..2ba890445c35 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -893,7 +893,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
893 IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); 893 IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
894 894
895 BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * 895 BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
896 ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE); 896 ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
897 897
898 BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES); 898 BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
899 899
@@ -928,6 +928,11 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
928 return -ENOENT; 928 return -ENOENT;
929} 929}
930 930
931static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
932{
933 return -ENOENT;
934}
935
931static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) 936static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
932{ 937{
933 return 0; 938 return 0;
@@ -954,8 +959,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
954 IWL_DEBUG_RADIO(mvm, 959 IWL_DEBUG_RADIO(mvm,
955 "WRDS SAR BIOS table invalid or unavailable. (%d)\n", 960 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
956 ret); 961 ret);
957 /* if not available, don't fail and don't bother with EWRD */ 962 /*
958 return 0; 963 * If not available, don't fail and don't bother with EWRD.
964 * Return 1 to tell that we can't use WGDS either.
965 */
966 return 1;
959 } 967 }
960 968
961 ret = iwl_mvm_sar_get_ewrd_table(mvm); 969 ret = iwl_mvm_sar_get_ewrd_table(mvm);
@@ -968,9 +976,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
968 /* choose profile 1 (WRDS) as default for both chains */ 976 /* choose profile 1 (WRDS) as default for both chains */
969 ret = iwl_mvm_sar_select_profile(mvm, 1, 1); 977 ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
970 978
971 /* if we don't have profile 0 from BIOS, just skip it */ 979 /*
980 * If we don't have profile 0 from BIOS, just skip it. This
981 * means that SAR Geo will not be enabled either, even if we
982 * have other valid profiles.
983 */
972 if (ret == -ENOENT) 984 if (ret == -ENOENT)
973 return 0; 985 return 1;
974 986
975 return ret; 987 return ret;
976} 988}
@@ -1168,11 +1180,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
1168 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); 1180 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1169 1181
1170 ret = iwl_mvm_sar_init(mvm); 1182 ret = iwl_mvm_sar_init(mvm);
1171 if (ret) 1183 if (ret == 0) {
1172 goto error; 1184 ret = iwl_mvm_sar_geo_init(mvm);
1185 } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
1186 /*
1187 * If basic SAR is not available, we check for WGDS,
1188 * which should *not* be available either. If it is
1189 * available, issue an error, because we can't use SAR
1190 * Geo without basic SAR.
1191 */
1192 IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
1193 }
1173 1194
1174 ret = iwl_mvm_sar_geo_init(mvm); 1195 if (ret < 0)
1175 if (ret)
1176 goto error; 1196 goto error;
1177 1197
1178 iwl_mvm_leds_sync(mvm); 1198 iwl_mvm_leds_sync(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 505b0385d800..00f831d88366 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -301,8 +301,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
301 goto out; 301 goto out;
302 } 302 }
303 303
304 if (changed) 304 if (changed) {
305 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); 305 u32 status = le32_to_cpu(resp->status);
306
307 *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
308 status == MCC_RESP_ILLEGAL);
309 }
306 310
307 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 311 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
308 __le32_to_cpu(resp->n_channels), 312 __le32_to_cpu(resp->n_channels),
@@ -4444,10 +4448,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4444 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 4448 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
4445 } 4449 }
4446 4450
4447 if (!fw_has_capa(&mvm->fw->ucode_capa,
4448 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4449 return;
4450
4451 /* if beacon filtering isn't on mac80211 does it anyway */ 4451 /* if beacon filtering isn't on mac80211 does it anyway */
4452 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) 4452 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4453 return; 4453 return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 3633f27d048a..6fc5cc1f2b5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -539,9 +539,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
539 } 539 }
540 540
541 IWL_DEBUG_LAR(mvm, 541 IWL_DEBUG_LAR(mvm,
542 "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", 542 "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
543 status, mcc, mcc >> 8, mcc & 0xff, 543 status, mcc, mcc >> 8, mcc & 0xff, n_channels);
544 !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
545 544
546exit: 545exit:
547 iwl_free_resp(&cmd); 546 iwl_free_resp(&cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 0e2092526fae..af3fba10abc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -858,6 +858,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
858 iwl_mvm_thermal_exit(mvm); 858 iwl_mvm_thermal_exit(mvm);
859 out_free: 859 out_free:
860 iwl_fw_flush_dump(&mvm->fwrt); 860 iwl_fw_flush_dump(&mvm->fwrt);
861 iwl_fw_runtime_free(&mvm->fwrt);
861 862
862 if (iwlmvm_mod_params.init_dbg) 863 if (iwlmvm_mod_params.init_dbg)
863 return op_mode; 864 return op_mode;
@@ -910,6 +911,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
910 911
911 iwl_mvm_tof_clean(mvm); 912 iwl_mvm_tof_clean(mvm);
912 913
914 iwl_fw_runtime_free(&mvm->fwrt);
913 mutex_destroy(&mvm->mutex); 915 mutex_destroy(&mvm->mutex);
914 mutex_destroy(&mvm->d0i3_suspend_mutex); 916 mutex_destroy(&mvm->d0i3_suspend_mutex);
915 917
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index 0ccbcd7e887d..c30d8f5bbf2a 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -1,6 +1,12 @@
1config MT76_CORE 1config MT76_CORE
2 tristate 2 tristate
3 3
4config MT76_LEDS
5 bool
6 depends on MT76_CORE
7 depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS
8 default y
9
4config MT76_USB 10config MT76_USB
5 tristate 11 tristate
6 depends on MT76_CORE 12 depends on MT76_CORE
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 2a699e8b79bf..7d219ff2d480 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -345,9 +345,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
345 mt76_check_sband(dev, NL80211_BAND_2GHZ); 345 mt76_check_sband(dev, NL80211_BAND_2GHZ);
346 mt76_check_sband(dev, NL80211_BAND_5GHZ); 346 mt76_check_sband(dev, NL80211_BAND_5GHZ);
347 347
348 ret = mt76_led_init(dev); 348 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
349 if (ret) 349 ret = mt76_led_init(dev);
350 return ret; 350 if (ret)
351 return ret;
352 }
351 353
352 return ieee80211_register_hw(hw); 354 return ieee80211_register_hw(hw);
353} 355}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 47c42c607964..7806963b1905 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -71,7 +71,6 @@ struct mt76x02_dev {
71 struct mac_address macaddr_list[8]; 71 struct mac_address macaddr_list[8];
72 72
73 struct mutex phy_mutex; 73 struct mutex phy_mutex;
74 struct mutex mutex;
75 74
76 u8 txdone_seq; 75 u8 txdone_seq;
77 DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); 76 DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 3824290b219d..fd125722d1fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -507,8 +507,10 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
507 mt76x2_dfs_init_detector(dev); 507 mt76x2_dfs_init_detector(dev);
508 508
509 /* init led callbacks */ 509 /* init led callbacks */
510 dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; 510 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
511 dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; 511 dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
512 dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
513 }
512 514
513 ret = mt76_register_device(&dev->mt76, true, mt76x02_rates, 515 ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
514 ARRAY_SIZE(mt76x02_rates)); 516 ARRAY_SIZE(mt76x02_rates));
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 034a06295668..3f001bd6806c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -272,9 +272,9 @@ mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
272 if (val != ~0 && val > 0xffff) 272 if (val != ~0 && val > 0xffff)
273 return -EINVAL; 273 return -EINVAL;
274 274
275 mutex_lock(&dev->mutex); 275 mutex_lock(&dev->mt76.mutex);
276 mt76x2_mac_set_tx_protection(dev, val); 276 mt76x2_mac_set_tx_protection(dev, val);
277 mutex_unlock(&dev->mutex); 277 mutex_unlock(&dev->mt76.mutex);
278 278
279 return 0; 279 return 0;
280} 280}
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 4c2154b9e6a3..bd10165d7eec 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -285,7 +285,7 @@ static int wl1271_probe(struct sdio_func *func,
285 struct resource res[2]; 285 struct resource res[2];
286 mmc_pm_flag_t mmcflags; 286 mmc_pm_flag_t mmcflags;
287 int ret = -ENOMEM; 287 int ret = -ENOMEM;
288 int irq, wakeirq; 288 int irq, wakeirq, num_irqs;
289 const char *chip_family; 289 const char *chip_family;
290 290
291 /* We are only able to handle the wlan function */ 291 /* We are only able to handle the wlan function */
@@ -353,12 +353,17 @@ static int wl1271_probe(struct sdio_func *func,
353 irqd_get_trigger_type(irq_get_irq_data(irq)); 353 irqd_get_trigger_type(irq_get_irq_data(irq));
354 res[0].name = "irq"; 354 res[0].name = "irq";
355 355
356 res[1].start = wakeirq;
357 res[1].flags = IORESOURCE_IRQ |
358 irqd_get_trigger_type(irq_get_irq_data(wakeirq));
359 res[1].name = "wakeirq";
360 356
361 ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); 357 if (wakeirq > 0) {
358 res[1].start = wakeirq;
359 res[1].flags = IORESOURCE_IRQ |
360 irqd_get_trigger_type(irq_get_irq_data(wakeirq));
361 res[1].name = "wakeirq";
362 num_irqs = 2;
363 } else {
364 num_irqs = 1;
365 }
366 ret = platform_device_add_resources(glue->core, res, num_irqs);
362 if (ret) { 367 if (ret) {
363 dev_err(glue->dev, "can't add resources\n"); 368 dev_err(glue->dev, "can't add resources\n");
364 goto out_dev_put; 369 goto out_dev_put;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 559d567693b8..3cf1b773158e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3314,6 +3314,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3314 struct nvme_ns *ns, *next; 3314 struct nvme_ns *ns, *next;
3315 LIST_HEAD(ns_list); 3315 LIST_HEAD(ns_list);
3316 3316
3317 /* prevent racing with ns scanning */
3318 flush_work(&ctrl->scan_work);
3319
3317 /* 3320 /*
3318 * The dead states indicates the controller was not gracefully 3321 * The dead states indicates the controller was not gracefully
3319 * disconnected. In that case, we won't be able to flush any data while 3322 * disconnected. In that case, we won't be able to flush any data while
@@ -3476,7 +3479,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
3476 nvme_mpath_stop(ctrl); 3479 nvme_mpath_stop(ctrl);
3477 nvme_stop_keep_alive(ctrl); 3480 nvme_stop_keep_alive(ctrl);
3478 flush_work(&ctrl->async_event_work); 3481 flush_work(&ctrl->async_event_work);
3479 flush_work(&ctrl->scan_work);
3480 cancel_work_sync(&ctrl->fw_act_work); 3482 cancel_work_sync(&ctrl->fw_act_work);
3481 if (ctrl->ops->stop_ctrl) 3483 if (ctrl->ops->stop_ctrl)
3482 ctrl->ops->stop_ctrl(ctrl); 3484 ctrl->ops->stop_ctrl(ctrl);
@@ -3585,7 +3587,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
3585 3587
3586 return 0; 3588 return 0;
3587out_free_name: 3589out_free_name:
3588 kfree_const(dev->kobj.name); 3590 kfree_const(ctrl->device->kobj.name);
3589out_release_instance: 3591out_release_instance:
3590 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 3592 ida_simple_remove(&nvme_instance_ida, ctrl->instance);
3591out: 3593out:
@@ -3607,7 +3609,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
3607 down_read(&ctrl->namespaces_rwsem); 3609 down_read(&ctrl->namespaces_rwsem);
3608 3610
3609 /* Forcibly unquiesce queues to avoid blocking dispatch */ 3611 /* Forcibly unquiesce queues to avoid blocking dispatch */
3610 if (ctrl->admin_q) 3612 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
3611 blk_mq_unquiesce_queue(ctrl->admin_q); 3613 blk_mq_unquiesce_queue(ctrl->admin_q);
3612 3614
3613 list_for_each_entry(ns, &ctrl->namespaces, list) 3615 list_for_each_entry(ns, &ctrl->namespaces, list)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 0b70c8bab045..feb86b59170e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -152,6 +152,7 @@ struct nvme_fc_ctrl {
152 152
153 bool ioq_live; 153 bool ioq_live;
154 bool assoc_active; 154 bool assoc_active;
155 atomic_t err_work_active;
155 u64 association_id; 156 u64 association_id;
156 157
157 struct list_head ctrl_list; /* rport->ctrl_list */ 158 struct list_head ctrl_list; /* rport->ctrl_list */
@@ -160,6 +161,7 @@ struct nvme_fc_ctrl {
160 struct blk_mq_tag_set tag_set; 161 struct blk_mq_tag_set tag_set;
161 162
162 struct delayed_work connect_work; 163 struct delayed_work connect_work;
164 struct work_struct err_work;
163 165
164 struct kref ref; 166 struct kref ref;
165 u32 flags; 167 u32 flags;
@@ -1531,6 +1533,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1531 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1533 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1532 int i; 1534 int i;
1533 1535
1536 /* ensure we've initialized the ops once */
1537 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1538 return;
1539
1534 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) 1540 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1535 __nvme_fc_abort_op(ctrl, aen_op); 1541 __nvme_fc_abort_op(ctrl, aen_op);
1536} 1542}
@@ -1746,12 +1752,12 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1746 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; 1752 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1747 int res; 1753 int res;
1748 1754
1749 nvme_req(rq)->ctrl = &ctrl->ctrl;
1750 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); 1755 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
1751 if (res) 1756 if (res)
1752 return res; 1757 return res;
1753 op->op.fcp_req.first_sgl = &op->sgl[0]; 1758 op->op.fcp_req.first_sgl = &op->sgl[0];
1754 op->op.fcp_req.private = &op->priv[0]; 1759 op->op.fcp_req.private = &op->priv[0];
1760 nvme_req(rq)->ctrl = &ctrl->ctrl;
1755 return res; 1761 return res;
1756} 1762}
1757 1763
@@ -2049,7 +2055,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2049static void 2055static void
2050nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) 2056nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2051{ 2057{
2052 /* only proceed if in LIVE state - e.g. on first error */ 2058 int active;
2059
2060 /*
2061 * if an error (io timeout, etc) while (re)connecting,
2062 * it's an error on creating the new association.
2063 * Start the error recovery thread if it hasn't already
2064 * been started. It is expected there could be multiple
2065 * ios hitting this path before things are cleaned up.
2066 */
2067 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2068 active = atomic_xchg(&ctrl->err_work_active, 1);
2069 if (!active && !schedule_work(&ctrl->err_work)) {
2070 atomic_set(&ctrl->err_work_active, 0);
2071 WARN_ON(1);
2072 }
2073 return;
2074 }
2075
2076 /* Otherwise, only proceed if in LIVE state - e.g. on first error */
2053 if (ctrl->ctrl.state != NVME_CTRL_LIVE) 2077 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2054 return; 2078 return;
2055 2079
@@ -2814,6 +2838,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2814{ 2838{
2815 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2839 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2816 2840
2841 cancel_work_sync(&ctrl->err_work);
2817 cancel_delayed_work_sync(&ctrl->connect_work); 2842 cancel_delayed_work_sync(&ctrl->connect_work);
2818 /* 2843 /*
2819 * kill the association on the link side. this will block 2844 * kill the association on the link side. this will block
@@ -2866,23 +2891,30 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2866} 2891}
2867 2892
2868static void 2893static void
2869nvme_fc_reset_ctrl_work(struct work_struct *work) 2894__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
2870{ 2895{
2871 struct nvme_fc_ctrl *ctrl = 2896 nvme_stop_keep_alive(&ctrl->ctrl);
2872 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2873 int ret;
2874
2875 nvme_stop_ctrl(&ctrl->ctrl);
2876 2897
2877 /* will block will waiting for io to terminate */ 2898 /* will block will waiting for io to terminate */
2878 nvme_fc_delete_association(ctrl); 2899 nvme_fc_delete_association(ctrl);
2879 2900
2880 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 2901 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
2902 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
2881 dev_err(ctrl->ctrl.device, 2903 dev_err(ctrl->ctrl.device,
2882 "NVME-FC{%d}: error_recovery: Couldn't change state " 2904 "NVME-FC{%d}: error_recovery: Couldn't change state "
2883 "to CONNECTING\n", ctrl->cnum); 2905 "to CONNECTING\n", ctrl->cnum);
2884 return; 2906}
2885 } 2907
2908static void
2909nvme_fc_reset_ctrl_work(struct work_struct *work)
2910{
2911 struct nvme_fc_ctrl *ctrl =
2912 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2913 int ret;
2914
2915 __nvme_fc_terminate_io(ctrl);
2916
2917 nvme_stop_ctrl(&ctrl->ctrl);
2886 2918
2887 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) 2919 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
2888 ret = nvme_fc_create_association(ctrl); 2920 ret = nvme_fc_create_association(ctrl);
@@ -2897,6 +2929,24 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
2897 ctrl->cnum); 2929 ctrl->cnum);
2898} 2930}
2899 2931
2932static void
2933nvme_fc_connect_err_work(struct work_struct *work)
2934{
2935 struct nvme_fc_ctrl *ctrl =
2936 container_of(work, struct nvme_fc_ctrl, err_work);
2937
2938 __nvme_fc_terminate_io(ctrl);
2939
2940 atomic_set(&ctrl->err_work_active, 0);
2941
2942 /*
2943 * Rescheduling the connection after recovering
2944 * from the io error is left to the reconnect work
2945 * item, which is what should have stalled waiting on
2946 * the io that had the error that scheduled this work.
2947 */
2948}
2949
2900static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { 2950static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2901 .name = "fc", 2951 .name = "fc",
2902 .module = THIS_MODULE, 2952 .module = THIS_MODULE,
@@ -3007,6 +3057,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3007 ctrl->cnum = idx; 3057 ctrl->cnum = idx;
3008 ctrl->ioq_live = false; 3058 ctrl->ioq_live = false;
3009 ctrl->assoc_active = false; 3059 ctrl->assoc_active = false;
3060 atomic_set(&ctrl->err_work_active, 0);
3010 init_waitqueue_head(&ctrl->ioabort_wait); 3061 init_waitqueue_head(&ctrl->ioabort_wait);
3011 3062
3012 get_device(ctrl->dev); 3063 get_device(ctrl->dev);
@@ -3014,6 +3065,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3014 3065
3015 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); 3066 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3016 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 3067 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3068 INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
3017 spin_lock_init(&ctrl->lock); 3069 spin_lock_init(&ctrl->lock);
3018 3070
3019 /* io queue count */ 3071 /* io queue count */
@@ -3103,6 +3155,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3103fail_ctrl: 3155fail_ctrl:
3104 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); 3156 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3105 cancel_work_sync(&ctrl->ctrl.reset_work); 3157 cancel_work_sync(&ctrl->ctrl.reset_work);
3158 cancel_work_sync(&ctrl->err_work);
3106 cancel_delayed_work_sync(&ctrl->connect_work); 3159 cancel_delayed_work_sync(&ctrl->connect_work);
3107 3160
3108 ctrl->ctrl.opts = NULL; 3161 ctrl->ctrl.opts = NULL;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cee79cb388af..081cbdcce880 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -531,6 +531,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
531static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, 531static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
532 struct nvme_id_ctrl *id) 532 struct nvme_id_ctrl *id)
533{ 533{
534 if (ctrl->subsys->cmic & (1 << 3))
535 dev_warn(ctrl->device,
536"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
534 return 0; 537 return 0;
535} 538}
536static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 539static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index d181cafedc58..ab6ec7295bf9 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
184 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); 184 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
185 if (ib_dma_mapping_error(ibdev, qe->dma)) { 185 if (ib_dma_mapping_error(ibdev, qe->dma)) {
186 kfree(qe->data); 186 kfree(qe->data);
187 qe->data = NULL;
187 return -ENOMEM; 188 return -ENOMEM;
188 } 189 }
189 190
@@ -823,6 +824,7 @@ out_free_tagset:
823out_free_async_qe: 824out_free_async_qe:
824 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, 825 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
825 sizeof(struct nvme_command), DMA_TO_DEVICE); 826 sizeof(struct nvme_command), DMA_TO_DEVICE);
827 ctrl->async_event_sqe.data = NULL;
826out_free_queue: 828out_free_queue:
827 nvme_rdma_free_queue(&ctrl->queues[0]); 829 nvme_rdma_free_queue(&ctrl->queues[0]);
828 return error; 830 return error;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 9b18ce90f907..27f67dfa649d 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -44,6 +44,7 @@ struct nvmem_cell {
44 int bytes; 44 int bytes;
45 int bit_offset; 45 int bit_offset;
46 int nbits; 46 int nbits;
47 struct device_node *np;
47 struct nvmem_device *nvmem; 48 struct nvmem_device *nvmem;
48 struct list_head node; 49 struct list_head node;
49}; 50};
@@ -298,6 +299,7 @@ static void nvmem_cell_drop(struct nvmem_cell *cell)
298 mutex_lock(&nvmem_mutex); 299 mutex_lock(&nvmem_mutex);
299 list_del(&cell->node); 300 list_del(&cell->node);
300 mutex_unlock(&nvmem_mutex); 301 mutex_unlock(&nvmem_mutex);
302 of_node_put(cell->np);
301 kfree(cell->name); 303 kfree(cell->name);
302 kfree(cell); 304 kfree(cell);
303} 305}
@@ -530,6 +532,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
530 return -ENOMEM; 532 return -ENOMEM;
531 533
532 cell->nvmem = nvmem; 534 cell->nvmem = nvmem;
535 cell->np = of_node_get(child);
533 cell->offset = be32_to_cpup(addr++); 536 cell->offset = be32_to_cpup(addr++);
534 cell->bytes = be32_to_cpup(addr); 537 cell->bytes = be32_to_cpup(addr);
535 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 538 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
@@ -960,14 +963,13 @@ out:
960 963
961#if IS_ENABLED(CONFIG_OF) 964#if IS_ENABLED(CONFIG_OF)
962static struct nvmem_cell * 965static struct nvmem_cell *
963nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index) 966nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
964{ 967{
965 struct nvmem_cell *cell = NULL; 968 struct nvmem_cell *cell = NULL;
966 int i = 0;
967 969
968 mutex_lock(&nvmem_mutex); 970 mutex_lock(&nvmem_mutex);
969 list_for_each_entry(cell, &nvmem->cells, node) { 971 list_for_each_entry(cell, &nvmem->cells, node) {
970 if (index == i++) 972 if (np == cell->np)
971 break; 973 break;
972 } 974 }
973 mutex_unlock(&nvmem_mutex); 975 mutex_unlock(&nvmem_mutex);
@@ -1011,7 +1013,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1011 if (IS_ERR(nvmem)) 1013 if (IS_ERR(nvmem))
1012 return ERR_CAST(nvmem); 1014 return ERR_CAST(nvmem);
1013 1015
1014 cell = nvmem_find_cell_by_index(nvmem, index); 1016 cell = nvmem_find_cell_by_node(nvmem, cell_np);
1015 if (!cell) { 1017 if (!cell) {
1016 __nvmem_device_put(nvmem); 1018 __nvmem_device_put(nvmem);
1017 return ERR_PTR(-ENOENT); 1019 return ERR_PTR(-ENOENT);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 5a4b47958073..38a08805a30c 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -579,10 +579,8 @@ int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
579 */ 579 */
580 count = of_count_phandle_with_args(dev->of_node, 580 count = of_count_phandle_with_args(dev->of_node,
581 "operating-points-v2", NULL); 581 "operating-points-v2", NULL);
582 if (count != 1) 582 if (count == 1)
583 return -ENODEV; 583 index = 0;
584
585 index = 0;
586 } 584 }
587 585
588 opp_table = dev_pm_opp_get_opp_table_indexed(dev, index); 586 opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index 9e5a9a3112c9..1c69c404df11 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -288,7 +288,10 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
288 int ret; 288 int ret;
289 289
290 vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data, 290 vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
291 new_supply_vbb->u_volt); 291 new_supply_vdd->u_volt);
292
293 if (new_supply_vdd->u_volt_min < vdd_uv)
294 new_supply_vdd->u_volt_min = vdd_uv;
292 295
293 /* Scaling up? Scale voltage before frequency */ 296 /* Scaling up? Scale voltage before frequency */
294 if (freq > old_freq) { 297 if (freq > old_freq) {
@@ -414,7 +417,6 @@ static struct platform_driver ti_opp_supply_driver = {
414 .probe = ti_opp_supply_probe, 417 .probe = ti_opp_supply_probe,
415 .driver = { 418 .driver = {
416 .name = "ti_opp_supply", 419 .name = "ti_opp_supply",
417 .owner = THIS_MODULE,
418 .of_match_table = of_match_ptr(ti_opp_supply_of_match), 420 .of_match_table = of_match_ptr(ti_opp_supply_of_match),
419 }, 421 },
420}; 422};
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 2cbef2d7c207..88af6bff945f 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -81,8 +81,6 @@ struct imx6_pcie {
81#define PCIE_PL_PFLR_FORCE_LINK (1 << 15) 81#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
82#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) 82#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
83#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) 83#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
84#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
85#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
86 84
87#define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 85#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
88#define PCIE_PHY_CTRL_DATA_LOC 0 86#define PCIE_PHY_CTRL_DATA_LOC 0
@@ -711,12 +709,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
711 return 0; 709 return 0;
712} 710}
713 711
714static int imx6_pcie_link_up(struct dw_pcie *pci)
715{
716 return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
717 PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
718}
719
720static const struct dw_pcie_host_ops imx6_pcie_host_ops = { 712static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
721 .host_init = imx6_pcie_host_init, 713 .host_init = imx6_pcie_host_init,
722}; 714};
@@ -749,7 +741,7 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
749} 741}
750 742
751static const struct dw_pcie_ops dw_pcie_ops = { 743static const struct dw_pcie_ops dw_pcie_ops = {
752 .link_up = imx6_pcie_link_up, 744 /* No special ops needed, but pcie-designware still expects this struct */
753}; 745};
754 746
755#ifdef CONFIG_PM_SLEEP 747#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 3724d3ef7008..7aa9a82b7ebd 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -88,7 +88,7 @@ static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
88 int i; 88 int i;
89 89
90 for (i = 0; i < PCIE_IATU_NUM; i++) 90 for (i = 0; i < PCIE_IATU_NUM; i++)
91 dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i); 91 dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
92} 92}
93 93
94static int ls1021_pcie_link_up(struct dw_pcie *pci) 94static int ls1021_pcie_link_up(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1e7b02221eac..de8635af4cde 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -440,7 +440,6 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
440 tbl_offset = dw_pcie_readl_dbi(pci, reg); 440 tbl_offset = dw_pcie_readl_dbi(pci, reg);
441 bir = (tbl_offset & PCI_MSIX_TABLE_BIR); 441 bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
442 tbl_offset &= PCI_MSIX_TABLE_OFFSET; 442 tbl_offset &= PCI_MSIX_TABLE_OFFSET;
443 tbl_offset >>= 3;
444 443
445 reg = PCI_BASE_ADDRESS_0 + (4 * bir); 444 reg = PCI_BASE_ADDRESS_0 + (4 * bir);
446 bar_addr_upper = 0; 445 bar_addr_upper = 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d068f11d08a7..c9d8e3c837de 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5556,9 +5556,13 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5556 u32 lnkcap2, lnkcap; 5556 u32 lnkcap2, lnkcap;
5557 5557
5558 /* 5558 /*
5559 * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link 5559 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
5560 * Speeds Vector in Link Capabilities 2 when supported, falling 5560 * implementation note there recommends using the Supported Link
5561 * back to Max Link Speed in Link Capabilities otherwise. 5561 * Speeds Vector in Link Capabilities 2 when supported.
5562 *
5563 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
5564 * should use the Supported Link Speeds field in Link Capabilities,
5565 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
5562 */ 5566 */
5563 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); 5567 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5564 if (lnkcap2) { /* PCIe r3.0-compliant */ 5568 if (lnkcap2) { /* PCIe r3.0-compliant */
@@ -5574,16 +5578,10 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5574 } 5578 }
5575 5579
5576 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); 5580 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5577 if (lnkcap) { 5581 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5578 if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) 5582 return PCIE_SPEED_5_0GT;
5579 return PCIE_SPEED_16_0GT; 5583 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5580 else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) 5584 return PCIE_SPEED_2_5GT;
5581 return PCIE_SPEED_8_0GT;
5582 else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
5583 return PCIE_SPEED_5_0GT;
5584 else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
5585 return PCIE_SPEED_2_5GT;
5586 }
5587 5585
5588 return PCI_SPEED_UNKNOWN; 5586 return PCI_SPEED_UNKNOWN;
5589} 5587}
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 9ce531194f8a..6d4b44b569bc 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -231,6 +231,7 @@ static const struct qusb2_phy_cfg sdm845_phy_cfg = {
231 .mask_core_ready = CORE_READY_STATUS, 231 .mask_core_ready = CORE_READY_STATUS,
232 .has_pll_override = true, 232 .has_pll_override = true,
233 .autoresume_en = BIT(0), 233 .autoresume_en = BIT(0),
234 .update_tune1_with_efuse = true,
234}; 235};
235 236
236static const char * const qusb2_phy_vreg_names[] = { 237static const char * const qusb2_phy_vreg_names[] = {
@@ -402,10 +403,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
402 403
403 /* 404 /*
404 * Read efuse register having TUNE2/1 parameter's high nibble. 405 * Read efuse register having TUNE2/1 parameter's high nibble.
405 * If efuse register shows value as 0x0, or if we fail to find 406 * If efuse register shows value as 0x0 (indicating value is not
406 * a valid efuse register settings, then use default value 407 * fused), or if we fail to find a valid efuse register setting,
407 * as 0xB for high nibble that we have already set while 408 * then use default value for high nibble that we have already
408 * configuring phy. 409 * set while configuring the phy.
409 */ 410 */
410 val = nvmem_cell_read(qphy->cell, NULL); 411 val = nvmem_cell_read(qphy->cell, NULL);
411 if (IS_ERR(val) || !val[0]) { 412 if (IS_ERR(val) || !val[0]) {
@@ -415,12 +416,13 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
415 416
416 /* Fused TUNE1/2 value is the higher nibble only */ 417 /* Fused TUNE1/2 value is the higher nibble only */
417 if (cfg->update_tune1_with_efuse) 418 if (cfg->update_tune1_with_efuse)
418 qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], 419 qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
419 val[0] << 0x4); 420 val[0] << HSTX_TRIM_SHIFT,
421 HSTX_TRIM_MASK);
420 else 422 else
421 qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2], 423 qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
422 val[0] << 0x4); 424 val[0] << HSTX_TRIM_SHIFT,
423 425 HSTX_TRIM_MASK);
424} 426}
425 427
426static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode) 428static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode)
diff --git a/drivers/phy/socionext/Kconfig b/drivers/phy/socionext/Kconfig
index 467e8147972b..9c85231a6dbc 100644
--- a/drivers/phy/socionext/Kconfig
+++ b/drivers/phy/socionext/Kconfig
@@ -26,7 +26,8 @@ config PHY_UNIPHIER_USB3
26 26
27config PHY_UNIPHIER_PCIE 27config PHY_UNIPHIER_PCIE
28 tristate "Uniphier PHY driver for PCIe controller" 28 tristate "Uniphier PHY driver for PCIe controller"
29 depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF 29 depends on ARCH_UNIPHIER || COMPILE_TEST
30 depends on OF && HAS_IOMEM
30 default PCIE_UNIPHIER 31 default PCIE_UNIPHIER
31 select GENERIC_PHY 32 select GENERIC_PHY
32 help 33 help
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 2751dba850c6..3e1abb455472 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -213,7 +213,7 @@ static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm)
213 /* get a report with all values through requesting one value */ 213 /* get a report with all values through requesting one value */
214 sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev, 214 sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev,
215 HID_USAGE_SENSOR_TIME, hid_time_addresses[0], 215 HID_USAGE_SENSOR_TIME, hid_time_addresses[0],
216 time_state->info[0].report_id, SENSOR_HUB_SYNC); 216 time_state->info[0].report_id, SENSOR_HUB_SYNC, false);
217 /* wait for all values (event) */ 217 /* wait for all values (event) */
218 ret = wait_for_completion_killable_timeout( 218 ret = wait_for_completion_killable_timeout(
219 &time_state->comp_last_time, HZ*6); 219 &time_state->comp_last_time, HZ*6);
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index fd77e46eb3b2..70a006ba4d05 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -387,8 +387,10 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
387 * orb specified one of the unsupported formats, we defer 387 * orb specified one of the unsupported formats, we defer
388 * checking for IDAWs in unsupported formats to here. 388 * checking for IDAWs in unsupported formats to here.
389 */ 389 */
390 if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) 390 if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
391 kfree(p);
391 return -EOPNOTSUPP; 392 return -EOPNOTSUPP;
393 }
392 394
393 if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) 395 if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
394 break; 396 break;
@@ -528,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
528 530
529 ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count); 531 ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
530 if (ret < 0) 532 if (ret < 0)
531 goto out_init; 533 goto out_unpin;
532 534
533 /* Translate this direct ccw to a idal ccw. */ 535 /* Translate this direct ccw to a idal ccw. */
534 idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL); 536 idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index f47d16b5810b..a10cec0e86eb 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -22,7 +22,7 @@
22#include "vfio_ccw_private.h" 22#include "vfio_ccw_private.h"
23 23
24struct workqueue_struct *vfio_ccw_work_q; 24struct workqueue_struct *vfio_ccw_work_q;
25struct kmem_cache *vfio_ccw_io_region; 25static struct kmem_cache *vfio_ccw_io_region;
26 26
27/* 27/*
28 * Helpers 28 * Helpers
@@ -134,14 +134,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
134 if (ret) 134 if (ret)
135 goto out_free; 135 goto out_free;
136 136
137 ret = vfio_ccw_mdev_reg(sch);
138 if (ret)
139 goto out_disable;
140
141 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); 137 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
142 atomic_set(&private->avail, 1); 138 atomic_set(&private->avail, 1);
143 private->state = VFIO_CCW_STATE_STANDBY; 139 private->state = VFIO_CCW_STATE_STANDBY;
144 140
141 ret = vfio_ccw_mdev_reg(sch);
142 if (ret)
143 goto out_disable;
144
145 return 0; 145 return 0;
146 146
147out_disable: 147out_disable:
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 048665e4f13d..9f5a201c4c87 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -775,6 +775,8 @@ static int ap_device_probe(struct device *dev)
775 drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; 775 drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
776 if (!!devres != !!drvres) 776 if (!!devres != !!drvres)
777 return -ENODEV; 777 return -ENODEV;
778 /* (re-)init queue's state machine */
779 ap_queue_reinit_state(to_ap_queue(dev));
778 } 780 }
779 781
780 /* Add queue/card to list of active queues/cards */ 782 /* Add queue/card to list of active queues/cards */
@@ -807,6 +809,8 @@ static int ap_device_remove(struct device *dev)
807 struct ap_device *ap_dev = to_ap_dev(dev); 809 struct ap_device *ap_dev = to_ap_dev(dev);
808 struct ap_driver *ap_drv = ap_dev->drv; 810 struct ap_driver *ap_drv = ap_dev->drv;
809 811
812 if (is_queue_dev(dev))
813 ap_queue_remove(to_ap_queue(dev));
810 if (ap_drv->remove) 814 if (ap_drv->remove)
811 ap_drv->remove(ap_dev); 815 ap_drv->remove(ap_dev);
812 816
@@ -1444,10 +1448,6 @@ static void ap_scan_bus(struct work_struct *unused)
1444 aq->ap_dev.device.parent = &ac->ap_dev.device; 1448 aq->ap_dev.device.parent = &ac->ap_dev.device;
1445 dev_set_name(&aq->ap_dev.device, 1449 dev_set_name(&aq->ap_dev.device,
1446 "%02x.%04x", id, dom); 1450 "%02x.%04x", id, dom);
1447 /* Start with a device reset */
1448 spin_lock_bh(&aq->lock);
1449 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
1450 spin_unlock_bh(&aq->lock);
1451 /* Register device */ 1451 /* Register device */
1452 rc = device_register(&aq->ap_dev.device); 1452 rc = device_register(&aq->ap_dev.device);
1453 if (rc) { 1453 if (rc) {
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 3eed1b36c876..bfc66e4a9de1 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -254,6 +254,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
254void ap_queue_remove(struct ap_queue *aq); 254void ap_queue_remove(struct ap_queue *aq);
255void ap_queue_suspend(struct ap_device *ap_dev); 255void ap_queue_suspend(struct ap_device *ap_dev);
256void ap_queue_resume(struct ap_device *ap_dev); 256void ap_queue_resume(struct ap_device *ap_dev);
257void ap_queue_reinit_state(struct ap_queue *aq);
257 258
258struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, 259struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
259 int comp_device_type, unsigned int functions); 260 int comp_device_type, unsigned int functions);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 66f7334bcb03..0aa4b3ccc948 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -718,5 +718,20 @@ void ap_queue_remove(struct ap_queue *aq)
718{ 718{
719 ap_flush_queue(aq); 719 ap_flush_queue(aq);
720 del_timer_sync(&aq->timeout); 720 del_timer_sync(&aq->timeout);
721
722 /* reset with zero, also clears irq registration */
723 spin_lock_bh(&aq->lock);
724 ap_zapq(aq->qid);
725 aq->state = AP_STATE_BORKED;
726 spin_unlock_bh(&aq->lock);
721} 727}
722EXPORT_SYMBOL(ap_queue_remove); 728EXPORT_SYMBOL(ap_queue_remove);
729
730void ap_queue_reinit_state(struct ap_queue *aq)
731{
732 spin_lock_bh(&aq->lock);
733 aq->state = AP_STATE_RESET_START;
734 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
735 spin_unlock_bh(&aq->lock);
736}
737EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 146f54f5cbb8..c50f3e86cc74 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -196,7 +196,6 @@ static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
196 struct ap_queue *aq = to_ap_queue(&ap_dev->device); 196 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
197 struct zcrypt_queue *zq = aq->private; 197 struct zcrypt_queue *zq = aq->private;
198 198
199 ap_queue_remove(aq);
200 if (zq) 199 if (zq)
201 zcrypt_queue_unregister(zq); 200 zcrypt_queue_unregister(zq);
202} 201}
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 546f67676734..35c7c6672713 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -251,7 +251,6 @@ static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
251 struct ap_queue *aq = to_ap_queue(&ap_dev->device); 251 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
252 struct zcrypt_queue *zq = aq->private; 252 struct zcrypt_queue *zq = aq->private;
253 253
254 ap_queue_remove(aq);
255 if (zq) 254 if (zq)
256 zcrypt_queue_unregister(zq); 255 zcrypt_queue_unregister(zq);
257} 256}
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index f9d4c6c7521d..582ffa7e0f18 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -275,7 +275,6 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
275 struct ap_queue *aq = to_ap_queue(&ap_dev->device); 275 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
276 struct zcrypt_queue *zq = aq->private; 276 struct zcrypt_queue *zq = aq->private;
277 277
278 ap_queue_remove(aq);
279 if (zq) 278 if (zq)
280 zcrypt_queue_unregister(zq); 279 zcrypt_queue_unregister(zq);
281} 280}
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index f96ec68af2e5..dcbf5c857743 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -415,9 +415,9 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
415 break; 415 break;
416 416
417 clear_bit_inv(bit, bv); 417 clear_bit_inv(bit, bv);
418 ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
418 barrier(); 419 barrier();
419 smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET); 420 smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
420 ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
421 } 421 }
422 422
423 if (ism->sba->e) { 423 if (ism->sba->e) {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4bce5ae65a55..254065271867 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4518,8 +4518,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
4518{ 4518{
4519 struct qeth_ipa_cmd *cmd; 4519 struct qeth_ipa_cmd *cmd;
4520 struct qeth_arp_query_info *qinfo; 4520 struct qeth_arp_query_info *qinfo;
4521 struct qeth_snmp_cmd *snmp;
4522 unsigned char *data; 4521 unsigned char *data;
4522 void *snmp_data;
4523 __u16 data_len; 4523 __u16 data_len;
4524 4524
4525 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4525 QETH_CARD_TEXT(card, 3, "snpcmdcb");
@@ -4527,7 +4527,6 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
4527 cmd = (struct qeth_ipa_cmd *) sdata; 4527 cmd = (struct qeth_ipa_cmd *) sdata;
4528 data = (unsigned char *)((char *)cmd - reply->offset); 4528 data = (unsigned char *)((char *)cmd - reply->offset);
4529 qinfo = (struct qeth_arp_query_info *) reply->param; 4529 qinfo = (struct qeth_arp_query_info *) reply->param;
4530 snmp = &cmd->data.setadapterparms.data.snmp;
4531 4530
4532 if (cmd->hdr.return_code) { 4531 if (cmd->hdr.return_code) {
4533 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4532 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
@@ -4540,10 +4539,15 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
4540 return 0; 4539 return 0;
4541 } 4540 }
4542 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); 4541 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
4543 if (cmd->data.setadapterparms.hdr.seq_no == 1) 4542 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4544 data_len -= (__u16)((char *)&snmp->data - (char *)cmd); 4543 snmp_data = &cmd->data.setadapterparms.data.snmp;
4545 else 4544 data_len -= offsetof(struct qeth_ipa_cmd,
4546 data_len -= (__u16)((char *)&snmp->request - (char *)cmd); 4545 data.setadapterparms.data.snmp);
4546 } else {
4547 snmp_data = &cmd->data.setadapterparms.data.snmp.request;
4548 data_len -= offsetof(struct qeth_ipa_cmd,
4549 data.setadapterparms.data.snmp.request);
4550 }
4547 4551
4548 /* check if there is enough room in userspace */ 4552 /* check if there is enough room in userspace */
4549 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4553 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
@@ -4556,16 +4560,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
4556 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4560 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4557 cmd->data.setadapterparms.hdr.seq_no); 4561 cmd->data.setadapterparms.hdr.seq_no);
4558 /*copy entries to user buffer*/ 4562 /*copy entries to user buffer*/
4559 if (cmd->data.setadapterparms.hdr.seq_no == 1) { 4563 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4560 memcpy(qinfo->udata + qinfo->udata_offset,
4561 (char *)snmp,
4562 data_len + offsetof(struct qeth_snmp_cmd, data));
4563 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
4564 } else {
4565 memcpy(qinfo->udata + qinfo->udata_offset,
4566 (char *)&snmp->request, data_len);
4567 }
4568 qinfo->udata_offset += data_len; 4564 qinfo->udata_offset += data_len;
4565
4569 /* check if all replies received ... */ 4566 /* check if all replies received ... */
4570 QETH_CARD_TEXT_(card, 4, "srtot%i", 4567 QETH_CARD_TEXT_(card, 4, "srtot%i",
4571 cmd->data.setadapterparms.hdr.used_total); 4568 cmd->data.setadapterparms.hdr.used_total);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 20c85eed1a75..b658b9a5eb1e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1749,7 +1749,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1749static void 1749static void
1750__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) 1750__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1751{ 1751{
1752 int cnt; 1752 int cnt, status;
1753 unsigned long flags; 1753 unsigned long flags;
1754 srb_t *sp; 1754 srb_t *sp;
1755 scsi_qla_host_t *vha = qp->vha; 1755 scsi_qla_host_t *vha = qp->vha;
@@ -1799,10 +1799,16 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1799 if (!sp_get(sp)) { 1799 if (!sp_get(sp)) {
1800 spin_unlock_irqrestore 1800 spin_unlock_irqrestore
1801 (qp->qp_lock_ptr, flags); 1801 (qp->qp_lock_ptr, flags);
1802 qla2xxx_eh_abort( 1802 status = qla2xxx_eh_abort(
1803 GET_CMD_SP(sp)); 1803 GET_CMD_SP(sp));
1804 spin_lock_irqsave 1804 spin_lock_irqsave
1805 (qp->qp_lock_ptr, flags); 1805 (qp->qp_lock_ptr, flags);
1806 /*
1807 * Get rid of extra reference caused
1808 * by early exit from qla2xxx_eh_abort
1809 */
1810 if (status == FAST_IO_FAIL)
1811 atomic_dec(&sp->ref_count);
1806 } 1812 }
1807 } 1813 }
1808 sp->done(sp, res); 1814 sp->done(sp, res);
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 46df707e6f2c..452e19f8fb47 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -20,6 +20,7 @@
20#include "unipro.h" 20#include "unipro.h"
21#include "ufs-hisi.h" 21#include "ufs-hisi.h"
22#include "ufshci.h" 22#include "ufshci.h"
23#include "ufs_quirks.h"
23 24
24static int ufs_hisi_check_hibern8(struct ufs_hba *hba) 25static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
25{ 26{
@@ -390,6 +391,14 @@ static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
390 391
391static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) 392static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
392{ 393{
394 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
395 pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
396 /* VS_DebugSaveConfigTime */
397 ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
398 /* sync length */
399 ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
400 }
401
393 /* update */ 402 /* update */
394 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1); 403 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
395 /* PA_TxSkip */ 404 /* PA_TxSkip */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 71f73d1d1ad1..5d2dfdb41a6f 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -131,4 +131,10 @@ struct ufs_dev_fix {
131 */ 131 */
132#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8) 132#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
133 133
134/*
135 * Some UFS devices require VS_DebugSaveConfigTime is 0x10,
136 * enabling this quirk ensure this.
137 */
138#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
139
134#endif /* UFS_QUIRKS_H_ */ 140#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 27db55b0ca7f..f1c57cd33b5b 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -231,6 +231,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
231 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), 231 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
232 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, 232 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
233 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME), 233 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
234 UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
235 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
234 236
235 END_FIX 237 END_FIX
236}; 238};
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 7218fb963d0a..1382a8df6c75 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -777,9 +777,6 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
777 u8 la = txn->la; 777 u8 la = txn->la;
778 bool usr_msg = false; 778 bool usr_msg = false;
779 779
780 if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
781 return -EPROTONOSUPPORT;
782
783 if (txn->mt == SLIM_MSG_MT_CORE && 780 if (txn->mt == SLIM_MSG_MT_CORE &&
784 (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION && 781 (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
785 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW)) 782 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index 4399d1873e2d..9be41089edde 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -61,12 +61,6 @@
61#define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL 0x58 61#define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL 0x58
62#define SLIM_MSG_MC_RECONFIGURE_NOW 0x5F 62#define SLIM_MSG_MC_RECONFIGURE_NOW 0x5F
63 63
64/*
65 * Clock pause flag to indicate that the reconfig message
66 * corresponds to clock pause sequence
67 */
68#define SLIM_MSG_CLK_PAUSE_SEQ_FLG (1U << 8)
69
70/* Clock pause values per SLIMbus spec */ 64/* Clock pause values per SLIMbus spec */
71#define SLIM_CLK_FAST 0 65#define SLIM_CLK_FAST 0
72#define SLIM_CLK_CONST_PHASE 1 66#define SLIM_CLK_CONST_PHASE 1
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 3dc31627c655..0c2867deb36f 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -522,11 +522,11 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
522 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len); 522 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
523 mtk_spi_setup_packet(master); 523 mtk_spi_setup_packet(master);
524 524
525 cnt = len / 4; 525 cnt = mdata->xfer_len / 4;
526 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, 526 iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
527 trans->tx_buf + mdata->num_xfered, cnt); 527 trans->tx_buf + mdata->num_xfered, cnt);
528 528
529 remainder = len % 4; 529 remainder = mdata->xfer_len % 4;
530 if (remainder > 0) { 530 if (remainder > 0) {
531 reg_val = 0; 531 reg_val = 0;
532 memcpy(&reg_val, 532 memcpy(&reg_val,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index f024c3fc3679..2fd8881fcd65 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1540,13 +1540,26 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
1540/* work with hotplug and coldplug */ 1540/* work with hotplug and coldplug */
1541MODULE_ALIAS("platform:omap2_mcspi"); 1541MODULE_ALIAS("platform:omap2_mcspi");
1542 1542
1543#ifdef CONFIG_SUSPEND 1543static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
1544static int omap2_mcspi_suspend_noirq(struct device *dev)
1545{ 1544{
1546 return pinctrl_pm_select_sleep_state(dev); 1545 struct spi_master *master = dev_get_drvdata(dev);
1546 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1547 int error;
1548
1549 error = pinctrl_pm_select_sleep_state(dev);
1550 if (error)
1551 dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
1552 __func__, error);
1553
1554 error = spi_master_suspend(master);
1555 if (error)
1556 dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
1557 __func__, error);
1558
1559 return pm_runtime_force_suspend(dev);
1547} 1560}
1548 1561
1549static int omap2_mcspi_resume_noirq(struct device *dev) 1562static int __maybe_unused omap2_mcspi_resume(struct device *dev)
1550{ 1563{
1551 struct spi_master *master = dev_get_drvdata(dev); 1564 struct spi_master *master = dev_get_drvdata(dev);
1552 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1565 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
@@ -1557,17 +1570,17 @@ static int omap2_mcspi_resume_noirq(struct device *dev)
1557 dev_warn(mcspi->dev, "%s: failed to set pins: %i\n", 1570 dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
1558 __func__, error); 1571 __func__, error);
1559 1572
1560 return 0; 1573 error = spi_master_resume(master);
1561} 1574 if (error)
1575 dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
1576 __func__, error);
1562 1577
1563#else 1578 return pm_runtime_force_resume(dev);
1564#define omap2_mcspi_suspend_noirq NULL 1579}
1565#define omap2_mcspi_resume_noirq NULL
1566#endif
1567 1580
1568static const struct dev_pm_ops omap2_mcspi_pm_ops = { 1581static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1569 .suspend_noirq = omap2_mcspi_suspend_noirq, 1582 SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
1570 .resume_noirq = omap2_mcspi_resume_noirq, 1583 omap2_mcspi_resume)
1571 .runtime_resume = omap_mcspi_runtime_resume, 1584 .runtime_resume = omap_mcspi_runtime_resume,
1572}; 1585};
1573 1586
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index e90b17775284..09a940066c0e 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -1005,35 +1005,38 @@ enum i8254_mode {
1005 * and INSN_DEVICE_CONFIG_GET_ROUTES. 1005 * and INSN_DEVICE_CONFIG_GET_ROUTES.
1006 */ 1006 */
1007#define NI_NAMES_BASE 0x8000u 1007#define NI_NAMES_BASE 0x8000u
1008
1009#define _TERM_N(base, n, x) ((base) + ((x) & ((n) - 1)))
1010
1008/* 1011/*
1009 * not necessarily all allowed 64 PFIs are valid--certainly not for all devices 1012 * not necessarily all allowed 64 PFIs are valid--certainly not for all devices
1010 */ 1013 */
1011#define NI_PFI(x) (NI_NAMES_BASE + ((x) & 0x3f)) 1014#define NI_PFI(x) _TERM_N(NI_NAMES_BASE, 64, x)
1012/* 8 trigger lines by standard, Some devices cannot talk to all eight. */ 1015/* 8 trigger lines by standard, Some devices cannot talk to all eight. */
1013#define TRIGGER_LINE(x) (NI_PFI(-1) + 1 + ((x) & 0x7)) 1016#define TRIGGER_LINE(x) _TERM_N(NI_PFI(-1) + 1, 8, x)
1014/* 4 RTSI shared MUXes to route signals to/from TRIGGER_LINES on NI hardware */ 1017/* 4 RTSI shared MUXes to route signals to/from TRIGGER_LINES on NI hardware */
1015#define NI_RTSI_BRD(x) (TRIGGER_LINE(-1) + 1 + ((x) & 0x3)) 1018#define NI_RTSI_BRD(x) _TERM_N(TRIGGER_LINE(-1) + 1, 4, x)
1016 1019
1017/* *** Counter/timer names : 8 counters max *** */ 1020/* *** Counter/timer names : 8 counters max *** */
1018#define NI_COUNTER_NAMES_BASE (NI_RTSI_BRD(-1) + 1) 1021#define NI_MAX_COUNTERS 8
1019#define NI_MAX_COUNTERS 7 1022#define NI_COUNTER_NAMES_BASE (NI_RTSI_BRD(-1) + 1)
1020#define NI_CtrSource(x) (NI_COUNTER_NAMES_BASE + ((x) & NI_MAX_COUNTERS)) 1023#define NI_CtrSource(x) _TERM_N(NI_COUNTER_NAMES_BASE, NI_MAX_COUNTERS, x)
1021/* Gate, Aux, A,B,Z are all treated, at times as gates */ 1024/* Gate, Aux, A,B,Z are all treated, at times as gates */
1022#define NI_GATES_NAMES_BASE (NI_CtrSource(-1) + 1) 1025#define NI_GATES_NAMES_BASE (NI_CtrSource(-1) + 1)
1023#define NI_CtrGate(x) (NI_GATES_NAMES_BASE + ((x) & NI_MAX_COUNTERS)) 1026#define NI_CtrGate(x) _TERM_N(NI_GATES_NAMES_BASE, NI_MAX_COUNTERS, x)
1024#define NI_CtrAux(x) (NI_CtrGate(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1027#define NI_CtrAux(x) _TERM_N(NI_CtrGate(-1) + 1, NI_MAX_COUNTERS, x)
1025#define NI_CtrA(x) (NI_CtrAux(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1028#define NI_CtrA(x) _TERM_N(NI_CtrAux(-1) + 1, NI_MAX_COUNTERS, x)
1026#define NI_CtrB(x) (NI_CtrA(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1029#define NI_CtrB(x) _TERM_N(NI_CtrA(-1) + 1, NI_MAX_COUNTERS, x)
1027#define NI_CtrZ(x) (NI_CtrB(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1030#define NI_CtrZ(x) _TERM_N(NI_CtrB(-1) + 1, NI_MAX_COUNTERS, x)
1028#define NI_GATES_NAMES_MAX NI_CtrZ(-1) 1031#define NI_GATES_NAMES_MAX NI_CtrZ(-1)
1029#define NI_CtrArmStartTrigger(x) (NI_CtrZ(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1032#define NI_CtrArmStartTrigger(x) _TERM_N(NI_CtrZ(-1) + 1, NI_MAX_COUNTERS, x)
1030#define NI_CtrInternalOutput(x) \ 1033#define NI_CtrInternalOutput(x) \
1031 (NI_CtrArmStartTrigger(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1034 _TERM_N(NI_CtrArmStartTrigger(-1) + 1, NI_MAX_COUNTERS, x)
1032/** external pin(s) labeled conveniently as Ctr<i>Out. */ 1035/** external pin(s) labeled conveniently as Ctr<i>Out. */
1033#define NI_CtrOut(x) (NI_CtrInternalOutput(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1036#define NI_CtrOut(x) _TERM_N(NI_CtrInternalOutput(-1) + 1, NI_MAX_COUNTERS, x)
1034/** For Buffered sampling of ctr -- x series capability. */ 1037/** For Buffered sampling of ctr -- x series capability. */
1035#define NI_CtrSampleClock(x) (NI_CtrOut(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1038#define NI_CtrSampleClock(x) _TERM_N(NI_CtrOut(-1) + 1, NI_MAX_COUNTERS, x)
1036#define NI_COUNTER_NAMES_MAX NI_CtrSampleClock(-1) 1039#define NI_COUNTER_NAMES_MAX NI_CtrSampleClock(-1)
1037 1040
1038enum ni_common_signal_names { 1041enum ni_common_signal_names {
1039 /* PXI_Star: this is a non-NI-specific signal */ 1042 /* PXI_Star: this is a non-NI-specific signal */
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 2d1e0325d04d..5edf59ac6706 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -2843,7 +2843,8 @@ static int ni_ao_insn_config(struct comedi_device *dev,
2843 return ni_ao_arm(dev, s); 2843 return ni_ao_arm(dev, s);
2844 case INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS: 2844 case INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS:
2845 /* we don't care about actual channels */ 2845 /* we don't care about actual channels */
2846 data[1] = board->ao_speed; 2846 /* data[3] : chanlist_len */
2847 data[1] = board->ao_speed * data[3];
2847 data[2] = 0; 2848 data[2] = 0;
2848 return 0; 2849 return 0;
2849 default: 2850 default:
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
index a53231b08d30..e3425bf082ae 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -310,6 +310,7 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
310 ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2); 310 ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
311 break; 311 break;
312 } 312 }
313 /* fall through */
313 314
314 case IPIPEIF_SDRAM_YUV: 315 case IPIPEIF_SDRAM_YUV:
315 /* Set clock divider */ 316 /* Set clock divider */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 82558455384a..c912c70b3ef7 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -108,17 +108,6 @@ static int cedrus_request_validate(struct media_request *req)
108 unsigned int count; 108 unsigned int count;
109 unsigned int i; 109 unsigned int i;
110 110
111 count = vb2_request_buffer_cnt(req);
112 if (!count) {
113 v4l2_info(&ctx->dev->v4l2_dev,
114 "No buffer was provided with the request\n");
115 return -ENOENT;
116 } else if (count > 1) {
117 v4l2_info(&ctx->dev->v4l2_dev,
118 "More than one buffer was provided with the request\n");
119 return -EINVAL;
120 }
121
122 list_for_each_entry(obj, &req->objects, list) { 111 list_for_each_entry(obj, &req->objects, list) {
123 struct vb2_buffer *vb; 112 struct vb2_buffer *vb;
124 113
@@ -133,6 +122,17 @@ static int cedrus_request_validate(struct media_request *req)
133 if (!ctx) 122 if (!ctx)
134 return -ENOENT; 123 return -ENOENT;
135 124
125 count = vb2_request_buffer_cnt(req);
126 if (!count) {
127 v4l2_info(&ctx->dev->v4l2_dev,
128 "No buffer was provided with the request\n");
129 return -ENOENT;
130 } else if (count > 1) {
131 v4l2_info(&ctx->dev->v4l2_dev,
132 "More than one buffer was provided with the request\n");
133 return -EINVAL;
134 }
135
136 parent_hdl = &ctx->hdl; 136 parent_hdl = &ctx->hdl;
137 137
138 hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl); 138 hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
@@ -253,7 +253,7 @@ static const struct v4l2_m2m_ops cedrus_m2m_ops = {
253 253
254static const struct media_device_ops cedrus_m2m_media_ops = { 254static const struct media_device_ops cedrus_m2m_media_ops = {
255 .req_validate = cedrus_request_validate, 255 .req_validate = cedrus_request_validate,
256 .req_queue = vb2_m2m_request_queue, 256 .req_queue = v4l2_m2m_request_queue,
257}; 257};
258 258
259static int cedrus_probe(struct platform_device *pdev) 259static int cedrus_probe(struct platform_device *pdev)
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 6a18cf73c85e..18936cdb1083 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -351,7 +351,7 @@ static ssize_t set_datatype_show(struct device *dev,
351 351
352 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) { 352 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
353 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type) 353 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
354 return snprintf(buf, PAGE_SIZE, ch_data_type[i].name); 354 return snprintf(buf, PAGE_SIZE, "%s", ch_data_type[i].name);
355 } 355 }
356 return snprintf(buf, PAGE_SIZE, "unconfigured\n"); 356 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
357} 357}
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index df6ebf41bdea..5831f816c17b 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -335,6 +335,8 @@ static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
335 /* tx desc */ 335 /* tx desc */
336 src = sg->src_addr; 336 src = sg->src_addr;
337 for (i = 0; i < chan->desc->num_sgs; i++) { 337 for (i = 0; i < chan->desc->num_sgs; i++) {
338 tx_desc = &chan->tx_ring[chan->tx_idx];
339
338 if (len > HSDMA_MAX_PLEN) 340 if (len > HSDMA_MAX_PLEN)
339 tlen = HSDMA_MAX_PLEN; 341 tlen = HSDMA_MAX_PLEN;
340 else 342 else
@@ -344,7 +346,6 @@ static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
344 tx_desc->addr1 = src; 346 tx_desc->addr1 = src;
345 tx_desc->flags |= HSDMA_DESC_PLEN1(tlen); 347 tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
346 } else { 348 } else {
347 tx_desc = &chan->tx_ring[chan->tx_idx];
348 tx_desc->addr0 = src; 349 tx_desc->addr0 = src;
349 tx_desc->flags = HSDMA_DESC_PLEN0(tlen); 350 tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
350 351
diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
index b8566ed898f1..aa98fbb17013 100644
--- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
+++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
@@ -82,7 +82,7 @@ static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev,
82 struct property *prop; 82 struct property *prop;
83 const char *function_name, *group_name; 83 const char *function_name, *group_name;
84 int ret; 84 int ret;
85 int ngroups; 85 int ngroups = 0;
86 unsigned int reserved_maps = 0; 86 unsigned int reserved_maps = 0;
87 87
88 for_each_node_with_property(np_config, "group") 88 for_each_node_with_property(np_config, "group")
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 85077947b9b8..85aba8a503cd 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -109,12 +109,12 @@ static void update_recvframe_phyinfo(union recv_frame *precvframe,
109 rx_bssid = get_hdr_bssid(wlanhdr); 109 rx_bssid = get_hdr_bssid(wlanhdr);
110 pkt_info.bssid_match = ((!IsFrameTypeCtrl(wlanhdr)) && 110 pkt_info.bssid_match = ((!IsFrameTypeCtrl(wlanhdr)) &&
111 !pattrib->icv_err && !pattrib->crc_err && 111 !pattrib->icv_err && !pattrib->crc_err &&
112 !ether_addr_equal(rx_bssid, my_bssid)); 112 ether_addr_equal(rx_bssid, my_bssid));
113 113
114 rx_ra = get_ra(wlanhdr); 114 rx_ra = get_ra(wlanhdr);
115 my_hwaddr = myid(&padapter->eeprompriv); 115 my_hwaddr = myid(&padapter->eeprompriv);
116 pkt_info.to_self = pkt_info.bssid_match && 116 pkt_info.to_self = pkt_info.bssid_match &&
117 !ether_addr_equal(rx_ra, my_hwaddr); 117 ether_addr_equal(rx_ra, my_hwaddr);
118 118
119 119
120 pkt_info.is_beacon = pkt_info.bssid_match && 120 pkt_info.is_beacon = pkt_info.bssid_match &&
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index af2234798fa8..db553f2e4c0b 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -1277,7 +1277,7 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
1277 1277
1278 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); 1278 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
1279 sinfo->tx_packets = psta->sta_stats.tx_pkts; 1279 sinfo->tx_packets = psta->sta_stats.tx_pkts;
1280 1280 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
1281 } 1281 }
1282 1282
1283 /* for Ad-Hoc/AP mode */ 1283 /* for Ad-Hoc/AP mode */
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 28bfdbdc6e76..b8631baf128d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -2289,7 +2289,7 @@ static int rtw_wx_read32(struct net_device *dev,
2289exit: 2289exit:
2290 kfree(ptmp); 2290 kfree(ptmp);
2291 2291
2292 return 0; 2292 return ret;
2293} 2293}
2294 2294
2295static int rtw_wx_write32(struct net_device *dev, 2295static int rtw_wx_write32(struct net_device *dev,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index ea789376de0f..45de21c210c1 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1795,6 +1795,7 @@ vchiq_compat_ioctl_await_completion(struct file *file,
1795 struct vchiq_await_completion32 args32; 1795 struct vchiq_await_completion32 args32;
1796 struct vchiq_completion_data32 completion32; 1796 struct vchiq_completion_data32 completion32;
1797 unsigned int *msgbufcount32; 1797 unsigned int *msgbufcount32;
1798 unsigned int msgbufcount_native;
1798 compat_uptr_t msgbuf32; 1799 compat_uptr_t msgbuf32;
1799 void *msgbuf; 1800 void *msgbuf;
1800 void **msgbufptr; 1801 void **msgbufptr;
@@ -1906,7 +1907,11 @@ vchiq_compat_ioctl_await_completion(struct file *file,
1906 sizeof(completion32))) 1907 sizeof(completion32)))
1907 return -EFAULT; 1908 return -EFAULT;
1908 1909
1909 args32.msgbufcount--; 1910 if (get_user(msgbufcount_native, &args->msgbufcount))
1911 return -EFAULT;
1912
1913 if (!msgbufcount_native)
1914 args32.msgbufcount--;
1910 1915
1911 msgbufcount32 = 1916 msgbufcount32 =
1912 &((struct vchiq_await_completion32 __user *)arg)->msgbufcount; 1917 &((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 52ff854f0d6c..cd96994dc094 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -863,6 +863,30 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
863} 863}
864static DEVICE_ATTR(key, 0600, key_show, key_store); 864static DEVICE_ATTR(key, 0600, key_show, key_store);
865 865
866static void nvm_authenticate_start(struct tb_switch *sw)
867{
868 struct pci_dev *root_port;
869
870 /*
871 * During host router NVM upgrade we should not allow root port to
872 * go into D3cold because some root ports cannot trigger PME
873 * itself. To be on the safe side keep the root port in D0 during
874 * the whole upgrade process.
875 */
876 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
877 if (root_port)
878 pm_runtime_get_noresume(&root_port->dev);
879}
880
881static void nvm_authenticate_complete(struct tb_switch *sw)
882{
883 struct pci_dev *root_port;
884
885 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
886 if (root_port)
887 pm_runtime_put(&root_port->dev);
888}
889
866static ssize_t nvm_authenticate_show(struct device *dev, 890static ssize_t nvm_authenticate_show(struct device *dev,
867 struct device_attribute *attr, char *buf) 891 struct device_attribute *attr, char *buf)
868{ 892{
@@ -912,10 +936,18 @@ static ssize_t nvm_authenticate_store(struct device *dev,
912 936
913 sw->nvm->authenticating = true; 937 sw->nvm->authenticating = true;
914 938
915 if (!tb_route(sw)) 939 if (!tb_route(sw)) {
940 /*
941 * Keep root port from suspending as long as the
942 * NVM upgrade process is running.
943 */
944 nvm_authenticate_start(sw);
916 ret = nvm_authenticate_host(sw); 945 ret = nvm_authenticate_host(sw);
917 else 946 if (ret)
947 nvm_authenticate_complete(sw);
948 } else {
918 ret = nvm_authenticate_device(sw); 949 ret = nvm_authenticate_device(sw);
950 }
919 pm_runtime_mark_last_busy(&sw->dev); 951 pm_runtime_mark_last_busy(&sw->dev);
920 pm_runtime_put_autosuspend(&sw->dev); 952 pm_runtime_put_autosuspend(&sw->dev);
921 } 953 }
@@ -1334,6 +1366,10 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
1334 if (ret <= 0) 1366 if (ret <= 0)
1335 return ret; 1367 return ret;
1336 1368
1369 /* Now we can allow root port to suspend again */
1370 if (!tb_route(sw))
1371 nvm_authenticate_complete(sw);
1372
1337 if (status) { 1373 if (status) {
1338 tb_sw_info(sw, "switch flash authentication failed\n"); 1374 tb_sw_info(sw, "switch flash authentication failed\n");
1339 tb_switch_set_uuid(sw); 1375 tb_switch_set_uuid(sw);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 85644669fbe7..0a357db4b31b 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -961,6 +961,8 @@ int __uio_register_device(struct module *owner,
961 if (ret) 961 if (ret)
962 goto err_uio_dev_add_attributes; 962 goto err_uio_dev_add_attributes;
963 963
964 info->uio_dev = idev;
965
964 if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { 966 if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
965 /* 967 /*
966 * Note that we deliberately don't use devm_request_irq 968 * Note that we deliberately don't use devm_request_irq
@@ -972,11 +974,12 @@ int __uio_register_device(struct module *owner,
972 */ 974 */
973 ret = request_irq(info->irq, uio_interrupt, 975 ret = request_irq(info->irq, uio_interrupt,
974 info->irq_flags, info->name, idev); 976 info->irq_flags, info->name, idev);
975 if (ret) 977 if (ret) {
978 info->uio_dev = NULL;
976 goto err_request_irq; 979 goto err_request_irq;
980 }
977 } 981 }
978 982
979 info->uio_dev = idev;
980 return 0; 983 return 0;
981 984
982err_request_irq: 985err_request_irq:
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 47d75c20c211..1b68fed464cb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1696,6 +1696,9 @@ static const struct usb_device_id acm_ids[] = {
1696 { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */ 1696 { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
1697 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1697 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1698 }, 1698 },
1699 { USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */
1700 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1701 },
1699 { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */ 1702 { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
1700 .driver_info = QUIRK_CONTROL_LINE_STATE, }, 1703 .driver_info = QUIRK_CONTROL_LINE_STATE, },
1701 { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */ 1704 { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index c6077d582d29..0f9381b69a3b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2794,6 +2794,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2794 int i, status; 2794 int i, status;
2795 u16 portchange, portstatus; 2795 u16 portchange, portstatus;
2796 struct usb_port *port_dev = hub->ports[port1 - 1]; 2796 struct usb_port *port_dev = hub->ports[port1 - 1];
2797 int reset_recovery_time;
2797 2798
2798 if (!hub_is_superspeed(hub->hdev)) { 2799 if (!hub_is_superspeed(hub->hdev)) {
2799 if (warm) { 2800 if (warm) {
@@ -2849,7 +2850,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2849 USB_PORT_FEAT_C_BH_PORT_RESET); 2850 USB_PORT_FEAT_C_BH_PORT_RESET);
2850 usb_clear_port_feature(hub->hdev, port1, 2851 usb_clear_port_feature(hub->hdev, port1,
2851 USB_PORT_FEAT_C_PORT_LINK_STATE); 2852 USB_PORT_FEAT_C_PORT_LINK_STATE);
2852 usb_clear_port_feature(hub->hdev, port1, 2853
2854 if (udev)
2855 usb_clear_port_feature(hub->hdev, port1,
2853 USB_PORT_FEAT_C_CONNECTION); 2856 USB_PORT_FEAT_C_CONNECTION);
2854 2857
2855 /* 2858 /*
@@ -2885,11 +2888,18 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2885 2888
2886done: 2889done:
2887 if (status == 0) { 2890 if (status == 0) {
2888 /* TRSTRCY = 10 ms; plus some extra */
2889 if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM) 2891 if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM)
2890 usleep_range(10000, 12000); 2892 usleep_range(10000, 12000);
2891 else 2893 else {
2892 msleep(10 + 40); 2894 /* TRSTRCY = 10 ms; plus some extra */
2895 reset_recovery_time = 10 + 40;
2896
2897 /* Hub needs extra delay after resetting its port. */
2898 if (hub->hdev->quirks & USB_QUIRK_HUB_SLOW_RESET)
2899 reset_recovery_time += 100;
2900
2901 msleep(reset_recovery_time);
2902 }
2893 2903
2894 if (udev) { 2904 if (udev) {
2895 struct usb_hcd *hcd = bus_to_hcd(udev->bus); 2905 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 178d6c6063c0..0690fcff0ea2 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -128,6 +128,9 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
128 case 'n': 128 case 'n':
129 flags |= USB_QUIRK_DELAY_CTRL_MSG; 129 flags |= USB_QUIRK_DELAY_CTRL_MSG;
130 break; 130 break;
131 case 'o':
132 flags |= USB_QUIRK_HUB_SLOW_RESET;
133 break;
131 /* Ignore unrecognized flag characters */ 134 /* Ignore unrecognized flag characters */
132 } 135 }
133 } 136 }
@@ -206,6 +209,9 @@ static const struct usb_device_id usb_quirk_list[] = {
206 /* Microsoft LifeCam-VX700 v2.0 */ 209 /* Microsoft LifeCam-VX700 v2.0 */
207 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, 210 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
208 211
212 /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
213 { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
214
209 /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ 215 /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
210 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, 216 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
211 { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, 217 { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -380,6 +386,9 @@ static const struct usb_device_id usb_quirk_list[] = {
380 { USB_DEVICE(0x1a0a, 0x0200), .driver_info = 386 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
381 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 387 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
382 388
389 /* Terminus Technology Inc. Hub */
390 { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
391
383 /* Corsair K70 RGB */ 392 /* Corsair K70 RGB */
384 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, 393 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
385 394
@@ -391,6 +400,9 @@ static const struct usb_device_id usb_quirk_list[] = {
391 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | 400 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
392 USB_QUIRK_DELAY_CTRL_MSG }, 401 USB_QUIRK_DELAY_CTRL_MSG },
393 402
403 /* Corsair K70 LUX RGB */
404 { USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT },
405
394 /* Corsair K70 LUX */ 406 /* Corsair K70 LUX */
395 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, 407 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
396 408
@@ -411,6 +423,11 @@ static const struct usb_device_id usb_quirk_list[] = {
411 { USB_DEVICE(0x2040, 0x7200), .driver_info = 423 { USB_DEVICE(0x2040, 0x7200), .driver_info =
412 USB_QUIRK_CONFIG_INTF_STRINGS }, 424 USB_QUIRK_CONFIG_INTF_STRINGS },
413 425
426 /* Raydium Touchscreen */
427 { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
428
429 { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
430
414 /* DJI CineSSD */ 431 /* DJI CineSSD */
415 { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, 432 { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
416 433
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index d257c541e51b..7afc10872f1f 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -120,6 +120,7 @@ static int dwc2_pci_probe(struct pci_dev *pci,
120 dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO); 120 dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO);
121 if (!dwc2) { 121 if (!dwc2) {
122 dev_err(dev, "couldn't allocate dwc2 device\n"); 122 dev_err(dev, "couldn't allocate dwc2 device\n");
123 ret = -ENOMEM;
123 goto err; 124 goto err;
124 } 125 }
125 126
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index becfbb87f791..2f2048aa5fde 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1499,6 +1499,7 @@ static int dwc3_probe(struct platform_device *pdev)
1499 1499
1500err5: 1500err5:
1501 dwc3_event_buffers_cleanup(dwc); 1501 dwc3_event_buffers_cleanup(dwc);
1502 dwc3_ulpi_exit(dwc);
1502 1503
1503err4: 1504err4:
1504 dwc3_free_scratch_buffers(dwc); 1505 dwc3_free_scratch_buffers(dwc);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 1286076a8890..842795856bf4 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -283,8 +283,10 @@ err:
283static void dwc3_pci_remove(struct pci_dev *pci) 283static void dwc3_pci_remove(struct pci_dev *pci)
284{ 284{
285 struct dwc3_pci *dwc = pci_get_drvdata(pci); 285 struct dwc3_pci *dwc = pci_get_drvdata(pci);
286 struct pci_dev *pdev = dwc->pci;
286 287
287 gpiod_remove_lookup_table(&platform_bytcr_gpios); 288 if (pdev->device == PCI_DEVICE_ID_INTEL_BYT)
289 gpiod_remove_lookup_table(&platform_bytcr_gpios);
288#ifdef CONFIG_PM 290#ifdef CONFIG_PM
289 cancel_work_sync(&dwc->wakeup_work); 291 cancel_work_sync(&dwc->wakeup_work);
290#endif 292#endif
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 679c12e14522..9f92ee03dde7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1081,7 +1081,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
1081 /* Now prepare one extra TRB to align transfer size */ 1081 /* Now prepare one extra TRB to align transfer size */
1082 trb = &dep->trb_pool[dep->trb_enqueue]; 1082 trb = &dep->trb_pool[dep->trb_enqueue];
1083 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 1083 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr,
1084 maxp - rem, false, 0, 1084 maxp - rem, false, 1,
1085 req->request.stream_id, 1085 req->request.stream_id,
1086 req->request.short_not_ok, 1086 req->request.short_not_ok,
1087 req->request.no_interrupt); 1087 req->request.no_interrupt);
@@ -1125,7 +1125,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
1125 /* Now prepare one extra TRB to align transfer size */ 1125 /* Now prepare one extra TRB to align transfer size */
1126 trb = &dep->trb_pool[dep->trb_enqueue]; 1126 trb = &dep->trb_pool[dep->trb_enqueue];
1127 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, 1127 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
1128 false, 0, req->request.stream_id, 1128 false, 1, req->request.stream_id,
1129 req->request.short_not_ok, 1129 req->request.short_not_ok,
1130 req->request.no_interrupt); 1130 req->request.no_interrupt);
1131 } else if (req->request.zero && req->request.length && 1131 } else if (req->request.zero && req->request.length &&
@@ -1141,7 +1141,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
1141 /* Now prepare one extra TRB to handle ZLP */ 1141 /* Now prepare one extra TRB to handle ZLP */
1142 trb = &dep->trb_pool[dep->trb_enqueue]; 1142 trb = &dep->trb_pool[dep->trb_enqueue];
1143 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, 1143 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
1144 false, 0, req->request.stream_id, 1144 false, 1, req->request.stream_id,
1145 req->request.short_not_ok, 1145 req->request.short_not_ok,
1146 req->request.no_interrupt); 1146 req->request.no_interrupt);
1147 } else { 1147 } else {
@@ -1470,9 +1470,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1470 unsigned transfer_in_flight; 1470 unsigned transfer_in_flight;
1471 unsigned started; 1471 unsigned started;
1472 1472
1473 if (dep->flags & DWC3_EP_STALL)
1474 return 0;
1475
1476 if (dep->number > 1) 1473 if (dep->number > 1)
1477 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1474 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
1478 else 1475 else
@@ -1494,8 +1491,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1494 else 1491 else
1495 dep->flags |= DWC3_EP_STALL; 1492 dep->flags |= DWC3_EP_STALL;
1496 } else { 1493 } else {
1497 if (!(dep->flags & DWC3_EP_STALL))
1498 return 0;
1499 1494
1500 ret = dwc3_send_clear_stall_ep_cmd(dep); 1495 ret = dwc3_send_clear_stall_ep_cmd(dep);
1501 if (ret) 1496 if (ret)
@@ -2259,7 +2254,7 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
2259 * with one TRB pending in the ring. We need to manually clear HWO bit 2254 * with one TRB pending in the ring. We need to manually clear HWO bit
2260 * from that TRB. 2255 * from that TRB.
2261 */ 2256 */
2262 if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) { 2257 if ((req->zero || req->unaligned) && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) {
2263 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2258 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
2264 return 1; 2259 return 1;
2265 } 2260 }
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 3ada83d81bda..31e8bf3578c8 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -215,7 +215,6 @@ struct ffs_io_data {
215 215
216 struct mm_struct *mm; 216 struct mm_struct *mm;
217 struct work_struct work; 217 struct work_struct work;
218 struct work_struct cancellation_work;
219 218
220 struct usb_ep *ep; 219 struct usb_ep *ep;
221 struct usb_request *req; 220 struct usb_request *req;
@@ -1073,31 +1072,22 @@ ffs_epfile_open(struct inode *inode, struct file *file)
1073 return 0; 1072 return 0;
1074} 1073}
1075 1074
1076static void ffs_aio_cancel_worker(struct work_struct *work)
1077{
1078 struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
1079 cancellation_work);
1080
1081 ENTER();
1082
1083 usb_ep_dequeue(io_data->ep, io_data->req);
1084}
1085
1086static int ffs_aio_cancel(struct kiocb *kiocb) 1075static int ffs_aio_cancel(struct kiocb *kiocb)
1087{ 1076{
1088 struct ffs_io_data *io_data = kiocb->private; 1077 struct ffs_io_data *io_data = kiocb->private;
1089 struct ffs_data *ffs = io_data->ffs; 1078 struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
1090 int value; 1079 int value;
1091 1080
1092 ENTER(); 1081 ENTER();
1093 1082
1094 if (likely(io_data && io_data->ep && io_data->req)) { 1083 spin_lock_irq(&epfile->ffs->eps_lock);
1095 INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker); 1084
1096 queue_work(ffs->io_completion_wq, &io_data->cancellation_work); 1085 if (likely(io_data && io_data->ep && io_data->req))
1097 value = -EINPROGRESS; 1086 value = usb_ep_dequeue(io_data->ep, io_data->req);
1098 } else { 1087 else
1099 value = -EINVAL; 1088 value = -EINVAL;
1100 } 1089
1090 spin_unlock_irq(&epfile->ffs->eps_lock);
1101 1091
1102 return value; 1092 return value;
1103} 1093}
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 1000d864929c..0f026d445e31 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -401,12 +401,12 @@ done:
401static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) 401static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
402{ 402{
403 struct usb_request *req; 403 struct usb_request *req;
404 struct usb_request *tmp;
405 unsigned long flags; 404 unsigned long flags;
406 405
407 /* fill unused rxq slots with some skb */ 406 /* fill unused rxq slots with some skb */
408 spin_lock_irqsave(&dev->req_lock, flags); 407 spin_lock_irqsave(&dev->req_lock, flags);
409 list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) { 408 while (!list_empty(&dev->rx_reqs)) {
409 req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
410 list_del_init(&req->list); 410 list_del_init(&req->list);
411 spin_unlock_irqrestore(&dev->req_lock, flags); 411 spin_unlock_irqrestore(&dev->req_lock, flags);
412 412
@@ -1125,7 +1125,6 @@ void gether_disconnect(struct gether *link)
1125{ 1125{
1126 struct eth_dev *dev = link->ioport; 1126 struct eth_dev *dev = link->ioport;
1127 struct usb_request *req; 1127 struct usb_request *req;
1128 struct usb_request *tmp;
1129 1128
1130 WARN_ON(!dev); 1129 WARN_ON(!dev);
1131 if (!dev) 1130 if (!dev)
@@ -1142,7 +1141,8 @@ void gether_disconnect(struct gether *link)
1142 */ 1141 */
1143 usb_ep_disable(link->in_ep); 1142 usb_ep_disable(link->in_ep);
1144 spin_lock(&dev->req_lock); 1143 spin_lock(&dev->req_lock);
1145 list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) { 1144 while (!list_empty(&dev->tx_reqs)) {
1145 req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
1146 list_del(&req->list); 1146 list_del(&req->list);
1147 1147
1148 spin_unlock(&dev->req_lock); 1148 spin_unlock(&dev->req_lock);
@@ -1154,7 +1154,8 @@ void gether_disconnect(struct gether *link)
1154 1154
1155 usb_ep_disable(link->out_ep); 1155 usb_ep_disable(link->out_ep);
1156 spin_lock(&dev->req_lock); 1156 spin_lock(&dev->req_lock);
1157 list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) { 1157 while (!list_empty(&dev->rx_reqs)) {
1158 req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1158 list_del(&req->list); 1159 list_del(&req->list);
1159 1160
1160 spin_unlock(&dev->req_lock); 1161 spin_unlock(&dev->req_lock);
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 3a16431da321..fcf13ef33b31 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2033,6 +2033,7 @@ static inline int machine_without_vbus_sense(void)
2033{ 2033{
2034 return machine_is_omap_innovator() 2034 return machine_is_omap_innovator()
2035 || machine_is_omap_osk() 2035 || machine_is_omap_osk()
2036 || machine_is_omap_palmte()
2036 || machine_is_sx1() 2037 || machine_is_sx1()
2037 /* No known omap7xx boards with vbus sense */ 2038 /* No known omap7xx boards with vbus sense */
2038 || cpu_is_omap7xx(); 2039 || cpu_is_omap7xx();
@@ -2041,7 +2042,7 @@ static inline int machine_without_vbus_sense(void)
2041static int omap_udc_start(struct usb_gadget *g, 2042static int omap_udc_start(struct usb_gadget *g,
2042 struct usb_gadget_driver *driver) 2043 struct usb_gadget_driver *driver)
2043{ 2044{
2044 int status = -ENODEV; 2045 int status;
2045 struct omap_ep *ep; 2046 struct omap_ep *ep;
2046 unsigned long flags; 2047 unsigned long flags;
2047 2048
@@ -2079,6 +2080,7 @@ static int omap_udc_start(struct usb_gadget *g,
2079 goto done; 2080 goto done;
2080 } 2081 }
2081 } else { 2082 } else {
2083 status = 0;
2082 if (can_pullup(udc)) 2084 if (can_pullup(udc))
2083 pullup_enable(udc); 2085 pullup_enable(udc);
2084 else 2086 else
@@ -2593,9 +2595,22 @@ omap_ep_setup(char *name, u8 addr, u8 type,
2593 2595
2594static void omap_udc_release(struct device *dev) 2596static void omap_udc_release(struct device *dev)
2595{ 2597{
2596 complete(udc->done); 2598 pullup_disable(udc);
2599 if (!IS_ERR_OR_NULL(udc->transceiver)) {
2600 usb_put_phy(udc->transceiver);
2601 udc->transceiver = NULL;
2602 }
2603 omap_writew(0, UDC_SYSCON1);
2604 remove_proc_file();
2605 if (udc->dc_clk) {
2606 if (udc->clk_requested)
2607 omap_udc_enable_clock(0);
2608 clk_put(udc->hhc_clk);
2609 clk_put(udc->dc_clk);
2610 }
2611 if (udc->done)
2612 complete(udc->done);
2597 kfree(udc); 2613 kfree(udc);
2598 udc = NULL;
2599} 2614}
2600 2615
2601static int 2616static int
@@ -2627,6 +2642,7 @@ omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
2627 udc->gadget.speed = USB_SPEED_UNKNOWN; 2642 udc->gadget.speed = USB_SPEED_UNKNOWN;
2628 udc->gadget.max_speed = USB_SPEED_FULL; 2643 udc->gadget.max_speed = USB_SPEED_FULL;
2629 udc->gadget.name = driver_name; 2644 udc->gadget.name = driver_name;
2645 udc->gadget.quirk_ep_out_aligned_size = 1;
2630 udc->transceiver = xceiv; 2646 udc->transceiver = xceiv;
2631 2647
2632 /* ep0 is special; put it right after the SETUP buffer */ 2648 /* ep0 is special; put it right after the SETUP buffer */
@@ -2867,8 +2883,8 @@ bad_on_1710:
2867 udc->clr_halt = UDC_RESET_EP; 2883 udc->clr_halt = UDC_RESET_EP;
2868 2884
2869 /* USB general purpose IRQ: ep0, state changes, dma, etc */ 2885 /* USB general purpose IRQ: ep0, state changes, dma, etc */
2870 status = request_irq(pdev->resource[1].start, omap_udc_irq, 2886 status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
2871 0, driver_name, udc); 2887 omap_udc_irq, 0, driver_name, udc);
2872 if (status != 0) { 2888 if (status != 0) {
2873 ERR("can't get irq %d, err %d\n", 2889 ERR("can't get irq %d, err %d\n",
2874 (int) pdev->resource[1].start, status); 2890 (int) pdev->resource[1].start, status);
@@ -2876,20 +2892,20 @@ bad_on_1710:
2876 } 2892 }
2877 2893
2878 /* USB "non-iso" IRQ (PIO for all but ep0) */ 2894 /* USB "non-iso" IRQ (PIO for all but ep0) */
2879 status = request_irq(pdev->resource[2].start, omap_udc_pio_irq, 2895 status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
2880 0, "omap_udc pio", udc); 2896 omap_udc_pio_irq, 0, "omap_udc pio", udc);
2881 if (status != 0) { 2897 if (status != 0) {
2882 ERR("can't get irq %d, err %d\n", 2898 ERR("can't get irq %d, err %d\n",
2883 (int) pdev->resource[2].start, status); 2899 (int) pdev->resource[2].start, status);
2884 goto cleanup2; 2900 goto cleanup1;
2885 } 2901 }
2886#ifdef USE_ISO 2902#ifdef USE_ISO
2887 status = request_irq(pdev->resource[3].start, omap_udc_iso_irq, 2903 status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
2888 0, "omap_udc iso", udc); 2904 omap_udc_iso_irq, 0, "omap_udc iso", udc);
2889 if (status != 0) { 2905 if (status != 0) {
2890 ERR("can't get irq %d, err %d\n", 2906 ERR("can't get irq %d, err %d\n",
2891 (int) pdev->resource[3].start, status); 2907 (int) pdev->resource[3].start, status);
2892 goto cleanup3; 2908 goto cleanup1;
2893 } 2909 }
2894#endif 2910#endif
2895 if (cpu_is_omap16xx() || cpu_is_omap7xx()) { 2911 if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
@@ -2900,23 +2916,8 @@ bad_on_1710:
2900 } 2916 }
2901 2917
2902 create_proc_file(); 2918 create_proc_file();
2903 status = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget, 2919 return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
2904 omap_udc_release); 2920 omap_udc_release);
2905 if (status)
2906 goto cleanup4;
2907
2908 return 0;
2909
2910cleanup4:
2911 remove_proc_file();
2912
2913#ifdef USE_ISO
2914cleanup3:
2915 free_irq(pdev->resource[2].start, udc);
2916#endif
2917
2918cleanup2:
2919 free_irq(pdev->resource[1].start, udc);
2920 2921
2921cleanup1: 2922cleanup1:
2922 kfree(udc); 2923 kfree(udc);
@@ -2943,42 +2944,15 @@ static int omap_udc_remove(struct platform_device *pdev)
2943{ 2944{
2944 DECLARE_COMPLETION_ONSTACK(done); 2945 DECLARE_COMPLETION_ONSTACK(done);
2945 2946
2946 if (!udc)
2947 return -ENODEV;
2948
2949 usb_del_gadget_udc(&udc->gadget);
2950 if (udc->driver)
2951 return -EBUSY;
2952
2953 udc->done = &done; 2947 udc->done = &done;
2954 2948
2955 pullup_disable(udc); 2949 usb_del_gadget_udc(&udc->gadget);
2956 if (!IS_ERR_OR_NULL(udc->transceiver)) {
2957 usb_put_phy(udc->transceiver);
2958 udc->transceiver = NULL;
2959 }
2960 omap_writew(0, UDC_SYSCON1);
2961
2962 remove_proc_file();
2963
2964#ifdef USE_ISO
2965 free_irq(pdev->resource[3].start, udc);
2966#endif
2967 free_irq(pdev->resource[2].start, udc);
2968 free_irq(pdev->resource[1].start, udc);
2969 2950
2970 if (udc->dc_clk) { 2951 wait_for_completion(&done);
2971 if (udc->clk_requested)
2972 omap_udc_enable_clock(0);
2973 clk_put(udc->hhc_clk);
2974 clk_put(udc->dc_clk);
2975 }
2976 2952
2977 release_mem_region(pdev->resource[0].start, 2953 release_mem_region(pdev->resource[0].start,
2978 pdev->resource[0].end - pdev->resource[0].start + 1); 2954 pdev->resource[0].end - pdev->resource[0].start + 1);
2979 2955
2980 wait_for_completion(&done);
2981
2982 return 0; 2956 return 0;
2983} 2957}
2984 2958
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
index 27f00160332e..3c4abb5a1c3f 100644
--- a/drivers/usb/host/xhci-histb.c
+++ b/drivers/usb/host/xhci-histb.c
@@ -325,14 +325,16 @@ static int xhci_histb_remove(struct platform_device *dev)
325 struct xhci_hcd_histb *histb = platform_get_drvdata(dev); 325 struct xhci_hcd_histb *histb = platform_get_drvdata(dev);
326 struct usb_hcd *hcd = histb->hcd; 326 struct usb_hcd *hcd = histb->hcd;
327 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 327 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
328 struct usb_hcd *shared_hcd = xhci->shared_hcd;
328 329
329 xhci->xhc_state |= XHCI_STATE_REMOVING; 330 xhci->xhc_state |= XHCI_STATE_REMOVING;
330 331
331 usb_remove_hcd(xhci->shared_hcd); 332 usb_remove_hcd(shared_hcd);
333 xhci->shared_hcd = NULL;
332 device_wakeup_disable(&dev->dev); 334 device_wakeup_disable(&dev->dev);
333 335
334 usb_remove_hcd(hcd); 336 usb_remove_hcd(hcd);
335 usb_put_hcd(xhci->shared_hcd); 337 usb_put_hcd(shared_hcd);
336 338
337 xhci_histb_host_disable(histb); 339 xhci_histb_host_disable(histb);
338 usb_put_hcd(hcd); 340 usb_put_hcd(hcd);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 12eea73d9f20..94aca1b5ac8a 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -876,7 +876,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
876 status |= USB_PORT_STAT_SUSPEND; 876 status |= USB_PORT_STAT_SUSPEND;
877 } 877 }
878 if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME && 878 if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
879 !DEV_SUPERSPEED_ANY(raw_port_status)) { 879 !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) {
880 if ((raw_port_status & PORT_RESET) || 880 if ((raw_port_status & PORT_RESET) ||
881 !(raw_port_status & PORT_PE)) 881 !(raw_port_status & PORT_PE))
882 return 0xffffffff; 882 return 0xffffffff;
@@ -921,7 +921,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
921 time_left = wait_for_completion_timeout( 921 time_left = wait_for_completion_timeout(
922 &bus_state->rexit_done[wIndex], 922 &bus_state->rexit_done[wIndex],
923 msecs_to_jiffies( 923 msecs_to_jiffies(
924 XHCI_MAX_REXIT_TIMEOUT)); 924 XHCI_MAX_REXIT_TIMEOUT_MS));
925 spin_lock_irqsave(&xhci->lock, flags); 925 spin_lock_irqsave(&xhci->lock, flags);
926 926
927 if (time_left) { 927 if (time_left) {
@@ -935,7 +935,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
935 } else { 935 } else {
936 int port_status = readl(port->addr); 936 int port_status = readl(port->addr);
937 xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n", 937 xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
938 XHCI_MAX_REXIT_TIMEOUT, 938 XHCI_MAX_REXIT_TIMEOUT_MS,
939 port_status); 939 port_status);
940 status |= USB_PORT_STAT_SUSPEND; 940 status |= USB_PORT_STAT_SUSPEND;
941 clear_bit(wIndex, &bus_state->rexit_ports); 941 clear_bit(wIndex, &bus_state->rexit_ports);
@@ -1474,15 +1474,18 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1474 unsigned long flags; 1474 unsigned long flags;
1475 struct xhci_hub *rhub; 1475 struct xhci_hub *rhub;
1476 struct xhci_port **ports; 1476 struct xhci_port **ports;
1477 u32 portsc_buf[USB_MAXCHILDREN];
1478 bool wake_enabled;
1477 1479
1478 rhub = xhci_get_rhub(hcd); 1480 rhub = xhci_get_rhub(hcd);
1479 ports = rhub->ports; 1481 ports = rhub->ports;
1480 max_ports = rhub->num_ports; 1482 max_ports = rhub->num_ports;
1481 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1483 bus_state = &xhci->bus_state[hcd_index(hcd)];
1484 wake_enabled = hcd->self.root_hub->do_remote_wakeup;
1482 1485
1483 spin_lock_irqsave(&xhci->lock, flags); 1486 spin_lock_irqsave(&xhci->lock, flags);
1484 1487
1485 if (hcd->self.root_hub->do_remote_wakeup) { 1488 if (wake_enabled) {
1486 if (bus_state->resuming_ports || /* USB2 */ 1489 if (bus_state->resuming_ports || /* USB2 */
1487 bus_state->port_remote_wakeup) { /* USB3 */ 1490 bus_state->port_remote_wakeup) { /* USB3 */
1488 spin_unlock_irqrestore(&xhci->lock, flags); 1491 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1490,26 +1493,36 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1490 return -EBUSY; 1493 return -EBUSY;
1491 } 1494 }
1492 } 1495 }
1493 1496 /*
1494 port_index = max_ports; 1497 * Prepare ports for suspend, but don't write anything before all ports
1498 * are checked and we know bus suspend can proceed
1499 */
1495 bus_state->bus_suspended = 0; 1500 bus_state->bus_suspended = 0;
1501 port_index = max_ports;
1496 while (port_index--) { 1502 while (port_index--) {
1497 /* suspend the port if the port is not suspended */
1498 u32 t1, t2; 1503 u32 t1, t2;
1499 int slot_id;
1500 1504
1501 t1 = readl(ports[port_index]->addr); 1505 t1 = readl(ports[port_index]->addr);
1502 t2 = xhci_port_state_to_neutral(t1); 1506 t2 = xhci_port_state_to_neutral(t1);
1507 portsc_buf[port_index] = 0;
1503 1508
1504 if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) { 1509 /* Bail out if a USB3 port has a new device in link training */
1505 xhci_dbg(xhci, "port %d not suspended\n", port_index); 1510 if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1506 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 1511 bus_state->bus_suspended = 0;
1507 port_index + 1); 1512 spin_unlock_irqrestore(&xhci->lock, flags);
1508 if (slot_id) { 1513 xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
1514 return -EBUSY;
1515 }
1516
1517 /* suspend ports in U0, or bail out for new connect changes */
1518 if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
1519 if ((t1 & PORT_CSC) && wake_enabled) {
1520 bus_state->bus_suspended = 0;
1509 spin_unlock_irqrestore(&xhci->lock, flags); 1521 spin_unlock_irqrestore(&xhci->lock, flags);
1510 xhci_stop_device(xhci, slot_id, 1); 1522 xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
1511 spin_lock_irqsave(&xhci->lock, flags); 1523 return -EBUSY;
1512 } 1524 }
1525 xhci_dbg(xhci, "port %d not suspended\n", port_index);
1513 t2 &= ~PORT_PLS_MASK; 1526 t2 &= ~PORT_PLS_MASK;
1514 t2 |= PORT_LINK_STROBE | XDEV_U3; 1527 t2 |= PORT_LINK_STROBE | XDEV_U3;
1515 set_bit(port_index, &bus_state->bus_suspended); 1528 set_bit(port_index, &bus_state->bus_suspended);
@@ -1518,7 +1531,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1518 * including the USB 3.0 roothub, but only if CONFIG_PM 1531 * including the USB 3.0 roothub, but only if CONFIG_PM
1519 * is enabled, so also enable remote wake here. 1532 * is enabled, so also enable remote wake here.
1520 */ 1533 */
1521 if (hcd->self.root_hub->do_remote_wakeup) { 1534 if (wake_enabled) {
1522 if (t1 & PORT_CONNECT) { 1535 if (t1 & PORT_CONNECT) {
1523 t2 |= PORT_WKOC_E | PORT_WKDISC_E; 1536 t2 |= PORT_WKOC_E | PORT_WKDISC_E;
1524 t2 &= ~PORT_WKCONN_E; 1537 t2 &= ~PORT_WKCONN_E;
@@ -1538,7 +1551,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1538 1551
1539 t1 = xhci_port_state_to_neutral(t1); 1552 t1 = xhci_port_state_to_neutral(t1);
1540 if (t1 != t2) 1553 if (t1 != t2)
1541 writel(t2, ports[port_index]->addr); 1554 portsc_buf[port_index] = t2;
1555 }
1556
1557 /* write port settings, stopping and suspending ports if needed */
1558 port_index = max_ports;
1559 while (port_index--) {
1560 if (!portsc_buf[port_index])
1561 continue;
1562 if (test_bit(port_index, &bus_state->bus_suspended)) {
1563 int slot_id;
1564
1565 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1566 port_index + 1);
1567 if (slot_id) {
1568 spin_unlock_irqrestore(&xhci->lock, flags);
1569 xhci_stop_device(xhci, slot_id, 1);
1570 spin_lock_irqsave(&xhci->lock, flags);
1571 }
1572 }
1573 writel(portsc_buf[port_index], ports[port_index]->addr);
1542 } 1574 }
1543 hcd->state = HC_STATE_SUSPENDED; 1575 hcd->state = HC_STATE_SUSPENDED;
1544 bus_state->next_statechange = jiffies + msecs_to_jiffies(10); 1576 bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 71d0d33c3286..60987c787e44 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -590,12 +590,14 @@ static int xhci_mtk_remove(struct platform_device *dev)
590 struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev); 590 struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev);
591 struct usb_hcd *hcd = mtk->hcd; 591 struct usb_hcd *hcd = mtk->hcd;
592 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 592 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
593 struct usb_hcd *shared_hcd = xhci->shared_hcd;
593 594
594 usb_remove_hcd(xhci->shared_hcd); 595 usb_remove_hcd(shared_hcd);
596 xhci->shared_hcd = NULL;
595 device_init_wakeup(&dev->dev, false); 597 device_init_wakeup(&dev->dev, false);
596 598
597 usb_remove_hcd(hcd); 599 usb_remove_hcd(hcd);
598 usb_put_hcd(xhci->shared_hcd); 600 usb_put_hcd(shared_hcd);
599 usb_put_hcd(hcd); 601 usb_put_hcd(hcd);
600 xhci_mtk_sch_exit(mtk); 602 xhci_mtk_sch_exit(mtk);
601 xhci_mtk_clks_disable(mtk); 603 xhci_mtk_clks_disable(mtk);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 01c57055c0c5..a9515265db4d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -248,6 +248,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
248 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) 248 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
249 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; 249 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
250 250
251 if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM ||
252 pdev->vendor == PCI_VENDOR_ID_CAVIUM) &&
253 pdev->device == 0x9026)
254 xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT;
255
251 if (xhci->quirks & XHCI_RESET_ON_RESUME) 256 if (xhci->quirks & XHCI_RESET_ON_RESUME)
252 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 257 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
253 "QUIRK: Resetting on resume"); 258 "QUIRK: Resetting on resume");
@@ -380,6 +385,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
380 if (xhci->shared_hcd) { 385 if (xhci->shared_hcd) {
381 usb_remove_hcd(xhci->shared_hcd); 386 usb_remove_hcd(xhci->shared_hcd);
382 usb_put_hcd(xhci->shared_hcd); 387 usb_put_hcd(xhci->shared_hcd);
388 xhci->shared_hcd = NULL;
383 } 389 }
384 390
385 /* Workaround for spurious wakeups at shutdown with HSW */ 391 /* Workaround for spurious wakeups at shutdown with HSW */
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 32b5574ad5c5..ef09cb06212f 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -362,14 +362,16 @@ static int xhci_plat_remove(struct platform_device *dev)
362 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 362 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
363 struct clk *clk = xhci->clk; 363 struct clk *clk = xhci->clk;
364 struct clk *reg_clk = xhci->reg_clk; 364 struct clk *reg_clk = xhci->reg_clk;
365 struct usb_hcd *shared_hcd = xhci->shared_hcd;
365 366
366 xhci->xhc_state |= XHCI_STATE_REMOVING; 367 xhci->xhc_state |= XHCI_STATE_REMOVING;
367 368
368 usb_remove_hcd(xhci->shared_hcd); 369 usb_remove_hcd(shared_hcd);
370 xhci->shared_hcd = NULL;
369 usb_phy_shutdown(hcd->usb_phy); 371 usb_phy_shutdown(hcd->usb_phy);
370 372
371 usb_remove_hcd(hcd); 373 usb_remove_hcd(hcd);
372 usb_put_hcd(xhci->shared_hcd); 374 usb_put_hcd(shared_hcd);
373 375
374 clk_disable_unprepare(clk); 376 clk_disable_unprepare(clk);
375 clk_disable_unprepare(reg_clk); 377 clk_disable_unprepare(reg_clk);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a8d92c90fb58..65750582133f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1521,6 +1521,35 @@ static void handle_device_notification(struct xhci_hcd *xhci,
1521 usb_wakeup_notification(udev->parent, udev->portnum); 1521 usb_wakeup_notification(udev->parent, udev->portnum);
1522} 1522}
1523 1523
1524/*
1525 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
1526 * Controller.
1527 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1528 * If a connection to a USB 1 device is followed by another connection
1529 * to a USB 2 device.
1530 *
1531 * Reset the PHY after the USB device is disconnected if device speed
1532 * is less than HCD_USB3.
1533 * Retry the reset sequence max of 4 times checking the PLL lock status.
1534 *
1535 */
1536static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
1537{
1538 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1539 u32 pll_lock_check;
1540 u32 retry_count = 4;
1541
1542 do {
1543 /* Assert PHY reset */
1544 writel(0x6F, hcd->regs + 0x1048);
1545 udelay(10);
1546 /* De-assert the PHY reset */
1547 writel(0x7F, hcd->regs + 0x1048);
1548 udelay(200);
1549 pll_lock_check = readl(hcd->regs + 0x1070);
1550 } while (!(pll_lock_check & 0x1) && --retry_count);
1551}
1552
1524static void handle_port_status(struct xhci_hcd *xhci, 1553static void handle_port_status(struct xhci_hcd *xhci,
1525 union xhci_trb *event) 1554 union xhci_trb *event)
1526{ 1555{
@@ -1556,6 +1585,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
1556 goto cleanup; 1585 goto cleanup;
1557 } 1586 }
1558 1587
1588 /* We might get interrupts after shared_hcd is removed */
1589 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
1590 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
1591 bogus_port_status = true;
1592 goto cleanup;
1593 }
1594
1559 hcd = port->rhub->hcd; 1595 hcd = port->rhub->hcd;
1560 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1596 bus_state = &xhci->bus_state[hcd_index(hcd)];
1561 hcd_portnum = port->hcd_portnum; 1597 hcd_portnum = port->hcd_portnum;
@@ -1639,7 +1675,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
1639 * RExit to a disconnect state). If so, let the the driver know it's 1675 * RExit to a disconnect state). If so, let the the driver know it's
1640 * out of the RExit state. 1676 * out of the RExit state.
1641 */ 1677 */
1642 if (!DEV_SUPERSPEED_ANY(portsc) && 1678 if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
1643 test_and_clear_bit(hcd_portnum, 1679 test_and_clear_bit(hcd_portnum,
1644 &bus_state->rexit_ports)) { 1680 &bus_state->rexit_ports)) {
1645 complete(&bus_state->rexit_done[hcd_portnum]); 1681 complete(&bus_state->rexit_done[hcd_portnum]);
@@ -1647,8 +1683,12 @@ static void handle_port_status(struct xhci_hcd *xhci,
1647 goto cleanup; 1683 goto cleanup;
1648 } 1684 }
1649 1685
1650 if (hcd->speed < HCD_USB3) 1686 if (hcd->speed < HCD_USB3) {
1651 xhci_test_and_clear_bit(xhci, port, PORT_PLC); 1687 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1688 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
1689 (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
1690 xhci_cavium_reset_phy_quirk(xhci);
1691 }
1652 1692
1653cleanup: 1693cleanup:
1654 /* Update event ring dequeue pointer before dropping the lock */ 1694 /* Update event ring dequeue pointer before dropping the lock */
@@ -2266,6 +2306,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2266 goto cleanup; 2306 goto cleanup;
2267 case COMP_RING_UNDERRUN: 2307 case COMP_RING_UNDERRUN:
2268 case COMP_RING_OVERRUN: 2308 case COMP_RING_OVERRUN:
2309 case COMP_STOPPED_LENGTH_INVALID:
2269 goto cleanup; 2310 goto cleanup;
2270 default: 2311 default:
2271 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", 2312 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 6b5db344de30..938ff06c0349 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1303,6 +1303,7 @@ static int tegra_xusb_remove(struct platform_device *pdev)
1303 1303
1304 usb_remove_hcd(xhci->shared_hcd); 1304 usb_remove_hcd(xhci->shared_hcd);
1305 usb_put_hcd(xhci->shared_hcd); 1305 usb_put_hcd(xhci->shared_hcd);
1306 xhci->shared_hcd = NULL;
1306 usb_remove_hcd(tegra->hcd); 1307 usb_remove_hcd(tegra->hcd);
1307 usb_put_hcd(tegra->hcd); 1308 usb_put_hcd(tegra->hcd);
1308 1309
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 0420eefa647a..c928dbbff881 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -719,8 +719,6 @@ static void xhci_stop(struct usb_hcd *hcd)
719 719
720 /* Only halt host and free memory after both hcds are removed */ 720 /* Only halt host and free memory after both hcds are removed */
721 if (!usb_hcd_is_primary_hcd(hcd)) { 721 if (!usb_hcd_is_primary_hcd(hcd)) {
722 /* usb core will free this hcd shortly, unset pointer */
723 xhci->shared_hcd = NULL;
724 mutex_unlock(&xhci->mutex); 722 mutex_unlock(&xhci->mutex);
725 return; 723 return;
726 } 724 }
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index bf0b3692dc9a..260b259b72bc 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1680,7 +1680,7 @@ struct xhci_bus_state {
1680 * It can take up to 20 ms to transition from RExit to U0 on the 1680 * It can take up to 20 ms to transition from RExit to U0 on the
1681 * Intel Lynx Point LP xHCI host. 1681 * Intel Lynx Point LP xHCI host.
1682 */ 1682 */
1683#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000) 1683#define XHCI_MAX_REXIT_TIMEOUT_MS 20
1684 1684
1685static inline unsigned int hcd_index(struct usb_hcd *hcd) 1685static inline unsigned int hcd_index(struct usb_hcd *hcd)
1686{ 1686{
@@ -1849,6 +1849,7 @@ struct xhci_hcd {
1849#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) 1849#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
1850#define XHCI_ZERO_64B_REGS BIT_ULL(32) 1850#define XHCI_ZERO_64B_REGS BIT_ULL(32)
1851#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) 1851#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
1852#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
1852 1853
1853 unsigned int num_active_eps; 1854 unsigned int num_active_eps;
1854 unsigned int limit_active_eps; 1855 unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index bd539f3058bc..85b48c6ddc7e 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -50,6 +50,7 @@ static const struct usb_device_id appledisplay_table[] = {
50 { APPLEDISPLAY_DEVICE(0x9219) }, 50 { APPLEDISPLAY_DEVICE(0x9219) },
51 { APPLEDISPLAY_DEVICE(0x921c) }, 51 { APPLEDISPLAY_DEVICE(0x921c) },
52 { APPLEDISPLAY_DEVICE(0x921d) }, 52 { APPLEDISPLAY_DEVICE(0x921d) },
53 { APPLEDISPLAY_DEVICE(0x9222) },
53 { APPLEDISPLAY_DEVICE(0x9236) }, 54 { APPLEDISPLAY_DEVICE(0x9236) },
54 55
55 /* Terminating entry */ 56 /* Terminating entry */
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index d17cd95b55bb..6b2140f966ef 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -27,4 +27,14 @@ UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
27 "USB Card Reader", 27 "USB Card Reader",
28 USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), 28 USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
29 29
30UNUSUAL_DEV(0x0bda, 0x0177, 0x0000, 0x9999,
31 "Realtek",
32 "USB Card Reader",
33 USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
34
35UNUSUAL_DEV(0x0bda, 0x0184, 0x0000, 0x9999,
36 "Realtek",
37 "USB Card Reader",
38 USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
39
30#endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */ 40#endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index fdfc64f5acea..221b7333d067 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -251,25 +251,10 @@ static void release_memory_resource(struct resource *resource)
251 kfree(resource); 251 kfree(resource);
252} 252}
253 253
254/*
255 * Host memory not allocated to dom0. We can use this range for hotplug-based
256 * ballooning.
257 *
258 * It's a type-less resource. Setting IORESOURCE_MEM will make resource
259 * management algorithms (arch_remove_reservations()) look into guest e820,
260 * which we don't want.
261 */
262static struct resource hostmem_resource = {
263 .name = "Host RAM",
264};
265
266void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
267{}
268
269static struct resource *additional_memory_resource(phys_addr_t size) 254static struct resource *additional_memory_resource(phys_addr_t size)
270{ 255{
271 struct resource *res, *res_hostmem; 256 struct resource *res;
272 int ret = -ENOMEM; 257 int ret;
273 258
274 res = kzalloc(sizeof(*res), GFP_KERNEL); 259 res = kzalloc(sizeof(*res), GFP_KERNEL);
275 if (!res) 260 if (!res)
@@ -278,42 +263,13 @@ static struct resource *additional_memory_resource(phys_addr_t size)
278 res->name = "System RAM"; 263 res->name = "System RAM";
279 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 264 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
280 265
281 res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL); 266 ret = allocate_resource(&iomem_resource, res,
282 if (res_hostmem) { 267 size, 0, -1,
283 /* Try to grab a range from hostmem */ 268 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
284 res_hostmem->name = "Host memory"; 269 if (ret < 0) {
285 ret = allocate_resource(&hostmem_resource, res_hostmem, 270 pr_err("Cannot allocate new System RAM resource\n");
286 size, 0, -1, 271 kfree(res);
287 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); 272 return NULL;
288 }
289
290 if (!ret) {
291 /*
292 * Insert this resource into iomem. Because hostmem_resource
293 * tracks portion of guest e820 marked as UNUSABLE noone else
294 * should try to use it.
295 */
296 res->start = res_hostmem->start;
297 res->end = res_hostmem->end;
298 ret = insert_resource(&iomem_resource, res);
299 if (ret < 0) {
300 pr_err("Can't insert iomem_resource [%llx - %llx]\n",
301 res->start, res->end);
302 release_memory_resource(res_hostmem);
303 res_hostmem = NULL;
304 res->start = res->end = 0;
305 }
306 }
307
308 if (ret) {
309 ret = allocate_resource(&iomem_resource, res,
310 size, 0, -1,
311 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
312 if (ret < 0) {
313 pr_err("Cannot allocate new System RAM resource\n");
314 kfree(res);
315 return NULL;
316 }
317 } 273 }
318 274
319#ifdef CONFIG_SPARSEMEM 275#ifdef CONFIG_SPARSEMEM
@@ -325,7 +281,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
325 pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", 281 pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
326 pfn, limit); 282 pfn, limit);
327 release_memory_resource(res); 283 release_memory_resource(res);
328 release_memory_resource(res_hostmem);
329 return NULL; 284 return NULL;
330 } 285 }
331 } 286 }
@@ -750,8 +705,6 @@ static int __init balloon_init(void)
750 set_online_page_callback(&xen_online_page); 705 set_online_page_callback(&xen_online_page);
751 register_memory_notifier(&xen_memory_nb); 706 register_memory_notifier(&xen_memory_nb);
752 register_sysctl_table(xen_root); 707 register_sysctl_table(xen_root);
753
754 arch_xen_balloon_init(&hostmem_resource);
755#endif 708#endif
756 709
757#ifdef CONFIG_XEN_PV 710#ifdef CONFIG_XEN_PV
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 2f11ca72a281..77224d8f3e6f 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -385,8 +385,8 @@ static int create_active(struct sock_mapping *map, int *evtchn)
385out_error: 385out_error:
386 if (*evtchn >= 0) 386 if (*evtchn >= 0)
387 xenbus_free_evtchn(pvcalls_front_dev, *evtchn); 387 xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
388 kfree(map->active.data.in); 388 free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
389 kfree(map->active.ring); 389 free_page((unsigned long)map->active.ring);
390 return ret; 390 return ret;
391} 391}
392 392
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 23f1387b3ef7..e7df65d32c91 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -36,6 +36,7 @@
36#include <asm/xen/hypervisor.h> 36#include <asm/xen/hypervisor.h>
37 37
38#include <xen/xen.h> 38#include <xen/xen.h>
39#include <xen/xen-ops.h>
39#include <xen/page.h> 40#include <xen/page.h>
40#include <xen/interface/xen.h> 41#include <xen/interface/xen.h>
41#include <xen/interface/memory.h> 42#include <xen/interface/memory.h>
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 43dea3b00c29..8a2562e3a316 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -1075,8 +1075,6 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
1075 if (fc->ac.error < 0) 1075 if (fc->ac.error < 0)
1076 return; 1076 return;
1077 1077
1078 d_drop(new_dentry);
1079
1080 inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key, 1078 inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key,
1081 newfid, newstatus, newcb, fc->cbi); 1079 newfid, newstatus, newcb, fc->cbi);
1082 if (IS_ERR(inode)) { 1080 if (IS_ERR(inode)) {
@@ -1090,7 +1088,7 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
1090 vnode = AFS_FS_I(inode); 1088 vnode = AFS_FS_I(inode);
1091 set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); 1089 set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
1092 afs_vnode_commit_status(fc, vnode, 0); 1090 afs_vnode_commit_status(fc, vnode, 0);
1093 d_add(new_dentry, inode); 1091 d_instantiate(new_dentry, inode);
1094} 1092}
1095 1093
1096/* 1094/*
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index d049cb459742..fde6b4d4121e 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -61,8 +61,11 @@ void afs_fileserver_probe_result(struct afs_call *call)
61 afs_io_error(call, afs_io_error_fs_probe_fail); 61 afs_io_error(call, afs_io_error_fs_probe_fail);
62 goto out; 62 goto out;
63 case -ECONNRESET: /* Responded, but call expired. */ 63 case -ECONNRESET: /* Responded, but call expired. */
64 case -ERFKILL:
65 case -EADDRNOTAVAIL:
64 case -ENETUNREACH: 66 case -ENETUNREACH:
65 case -EHOSTUNREACH: 67 case -EHOSTUNREACH:
68 case -EHOSTDOWN:
66 case -ECONNREFUSED: 69 case -ECONNREFUSED:
67 case -ETIMEDOUT: 70 case -ETIMEDOUT:
68 case -ETIME: 71 case -ETIME:
@@ -132,12 +135,14 @@ out:
132static int afs_do_probe_fileserver(struct afs_net *net, 135static int afs_do_probe_fileserver(struct afs_net *net,
133 struct afs_server *server, 136 struct afs_server *server,
134 struct key *key, 137 struct key *key,
135 unsigned int server_index) 138 unsigned int server_index,
139 struct afs_error *_e)
136{ 140{
137 struct afs_addr_cursor ac = { 141 struct afs_addr_cursor ac = {
138 .index = 0, 142 .index = 0,
139 }; 143 };
140 int ret; 144 bool in_progress = false;
145 int err;
141 146
142 _enter("%pU", &server->uuid); 147 _enter("%pU", &server->uuid);
143 148
@@ -151,15 +156,17 @@ static int afs_do_probe_fileserver(struct afs_net *net,
151 server->probe.rtt = UINT_MAX; 156 server->probe.rtt = UINT_MAX;
152 157
153 for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) { 158 for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
154 ret = afs_fs_get_capabilities(net, server, &ac, key, server_index, 159 err = afs_fs_get_capabilities(net, server, &ac, key, server_index,
155 true); 160 true);
156 if (ret != -EINPROGRESS) { 161 if (err == -EINPROGRESS)
157 afs_fs_probe_done(server); 162 in_progress = true;
158 return ret; 163 else
159 } 164 afs_prioritise_error(_e, err, ac.abort_code);
160 } 165 }
161 166
162 return 0; 167 if (!in_progress)
168 afs_fs_probe_done(server);
169 return in_progress;
163} 170}
164 171
165/* 172/*
@@ -169,21 +176,23 @@ int afs_probe_fileservers(struct afs_net *net, struct key *key,
169 struct afs_server_list *list) 176 struct afs_server_list *list)
170{ 177{
171 struct afs_server *server; 178 struct afs_server *server;
172 int i, ret; 179 struct afs_error e;
180 bool in_progress = false;
181 int i;
173 182
183 e.error = 0;
184 e.responded = false;
174 for (i = 0; i < list->nr_servers; i++) { 185 for (i = 0; i < list->nr_servers; i++) {
175 server = list->servers[i].server; 186 server = list->servers[i].server;
176 if (test_bit(AFS_SERVER_FL_PROBED, &server->flags)) 187 if (test_bit(AFS_SERVER_FL_PROBED, &server->flags))
177 continue; 188 continue;
178 189
179 if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags)) { 190 if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags) &&
180 ret = afs_do_probe_fileserver(net, server, key, i); 191 afs_do_probe_fileserver(net, server, key, i, &e))
181 if (ret) 192 in_progress = true;
182 return ret;
183 }
184 } 193 }
185 194
186 return 0; 195 return in_progress ? 0 : e.error;
187} 196}
188 197
189/* 198/*
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 4c6d8e1112c2..6b17d3620414 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -382,7 +382,7 @@ void afs_zap_data(struct afs_vnode *vnode)
382int afs_validate(struct afs_vnode *vnode, struct key *key) 382int afs_validate(struct afs_vnode *vnode, struct key *key)
383{ 383{
384 time64_t now = ktime_get_real_seconds(); 384 time64_t now = ktime_get_real_seconds();
385 bool valid = false; 385 bool valid;
386 int ret; 386 int ret;
387 387
388 _enter("{v={%llx:%llu} fl=%lx},%x", 388 _enter("{v={%llx:%llu} fl=%lx},%x",
@@ -402,15 +402,21 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
402 vnode->cb_v_break = vnode->volume->cb_v_break; 402 vnode->cb_v_break = vnode->volume->cb_v_break;
403 valid = false; 403 valid = false;
404 } else if (vnode->status.type == AFS_FTYPE_DIR && 404 } else if (vnode->status.type == AFS_FTYPE_DIR &&
405 test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) && 405 (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) ||
406 vnode->cb_expires_at - 10 > now) { 406 vnode->cb_expires_at - 10 <= now)) {
407 valid = true; 407 valid = false;
408 } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) && 408 } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) ||
409 vnode->cb_expires_at - 10 > now) { 409 vnode->cb_expires_at - 10 <= now) {
410 valid = false;
411 } else {
410 valid = true; 412 valid = true;
411 } 413 }
412 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { 414 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
413 valid = true; 415 valid = true;
416 } else {
417 vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
418 vnode->cb_v_break = vnode->volume->cb_v_break;
419 valid = false;
414 } 420 }
415 421
416 read_sequnlock_excl(&vnode->cb_lock); 422 read_sequnlock_excl(&vnode->cb_lock);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5da3b09b7518..8871b9e8645f 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -696,6 +696,14 @@ struct afs_interface {
696}; 696};
697 697
698/* 698/*
699 * Error prioritisation and accumulation.
700 */
701struct afs_error {
702 short error; /* Accumulated error */
703 bool responded; /* T if server responded */
704};
705
706/*
699 * Cursor for iterating over a server's address list. 707 * Cursor for iterating over a server's address list.
700 */ 708 */
701struct afs_addr_cursor { 709struct afs_addr_cursor {
@@ -1015,6 +1023,7 @@ static inline void __afs_stat(atomic_t *s)
1015 * misc.c 1023 * misc.c
1016 */ 1024 */
1017extern int afs_abort_to_error(u32); 1025extern int afs_abort_to_error(u32);
1026extern void afs_prioritise_error(struct afs_error *, int, u32);
1018 1027
1019/* 1028/*
1020 * mntpt.c 1029 * mntpt.c
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 700a5fa7f4ec..bbb1fd51b019 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -118,3 +118,55 @@ int afs_abort_to_error(u32 abort_code)
118 default: return -EREMOTEIO; 118 default: return -EREMOTEIO;
119 } 119 }
120} 120}
121
122/*
123 * Select the error to report from a set of errors.
124 */
125void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code)
126{
127 switch (error) {
128 case 0:
129 return;
130 default:
131 if (e->error == -ETIMEDOUT ||
132 e->error == -ETIME)
133 return;
134 case -ETIMEDOUT:
135 case -ETIME:
136 if (e->error == -ENOMEM ||
137 e->error == -ENONET)
138 return;
139 case -ENOMEM:
140 case -ENONET:
141 if (e->error == -ERFKILL)
142 return;
143 case -ERFKILL:
144 if (e->error == -EADDRNOTAVAIL)
145 return;
146 case -EADDRNOTAVAIL:
147 if (e->error == -ENETUNREACH)
148 return;
149 case -ENETUNREACH:
150 if (e->error == -EHOSTUNREACH)
151 return;
152 case -EHOSTUNREACH:
153 if (e->error == -EHOSTDOWN)
154 return;
155 case -EHOSTDOWN:
156 if (e->error == -ECONNREFUSED)
157 return;
158 case -ECONNREFUSED:
159 if (e->error == -ECONNRESET)
160 return;
161 case -ECONNRESET: /* Responded, but call expired. */
162 if (e->responded)
163 return;
164 e->error = error;
165 return;
166
167 case -ECONNABORTED:
168 e->responded = true;
169 e->error = afs_abort_to_error(abort_code);
170 return;
171 }
172}
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index 00504254c1c2..c3ae324781f8 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -136,7 +136,8 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
136 struct afs_addr_list *alist; 136 struct afs_addr_list *alist;
137 struct afs_server *server; 137 struct afs_server *server;
138 struct afs_vnode *vnode = fc->vnode; 138 struct afs_vnode *vnode = fc->vnode;
139 u32 rtt, abort_code; 139 struct afs_error e;
140 u32 rtt;
140 int error = fc->ac.error, i; 141 int error = fc->ac.error, i;
141 142
142 _enter("%lx[%d],%lx[%d],%d,%d", 143 _enter("%lx[%d],%lx[%d],%d,%d",
@@ -306,8 +307,11 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
306 if (fc->error != -EDESTADDRREQ) 307 if (fc->error != -EDESTADDRREQ)
307 goto iterate_address; 308 goto iterate_address;
308 /* Fall through */ 309 /* Fall through */
310 case -ERFKILL:
311 case -EADDRNOTAVAIL:
309 case -ENETUNREACH: 312 case -ENETUNREACH:
310 case -EHOSTUNREACH: 313 case -EHOSTUNREACH:
314 case -EHOSTDOWN:
311 case -ECONNREFUSED: 315 case -ECONNREFUSED:
312 _debug("no conn"); 316 _debug("no conn");
313 fc->error = error; 317 fc->error = error;
@@ -446,50 +450,15 @@ no_more_servers:
446 if (fc->flags & AFS_FS_CURSOR_VBUSY) 450 if (fc->flags & AFS_FS_CURSOR_VBUSY)
447 goto restart_from_beginning; 451 goto restart_from_beginning;
448 452
449 abort_code = 0; 453 e.error = -EDESTADDRREQ;
450 error = -EDESTADDRREQ; 454 e.responded = false;
451 for (i = 0; i < fc->server_list->nr_servers; i++) { 455 for (i = 0; i < fc->server_list->nr_servers; i++) {
452 struct afs_server *s = fc->server_list->servers[i].server; 456 struct afs_server *s = fc->server_list->servers[i].server;
453 int probe_error = READ_ONCE(s->probe.error);
454 457
455 switch (probe_error) { 458 afs_prioritise_error(&e, READ_ONCE(s->probe.error),
456 case 0: 459 s->probe.abort_code);
457 continue;
458 default:
459 if (error == -ETIMEDOUT ||
460 error == -ETIME)
461 continue;
462 case -ETIMEDOUT:
463 case -ETIME:
464 if (error == -ENOMEM ||
465 error == -ENONET)
466 continue;
467 case -ENOMEM:
468 case -ENONET:
469 if (error == -ENETUNREACH)
470 continue;
471 case -ENETUNREACH:
472 if (error == -EHOSTUNREACH)
473 continue;
474 case -EHOSTUNREACH:
475 if (error == -ECONNREFUSED)
476 continue;
477 case -ECONNREFUSED:
478 if (error == -ECONNRESET)
479 continue;
480 case -ECONNRESET: /* Responded, but call expired. */
481 if (error == -ECONNABORTED)
482 continue;
483 case -ECONNABORTED:
484 abort_code = s->probe.abort_code;
485 error = probe_error;
486 continue;
487 }
488 } 460 }
489 461
490 if (error == -ECONNABORTED)
491 error = afs_abort_to_error(abort_code);
492
493failed_set_error: 462failed_set_error:
494 fc->error = error; 463 fc->error = error;
495failed: 464failed:
@@ -553,8 +522,11 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
553 _leave(" = f [abort]"); 522 _leave(" = f [abort]");
554 return false; 523 return false;
555 524
525 case -ERFKILL:
526 case -EADDRNOTAVAIL:
556 case -ENETUNREACH: 527 case -ENETUNREACH:
557 case -EHOSTUNREACH: 528 case -EHOSTUNREACH:
529 case -EHOSTDOWN:
558 case -ECONNREFUSED: 530 case -ECONNREFUSED:
559 case -ETIMEDOUT: 531 case -ETIMEDOUT:
560 case -ETIME: 532 case -ETIME:
@@ -633,6 +605,7 @@ int afs_end_vnode_operation(struct afs_fs_cursor *fc)
633 struct afs_net *net = afs_v2net(fc->vnode); 605 struct afs_net *net = afs_v2net(fc->vnode);
634 606
635 if (fc->error == -EDESTADDRREQ || 607 if (fc->error == -EDESTADDRREQ ||
608 fc->error == -EADDRNOTAVAIL ||
636 fc->error == -ENETUNREACH || 609 fc->error == -ENETUNREACH ||
637 fc->error == -EHOSTUNREACH) 610 fc->error == -EHOSTUNREACH)
638 afs_dump_edestaddrreq(fc); 611 afs_dump_edestaddrreq(fc);
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 59970886690f..a7b44863d502 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -576,6 +576,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
576{ 576{
577 signed long rtt2, timeout; 577 signed long rtt2, timeout;
578 long ret; 578 long ret;
579 bool stalled = false;
579 u64 rtt; 580 u64 rtt;
580 u32 life, last_life; 581 u32 life, last_life;
581 582
@@ -609,12 +610,20 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
609 610
610 life = rxrpc_kernel_check_life(call->net->socket, call->rxcall); 611 life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
611 if (timeout == 0 && 612 if (timeout == 0 &&
612 life == last_life && signal_pending(current)) 613 life == last_life && signal_pending(current)) {
614 if (stalled)
613 break; 615 break;
616 __set_current_state(TASK_RUNNING);
617 rxrpc_kernel_probe_life(call->net->socket, call->rxcall);
618 timeout = rtt2;
619 stalled = true;
620 continue;
621 }
614 622
615 if (life != last_life) { 623 if (life != last_life) {
616 timeout = rtt2; 624 timeout = rtt2;
617 last_life = life; 625 last_life = life;
626 stalled = false;
618 } 627 }
619 628
620 timeout = schedule_timeout(timeout); 629 timeout = schedule_timeout(timeout);
diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
index c0f616bd70cb..f0b032976487 100644
--- a/fs/afs/vl_probe.c
+++ b/fs/afs/vl_probe.c
@@ -61,8 +61,11 @@ void afs_vlserver_probe_result(struct afs_call *call)
61 afs_io_error(call, afs_io_error_vl_probe_fail); 61 afs_io_error(call, afs_io_error_vl_probe_fail);
62 goto out; 62 goto out;
63 case -ECONNRESET: /* Responded, but call expired. */ 63 case -ECONNRESET: /* Responded, but call expired. */
64 case -ERFKILL:
65 case -EADDRNOTAVAIL:
64 case -ENETUNREACH: 66 case -ENETUNREACH:
65 case -EHOSTUNREACH: 67 case -EHOSTUNREACH:
68 case -EHOSTDOWN:
66 case -ECONNREFUSED: 69 case -ECONNREFUSED:
67 case -ETIMEDOUT: 70 case -ETIMEDOUT:
68 case -ETIME: 71 case -ETIME:
@@ -129,15 +132,17 @@ out:
129 * Probe all of a vlserver's addresses to find out the best route and to 132 * Probe all of a vlserver's addresses to find out the best route and to
130 * query its capabilities. 133 * query its capabilities.
131 */ 134 */
132static int afs_do_probe_vlserver(struct afs_net *net, 135static bool afs_do_probe_vlserver(struct afs_net *net,
133 struct afs_vlserver *server, 136 struct afs_vlserver *server,
134 struct key *key, 137 struct key *key,
135 unsigned int server_index) 138 unsigned int server_index,
139 struct afs_error *_e)
136{ 140{
137 struct afs_addr_cursor ac = { 141 struct afs_addr_cursor ac = {
138 .index = 0, 142 .index = 0,
139 }; 143 };
140 int ret; 144 bool in_progress = false;
145 int err;
141 146
142 _enter("%s", server->name); 147 _enter("%s", server->name);
143 148
@@ -151,15 +156,17 @@ static int afs_do_probe_vlserver(struct afs_net *net,
151 server->probe.rtt = UINT_MAX; 156 server->probe.rtt = UINT_MAX;
152 157
153 for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) { 158 for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
154 ret = afs_vl_get_capabilities(net, &ac, key, server, 159 err = afs_vl_get_capabilities(net, &ac, key, server,
155 server_index, true); 160 server_index, true);
156 if (ret != -EINPROGRESS) { 161 if (err == -EINPROGRESS)
157 afs_vl_probe_done(server); 162 in_progress = true;
158 return ret; 163 else
159 } 164 afs_prioritise_error(_e, err, ac.abort_code);
160 } 165 }
161 166
162 return 0; 167 if (!in_progress)
168 afs_vl_probe_done(server);
169 return in_progress;
163} 170}
164 171
165/* 172/*
@@ -169,21 +176,23 @@ int afs_send_vl_probes(struct afs_net *net, struct key *key,
169 struct afs_vlserver_list *vllist) 176 struct afs_vlserver_list *vllist)
170{ 177{
171 struct afs_vlserver *server; 178 struct afs_vlserver *server;
172 int i, ret; 179 struct afs_error e;
180 bool in_progress = false;
181 int i;
173 182
183 e.error = 0;
184 e.responded = false;
174 for (i = 0; i < vllist->nr_servers; i++) { 185 for (i = 0; i < vllist->nr_servers; i++) {
175 server = vllist->servers[i].server; 186 server = vllist->servers[i].server;
176 if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags)) 187 if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags))
177 continue; 188 continue;
178 189
179 if (!test_and_set_bit_lock(AFS_VLSERVER_FL_PROBING, &server->flags)) { 190 if (!test_and_set_bit_lock(AFS_VLSERVER_FL_PROBING, &server->flags) &&
180 ret = afs_do_probe_vlserver(net, server, key, i); 191 afs_do_probe_vlserver(net, server, key, i, &e))
181 if (ret) 192 in_progress = true;
182 return ret;
183 }
184 } 193 }
185 194
186 return 0; 195 return in_progress ? 0 : e.error;
187} 196}
188 197
189/* 198/*
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index b64a284b99d2..7adde83a0648 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -71,8 +71,9 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
71{ 71{
72 struct afs_addr_list *alist; 72 struct afs_addr_list *alist;
73 struct afs_vlserver *vlserver; 73 struct afs_vlserver *vlserver;
74 struct afs_error e;
74 u32 rtt; 75 u32 rtt;
75 int error = vc->ac.error, abort_code, i; 76 int error = vc->ac.error, i;
76 77
77 _enter("%lx[%d],%lx[%d],%d,%d", 78 _enter("%lx[%d],%lx[%d],%d,%d",
78 vc->untried, vc->index, 79 vc->untried, vc->index,
@@ -119,8 +120,11 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
119 goto failed; 120 goto failed;
120 } 121 }
121 122
123 case -ERFKILL:
124 case -EADDRNOTAVAIL:
122 case -ENETUNREACH: 125 case -ENETUNREACH:
123 case -EHOSTUNREACH: 126 case -EHOSTUNREACH:
127 case -EHOSTDOWN:
124 case -ECONNREFUSED: 128 case -ECONNREFUSED:
125 case -ETIMEDOUT: 129 case -ETIMEDOUT:
126 case -ETIME: 130 case -ETIME:
@@ -235,50 +239,15 @@ no_more_servers:
235 if (vc->flags & AFS_VL_CURSOR_RETRY) 239 if (vc->flags & AFS_VL_CURSOR_RETRY)
236 goto restart_from_beginning; 240 goto restart_from_beginning;
237 241
238 abort_code = 0; 242 e.error = -EDESTADDRREQ;
239 error = -EDESTADDRREQ; 243 e.responded = false;
240 for (i = 0; i < vc->server_list->nr_servers; i++) { 244 for (i = 0; i < vc->server_list->nr_servers; i++) {
241 struct afs_vlserver *s = vc->server_list->servers[i].server; 245 struct afs_vlserver *s = vc->server_list->servers[i].server;
242 int probe_error = READ_ONCE(s->probe.error);
243 246
244 switch (probe_error) { 247 afs_prioritise_error(&e, READ_ONCE(s->probe.error),
245 case 0: 248 s->probe.abort_code);
246 continue;
247 default:
248 if (error == -ETIMEDOUT ||
249 error == -ETIME)
250 continue;
251 case -ETIMEDOUT:
252 case -ETIME:
253 if (error == -ENOMEM ||
254 error == -ENONET)
255 continue;
256 case -ENOMEM:
257 case -ENONET:
258 if (error == -ENETUNREACH)
259 continue;
260 case -ENETUNREACH:
261 if (error == -EHOSTUNREACH)
262 continue;
263 case -EHOSTUNREACH:
264 if (error == -ECONNREFUSED)
265 continue;
266 case -ECONNREFUSED:
267 if (error == -ECONNRESET)
268 continue;
269 case -ECONNRESET: /* Responded, but call expired. */
270 if (error == -ECONNABORTED)
271 continue;
272 case -ECONNABORTED:
273 abort_code = s->probe.abort_code;
274 error = probe_error;
275 continue;
276 }
277 } 249 }
278 250
279 if (error == -ECONNABORTED)
280 error = afs_abort_to_error(abort_code);
281
282failed_set_error: 251failed_set_error:
283 vc->error = error; 252 vc->error = error;
284failed: 253failed:
@@ -341,6 +310,7 @@ int afs_end_vlserver_operation(struct afs_vl_cursor *vc)
341 struct afs_net *net = vc->cell->net; 310 struct afs_net *net = vc->cell->net;
342 311
343 if (vc->error == -EDESTADDRREQ || 312 if (vc->error == -EDESTADDRREQ ||
313 vc->error == -EADDRNOTAVAIL ||
344 vc->error == -ENETUNREACH || 314 vc->error == -ENETUNREACH ||
345 vc->error == -EHOSTUNREACH) 315 vc->error == -EHOSTUNREACH)
346 afs_vl_dump_edestaddrreq(vc); 316 afs_vl_dump_edestaddrreq(vc);
diff --git a/fs/aio.c b/fs/aio.c
index 301e6314183b..97f983592925 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1436,6 +1436,7 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
1436 ret = ioprio_check_cap(iocb->aio_reqprio); 1436 ret = ioprio_check_cap(iocb->aio_reqprio);
1437 if (ret) { 1437 if (ret) {
1438 pr_debug("aio ioprio check cap error: %d\n", ret); 1438 pr_debug("aio ioprio check cap error: %d\n", ret);
1439 fput(req->ki_filp);
1439 return ret; 1440 return ret;
1440 } 1441 }
1441 1442
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3f0b6d1936e8..6d776717d8b3 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -477,9 +477,9 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
477 int mirror_num = 0; 477 int mirror_num = 0;
478 int failed_mirror = 0; 478 int failed_mirror = 0;
479 479
480 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
481 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; 480 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
482 while (1) { 481 while (1) {
482 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
483 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE, 483 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
484 mirror_num); 484 mirror_num);
485 if (!ret) { 485 if (!ret) {
@@ -493,15 +493,6 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
493 break; 493 break;
494 } 494 }
495 495
496 /*
497 * This buffer's crc is fine, but its contents are corrupted, so
498 * there is no reason to read the other copies, they won't be
499 * any less wrong.
500 */
501 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags) ||
502 ret == -EUCLEAN)
503 break;
504
505 num_copies = btrfs_num_copies(fs_info, 496 num_copies = btrfs_num_copies(fs_info,
506 eb->start, eb->len); 497 eb->start, eb->len);
507 if (num_copies == 1) 498 if (num_copies == 1)
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a3c22e16509b..58e93bce3036 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2089,6 +2089,30 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2089 atomic_inc(&root->log_batch); 2089 atomic_inc(&root->log_batch);
2090 2090
2091 /* 2091 /*
2092 * Before we acquired the inode's lock, someone may have dirtied more
2093 * pages in the target range. We need to make sure that writeback for
2094 * any such pages does not start while we are logging the inode, because
2095 * if it does, any of the following might happen when we are not doing a
2096 * full inode sync:
2097 *
2098 * 1) We log an extent after its writeback finishes but before its
2099 * checksums are added to the csum tree, leading to -EIO errors
2100 * when attempting to read the extent after a log replay.
2101 *
2102 * 2) We can end up logging an extent before its writeback finishes.
2103 * Therefore after the log replay we will have a file extent item
2104 * pointing to an unwritten extent (and no data checksums as well).
2105 *
2106 * So trigger writeback for any eventual new dirty pages and then we
2107 * wait for all ordered extents to complete below.
2108 */
2109 ret = start_ordered_ops(inode, start, end);
2110 if (ret) {
2111 inode_unlock(inode);
2112 goto out;
2113 }
2114
2115 /*
2092 * We have to do this here to avoid the priority inversion of waiting on 2116 * We have to do this here to avoid the priority inversion of waiting on
2093 * IO of a lower priority task while holding a transaciton open. 2117 * IO of a lower priority task while holding a transaciton open.
2094 */ 2118 */
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 45868fd76209..f70825af6438 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2659,7 +2659,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2659 int i; 2659 int i;
2660 u64 *i_qgroups; 2660 u64 *i_qgroups;
2661 struct btrfs_fs_info *fs_info = trans->fs_info; 2661 struct btrfs_fs_info *fs_info = trans->fs_info;
2662 struct btrfs_root *quota_root = fs_info->quota_root; 2662 struct btrfs_root *quota_root;
2663 struct btrfs_qgroup *srcgroup; 2663 struct btrfs_qgroup *srcgroup;
2664 struct btrfs_qgroup *dstgroup; 2664 struct btrfs_qgroup *dstgroup;
2665 u32 level_size = 0; 2665 u32 level_size = 0;
@@ -2669,6 +2669,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2669 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2669 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2670 goto out; 2670 goto out;
2671 2671
2672 quota_root = fs_info->quota_root;
2672 if (!quota_root) { 2673 if (!quota_root) {
2673 ret = -EINVAL; 2674 ret = -EINVAL;
2674 goto out; 2675 goto out;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 924116f654a1..a3f75b8926d4 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3959,6 +3959,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3959restart: 3959restart:
3960 if (update_backref_cache(trans, &rc->backref_cache)) { 3960 if (update_backref_cache(trans, &rc->backref_cache)) {
3961 btrfs_end_transaction(trans); 3961 btrfs_end_transaction(trans);
3962 trans = NULL;
3962 continue; 3963 continue;
3963 } 3964 }
3964 3965
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 094cc1444a90..5be83b5a1b43 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -3340,7 +3340,8 @@ static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3340 kfree(m); 3340 kfree(m);
3341} 3341}
3342 3342
3343static void tail_append_pending_moves(struct pending_dir_move *moves, 3343static void tail_append_pending_moves(struct send_ctx *sctx,
3344 struct pending_dir_move *moves,
3344 struct list_head *stack) 3345 struct list_head *stack)
3345{ 3346{
3346 if (list_empty(&moves->list)) { 3347 if (list_empty(&moves->list)) {
@@ -3351,6 +3352,10 @@ static void tail_append_pending_moves(struct pending_dir_move *moves,
3351 list_add_tail(&moves->list, stack); 3352 list_add_tail(&moves->list, stack);
3352 list_splice_tail(&list, stack); 3353 list_splice_tail(&list, stack);
3353 } 3354 }
3355 if (!RB_EMPTY_NODE(&moves->node)) {
3356 rb_erase(&moves->node, &sctx->pending_dir_moves);
3357 RB_CLEAR_NODE(&moves->node);
3358 }
3354} 3359}
3355 3360
3356static int apply_children_dir_moves(struct send_ctx *sctx) 3361static int apply_children_dir_moves(struct send_ctx *sctx)
@@ -3365,7 +3370,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
3365 return 0; 3370 return 0;
3366 3371
3367 INIT_LIST_HEAD(&stack); 3372 INIT_LIST_HEAD(&stack);
3368 tail_append_pending_moves(pm, &stack); 3373 tail_append_pending_moves(sctx, pm, &stack);
3369 3374
3370 while (!list_empty(&stack)) { 3375 while (!list_empty(&stack)) {
3371 pm = list_first_entry(&stack, struct pending_dir_move, list); 3376 pm = list_first_entry(&stack, struct pending_dir_move, list);
@@ -3376,7 +3381,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
3376 goto out; 3381 goto out;
3377 pm = get_pending_dir_moves(sctx, parent_ino); 3382 pm = get_pending_dir_moves(sctx, parent_ino);
3378 if (pm) 3383 if (pm)
3379 tail_append_pending_moves(pm, &stack); 3384 tail_append_pending_moves(sctx, pm, &stack);
3380 } 3385 }
3381 return 0; 3386 return 0;
3382 3387
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index cbc9d0d2c12d..645fc81e2a94 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2237,6 +2237,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
2237 vol = memdup_user((void __user *)arg, sizeof(*vol)); 2237 vol = memdup_user((void __user *)arg, sizeof(*vol));
2238 if (IS_ERR(vol)) 2238 if (IS_ERR(vol))
2239 return PTR_ERR(vol); 2239 return PTR_ERR(vol);
2240 vol->name[BTRFS_PATH_NAME_MAX] = '\0';
2240 2241
2241 switch (cmd) { 2242 switch (cmd) {
2242 case BTRFS_IOC_SCAN_DEV: 2243 case BTRFS_IOC_SCAN_DEV:
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 95983c744164..1645fcfd9691 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -244,11 +244,13 @@ wait_for_old_object:
244 244
245 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)); 245 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
246 246
247 cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_retry); 247 cache->cache.ops->put_object(&xobject->fscache,
248 (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
248 goto try_again; 249 goto try_again;
249 250
250requeue: 251requeue:
251 cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo); 252 cache->cache.ops->put_object(&xobject->fscache,
253 (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
252 _leave(" = -ETIMEDOUT"); 254 _leave(" = -ETIMEDOUT");
253 return -ETIMEDOUT; 255 return -ETIMEDOUT;
254} 256}
@@ -336,7 +338,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
336try_again: 338try_again:
337 /* first step is to make up a grave dentry in the graveyard */ 339 /* first step is to make up a grave dentry in the graveyard */
338 sprintf(nbuffer, "%08x%08x", 340 sprintf(nbuffer, "%08x%08x",
339 (uint32_t) get_seconds(), 341 (uint32_t) ktime_get_real_seconds(),
340 (uint32_t) atomic_inc_return(&cache->gravecounter)); 342 (uint32_t) atomic_inc_return(&cache->gravecounter));
341 343
342 /* do the multiway lock magic */ 344 /* do the multiway lock magic */
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 40f7595aad10..8a577409d030 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -535,7 +535,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
535 netpage->index, cachefiles_gfp); 535 netpage->index, cachefiles_gfp);
536 if (ret < 0) { 536 if (ret < 0) {
537 if (ret == -EEXIST) { 537 if (ret == -EEXIST) {
538 put_page(backpage);
539 backpage = NULL;
538 put_page(netpage); 540 put_page(netpage);
541 netpage = NULL;
539 fscache_retrieval_complete(op, 1); 542 fscache_retrieval_complete(op, 1);
540 continue; 543 continue;
541 } 544 }
@@ -608,7 +611,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
608 netpage->index, cachefiles_gfp); 611 netpage->index, cachefiles_gfp);
609 if (ret < 0) { 612 if (ret < 0) {
610 if (ret == -EEXIST) { 613 if (ret == -EEXIST) {
614 put_page(backpage);
615 backpage = NULL;
611 put_page(netpage); 616 put_page(netpage);
617 netpage = NULL;
612 fscache_retrieval_complete(op, 1); 618 fscache_retrieval_complete(op, 1);
613 continue; 619 continue;
614 } 620 }
@@ -962,11 +968,8 @@ void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
962 __releases(&object->fscache.cookie->lock) 968 __releases(&object->fscache.cookie->lock)
963{ 969{
964 struct cachefiles_object *object; 970 struct cachefiles_object *object;
965 struct cachefiles_cache *cache;
966 971
967 object = container_of(_object, struct cachefiles_object, fscache); 972 object = container_of(_object, struct cachefiles_object, fscache);
968 cache = container_of(object->fscache.cache,
969 struct cachefiles_cache, cache);
970 973
971 _enter("%p,{%lu}", object, page->index); 974 _enter("%p,{%lu}", object, page->index);
972 975
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 0a29a00aed2e..511e6c68156a 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -135,7 +135,8 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
135 struct dentry *dentry = object->dentry; 135 struct dentry *dentry = object->dentry;
136 int ret; 136 int ret;
137 137
138 ASSERT(dentry); 138 if (!dentry)
139 return -ESTALE;
139 140
140 _enter("%p,#%d", object, auxdata->len); 141 _enter("%p,#%d", object, auxdata->len);
141 142
diff --git a/fs/dax.c b/fs/dax.c
index 616e36ea6aaa..9bcce89ea18e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -98,12 +98,6 @@ static void *dax_make_entry(pfn_t pfn, unsigned long flags)
98 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 98 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
99} 99}
100 100
101static void *dax_make_page_entry(struct page *page)
102{
103 pfn_t pfn = page_to_pfn_t(page);
104 return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0);
105}
106
107static bool dax_is_locked(void *entry) 101static bool dax_is_locked(void *entry)
108{ 102{
109 return xa_to_value(entry) & DAX_LOCKED; 103 return xa_to_value(entry) & DAX_LOCKED;
@@ -116,12 +110,12 @@ static unsigned int dax_entry_order(void *entry)
116 return 0; 110 return 0;
117} 111}
118 112
119static int dax_is_pmd_entry(void *entry) 113static unsigned long dax_is_pmd_entry(void *entry)
120{ 114{
121 return xa_to_value(entry) & DAX_PMD; 115 return xa_to_value(entry) & DAX_PMD;
122} 116}
123 117
124static int dax_is_pte_entry(void *entry) 118static bool dax_is_pte_entry(void *entry)
125{ 119{
126 return !(xa_to_value(entry) & DAX_PMD); 120 return !(xa_to_value(entry) & DAX_PMD);
127} 121}
@@ -222,9 +216,8 @@ static void *get_unlocked_entry(struct xa_state *xas)
222 ewait.wait.func = wake_exceptional_entry_func; 216 ewait.wait.func = wake_exceptional_entry_func;
223 217
224 for (;;) { 218 for (;;) {
225 entry = xas_load(xas); 219 entry = xas_find_conflict(xas);
226 if (!entry || xa_is_internal(entry) || 220 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
227 WARN_ON_ONCE(!xa_is_value(entry)) ||
228 !dax_is_locked(entry)) 221 !dax_is_locked(entry))
229 return entry; 222 return entry;
230 223
@@ -255,6 +248,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
255{ 248{
256 void *old; 249 void *old;
257 250
251 BUG_ON(dax_is_locked(entry));
258 xas_reset(xas); 252 xas_reset(xas);
259 xas_lock_irq(xas); 253 xas_lock_irq(xas);
260 old = xas_store(xas, entry); 254 old = xas_store(xas, entry);
@@ -352,16 +346,27 @@ static struct page *dax_busy_page(void *entry)
352 return NULL; 346 return NULL;
353} 347}
354 348
349/*
350 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
351 * @page: The page whose entry we want to lock
352 *
353 * Context: Process context.
354 * Return: %true if the entry was locked or does not need to be locked.
355 */
355bool dax_lock_mapping_entry(struct page *page) 356bool dax_lock_mapping_entry(struct page *page)
356{ 357{
357 XA_STATE(xas, NULL, 0); 358 XA_STATE(xas, NULL, 0);
358 void *entry; 359 void *entry;
360 bool locked;
359 361
362 /* Ensure page->mapping isn't freed while we look at it */
363 rcu_read_lock();
360 for (;;) { 364 for (;;) {
361 struct address_space *mapping = READ_ONCE(page->mapping); 365 struct address_space *mapping = READ_ONCE(page->mapping);
362 366
367 locked = false;
363 if (!dax_mapping(mapping)) 368 if (!dax_mapping(mapping))
364 return false; 369 break;
365 370
366 /* 371 /*
367 * In the device-dax case there's no need to lock, a 372 * In the device-dax case there's no need to lock, a
@@ -370,8 +375,9 @@ bool dax_lock_mapping_entry(struct page *page)
370 * otherwise we would not have a valid pfn_to_page() 375 * otherwise we would not have a valid pfn_to_page()
371 * translation. 376 * translation.
372 */ 377 */
378 locked = true;
373 if (S_ISCHR(mapping->host->i_mode)) 379 if (S_ISCHR(mapping->host->i_mode))
374 return true; 380 break;
375 381
376 xas.xa = &mapping->i_pages; 382 xas.xa = &mapping->i_pages;
377 xas_lock_irq(&xas); 383 xas_lock_irq(&xas);
@@ -382,28 +388,35 @@ bool dax_lock_mapping_entry(struct page *page)
382 xas_set(&xas, page->index); 388 xas_set(&xas, page->index);
383 entry = xas_load(&xas); 389 entry = xas_load(&xas);
384 if (dax_is_locked(entry)) { 390 if (dax_is_locked(entry)) {
391 rcu_read_unlock();
385 entry = get_unlocked_entry(&xas); 392 entry = get_unlocked_entry(&xas);
386 /* Did the page move while we slept? */ 393 xas_unlock_irq(&xas);
387 if (dax_to_pfn(entry) != page_to_pfn(page)) { 394 put_unlocked_entry(&xas, entry);
388 xas_unlock_irq(&xas); 395 rcu_read_lock();
389 continue; 396 continue;
390 }
391 } 397 }
392 dax_lock_entry(&xas, entry); 398 dax_lock_entry(&xas, entry);
393 xas_unlock_irq(&xas); 399 xas_unlock_irq(&xas);
394 return true; 400 break;
395 } 401 }
402 rcu_read_unlock();
403 return locked;
396} 404}
397 405
398void dax_unlock_mapping_entry(struct page *page) 406void dax_unlock_mapping_entry(struct page *page)
399{ 407{
400 struct address_space *mapping = page->mapping; 408 struct address_space *mapping = page->mapping;
401 XA_STATE(xas, &mapping->i_pages, page->index); 409 XA_STATE(xas, &mapping->i_pages, page->index);
410 void *entry;
402 411
403 if (S_ISCHR(mapping->host->i_mode)) 412 if (S_ISCHR(mapping->host->i_mode))
404 return; 413 return;
405 414
406 dax_unlock_entry(&xas, dax_make_page_entry(page)); 415 rcu_read_lock();
416 entry = xas_load(&xas);
417 rcu_read_unlock();
418 entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
419 dax_unlock_entry(&xas, entry);
407} 420}
408 421
409/* 422/*
@@ -445,11 +458,9 @@ static void *grab_mapping_entry(struct xa_state *xas,
445retry: 458retry:
446 xas_lock_irq(xas); 459 xas_lock_irq(xas);
447 entry = get_unlocked_entry(xas); 460 entry = get_unlocked_entry(xas);
448 if (xa_is_internal(entry))
449 goto fallback;
450 461
451 if (entry) { 462 if (entry) {
452 if (WARN_ON_ONCE(!xa_is_value(entry))) { 463 if (!xa_is_value(entry)) {
453 xas_set_err(xas, EIO); 464 xas_set_err(xas, EIO);
454 goto out_unlock; 465 goto out_unlock;
455 } 466 }
@@ -1628,8 +1639,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1628 /* Did we race with someone splitting entry or so? */ 1639 /* Did we race with someone splitting entry or so? */
1629 if (!entry || 1640 if (!entry ||
1630 (order == 0 && !dax_is_pte_entry(entry)) || 1641 (order == 0 && !dax_is_pte_entry(entry)) ||
1631 (order == PMD_ORDER && (xa_is_internal(entry) || 1642 (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
1632 !dax_is_pmd_entry(entry)))) {
1633 put_unlocked_entry(&xas, entry); 1643 put_unlocked_entry(&xas, entry);
1634 xas_unlock_irq(&xas); 1644 xas_unlock_irq(&xas);
1635 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1645 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 722d17c88edb..41a0e97252ae 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -325,8 +325,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
325 */ 325 */
326 dio->iocb->ki_pos += transferred; 326 dio->iocb->ki_pos += transferred;
327 327
328 if (dio->op == REQ_OP_WRITE) 328 if (ret > 0 && dio->op == REQ_OP_WRITE)
329 ret = generic_write_sync(dio->iocb, transferred); 329 ret = generic_write_sync(dio->iocb, ret);
330 dio->iocb->ki_complete(dio->iocb, ret, 0); 330 dio->iocb->ki_complete(dio->iocb, ret, 0);
331 } 331 }
332 332
diff --git a/fs/exec.c b/fs/exec.c
index fc281b738a98..acc3a5536384 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -62,6 +62,7 @@
62#include <linux/oom.h> 62#include <linux/oom.h>
63#include <linux/compat.h> 63#include <linux/compat.h>
64#include <linux/vmalloc.h> 64#include <linux/vmalloc.h>
65#include <linux/freezer.h>
65 66
66#include <linux/uaccess.h> 67#include <linux/uaccess.h>
67#include <asm/mmu_context.h> 68#include <asm/mmu_context.h>
@@ -1083,7 +1084,7 @@ static int de_thread(struct task_struct *tsk)
1083 while (sig->notify_count) { 1084 while (sig->notify_count) {
1084 __set_current_state(TASK_KILLABLE); 1085 __set_current_state(TASK_KILLABLE);
1085 spin_unlock_irq(lock); 1086 spin_unlock_irq(lock);
1086 schedule(); 1087 freezable_schedule();
1087 if (unlikely(__fatal_signal_pending(tsk))) 1088 if (unlikely(__fatal_signal_pending(tsk)))
1088 goto killed; 1089 goto killed;
1089 spin_lock_irq(lock); 1090 spin_lock_irq(lock);
@@ -1111,7 +1112,7 @@ static int de_thread(struct task_struct *tsk)
1111 __set_current_state(TASK_KILLABLE); 1112 __set_current_state(TASK_KILLABLE);
1112 write_unlock_irq(&tasklist_lock); 1113 write_unlock_irq(&tasklist_lock);
1113 cgroup_threadgroup_change_end(tsk); 1114 cgroup_threadgroup_change_end(tsk);
1114 schedule(); 1115 freezable_schedule();
1115 if (unlikely(__fatal_signal_pending(tsk))) 1116 if (unlikely(__fatal_signal_pending(tsk)))
1116 goto killed; 1117 goto killed;
1117 } 1118 }
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 645158dc33f1..c69927bed4ef 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -77,7 +77,7 @@ static bool dentry_connected(struct dentry *dentry)
77 struct dentry *parent = dget_parent(dentry); 77 struct dentry *parent = dget_parent(dentry);
78 78
79 dput(dentry); 79 dput(dentry);
80 if (IS_ROOT(dentry)) { 80 if (dentry == parent) {
81 dput(parent); 81 dput(parent);
82 return false; 82 return false;
83 } 83 }
@@ -147,6 +147,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
147 tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf)); 147 tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf));
148 if (IS_ERR(tmp)) { 148 if (IS_ERR(tmp)) {
149 dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp)); 149 dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
150 err = PTR_ERR(tmp);
150 goto out_err; 151 goto out_err;
151 } 152 }
152 if (tmp != dentry) { 153 if (tmp != dentry) {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index cb91baa4275d..eb11502e3fcd 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -892,6 +892,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
892 if (sb->s_magic != EXT2_SUPER_MAGIC) 892 if (sb->s_magic != EXT2_SUPER_MAGIC)
893 goto cantfind_ext2; 893 goto cantfind_ext2;
894 894
895 opts.s_mount_opt = 0;
895 /* Set defaults before we parse the mount options */ 896 /* Set defaults before we parse the mount options */
896 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 897 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
897 if (def_mount_opts & EXT2_DEFM_DEBUG) 898 if (def_mount_opts & EXT2_DEFM_DEBUG)
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 62d9a659a8ff..dd8f10db82e9 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -612,9 +612,9 @@ skip_replace:
612 } 612 }
613 613
614cleanup: 614cleanup:
615 brelse(bh);
616 if (!(bh && header == HDR(bh))) 615 if (!(bh && header == HDR(bh)))
617 kfree(header); 616 kfree(header);
617 brelse(bh);
618 up_write(&EXT2_I(inode)->xattr_sem); 618 up_write(&EXT2_I(inode)->xattr_sem);
619 619
620 return error; 620 return error;
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 9edc920f651f..6d9cb1719de5 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -730,6 +730,9 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
730 730
731 if (awaken) 731 if (awaken)
732 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); 732 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
733 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
734 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
735
733 736
734 /* Prevent a race with our last child, which has to signal EV_CLEARED 737 /* Prevent a race with our last child, which has to signal EV_CLEARED
735 * before dropping our spinlock. 738 * before dropping our spinlock.
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 98b96ffb95ed..19017d296173 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -338,13 +338,14 @@ void hfs_bmap_free(struct hfs_bnode *node)
338 338
339 nidx -= len * 8; 339 nidx -= len * 8;
340 i = node->next; 340 i = node->next;
341 hfs_bnode_put(node);
342 if (!i) { 341 if (!i) {
343 /* panic */; 342 /* panic */;
344 pr_crit("unable to free bnode %u. bmap not found!\n", 343 pr_crit("unable to free bnode %u. bmap not found!\n",
345 node->this); 344 node->this);
345 hfs_bnode_put(node);
346 return; 346 return;
347 } 347 }
348 hfs_bnode_put(node);
348 node = hfs_bnode_find(tree, i); 349 node = hfs_bnode_find(tree, i);
349 if (IS_ERR(node)) 350 if (IS_ERR(node))
350 return; 351 return;
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 236efe51eca6..66774f4cb4fd 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -466,14 +466,15 @@ void hfs_bmap_free(struct hfs_bnode *node)
466 466
467 nidx -= len * 8; 467 nidx -= len * 8;
468 i = node->next; 468 i = node->next;
469 hfs_bnode_put(node);
470 if (!i) { 469 if (!i) {
471 /* panic */; 470 /* panic */;
472 pr_crit("unable to free bnode %u. " 471 pr_crit("unable to free bnode %u. "
473 "bmap not found!\n", 472 "bmap not found!\n",
474 node->this); 473 node->this);
474 hfs_bnode_put(node);
475 return; 475 return;
476 } 476 }
477 hfs_bnode_put(node);
477 node = hfs_bnode_find(tree, i); 478 node = hfs_bnode_find(tree, i);
478 if (IS_ERR(node)) 479 if (IS_ERR(node))
479 return; 480 return;
diff --git a/fs/iomap.c b/fs/iomap.c
index 64ce240217a1..3ffb776fbebe 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -142,13 +142,14 @@ static void
142iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, 142iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
143 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) 143 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
144{ 144{
145 loff_t orig_pos = *pos;
146 loff_t isize = i_size_read(inode);
145 unsigned block_bits = inode->i_blkbits; 147 unsigned block_bits = inode->i_blkbits;
146 unsigned block_size = (1 << block_bits); 148 unsigned block_size = (1 << block_bits);
147 unsigned poff = offset_in_page(*pos); 149 unsigned poff = offset_in_page(*pos);
148 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); 150 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
149 unsigned first = poff >> block_bits; 151 unsigned first = poff >> block_bits;
150 unsigned last = (poff + plen - 1) >> block_bits; 152 unsigned last = (poff + plen - 1) >> block_bits;
151 unsigned end = offset_in_page(i_size_read(inode)) >> block_bits;
152 153
153 /* 154 /*
154 * If the block size is smaller than the page size we need to check the 155 * If the block size is smaller than the page size we need to check the
@@ -183,8 +184,12 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
183 * handle both halves separately so that we properly zero data in the 184 * handle both halves separately so that we properly zero data in the
184 * page cache for blocks that are entirely outside of i_size. 185 * page cache for blocks that are entirely outside of i_size.
185 */ 186 */
186 if (first <= end && last > end) 187 if (orig_pos <= isize && orig_pos + length > isize) {
187 plen -= (last - end) * block_size; 188 unsigned end = offset_in_page(isize - 1) >> block_bits;
189
190 if (first <= end && last > end)
191 plen -= (last - end) * block_size;
192 }
188 193
189 *offp = poff; 194 *offp = poff;
190 *lenp = plen; 195 *lenp = plen;
@@ -1580,7 +1585,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1580 struct bio *bio; 1585 struct bio *bio;
1581 bool need_zeroout = false; 1586 bool need_zeroout = false;
1582 bool use_fua = false; 1587 bool use_fua = false;
1583 int nr_pages, ret; 1588 int nr_pages, ret = 0;
1584 size_t copied = 0; 1589 size_t copied = 0;
1585 1590
1586 if ((pos | length | align) & ((1 << blkbits) - 1)) 1591 if ((pos | length | align) & ((1 << blkbits) - 1))
@@ -1596,12 +1601,13 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1596 1601
1597 if (iomap->flags & IOMAP_F_NEW) { 1602 if (iomap->flags & IOMAP_F_NEW) {
1598 need_zeroout = true; 1603 need_zeroout = true;
1599 } else { 1604 } else if (iomap->type == IOMAP_MAPPED) {
1600 /* 1605 /*
1601 * Use a FUA write if we need datasync semantics, this 1606 * Use a FUA write if we need datasync semantics, this is a pure
1602 * is a pure data IO that doesn't require any metadata 1607 * data IO that doesn't require any metadata updates (including
1603 * updates and the underlying device supports FUA. This 1608 * after IO completion such as unwritten extent conversion) and
1604 * allows us to avoid cache flushes on IO completion. 1609 * the underlying device supports FUA. This allows us to avoid
1610 * cache flushes on IO completion.
1605 */ 1611 */
1606 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && 1612 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1607 (dio->flags & IOMAP_DIO_WRITE_FUA) && 1613 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
@@ -1644,8 +1650,14 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1644 1650
1645 ret = bio_iov_iter_get_pages(bio, &iter); 1651 ret = bio_iov_iter_get_pages(bio, &iter);
1646 if (unlikely(ret)) { 1652 if (unlikely(ret)) {
1653 /*
1654 * We have to stop part way through an IO. We must fall
1655 * through to the sub-block tail zeroing here, otherwise
1656 * this short IO may expose stale data in the tail of
1657 * the block we haven't written data to.
1658 */
1647 bio_put(bio); 1659 bio_put(bio);
1648 return copied ? copied : ret; 1660 goto zero_tail;
1649 } 1661 }
1650 1662
1651 n = bio->bi_iter.bi_size; 1663 n = bio->bi_iter.bi_size;
@@ -1676,13 +1688,21 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1676 dio->submit.cookie = submit_bio(bio); 1688 dio->submit.cookie = submit_bio(bio);
1677 } while (nr_pages); 1689 } while (nr_pages);
1678 1690
1679 if (need_zeroout) { 1691 /*
1692 * We need to zeroout the tail of a sub-block write if the extent type
1693 * requires zeroing or the write extends beyond EOF. If we don't zero
1694 * the block tail in the latter case, we can expose stale data via mmap
1695 * reads of the EOF block.
1696 */
1697zero_tail:
1698 if (need_zeroout ||
1699 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
1680 /* zero out from the end of the write to the end of the block */ 1700 /* zero out from the end of the write to the end of the block */
1681 pad = pos & (fs_block_size - 1); 1701 pad = pos & (fs_block_size - 1);
1682 if (pad) 1702 if (pad)
1683 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); 1703 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1684 } 1704 }
1685 return copied; 1705 return copied ? copied : ret;
1686} 1706}
1687 1707
1688static loff_t 1708static loff_t
@@ -1857,6 +1877,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1857 dio->wait_for_completion = true; 1877 dio->wait_for_completion = true;
1858 ret = 0; 1878 ret = 0;
1859 } 1879 }
1880
1881 /*
1882 * Splicing to pipes can fail on a full pipe. We have to
1883 * swallow this to make it look like a short IO
1884 * otherwise the higher splice layers will completely
1885 * mishandle the error and stop moving data.
1886 */
1887 if (ret == -EFAULT)
1888 ret = 0;
1860 break; 1889 break;
1861 } 1890 }
1862 pos += ret; 1891 pos += ret;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 7b861bbc0b43..315967354954 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -686,20 +686,24 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
686{ 686{
687 struct cb_offloadargs *args = data; 687 struct cb_offloadargs *args = data;
688 struct nfs_server *server; 688 struct nfs_server *server;
689 struct nfs4_copy_state *copy; 689 struct nfs4_copy_state *copy, *tmp_copy;
690 bool found = false; 690 bool found = false;
691 691
692 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
693 if (!copy)
694 return htonl(NFS4ERR_SERVERFAULT);
695
692 spin_lock(&cps->clp->cl_lock); 696 spin_lock(&cps->clp->cl_lock);
693 rcu_read_lock(); 697 rcu_read_lock();
694 list_for_each_entry_rcu(server, &cps->clp->cl_superblocks, 698 list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
695 client_link) { 699 client_link) {
696 list_for_each_entry(copy, &server->ss_copies, copies) { 700 list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
697 if (memcmp(args->coa_stateid.other, 701 if (memcmp(args->coa_stateid.other,
698 copy->stateid.other, 702 tmp_copy->stateid.other,
699 sizeof(args->coa_stateid.other))) 703 sizeof(args->coa_stateid.other)))
700 continue; 704 continue;
701 nfs4_copy_cb_args(copy, args); 705 nfs4_copy_cb_args(tmp_copy, args);
702 complete(&copy->completion); 706 complete(&tmp_copy->completion);
703 found = true; 707 found = true;
704 goto out; 708 goto out;
705 } 709 }
@@ -707,15 +711,11 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
707out: 711out:
708 rcu_read_unlock(); 712 rcu_read_unlock();
709 if (!found) { 713 if (!found) {
710 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
711 if (!copy) {
712 spin_unlock(&cps->clp->cl_lock);
713 return htonl(NFS4ERR_SERVERFAULT);
714 }
715 memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE); 714 memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
716 nfs4_copy_cb_args(copy, args); 715 nfs4_copy_cb_args(copy, args);
717 list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids); 716 list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
718 } 717 } else
718 kfree(copy);
719 spin_unlock(&cps->clp->cl_lock); 719 spin_unlock(&cps->clp->cl_lock);
720 720
721 return 0; 721 return 0;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 86bcba40ca61..74b36ed883ca 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1361,12 +1361,7 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1361 task)) 1361 task))
1362 return; 1362 return;
1363 1363
1364 if (ff_layout_read_prepare_common(task, hdr)) 1364 ff_layout_read_prepare_common(task, hdr);
1365 return;
1366
1367 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1368 hdr->args.lock_context, FMODE_READ) == -EIO)
1369 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1370} 1365}
1371 1366
1372static void ff_layout_read_call_done(struct rpc_task *task, void *data) 1367static void ff_layout_read_call_done(struct rpc_task *task, void *data)
@@ -1542,12 +1537,7 @@ static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1542 task)) 1537 task))
1543 return; 1538 return;
1544 1539
1545 if (ff_layout_write_prepare_common(task, hdr)) 1540 ff_layout_write_prepare_common(task, hdr);
1546 return;
1547
1548 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1549 hdr->args.lock_context, FMODE_WRITE) == -EIO)
1550 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1551} 1541}
1552 1542
1553static void ff_layout_write_call_done(struct rpc_task *task, void *data) 1543static void ff_layout_write_call_done(struct rpc_task *task, void *data)
@@ -1742,6 +1732,10 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1742 fh = nfs4_ff_layout_select_ds_fh(lseg, idx); 1732 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1743 if (fh) 1733 if (fh)
1744 hdr->args.fh = fh; 1734 hdr->args.fh = fh;
1735
1736 if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
1737 goto out_failed;
1738
1745 /* 1739 /*
1746 * Note that if we ever decide to split across DSes, 1740 * Note that if we ever decide to split across DSes,
1747 * then we may need to handle dense-like offsets. 1741 * then we may need to handle dense-like offsets.
@@ -1804,6 +1798,9 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1804 if (fh) 1798 if (fh)
1805 hdr->args.fh = fh; 1799 hdr->args.fh = fh;
1806 1800
1801 if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
1802 goto out_failed;
1803
1807 /* 1804 /*
1808 * Note that if we ever decide to split across DSes, 1805 * Note that if we ever decide to split across DSes,
1809 * then we may need to handle dense-like offsets. 1806 * then we may need to handle dense-like offsets.
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index 411798346e48..de50a342d5a5 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -215,6 +215,10 @@ unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
215 unsigned int maxnum); 215 unsigned int maxnum);
216struct nfs_fh * 216struct nfs_fh *
217nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx); 217nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
218int
219nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
220 u32 mirror_idx,
221 nfs4_stateid *stateid);
218 222
219struct nfs4_pnfs_ds * 223struct nfs4_pnfs_ds *
220nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, 224nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 74d8d5352438..d23347389626 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -370,6 +370,25 @@ out:
370 return fh; 370 return fh;
371} 371}
372 372
373int
374nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
375 u32 mirror_idx,
376 nfs4_stateid *stateid)
377{
378 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
379
380 if (!ff_layout_mirror_valid(lseg, mirror, false)) {
381 pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
382 __func__, mirror_idx);
383 goto out;
384 }
385
386 nfs4_stateid_copy(stateid, &mirror->stateid);
387 return 1;
388out:
389 return 0;
390}
391
373/** 392/**
374 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call 393 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
375 * @lseg: the layout segment we're operating on 394 * @lseg: the layout segment we're operating on
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index ac5b784a1de0..fed06fd9998d 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -137,31 +137,32 @@ static int handle_async_copy(struct nfs42_copy_res *res,
137 struct file *dst, 137 struct file *dst,
138 nfs4_stateid *src_stateid) 138 nfs4_stateid *src_stateid)
139{ 139{
140 struct nfs4_copy_state *copy; 140 struct nfs4_copy_state *copy, *tmp_copy;
141 int status = NFS4_OK; 141 int status = NFS4_OK;
142 bool found_pending = false; 142 bool found_pending = false;
143 struct nfs_open_context *ctx = nfs_file_open_context(dst); 143 struct nfs_open_context *ctx = nfs_file_open_context(dst);
144 144
145 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
146 if (!copy)
147 return -ENOMEM;
148
145 spin_lock(&server->nfs_client->cl_lock); 149 spin_lock(&server->nfs_client->cl_lock);
146 list_for_each_entry(copy, &server->nfs_client->pending_cb_stateids, 150 list_for_each_entry(tmp_copy, &server->nfs_client->pending_cb_stateids,
147 copies) { 151 copies) {
148 if (memcmp(&res->write_res.stateid, &copy->stateid, 152 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
149 NFS4_STATEID_SIZE)) 153 NFS4_STATEID_SIZE))
150 continue; 154 continue;
151 found_pending = true; 155 found_pending = true;
152 list_del(&copy->copies); 156 list_del(&tmp_copy->copies);
153 break; 157 break;
154 } 158 }
155 if (found_pending) { 159 if (found_pending) {
156 spin_unlock(&server->nfs_client->cl_lock); 160 spin_unlock(&server->nfs_client->cl_lock);
161 kfree(copy);
162 copy = tmp_copy;
157 goto out; 163 goto out;
158 } 164 }
159 165
160 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
161 if (!copy) {
162 spin_unlock(&server->nfs_client->cl_lock);
163 return -ENOMEM;
164 }
165 memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 166 memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
166 init_completion(&copy->completion); 167 init_completion(&copy->completion);
167 copy->parent_state = ctx->state; 168 copy->parent_state = ctx->state;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 8d59c9655ec4..1b994b527518 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -41,6 +41,8 @@ enum nfs4_client_state {
41 NFS4CLNT_MOVED, 41 NFS4CLNT_MOVED,
42 NFS4CLNT_LEASE_MOVED, 42 NFS4CLNT_LEASE_MOVED,
43 NFS4CLNT_DELEGATION_EXPIRED, 43 NFS4CLNT_DELEGATION_EXPIRED,
44 NFS4CLNT_RUN_MANAGER,
45 NFS4CLNT_DELEGRETURN_RUNNING,
44}; 46};
45 47
46#define NFS4_RENEW_TIMEOUT 0x01 48#define NFS4_RENEW_TIMEOUT 0x01
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index ffea57885394..d8decf2ec48f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1210,6 +1210,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
1210 struct task_struct *task; 1210 struct task_struct *task;
1211 char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1]; 1211 char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
1212 1212
1213 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
1213 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) 1214 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1214 return; 1215 return;
1215 __module_get(THIS_MODULE); 1216 __module_get(THIS_MODULE);
@@ -2503,6 +2504,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
2503 2504
2504 /* Ensure exclusive access to NFSv4 state */ 2505 /* Ensure exclusive access to NFSv4 state */
2505 do { 2506 do {
2507 clear_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
2506 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 2508 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
2507 section = "purge state"; 2509 section = "purge state";
2508 status = nfs4_purge_lease(clp); 2510 status = nfs4_purge_lease(clp);
@@ -2593,14 +2595,18 @@ static void nfs4_state_manager(struct nfs_client *clp)
2593 } 2595 }
2594 2596
2595 nfs4_end_drain_session(clp); 2597 nfs4_end_drain_session(clp);
2596 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) { 2598 nfs4_clear_state_manager_bit(clp);
2597 nfs_client_return_marked_delegations(clp); 2599
2598 continue; 2600 if (!test_and_set_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state)) {
2601 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
2602 nfs_client_return_marked_delegations(clp);
2603 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
2604 }
2605 clear_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state);
2599 } 2606 }
2600 2607
2601 nfs4_clear_state_manager_bit(clp);
2602 /* Did we race with an attempt to give us more work? */ 2608 /* Did we race with an attempt to give us more work? */
2603 if (clp->cl_state == 0) 2609 if (!test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
2604 return; 2610 return;
2605 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) 2611 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
2606 return; 2612 return;
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index de99db518571..f2129a5d9f23 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -266,9 +266,7 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc,
266 return; 266 return;
267 267
268 if (nbh == NULL) { /* blocksize == pagesize */ 268 if (nbh == NULL) { /* blocksize == pagesize */
269 xa_lock_irq(&btnc->i_pages); 269 xa_erase_irq(&btnc->i_pages, newkey);
270 __xa_erase(&btnc->i_pages, newkey);
271 xa_unlock_irq(&btnc->i_pages);
272 unlock_page(ctxt->bh->b_page); 270 unlock_page(ctxt->bh->b_page);
273 } else 271 } else
274 brelse(nbh); 272 brelse(nbh);
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 9f88188060db..4bf8d5854b27 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -125,10 +125,10 @@ check_err:
125 125
126check_gen: 126check_gen:
127 if (handle->ih_generation != inode->i_generation) { 127 if (handle->ih_generation != inode->i_generation) {
128 iput(inode);
129 trace_ocfs2_get_dentry_generation((unsigned long long)blkno, 128 trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
130 handle->ih_generation, 129 handle->ih_generation,
131 inode->i_generation); 130 inode->i_generation);
131 iput(inode);
132 result = ERR_PTR(-ESTALE); 132 result = ERR_PTR(-ESTALE);
133 goto bail; 133 goto bail;
134 } 134 }
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 3f1685d7d43b..1565dd8e8856 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -157,18 +157,14 @@ out:
157} 157}
158 158
159/* 159/*
160 * lock allocators, and reserving appropriate number of bits for 160 * lock allocator, and reserve appropriate number of bits for
161 * meta blocks and data clusters. 161 * meta blocks.
162 *
163 * in some cases, we don't need to reserve clusters, just let data_ac
164 * be NULL.
165 */ 162 */
166static int ocfs2_lock_allocators_move_extents(struct inode *inode, 163static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
167 struct ocfs2_extent_tree *et, 164 struct ocfs2_extent_tree *et,
168 u32 clusters_to_move, 165 u32 clusters_to_move,
169 u32 extents_to_split, 166 u32 extents_to_split,
170 struct ocfs2_alloc_context **meta_ac, 167 struct ocfs2_alloc_context **meta_ac,
171 struct ocfs2_alloc_context **data_ac,
172 int extra_blocks, 168 int extra_blocks,
173 int *credits) 169 int *credits)
174{ 170{
@@ -193,13 +189,6 @@ static int ocfs2_lock_allocators_move_extents(struct inode *inode,
193 goto out; 189 goto out;
194 } 190 }
195 191
196 if (data_ac) {
197 ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
198 if (ret) {
199 mlog_errno(ret);
200 goto out;
201 }
202 }
203 192
204 *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el); 193 *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
205 194
@@ -259,10 +248,10 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
259 } 248 }
260 } 249 }
261 250
262 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1, 251 ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
263 &context->meta_ac, 252 *len, 1,
264 &context->data_ac, 253 &context->meta_ac,
265 extra_blocks, &credits); 254 extra_blocks, &credits);
266 if (ret) { 255 if (ret) {
267 mlog_errno(ret); 256 mlog_errno(ret);
268 goto out; 257 goto out;
@@ -285,6 +274,21 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
285 } 274 }
286 } 275 }
287 276
277 /*
278 * Make sure ocfs2_reserve_cluster is called after
279 * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
280 *
281 * If ocfs2_reserve_cluster is called
282 * before __ocfs2_flush_truncate_log, dead lock on global bitmap
283 * may happen.
284 *
285 */
286 ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
287 if (ret) {
288 mlog_errno(ret);
289 goto out_unlock_mutex;
290 }
291
288 handle = ocfs2_start_trans(osb, credits); 292 handle = ocfs2_start_trans(osb, credits);
289 if (IS_ERR(handle)) { 293 if (IS_ERR(handle)) {
290 ret = PTR_ERR(handle); 294 ret = PTR_ERR(handle);
@@ -617,9 +621,10 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
617 } 621 }
618 } 622 }
619 623
620 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1, 624 ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
621 &context->meta_ac, 625 len, 1,
622 NULL, extra_blocks, &credits); 626 &context->meta_ac,
627 extra_blocks, &credits);
623 if (ret) { 628 if (ret) {
624 mlog_errno(ret); 629 mlog_errno(ret);
625 goto out; 630 goto out;
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index ffcff6516e89..e02a9039b5ea 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -816,17 +816,14 @@ static int ramoops_probe(struct platform_device *pdev)
816 816
817 cxt->pstore.data = cxt; 817 cxt->pstore.data = cxt;
818 /* 818 /*
819 * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we 819 * Since bufsize is only used for dmesg crash dumps, it
820 * have to handle dumps, we must have at least record_size buffer. And 820 * must match the size of the dprz record (after PRZ header
821 * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be 821 * and ECC bytes have been accounted for).
822 * ZERO_SIZE_PTR).
823 */ 822 */
824 if (cxt->console_size) 823 cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
825 cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */ 824 cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
826 cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
827 cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
828 if (!cxt->pstore.buf) { 825 if (!cxt->pstore.buf) {
829 pr_err("cannot allocate pstore buffer\n"); 826 pr_err("cannot allocate pstore crash dump buffer\n");
830 err = -ENOMEM; 827 err = -ENOMEM;
831 goto fail_clear; 828 goto fail_clear;
832 } 829 }
diff --git a/fs/read_write.c b/fs/read_write.c
index bfcb4ced5664..4dae0399c75a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -2094,17 +2094,18 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
2094 off = same->src_offset; 2094 off = same->src_offset;
2095 len = same->src_length; 2095 len = same->src_length;
2096 2096
2097 ret = -EISDIR;
2098 if (S_ISDIR(src->i_mode)) 2097 if (S_ISDIR(src->i_mode))
2099 goto out; 2098 return -EISDIR;
2100 2099
2101 ret = -EINVAL;
2102 if (!S_ISREG(src->i_mode)) 2100 if (!S_ISREG(src->i_mode))
2103 goto out; 2101 return -EINVAL;
2102
2103 if (!file->f_op->remap_file_range)
2104 return -EOPNOTSUPP;
2104 2105
2105 ret = remap_verify_area(file, off, len, false); 2106 ret = remap_verify_area(file, off, len, false);
2106 if (ret < 0) 2107 if (ret < 0)
2107 goto out; 2108 return ret;
2108 ret = 0; 2109 ret = 0;
2109 2110
2110 if (off + len > i_size_read(src)) 2111 if (off + len > i_size_read(src))
@@ -2147,10 +2148,8 @@ next_fdput:
2147 fdput(dst_fd); 2148 fdput(dst_fd);
2148next_loop: 2149next_loop:
2149 if (fatal_signal_pending(current)) 2150 if (fatal_signal_pending(current))
2150 goto out; 2151 break;
2151 } 2152 }
2152
2153out:
2154 return ret; 2153 return ret;
2155} 2154}
2156EXPORT_SYMBOL(vfs_dedupe_file_range); 2155EXPORT_SYMBOL(vfs_dedupe_file_range);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 499a20a5a010..273736f41be3 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -275,7 +275,7 @@ static int __sysv_write_inode(struct inode *inode, int wait)
275 } 275 }
276 } 276 }
277 brelse(bh); 277 brelse(bh);
278 return 0; 278 return err;
279} 279}
280 280
281int sysv_write_inode(struct inode *inode, struct writeback_control *wbc) 281int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 8f2f56d9a1bb..e3d684ea3203 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -827,16 +827,20 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
827 827
828 828
829 ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32); 829 ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
830 if (ret < 0) 830 if (ret < 0) {
831 goto out_bh; 831 strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
832 832 pr_warn("incorrect volume identification, setting to "
833 strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret); 833 "'InvalidName'\n");
834 } else {
835 strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
836 }
834 udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident); 837 udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
835 838
836 ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128); 839 ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
837 if (ret < 0) 840 if (ret < 0) {
841 ret = 0;
838 goto out_bh; 842 goto out_bh;
839 843 }
840 outstr[ret] = 0; 844 outstr[ret] = 0;
841 udf_debug("volSetIdent[] = '%s'\n", outstr); 845 udf_debug("volSetIdent[] = '%s'\n", outstr);
842 846
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 45234791fec2..5fcfa96463eb 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -351,6 +351,11 @@ try_again:
351 return u_len; 351 return u_len;
352} 352}
353 353
354/*
355 * Convert CS0 dstring to output charset. Warning: This function may truncate
356 * input string if it is too long as it is used for informational strings only
357 * and it is better to truncate the string than to refuse mounting a media.
358 */
354int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len, 359int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len,
355 const uint8_t *ocu_i, int i_len) 360 const uint8_t *ocu_i, int i_len)
356{ 361{
@@ -359,9 +364,12 @@ int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len,
359 if (i_len > 0) { 364 if (i_len > 0) {
360 s_len = ocu_i[i_len - 1]; 365 s_len = ocu_i[i_len - 1];
361 if (s_len >= i_len) { 366 if (s_len >= i_len) {
362 pr_err("incorrect dstring lengths (%d/%d)\n", 367 pr_warn("incorrect dstring lengths (%d/%d),"
363 s_len, i_len); 368 " truncating\n", s_len, i_len);
364 return -EINVAL; 369 s_len = i_len - 1;
370 /* 2-byte encoding? Need to round properly... */
371 if (ocu_i[0] == 16)
372 s_len -= (s_len - 1) & 2;
365 } 373 }
366 } 374 }
367 375
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 356d2b8568c1..cd58939dc977 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1361,6 +1361,19 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1361 ret = -EINVAL; 1361 ret = -EINVAL;
1362 if (!vma_can_userfault(cur)) 1362 if (!vma_can_userfault(cur))
1363 goto out_unlock; 1363 goto out_unlock;
1364
1365 /*
1366 * UFFDIO_COPY will fill file holes even without
1367 * PROT_WRITE. This check enforces that if this is a
1368 * MAP_SHARED, the process has write permission to the backing
1369 * file. If VM_MAYWRITE is set it also enforces that on a
1370 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
1371 * F_WRITE_SEAL can be taken until the vma is destroyed.
1372 */
1373 ret = -EPERM;
1374 if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
1375 goto out_unlock;
1376
1364 /* 1377 /*
1365 * If this vma contains ending address, and huge pages 1378 * If this vma contains ending address, and huge pages
1366 * check alignment. 1379 * check alignment.
@@ -1406,6 +1419,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1406 BUG_ON(!vma_can_userfault(vma)); 1419 BUG_ON(!vma_can_userfault(vma));
1407 BUG_ON(vma->vm_userfaultfd_ctx.ctx && 1420 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1408 vma->vm_userfaultfd_ctx.ctx != ctx); 1421 vma->vm_userfaultfd_ctx.ctx != ctx);
1422 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1409 1423
1410 /* 1424 /*
1411 * Nothing to do: this vma is already registered into this 1425 * Nothing to do: this vma is already registered into this
@@ -1552,6 +1566,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1552 cond_resched(); 1566 cond_resched();
1553 1567
1554 BUG_ON(!vma_can_userfault(vma)); 1568 BUG_ON(!vma_can_userfault(vma));
1569 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1555 1570
1556 /* 1571 /*
1557 * Nothing to do: this vma is already registered into this 1572 * Nothing to do: this vma is already registered into this
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 74d7228e755b..19e921d1586f 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1694,10 +1694,13 @@ xfs_bmap_add_extent_delay_real(
1694 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1694 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1695 /* 1695 /*
1696 * Filling in all of a previously delayed allocation extent. 1696 * Filling in all of a previously delayed allocation extent.
1697 * The right neighbor is contiguous, the left is not. 1697 * The right neighbor is contiguous, the left is not. Take care
1698 * with delay -> unwritten extent allocation here because the
1699 * delalloc record we are overwriting is always written.
1698 */ 1700 */
1699 PREV.br_startblock = new->br_startblock; 1701 PREV.br_startblock = new->br_startblock;
1700 PREV.br_blockcount += RIGHT.br_blockcount; 1702 PREV.br_blockcount += RIGHT.br_blockcount;
1703 PREV.br_state = new->br_state;
1701 1704
1702 xfs_iext_next(ifp, &bma->icur); 1705 xfs_iext_next(ifp, &bma->icur);
1703 xfs_iext_remove(bma->ip, &bma->icur, state); 1706 xfs_iext_remove(bma->ip, &bma->icur, state);
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 86c50208a143..7fbf8af0b159 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -538,15 +538,18 @@ xfs_inobt_rec_check_count(
538 538
539static xfs_extlen_t 539static xfs_extlen_t
540xfs_inobt_max_size( 540xfs_inobt_max_size(
541 struct xfs_mount *mp) 541 struct xfs_mount *mp,
542 xfs_agnumber_t agno)
542{ 543{
544 xfs_agblock_t agblocks = xfs_ag_block_count(mp, agno);
545
543 /* Bail out if we're uninitialized, which can happen in mkfs. */ 546 /* Bail out if we're uninitialized, which can happen in mkfs. */
544 if (mp->m_inobt_mxr[0] == 0) 547 if (mp->m_inobt_mxr[0] == 0)
545 return 0; 548 return 0;
546 549
547 return xfs_btree_calc_size(mp->m_inobt_mnr, 550 return xfs_btree_calc_size(mp->m_inobt_mnr,
548 (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock / 551 (uint64_t)agblocks * mp->m_sb.sb_inopblock /
549 XFS_INODES_PER_CHUNK); 552 XFS_INODES_PER_CHUNK);
550} 553}
551 554
552static int 555static int
@@ -594,7 +597,7 @@ xfs_finobt_calc_reserves(
594 if (error) 597 if (error)
595 return error; 598 return error;
596 599
597 *ask += xfs_inobt_max_size(mp); 600 *ask += xfs_inobt_max_size(mp, agno);
598 *used += tree_len; 601 *used += tree_len;
599 return 0; 602 return 0;
600} 603}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5d263dfdb3bc..404e581f1ea1 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1042,7 +1042,7 @@ out_trans_cancel:
1042 goto out_unlock; 1042 goto out_unlock;
1043} 1043}
1044 1044
1045static int 1045int
1046xfs_flush_unmap_range( 1046xfs_flush_unmap_range(
1047 struct xfs_inode *ip, 1047 struct xfs_inode *ip,
1048 xfs_off_t offset, 1048 xfs_off_t offset,
@@ -1195,13 +1195,7 @@ xfs_prepare_shift(
1195 * Writeback and invalidate cache for the remainder of the file as we're 1195 * Writeback and invalidate cache for the remainder of the file as we're
1196 * about to shift down every extent from offset to EOF. 1196 * about to shift down every extent from offset to EOF.
1197 */ 1197 */
1198 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1); 1198 error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1199 if (error)
1200 return error;
1201 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1202 offset >> PAGE_SHIFT, -1);
1203 if (error)
1204 return error;
1205 1199
1206 /* 1200 /*
1207 * Clean out anything hanging around in the cow fork now that 1201 * Clean out anything hanging around in the cow fork now that
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 87363d136bb6..7a78229cf1a7 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -80,4 +80,7 @@ int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
80 int whichfork, xfs_extnum_t *nextents, 80 int whichfork, xfs_extnum_t *nextents,
81 xfs_filblks_t *count); 81 xfs_filblks_t *count);
82 82
83int xfs_flush_unmap_range(struct xfs_inode *ip, xfs_off_t offset,
84 xfs_off_t len);
85
83#endif /* __XFS_BMAP_UTIL_H__ */ 86#endif /* __XFS_BMAP_UTIL_H__ */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 12d8455bfbb2..010db5f8fb00 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -1233,9 +1233,23 @@ xfs_buf_iodone(
1233} 1233}
1234 1234
1235/* 1235/*
1236 * Requeue a failed buffer for writeback 1236 * Requeue a failed buffer for writeback.
1237 * 1237 *
1238 * Return true if the buffer has been re-queued properly, false otherwise 1238 * We clear the log item failed state here as well, but we have to be careful
1239 * about reference counts because the only active reference counts on the buffer
1240 * may be the failed log items. Hence if we clear the log item failed state
1241 * before queuing the buffer for IO we can release all active references to
1242 * the buffer and free it, leading to use after free problems in
1243 * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
1244 * order we process them in - the buffer is locked, and we own the buffer list
1245 * so nothing on them is going to change while we are performing this action.
1246 *
1247 * Hence we can safely queue the buffer for IO before we clear the failed log
1248 * item state, therefore always having an active reference to the buffer and
1249 * avoiding the transient zero-reference state that leads to use-after-free.
1250 *
1251 * Return true if the buffer was added to the buffer list, false if it was
1252 * already on the buffer list.
1239 */ 1253 */
1240bool 1254bool
1241xfs_buf_resubmit_failed_buffers( 1255xfs_buf_resubmit_failed_buffers(
@@ -1243,16 +1257,16 @@ xfs_buf_resubmit_failed_buffers(
1243 struct list_head *buffer_list) 1257 struct list_head *buffer_list)
1244{ 1258{
1245 struct xfs_log_item *lip; 1259 struct xfs_log_item *lip;
1260 bool ret;
1261
1262 ret = xfs_buf_delwri_queue(bp, buffer_list);
1246 1263
1247 /* 1264 /*
1248 * Clear XFS_LI_FAILED flag from all items before resubmit 1265 * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this
1249 *
1250 * XFS_LI_FAILED set/clear is protected by ail_lock, caller this
1251 * function already have it acquired 1266 * function already have it acquired
1252 */ 1267 */
1253 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) 1268 list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1254 xfs_clear_li_failed(lip); 1269 xfs_clear_li_failed(lip);
1255 1270
1256 /* Add this buffer back to the delayed write list */ 1271 return ret;
1257 return xfs_buf_delwri_queue(bp, buffer_list);
1258} 1272}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 53c9ab8fb777..e47425071e65 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -920,7 +920,7 @@ out_unlock:
920} 920}
921 921
922 922
923loff_t 923STATIC loff_t
924xfs_file_remap_range( 924xfs_file_remap_range(
925 struct file *file_in, 925 struct file *file_in,
926 loff_t pos_in, 926 loff_t pos_in,
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index ecdb086bc23e..322a852ce284 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -296,6 +296,7 @@ xfs_reflink_reserve_cow(
296 if (error) 296 if (error)
297 return error; 297 return error;
298 298
299 xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
299 trace_xfs_reflink_cow_alloc(ip, &got); 300 trace_xfs_reflink_cow_alloc(ip, &got);
300 return 0; 301 return 0;
301} 302}
@@ -1351,10 +1352,19 @@ xfs_reflink_remap_prep(
1351 if (ret) 1352 if (ret)
1352 goto out_unlock; 1353 goto out_unlock;
1353 1354
1354 /* Zap any page cache for the destination file's range. */ 1355 /*
1355 truncate_inode_pages_range(&inode_out->i_data, 1356 * If pos_out > EOF, we may have dirtied blocks between EOF and
1356 round_down(pos_out, PAGE_SIZE), 1357 * pos_out. In that case, we need to extend the flush and unmap to cover
1357 round_up(pos_out + *len, PAGE_SIZE) - 1); 1358 * from EOF to the end of the copy length.
1359 */
1360 if (pos_out > XFS_ISIZE(dest)) {
1361 loff_t flen = *len + (pos_out - XFS_ISIZE(dest));
1362 ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen);
1363 } else {
1364 ret = xfs_flush_unmap_range(dest, pos_out, *len);
1365 }
1366 if (ret)
1367 goto out_unlock;
1358 1368
1359 return 1; 1369 return 1;
1360out_unlock: 1370out_unlock:
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 3043e5ed6495..8a6532aae779 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -280,7 +280,10 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
280 ), 280 ),
281 TP_fast_assign( 281 TP_fast_assign(
282 __entry->dev = bp->b_target->bt_dev; 282 __entry->dev = bp->b_target->bt_dev;
283 __entry->bno = bp->b_bn; 283 if (bp->b_bn == XFS_BUF_DADDR_NULL)
284 __entry->bno = bp->b_maps[0].bm_bn;
285 else
286 __entry->bno = bp->b_bn;
284 __entry->nblks = bp->b_length; 287 __entry->nblks = bp->b_length;
285 __entry->hold = atomic_read(&bp->b_hold); 288 __entry->hold = atomic_read(&bp->b_hold);
286 __entry->pincount = atomic_read(&bp->b_pin_count); 289 __entry->pincount = atomic_read(&bp->b_pin_count);
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index a83e1f632eb7..f01623aef2f7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
169 169
170void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, 170void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
171 unsigned int idx); 171 unsigned int idx);
172struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
172unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); 173unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
173void can_free_echo_skb(struct net_device *dev, unsigned int idx); 174void can_free_echo_skb(struct net_device *dev, unsigned int idx);
174 175
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index cb31683bbe15..8268811a697e 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
41int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); 41int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
42int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); 42int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
43int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); 43int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
44int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb); 44int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
45 struct sk_buff *skb, u32 timestamp);
46unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
47 unsigned int idx, u32 timestamp);
48int can_rx_offload_queue_tail(struct can_rx_offload *offload,
49 struct sk_buff *skb);
45void can_rx_offload_reset(struct can_rx_offload *offload); 50void can_rx_offload_reset(struct can_rx_offload *offload);
46void can_rx_offload_del(struct can_rx_offload *offload); 51void can_rx_offload_del(struct can_rx_offload *offload);
47void can_rx_offload_enable(struct can_rx_offload *offload); 52void can_rx_offload_enable(struct can_rx_offload *offload);
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index bd73e7a91410..9e66bfe369aa 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -5,7 +5,7 @@
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6#include <linux/mem_encrypt.h> 6#include <linux/mem_encrypt.h>
7 7
8#define DIRECT_MAPPING_ERROR 0 8#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0)
9 9
10#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA 10#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
11#include <asm/dma-direct.h> 11#include <asm/dma-direct.h>
diff --git a/include/linux/filter.h b/include/linux/filter.h
index de629b706d1d..448dcc448f1f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -866,6 +866,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
866 866
867void bpf_jit_free(struct bpf_prog *fp); 867void bpf_jit_free(struct bpf_prog *fp);
868 868
869int bpf_jit_get_func_addr(const struct bpf_prog *prog,
870 const struct bpf_insn *insn, bool extra_pass,
871 u64 *func_addr, bool *func_addr_fixed);
872
869struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); 873struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
870void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); 874void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
871 875
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 34cf0fdd7dc7..610815e3f1aa 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
196static inline void fscache_retrieval_complete(struct fscache_retrieval *op, 196static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
197 int n_pages) 197 int n_pages)
198{ 198{
199 atomic_sub(n_pages, &op->n_pages); 199 if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
200 if (atomic_read(&op->n_pages) <= 0)
201 fscache_op_complete(&op->op, false); 200 fscache_op_complete(&op->op, false);
202} 201}
203 202
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a397907e8d72..dd16e8218db3 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -777,8 +777,8 @@ struct ftrace_ret_stack {
777extern void return_to_handler(void); 777extern void return_to_handler(void);
778 778
779extern int 779extern int
780ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, 780function_graph_enter(unsigned long ret, unsigned long func,
781 unsigned long frame_pointer, unsigned long *retp); 781 unsigned long frame_pointer, unsigned long *retp);
782 782
783unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 783unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
784 unsigned long ret, unsigned long *retp); 784 unsigned long ret, unsigned long *retp);
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 331dc377c275..dc12f5c4b076 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -177,6 +177,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
177* @attr_usage_id: Attribute usage id as per spec 177* @attr_usage_id: Attribute usage id as per spec
178* @report_id: Report id to look for 178* @report_id: Report id to look for
179* @flag: Synchronous or asynchronous read 179* @flag: Synchronous or asynchronous read
180* @is_signed: If true then fields < 32 bits will be sign-extended
180* 181*
181* Issues a synchronous or asynchronous read request for an input attribute. 182* Issues a synchronous or asynchronous read request for an input attribute.
182* Returns data upto 32 bits. 183* Returns data upto 32 bits.
@@ -190,7 +191,8 @@ enum sensor_hub_read_flags {
190int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, 191int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
191 u32 usage_id, 192 u32 usage_id,
192 u32 attr_usage_id, u32 report_id, 193 u32 attr_usage_id, u32 report_id,
193 enum sensor_hub_read_flags flag 194 enum sensor_hub_read_flags flag,
195 bool is_signed
194); 196);
195 197
196/** 198/**
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 387c70df6f29..a355d61940f2 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1139,34 +1139,6 @@ static inline u32 hid_report_len(struct hid_report *report)
1139int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, 1139int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
1140 int interrupt); 1140 int interrupt);
1141 1141
1142
1143/**
1144 * struct hid_scroll_counter - Utility class for processing high-resolution
1145 * scroll events.
1146 * @dev: the input device for which events should be reported.
1147 * @microns_per_hi_res_unit: the amount moved by the user's finger for each
1148 * high-resolution unit reported by the mouse, in
1149 * microns.
1150 * @resolution_multiplier: the wheel's resolution in high-resolution mode as a
1151 * multiple of its lower resolution. For example, if
1152 * moving the wheel by one "notch" would result in a
1153 * value of 1 in low-resolution mode but 8 in
1154 * high-resolution, the multiplier is 8.
1155 * @remainder: counts the number of high-resolution units moved since the last
1156 * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
1157 * only be used by class methods.
1158 */
1159struct hid_scroll_counter {
1160 struct input_dev *dev;
1161 int microns_per_hi_res_unit;
1162 int resolution_multiplier;
1163
1164 int remainder;
1165};
1166
1167void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
1168 int hi_res_value);
1169
1170/* HID quirks API */ 1142/* HID quirks API */
1171unsigned long hid_lookup_quirk(const struct hid_device *hdev); 1143unsigned long hid_lookup_quirk(const struct hid_device *hdev);
1172int hid_quirks_init(char **quirks_param, __u16 bus, int count); 1144int hid_quirks_init(char **quirks_param, __u16 bus, int count);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index dbff9ff28f2c..34e17e6f8942 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -2473,14 +2473,15 @@ struct mlx5_ifc_xrc_srqc_bits {
2473 2473
2474 u8 wq_signature[0x1]; 2474 u8 wq_signature[0x1];
2475 u8 cont_srq[0x1]; 2475 u8 cont_srq[0x1];
2476 u8 dbr_umem_valid[0x1]; 2476 u8 reserved_at_22[0x1];
2477 u8 rlky[0x1]; 2477 u8 rlky[0x1];
2478 u8 basic_cyclic_rcv_wqe[0x1]; 2478 u8 basic_cyclic_rcv_wqe[0x1];
2479 u8 log_rq_stride[0x3]; 2479 u8 log_rq_stride[0x3];
2480 u8 xrcd[0x18]; 2480 u8 xrcd[0x18];
2481 2481
2482 u8 page_offset[0x6]; 2482 u8 page_offset[0x6];
2483 u8 reserved_at_46[0x2]; 2483 u8 reserved_at_46[0x1];
2484 u8 dbr_umem_valid[0x1];
2484 u8 cqn[0x18]; 2485 u8 cqn[0x18];
2485 2486
2486 u8 reserved_at_60[0x20]; 2487 u8 reserved_at_60[0x20];
@@ -6689,9 +6690,12 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
6689 6690
6690 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; 6691 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
6691 6692
6692 u8 reserved_at_280[0x40]; 6693 u8 reserved_at_280[0x60];
6694
6693 u8 xrc_srq_umem_valid[0x1]; 6695 u8 xrc_srq_umem_valid[0x1];
6694 u8 reserved_at_2c1[0x5bf]; 6696 u8 reserved_at_2e1[0x1f];
6697
6698 u8 reserved_at_300[0x580];
6695 6699
6696 u8 pas[0][0x40]; 6700 u8 pas[0][0x40];
6697}; 6701};
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
index c79e859408e6..fd458389f7d1 100644
--- a/include/linux/net_dim.h
+++ b/include/linux/net_dim.h
@@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim,
406 } 406 }
407 /* fall through */ 407 /* fall through */
408 case NET_DIM_START_MEASURE: 408 case NET_DIM_START_MEASURE:
409 net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr,
410 &dim->start_sample);
409 dim->state = NET_DIM_MEASURE_IN_PROGRESS; 411 dim->state = NET_DIM_MEASURE_IN_PROGRESS;
410 break; 412 break;
411 case NET_DIM_APPLY_NEW_PROFILE: 413 case NET_DIM_APPLY_NEW_PROFILE:
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index b8d95564bd53..14edb795ab43 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
21 struct nf_conntrack_tuple tuple; 21 struct nf_conntrack_tuple tuple;
22}; 22};
23 23
24enum grep_conntrack {
25 GRE_CT_UNREPLIED,
26 GRE_CT_REPLIED,
27 GRE_CT_MAX
28};
29
30struct netns_proto_gre {
31 struct nf_proto_net nf;
32 rwlock_t keymap_lock;
33 struct list_head keymap_list;
34 unsigned int gre_timeouts[GRE_CT_MAX];
35};
36
24/* add new tuple->key_reply pair to keymap */ 37/* add new tuple->key_reply pair to keymap */
25int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, 38int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
26 struct nf_conntrack_tuple *t); 39 struct nf_conntrack_tuple *t);
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index f92a47e18034..a93841bfb9f7 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -17,6 +17,8 @@
17#define __DAVINCI_GPIO_PLATFORM_H 17#define __DAVINCI_GPIO_PLATFORM_H
18 18
19struct davinci_gpio_platform_data { 19struct davinci_gpio_platform_data {
20 bool no_auto_base;
21 u32 base;
20 u32 ngpio; 22 u32 ngpio;
21 u32 gpio_unbanked; 23 u32 gpio_unbanked;
22}; 24};
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 8e0725aac0aa..7006008d5b72 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -1,6 +1,7 @@
1#ifndef _LINUX_PSI_H 1#ifndef _LINUX_PSI_H
2#define _LINUX_PSI_H 2#define _LINUX_PSI_H
3 3
4#include <linux/jump_label.h>
4#include <linux/psi_types.h> 5#include <linux/psi_types.h>
5#include <linux/sched.h> 6#include <linux/sched.h>
6 7
@@ -9,7 +10,7 @@ struct css_set;
9 10
10#ifdef CONFIG_PSI 11#ifdef CONFIG_PSI
11 12
12extern bool psi_disabled; 13extern struct static_key_false psi_disabled;
13 14
14void psi_init(void); 15void psi_init(void);
15 16
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index a15bc4d48752..30fcec375a3a 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -90,7 +90,10 @@ struct pstore_record {
90 * 90 *
91 * @buf_lock: spinlock to serialize access to @buf 91 * @buf_lock: spinlock to serialize access to @buf
92 * @buf: preallocated crash dump buffer 92 * @buf: preallocated crash dump buffer
93 * @bufsize: size of @buf available for crash dump writes 93 * @bufsize: size of @buf available for crash dump bytes (must match
94 * smallest number of bytes available for writing to a
95 * backend entry, since compressed bytes don't take kindly
96 * to being truncated)
94 * 97 *
95 * @read_mutex: serializes @open, @read, @close, and @erase callbacks 98 * @read_mutex: serializes @open, @read, @close, and @erase callbacks
96 * @flags: bitfield of frontends the backend can accept writes for 99 * @flags: bitfield of frontends the backend can accept writes for
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 6c2ffed907f5..de20ede2c5c8 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -64,15 +64,12 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
64#define PTRACE_MODE_NOAUDIT 0x04 64#define PTRACE_MODE_NOAUDIT 0x04
65#define PTRACE_MODE_FSCREDS 0x08 65#define PTRACE_MODE_FSCREDS 0x08
66#define PTRACE_MODE_REALCREDS 0x10 66#define PTRACE_MODE_REALCREDS 0x10
67#define PTRACE_MODE_SCHED 0x20
68#define PTRACE_MODE_IBPB 0x40
69 67
70/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ 68/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
71#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) 69#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
72#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) 70#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
73#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) 71#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
74#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) 72#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
75#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
76 73
77/** 74/**
78 * ptrace_may_access - check whether the caller is permitted to access 75 * ptrace_may_access - check whether the caller is permitted to access
@@ -90,20 +87,6 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
90 */ 87 */
91extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); 88extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
92 89
93/**
94 * ptrace_may_access - check whether the caller is permitted to access
95 * a target task.
96 * @task: target task
97 * @mode: selects type of access and caller credentials
98 *
99 * Returns true on success, false on denial.
100 *
101 * Similar to ptrace_may_access(). Only to be called from context switch
102 * code. Does not call into audit and the regular LSM hooks due to locking
103 * constraints.
104 */
105extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
106
107static inline int ptrace_reparented(struct task_struct *child) 90static inline int ptrace_reparented(struct task_struct *child)
108{ 91{
109 return !same_thread_group(child->real_parent, child->parent); 92 return !same_thread_group(child->real_parent, child->parent);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a51c13c2b1a0..291a9bd5b97f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1116,6 +1116,7 @@ struct task_struct {
1116#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1116#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1117 /* Index of current stored address in ret_stack: */ 1117 /* Index of current stored address in ret_stack: */
1118 int curr_ret_stack; 1118 int curr_ret_stack;
1119 int curr_ret_depth;
1119 1120
1120 /* Stack of return addresses for return function tracing: */ 1121 /* Stack of return addresses for return function tracing: */
1121 struct ftrace_ret_stack *ret_stack; 1122 struct ftrace_ret_stack *ret_stack;
@@ -1453,6 +1454,8 @@ static inline bool is_percpu_thread(void)
1453#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1454#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1454#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ 1455#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
1455#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ 1456#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
1457#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
1458#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
1456 1459
1457#define TASK_PFA_TEST(name, func) \ 1460#define TASK_PFA_TEST(name, func) \
1458 static inline bool task_##func(struct task_struct *p) \ 1461 static inline bool task_##func(struct task_struct *p) \
@@ -1484,6 +1487,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1484TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1487TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1485TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1488TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1486 1489
1490TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1491TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1492TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1493
1494TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1495TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1496
1487static inline void 1497static inline void
1488current_restore_flags(unsigned long orig_flags, unsigned long flags) 1498current_restore_flags(unsigned long orig_flags, unsigned long flags)
1489{ 1499{
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644
index 000000000000..59d3736c454c
--- /dev/null
+++ b/include/linux/sched/smt.h
@@ -0,0 +1,20 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SCHED_SMT_H
3#define _LINUX_SCHED_SMT_H
4
5#include <linux/static_key.h>
6
7#ifdef CONFIG_SCHED_SMT
8extern struct static_key_false sched_smt_present;
9
10static __always_inline bool sched_smt_active(void)
11{
12 return static_branch_likely(&sched_smt_present);
13}
14#else
15static inline bool sched_smt_active(void) { return false; }
16#endif
17
18void arch_smt_update(void);
19
20#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0ba687454267..0d1b2c3f127b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1326,6 +1326,22 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
1326 } 1326 }
1327} 1327}
1328 1328
1329static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1330{
1331 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1332 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1333}
1334
1335static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1336{
1337 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1338}
1339
1340static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1341{
1342 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1343}
1344
1329/* Release a reference on a zerocopy structure */ 1345/* Release a reference on a zerocopy structure */
1330static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) 1346static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1331{ 1347{
@@ -1335,7 +1351,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1335 if (uarg->callback == sock_zerocopy_callback) { 1351 if (uarg->callback == sock_zerocopy_callback) {
1336 uarg->zerocopy = uarg->zerocopy && zerocopy; 1352 uarg->zerocopy = uarg->zerocopy && zerocopy;
1337 sock_zerocopy_put(uarg); 1353 sock_zerocopy_put(uarg);
1338 } else { 1354 } else if (!skb_zcopy_is_nouarg(skb)) {
1339 uarg->callback(uarg, zerocopy); 1355 uarg->callback(uarg, zerocopy);
1340 } 1356 }
1341 1357
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 8ed77bb4ed86..a9b0280687d5 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -196,6 +196,7 @@ struct tcp_sock {
196 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 196 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
197 u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ 197 u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
198 u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ 198 u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
199 u32 compressed_ack_rcv_nxt;
199 200
200 u32 tsoffset; /* timestamp offset */ 201 u32 tsoffset; /* timestamp offset */
201 202
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 40b0b4c1bf7b..df20f8bdbfa3 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -83,8 +83,8 @@ static inline int ptrace_report_syscall(struct pt_regs *regs)
83 * tracehook_report_syscall_entry - task is about to attempt a system call 83 * tracehook_report_syscall_entry - task is about to attempt a system call
84 * @regs: user register state of current task 84 * @regs: user register state of current task
85 * 85 *
86 * This will be called if %TIF_SYSCALL_TRACE has been set, when the 86 * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set,
87 * current task has just entered the kernel for a system call. 87 * when the current task has just entered the kernel for a system call.
88 * Full user register state is available here. Changing the values 88 * Full user register state is available here. Changing the values
89 * in @regs can affect the system call number and arguments to be tried. 89 * in @regs can affect the system call number and arguments to be tried.
90 * It is safe to block here, preventing the system call from beginning. 90 * It is safe to block here, preventing the system call from beginning.
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 538ba1a58f5b..e9de8ad0bad7 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -166,7 +166,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
166 struct tracepoint_func *it_func_ptr; \ 166 struct tracepoint_func *it_func_ptr; \
167 void *it_func; \ 167 void *it_func; \
168 void *__data; \ 168 void *__data; \
169 int __maybe_unused idx = 0; \ 169 int __maybe_unused __idx = 0; \
170 \ 170 \
171 if (!(cond)) \ 171 if (!(cond)) \
172 return; \ 172 return; \
@@ -182,7 +182,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
182 * doesn't work from the idle path. \ 182 * doesn't work from the idle path. \
183 */ \ 183 */ \
184 if (rcuidle) { \ 184 if (rcuidle) { \
185 idx = srcu_read_lock_notrace(&tracepoint_srcu); \ 185 __idx = srcu_read_lock_notrace(&tracepoint_srcu);\
186 rcu_irq_enter_irqson(); \ 186 rcu_irq_enter_irqson(); \
187 } \ 187 } \
188 \ 188 \
@@ -198,7 +198,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
198 \ 198 \
199 if (rcuidle) { \ 199 if (rcuidle) { \
200 rcu_irq_exit_irqson(); \ 200 rcu_irq_exit_irqson(); \
201 srcu_read_unlock_notrace(&tracepoint_srcu, idx);\ 201 srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
202 } \ 202 } \
203 \ 203 \
204 preempt_enable_notrace(); \ 204 preempt_enable_notrace(); \
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index b7a99ce56bc9..a1be64c9940f 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -66,4 +66,7 @@
66/* Device needs a pause after every control message. */ 66/* Device needs a pause after every control message. */
67#define USB_QUIRK_DELAY_CTRL_MSG BIT(13) 67#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
68 68
69/* Hub needs extra delay after resetting its port. */
70#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
71
69#endif /* __LINUX_USB_QUIRKS_H */ 72#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index d9514928ddac..564892e19f8c 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -289,9 +289,7 @@ struct xarray {
289void xa_init_flags(struct xarray *, gfp_t flags); 289void xa_init_flags(struct xarray *, gfp_t flags);
290void *xa_load(struct xarray *, unsigned long index); 290void *xa_load(struct xarray *, unsigned long index);
291void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); 291void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
292void *xa_cmpxchg(struct xarray *, unsigned long index, 292void *xa_erase(struct xarray *, unsigned long index);
293 void *old, void *entry, gfp_t);
294int xa_reserve(struct xarray *, unsigned long index, gfp_t);
295void *xa_store_range(struct xarray *, unsigned long first, unsigned long last, 293void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
296 void *entry, gfp_t); 294 void *entry, gfp_t);
297bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); 295bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -344,65 +342,6 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
344} 342}
345 343
346/** 344/**
347 * xa_erase() - Erase this entry from the XArray.
348 * @xa: XArray.
349 * @index: Index of entry.
350 *
351 * This function is the equivalent of calling xa_store() with %NULL as
352 * the third argument. The XArray does not need to allocate memory, so
353 * the user does not need to provide GFP flags.
354 *
355 * Context: Process context. Takes and releases the xa_lock.
356 * Return: The entry which used to be at this index.
357 */
358static inline void *xa_erase(struct xarray *xa, unsigned long index)
359{
360 return xa_store(xa, index, NULL, 0);
361}
362
363/**
364 * xa_insert() - Store this entry in the XArray unless another entry is
365 * already present.
366 * @xa: XArray.
367 * @index: Index into array.
368 * @entry: New entry.
369 * @gfp: Memory allocation flags.
370 *
371 * If you would rather see the existing entry in the array, use xa_cmpxchg().
372 * This function is for users who don't care what the entry is, only that
373 * one is present.
374 *
375 * Context: Process context. Takes and releases the xa_lock.
376 * May sleep if the @gfp flags permit.
377 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
378 * -ENOMEM if memory could not be allocated.
379 */
380static inline int xa_insert(struct xarray *xa, unsigned long index,
381 void *entry, gfp_t gfp)
382{
383 void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
384 if (!curr)
385 return 0;
386 if (xa_is_err(curr))
387 return xa_err(curr);
388 return -EEXIST;
389}
390
391/**
392 * xa_release() - Release a reserved entry.
393 * @xa: XArray.
394 * @index: Index of entry.
395 *
396 * After calling xa_reserve(), you can call this function to release the
397 * reservation. If the entry at @index has been stored to, this function
398 * will do nothing.
399 */
400static inline void xa_release(struct xarray *xa, unsigned long index)
401{
402 xa_cmpxchg(xa, index, NULL, NULL, 0);
403}
404
405/**
406 * xa_for_each() - Iterate over a portion of an XArray. 345 * xa_for_each() - Iterate over a portion of an XArray.
407 * @xa: XArray. 346 * @xa: XArray.
408 * @entry: Entry retrieved from array. 347 * @entry: Entry retrieved from array.
@@ -455,6 +394,7 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
455void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, 394void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
456 void *entry, gfp_t); 395 void *entry, gfp_t);
457int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); 396int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
397int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
458void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); 398void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
459void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); 399void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
460 400
@@ -487,6 +427,58 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
487} 427}
488 428
489/** 429/**
430 * xa_store_bh() - Store this entry in the XArray.
431 * @xa: XArray.
432 * @index: Index into array.
433 * @entry: New entry.
434 * @gfp: Memory allocation flags.
435 *
436 * This function is like calling xa_store() except it disables softirqs
437 * while holding the array lock.
438 *
439 * Context: Any context. Takes and releases the xa_lock while
440 * disabling softirqs.
441 * Return: The entry which used to be at this index.
442 */
443static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
444 void *entry, gfp_t gfp)
445{
446 void *curr;
447
448 xa_lock_bh(xa);
449 curr = __xa_store(xa, index, entry, gfp);
450 xa_unlock_bh(xa);
451
452 return curr;
453}
454
455/**
456 * xa_store_irq() - Erase this entry from the XArray.
457 * @xa: XArray.
458 * @index: Index into array.
459 * @entry: New entry.
460 * @gfp: Memory allocation flags.
461 *
462 * This function is like calling xa_store() except it disables interrupts
463 * while holding the array lock.
464 *
465 * Context: Process context. Takes and releases the xa_lock while
466 * disabling interrupts.
467 * Return: The entry which used to be at this index.
468 */
469static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
470 void *entry, gfp_t gfp)
471{
472 void *curr;
473
474 xa_lock_irq(xa);
475 curr = __xa_store(xa, index, entry, gfp);
476 xa_unlock_irq(xa);
477
478 return curr;
479}
480
481/**
490 * xa_erase_bh() - Erase this entry from the XArray. 482 * xa_erase_bh() - Erase this entry from the XArray.
491 * @xa: XArray. 483 * @xa: XArray.
492 * @index: Index of entry. 484 * @index: Index of entry.
@@ -495,7 +487,7 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
495 * the third argument. The XArray does not need to allocate memory, so 487 * the third argument. The XArray does not need to allocate memory, so
496 * the user does not need to provide GFP flags. 488 * the user does not need to provide GFP flags.
497 * 489 *
498 * Context: Process context. Takes and releases the xa_lock while 490 * Context: Any context. Takes and releases the xa_lock while
499 * disabling softirqs. 491 * disabling softirqs.
500 * Return: The entry which used to be at this index. 492 * Return: The entry which used to be at this index.
501 */ 493 */
@@ -535,6 +527,61 @@ static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
535} 527}
536 528
537/** 529/**
530 * xa_cmpxchg() - Conditionally replace an entry in the XArray.
531 * @xa: XArray.
532 * @index: Index into array.
533 * @old: Old value to test against.
534 * @entry: New value to place in array.
535 * @gfp: Memory allocation flags.
536 *
537 * If the entry at @index is the same as @old, replace it with @entry.
538 * If the return value is equal to @old, then the exchange was successful.
539 *
540 * Context: Any context. Takes and releases the xa_lock. May sleep
541 * if the @gfp flags permit.
542 * Return: The old value at this index or xa_err() if an error happened.
543 */
544static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
545 void *old, void *entry, gfp_t gfp)
546{
547 void *curr;
548
549 xa_lock(xa);
550 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
551 xa_unlock(xa);
552
553 return curr;
554}
555
556/**
557 * xa_insert() - Store this entry in the XArray unless another entry is
558 * already present.
559 * @xa: XArray.
560 * @index: Index into array.
561 * @entry: New entry.
562 * @gfp: Memory allocation flags.
563 *
564 * If you would rather see the existing entry in the array, use xa_cmpxchg().
565 * This function is for users who don't care what the entry is, only that
566 * one is present.
567 *
568 * Context: Process context. Takes and releases the xa_lock.
569 * May sleep if the @gfp flags permit.
570 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
571 * -ENOMEM if memory could not be allocated.
572 */
573static inline int xa_insert(struct xarray *xa, unsigned long index,
574 void *entry, gfp_t gfp)
575{
576 void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
577 if (!curr)
578 return 0;
579 if (xa_is_err(curr))
580 return xa_err(curr);
581 return -EEXIST;
582}
583
584/**
538 * xa_alloc() - Find somewhere to store this entry in the XArray. 585 * xa_alloc() - Find somewhere to store this entry in the XArray.
539 * @xa: XArray. 586 * @xa: XArray.
540 * @id: Pointer to ID. 587 * @id: Pointer to ID.
@@ -575,7 +622,7 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
575 * Updates the @id pointer with the index, then stores the entry at that 622 * Updates the @id pointer with the index, then stores the entry at that
576 * index. A concurrent lookup will not see an uninitialised @id. 623 * index. A concurrent lookup will not see an uninitialised @id.
577 * 624 *
578 * Context: Process context. Takes and releases the xa_lock while 625 * Context: Any context. Takes and releases the xa_lock while
579 * disabling softirqs. May sleep if the @gfp flags permit. 626 * disabling softirqs. May sleep if the @gfp flags permit.
580 * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if 627 * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
581 * there is no more space in the XArray. 628 * there is no more space in the XArray.
@@ -621,6 +668,98 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
621 return err; 668 return err;
622} 669}
623 670
671/**
672 * xa_reserve() - Reserve this index in the XArray.
673 * @xa: XArray.
674 * @index: Index into array.
675 * @gfp: Memory allocation flags.
676 *
677 * Ensures there is somewhere to store an entry at @index in the array.
678 * If there is already something stored at @index, this function does
679 * nothing. If there was nothing there, the entry is marked as reserved.
680 * Loading from a reserved entry returns a %NULL pointer.
681 *
682 * If you do not use the entry that you have reserved, call xa_release()
683 * or xa_erase() to free any unnecessary memory.
684 *
685 * Context: Any context. Takes and releases the xa_lock.
686 * May sleep if the @gfp flags permit.
687 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
688 */
689static inline
690int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
691{
692 int ret;
693
694 xa_lock(xa);
695 ret = __xa_reserve(xa, index, gfp);
696 xa_unlock(xa);
697
698 return ret;
699}
700
701/**
702 * xa_reserve_bh() - Reserve this index in the XArray.
703 * @xa: XArray.
704 * @index: Index into array.
705 * @gfp: Memory allocation flags.
706 *
707 * A softirq-disabling version of xa_reserve().
708 *
709 * Context: Any context. Takes and releases the xa_lock while
710 * disabling softirqs.
711 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
712 */
713static inline
714int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
715{
716 int ret;
717
718 xa_lock_bh(xa);
719 ret = __xa_reserve(xa, index, gfp);
720 xa_unlock_bh(xa);
721
722 return ret;
723}
724
725/**
726 * xa_reserve_irq() - Reserve this index in the XArray.
727 * @xa: XArray.
728 * @index: Index into array.
729 * @gfp: Memory allocation flags.
730 *
731 * An interrupt-disabling version of xa_reserve().
732 *
733 * Context: Process context. Takes and releases the xa_lock while
734 * disabling interrupts.
735 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
736 */
737static inline
738int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
739{
740 int ret;
741
742 xa_lock_irq(xa);
743 ret = __xa_reserve(xa, index, gfp);
744 xa_unlock_irq(xa);
745
746 return ret;
747}
748
749/**
750 * xa_release() - Release a reserved entry.
751 * @xa: XArray.
752 * @index: Index of entry.
753 *
754 * After calling xa_reserve(), you can call this function to release the
755 * reservation. If the entry at @index has been stored to, this function
756 * will do nothing.
757 */
758static inline void xa_release(struct xarray *xa, unsigned long index)
759{
760 xa_cmpxchg(xa, index, NULL, NULL, 0);
761}
762
624/* Everything below here is the Advanced API. Proceed with caution. */ 763/* Everything below here is the Advanced API. Proceed with caution. */
625 764
626/* 765/*
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 58c1ecf3d648..5467264771ec 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -624,7 +624,7 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
624 624
625/* v4l2 request helper */ 625/* v4l2 request helper */
626 626
627void vb2_m2m_request_queue(struct media_request *req); 627void v4l2_m2m_request_queue(struct media_request *req);
628 628
629/* v4l2 ioctl helpers */ 629/* v4l2 ioctl helpers */
630 630
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index de587948042a..1adefe42c0a6 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -77,7 +77,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
77 struct sockaddr_rxrpc *, struct key *); 77 struct sockaddr_rxrpc *, struct key *);
78int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *, 78int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
79 enum rxrpc_call_completion *, u32 *); 79 enum rxrpc_call_completion *, u32 *);
80u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *); 80u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
81void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
81u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); 82u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
82bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, 83bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
83 ktime_t *); 84 ktime_t *);
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
index cd24be4c4a99..13d55206bb9f 100644
--- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
9 const struct nf_nat_range2 *range, 9 const struct nf_nat_range2 *range,
10 const struct net_device *out); 10 const struct net_device *out);
11 11
12void nf_nat_masquerade_ipv4_register_notifier(void); 12int nf_nat_masquerade_ipv4_register_notifier(void);
13void nf_nat_masquerade_ipv4_unregister_notifier(void); 13void nf_nat_masquerade_ipv4_unregister_notifier(void);
14 14
15#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */ 15#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
index 0c3b5ebf0bb8..2917bf95c437 100644
--- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -5,7 +5,7 @@
5unsigned int 5unsigned int
6nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, 6nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
7 const struct net_device *out); 7 const struct net_device *out);
8void nf_nat_masquerade_ipv6_register_notifier(void); 8int nf_nat_masquerade_ipv6_register_notifier(void);
9void nf_nat_masquerade_ipv6_unregister_notifier(void); 9void nf_nat_masquerade_ipv6_unregister_notifier(void);
10 10
11#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */ 11#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 8c2caa370e0f..ab9242e51d9e 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -608,4 +608,16 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
608 SCTP_DEFAULT_MINSEGMENT)); 608 SCTP_DEFAULT_MINSEGMENT));
609} 609}
610 610
611static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
612{
613 __u32 pmtu = sctp_dst_mtu(t->dst);
614
615 if (t->pathmtu == pmtu)
616 return true;
617
618 t->pathmtu = pmtu;
619
620 return false;
621}
622
611#endif /* __net_sctp_h__ */ 623#endif /* __net_sctp_h__ */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f1dab1f4b194..70c10a8f3e90 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1192,7 +1192,7 @@ struct snd_soc_pcm_runtime {
1192 ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \ 1192 ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
1193 (i)++) 1193 (i)++)
1194#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \ 1194#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
1195 for (; ((i--) >= 0) && ((dai) = rtd->codec_dais[i]);) 1195 for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);)
1196 1196
1197 1197
1198/* mixer control */ 1198/* mixer control */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 573d5b901fb1..5b50fe4906d2 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -181,6 +181,7 @@ enum rxrpc_timer_trace {
181enum rxrpc_propose_ack_trace { 181enum rxrpc_propose_ack_trace {
182 rxrpc_propose_ack_client_tx_end, 182 rxrpc_propose_ack_client_tx_end,
183 rxrpc_propose_ack_input_data, 183 rxrpc_propose_ack_input_data,
184 rxrpc_propose_ack_ping_for_check_life,
184 rxrpc_propose_ack_ping_for_keepalive, 185 rxrpc_propose_ack_ping_for_keepalive,
185 rxrpc_propose_ack_ping_for_lost_ack, 186 rxrpc_propose_ack_ping_for_lost_ack,
186 rxrpc_propose_ack_ping_for_lost_reply, 187 rxrpc_propose_ack_ping_for_lost_reply,
@@ -380,6 +381,7 @@ enum rxrpc_tx_point {
380#define rxrpc_propose_ack_traces \ 381#define rxrpc_propose_ack_traces \
381 EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ 382 EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
382 EM(rxrpc_propose_ack_input_data, "DataIn ") \ 383 EM(rxrpc_propose_ack_input_data, "DataIn ") \
384 EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
383 EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \ 385 EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
384 EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ 386 EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
385 EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ 387 EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index f07b270d4fc4..9a4bdfadab07 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -107,6 +107,8 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
107#ifdef CREATE_TRACE_POINTS 107#ifdef CREATE_TRACE_POINTS
108static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) 108static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
109{ 109{
110 unsigned int state;
111
110#ifdef CONFIG_SCHED_DEBUG 112#ifdef CONFIG_SCHED_DEBUG
111 BUG_ON(p != current); 113 BUG_ON(p != current);
112#endif /* CONFIG_SCHED_DEBUG */ 114#endif /* CONFIG_SCHED_DEBUG */
@@ -118,7 +120,15 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
118 if (preempt) 120 if (preempt)
119 return TASK_REPORT_MAX; 121 return TASK_REPORT_MAX;
120 122
121 return 1 << task_state_index(p); 123 /*
124 * task_state_index() uses fls() and returns a value from 0-8 range.
125 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
126 * it for left shift operation to get the correct task->state
127 * mapping.
128 */
129 state = task_state_index(p);
130
131 return state ? (1 << (state - 1)) : state;
122} 132}
123#endif /* CREATE_TRACE_POINTS */ 133#endif /* CREATE_TRACE_POINTS */
124 134
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 6d180cc60a5d..3eb5a4c3d60a 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -716,7 +716,6 @@
716 * the situation described above. 716 * the situation described above.
717 */ 717 */
718#define REL_RESERVED 0x0a 718#define REL_RESERVED 0x0a
719#define REL_WHEEL_HI_RES 0x0b
720#define REL_MAX 0x0f 719#define REL_MAX 0x0f
721#define REL_CNT (REL_MAX+1) 720#define REL_CNT (REL_MAX+1)
722 721
@@ -753,15 +752,6 @@
753 752
754#define ABS_MISC 0x28 753#define ABS_MISC 0x28
755 754
756/*
757 * 0x2e is reserved and should not be used in input drivers.
758 * It was used by HID as ABS_MISC+6 and userspace needs to detect if
759 * the next ABS_* event is correct or is just ABS_MISC + n.
760 * We define here ABS_RESERVED so userspace can rely on it and detect
761 * the situation described above.
762 */
763#define ABS_RESERVED 0x2e
764
765#define ABS_MT_SLOT 0x2f /* MT slot being modified */ 755#define ABS_MT_SLOT 0x2f /* MT slot being modified */
766#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ 756#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
767#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ 757#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index c0d7ea0bf5b6..b17201edfa09 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -212,6 +212,7 @@ struct prctl_mm_map {
212#define PR_SET_SPECULATION_CTRL 53 212#define PR_SET_SPECULATION_CTRL 53
213/* Speculation control variants */ 213/* Speculation control variants */
214# define PR_SPEC_STORE_BYPASS 0 214# define PR_SPEC_STORE_BYPASS 0
215# define PR_SPEC_INDIRECT_BRANCH 1
215/* Return and control values for PR_SET/GET_SPECULATION_CTRL */ 216/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
216# define PR_SPEC_NOT_AFFECTED 0 217# define PR_SPEC_NOT_AFFECTED 0
217# define PR_SPEC_PRCTL (1UL << 0) 218# define PR_SPEC_PRCTL (1UL << 0)
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 51b095898f4b..998983a6e6b7 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -50,6 +50,8 @@
50#ifndef __LINUX_V4L2_CONTROLS_H 50#ifndef __LINUX_V4L2_CONTROLS_H
51#define __LINUX_V4L2_CONTROLS_H 51#define __LINUX_V4L2_CONTROLS_H
52 52
53#include <linux/types.h>
54
53/* Control classes */ 55/* Control classes */
54#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */ 56#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */
55#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */ 57#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */
@@ -1110,6 +1112,7 @@ struct v4l2_mpeg2_sequence {
1110 __u8 profile_and_level_indication; 1112 __u8 profile_and_level_indication;
1111 __u8 progressive_sequence; 1113 __u8 progressive_sequence;
1112 __u8 chroma_format; 1114 __u8 chroma_format;
1115 __u8 pad;
1113}; 1116};
1114 1117
1115struct v4l2_mpeg2_picture { 1118struct v4l2_mpeg2_picture {
@@ -1128,6 +1131,7 @@ struct v4l2_mpeg2_picture {
1128 __u8 alternate_scan; 1131 __u8 alternate_scan;
1129 __u8 repeat_first_field; 1132 __u8 repeat_first_field;
1130 __u8 progressive_frame; 1133 __u8 progressive_frame;
1134 __u8 pad;
1131}; 1135};
1132 1136
1133struct v4l2_ctrl_mpeg2_slice_params { 1137struct v4l2_ctrl_mpeg2_slice_params {
@@ -1142,6 +1146,7 @@ struct v4l2_ctrl_mpeg2_slice_params {
1142 1146
1143 __u8 backward_ref_index; 1147 __u8 backward_ref_index;
1144 __u8 forward_ref_index; 1148 __u8 forward_ref_index;
1149 __u8 pad;
1145}; 1150};
1146 1151
1147struct v4l2_ctrl_mpeg2_quantization { 1152struct v4l2_ctrl_mpeg2_quantization {
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 61f410fd74e4..4914b93a23f2 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
44{ 44{
45} 45}
46#endif 46#endif
47
48#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
49struct resource;
50void arch_xen_balloon_init(struct resource *hostmem_resource);
51#endif
diff --git a/init/Kconfig b/init/Kconfig
index a4112e95724a..cf5b5a0dcbc2 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -509,6 +509,15 @@ config PSI
509 509
510 Say N if unsure. 510 Say N if unsure.
511 511
512config PSI_DEFAULT_DISABLED
513 bool "Require boot parameter to enable pressure stall information tracking"
514 default n
515 depends on PSI
516 help
517 If set, pressure stall information tracking will be disabled
518 per default but can be enabled through passing psi_enable=1
519 on the kernel commandline during boot.
520
512endmenu # "CPU/Task time and stats accounting" 521endmenu # "CPU/Task time and stats accounting"
513 522
514config CPU_ISOLATION 523config CPU_ISOLATION
diff --git a/init/initramfs.c b/init/initramfs.c
index 640557788026..f6f4a1e4cd54 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -291,16 +291,6 @@ static int __init do_reset(void)
291 return 1; 291 return 1;
292} 292}
293 293
294static int __init maybe_link(void)
295{
296 if (nlink >= 2) {
297 char *old = find_link(major, minor, ino, mode, collected);
298 if (old)
299 return (ksys_link(old, collected) < 0) ? -1 : 1;
300 }
301 return 0;
302}
303
304static void __init clean_path(char *path, umode_t fmode) 294static void __init clean_path(char *path, umode_t fmode)
305{ 295{
306 struct kstat st; 296 struct kstat st;
@@ -313,6 +303,18 @@ static void __init clean_path(char *path, umode_t fmode)
313 } 303 }
314} 304}
315 305
306static int __init maybe_link(void)
307{
308 if (nlink >= 2) {
309 char *old = find_link(major, minor, ino, mode, collected);
310 if (old) {
311 clean_path(collected, 0);
312 return (ksys_link(old, collected) < 0) ? -1 : 1;
313 }
314 }
315 return 0;
316}
317
316static __initdata int wfd; 318static __initdata int wfd;
317 319
318static int __init do_name(void) 320static int __init do_name(void)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1a796e0799ec..b1a3545d0ec8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -672,6 +672,40 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
672 bpf_prog_unlock_free(fp); 672 bpf_prog_unlock_free(fp);
673} 673}
674 674
675int bpf_jit_get_func_addr(const struct bpf_prog *prog,
676 const struct bpf_insn *insn, bool extra_pass,
677 u64 *func_addr, bool *func_addr_fixed)
678{
679 s16 off = insn->off;
680 s32 imm = insn->imm;
681 u8 *addr;
682
683 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
684 if (!*func_addr_fixed) {
685 /* Place-holder address till the last pass has collected
686 * all addresses for JITed subprograms in which case we
687 * can pick them up from prog->aux.
688 */
689 if (!extra_pass)
690 addr = NULL;
691 else if (prog->aux->func &&
692 off >= 0 && off < prog->aux->func_cnt)
693 addr = (u8 *)prog->aux->func[off]->bpf_func;
694 else
695 return -EINVAL;
696 } else {
697 /* Address of a BPF helper call. Since part of the core
698 * kernel, it's always at a fixed location. __bpf_call_base
699 * and the helper with imm relative to it are both in core
700 * kernel.
701 */
702 addr = (u8 *)__bpf_call_base + imm;
703 }
704
705 *func_addr = (unsigned long)addr;
706 return 0;
707}
708
675static int bpf_jit_blind_insn(const struct bpf_insn *from, 709static int bpf_jit_blind_insn(const struct bpf_insn *from,
676 const struct bpf_insn *aux, 710 const struct bpf_insn *aux,
677 struct bpf_insn *to_buff) 711 struct bpf_insn *to_buff)
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index c97a8f968638..bed9d48a7ae9 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -139,7 +139,8 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
139 return -ENOENT; 139 return -ENOENT;
140 140
141 new = kmalloc_node(sizeof(struct bpf_storage_buffer) + 141 new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
142 map->value_size, __GFP_ZERO | GFP_USER, 142 map->value_size,
143 __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
143 map->numa_node); 144 map->numa_node);
144 if (!new) 145 if (!new)
145 return -ENOMEM; 146 return -ENOMEM;
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 8bbd72d3a121..b384ea9f3254 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -7,6 +7,7 @@
7#include <linux/bpf.h> 7#include <linux/bpf.h>
8#include <linux/list.h> 8#include <linux/list.h>
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/capability.h>
10#include "percpu_freelist.h" 11#include "percpu_freelist.h"
11 12
12#define QUEUE_STACK_CREATE_FLAG_MASK \ 13#define QUEUE_STACK_CREATE_FLAG_MASK \
@@ -45,8 +46,12 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
45/* Called from syscall */ 46/* Called from syscall */
46static int queue_stack_map_alloc_check(union bpf_attr *attr) 47static int queue_stack_map_alloc_check(union bpf_attr *attr)
47{ 48{
49 if (!capable(CAP_SYS_ADMIN))
50 return -EPERM;
51
48 /* check sanity of attributes */ 52 /* check sanity of attributes */
49 if (attr->max_entries == 0 || attr->key_size != 0 || 53 if (attr->max_entries == 0 || attr->key_size != 0 ||
54 attr->value_size == 0 ||
50 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK) 55 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK)
51 return -EINVAL; 56 return -EINVAL;
52 57
@@ -63,15 +68,10 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
63{ 68{
64 int ret, numa_node = bpf_map_attr_numa_node(attr); 69 int ret, numa_node = bpf_map_attr_numa_node(attr);
65 struct bpf_queue_stack *qs; 70 struct bpf_queue_stack *qs;
66 u32 size, value_size; 71 u64 size, queue_size, cost;
67 u64 queue_size, cost;
68
69 size = attr->max_entries + 1;
70 value_size = attr->value_size;
71
72 queue_size = sizeof(*qs) + (u64) value_size * size;
73 72
74 cost = queue_size; 73 size = (u64) attr->max_entries + 1;
74 cost = queue_size = sizeof(*qs) + size * attr->value_size;
75 if (cost >= U32_MAX - PAGE_SIZE) 75 if (cost >= U32_MAX - PAGE_SIZE)
76 return ERR_PTR(-E2BIG); 76 return ERR_PTR(-E2BIG);
77 77
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 1971ca325fb4..6dd419550aba 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5650,7 +5650,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
5650 return; 5650 return;
5651 /* NOTE: fake 'exit' subprog should be updated as well. */ 5651 /* NOTE: fake 'exit' subprog should be updated as well. */
5652 for (i = 0; i <= env->subprog_cnt; i++) { 5652 for (i = 0; i <= env->subprog_cnt; i++) {
5653 if (env->subprog_info[i].start < off) 5653 if (env->subprog_info[i].start <= off)
5654 continue; 5654 continue;
5655 env->subprog_info[i].start += len - 1; 5655 env->subprog_info[i].start += len - 1;
5656 } 5656 }
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3c7f3b4c453c..91d5c38eb7e5 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -10,6 +10,7 @@
10#include <linux/sched/signal.h> 10#include <linux/sched/signal.h>
11#include <linux/sched/hotplug.h> 11#include <linux/sched/hotplug.h>
12#include <linux/sched/task.h> 12#include <linux/sched/task.h>
13#include <linux/sched/smt.h>
13#include <linux/unistd.h> 14#include <linux/unistd.h>
14#include <linux/cpu.h> 15#include <linux/cpu.h>
15#include <linux/oom.h> 16#include <linux/oom.h>
@@ -367,6 +368,12 @@ static void lockdep_release_cpus_lock(void)
367 368
368#endif /* CONFIG_HOTPLUG_CPU */ 369#endif /* CONFIG_HOTPLUG_CPU */
369 370
371/*
372 * Architectures that need SMT-specific errata handling during SMT hotplug
373 * should override this.
374 */
375void __weak arch_smt_update(void) { }
376
370#ifdef CONFIG_HOTPLUG_SMT 377#ifdef CONFIG_HOTPLUG_SMT
371enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; 378enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
372EXPORT_SYMBOL_GPL(cpu_smt_control); 379EXPORT_SYMBOL_GPL(cpu_smt_control);
@@ -1011,6 +1018,7 @@ out:
1011 * concurrent CPU hotplug via cpu_add_remove_lock. 1018 * concurrent CPU hotplug via cpu_add_remove_lock.
1012 */ 1019 */
1013 lockup_detector_cleanup(); 1020 lockup_detector_cleanup();
1021 arch_smt_update();
1014 return ret; 1022 return ret;
1015} 1023}
1016 1024
@@ -1139,6 +1147,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1139 ret = cpuhp_up_callbacks(cpu, st, target); 1147 ret = cpuhp_up_callbacks(cpu, st, target);
1140out: 1148out:
1141 cpus_write_unlock(); 1149 cpus_write_unlock();
1150 arch_smt_update();
1142 return ret; 1151 return ret;
1143} 1152}
1144 1153
@@ -2055,12 +2064,6 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
2055 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 2064 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2056} 2065}
2057 2066
2058/*
2059 * Architectures that need SMT-specific errata handling during SMT hotplug
2060 * should override this.
2061 */
2062void __weak arch_smt_update(void) { };
2063
2064static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) 2067static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2065{ 2068{
2066 int cpu, ret = 0; 2069 int cpu, ret = 0;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 5731daa09a32..045930e32c0e 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -679,7 +679,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
679 } 679 }
680 680
681 if (!dev_is_dma_coherent(dev) && 681 if (!dev_is_dma_coherent(dev) &&
682 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 682 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0 &&
683 dev_addr != DIRECT_MAPPING_ERROR)
683 arch_sync_dma_for_device(dev, phys, size, dir); 684 arch_sync_dma_for_device(dev, phys, size, dir);
684 685
685 return dev_addr; 686 return dev_addr;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 96d4bee83489..322e97bbb437 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -829,7 +829,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
829 BUG_ON((uprobe->offset & ~PAGE_MASK) + 829 BUG_ON((uprobe->offset & ~PAGE_MASK) +
830 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); 830 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
831 831
832 smp_wmb(); /* pairs with rmb() in find_active_uprobe() */ 832 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
833 set_bit(UPROBE_COPY_INSN, &uprobe->flags); 833 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
834 834
835 out: 835 out:
@@ -2178,10 +2178,18 @@ static void handle_swbp(struct pt_regs *regs)
2178 * After we hit the bp, _unregister + _register can install the 2178 * After we hit the bp, _unregister + _register can install the
2179 * new and not-yet-analyzed uprobe at the same address, restart. 2179 * new and not-yet-analyzed uprobe at the same address, restart.
2180 */ 2180 */
2181 smp_rmb(); /* pairs with wmb() in install_breakpoint() */
2182 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) 2181 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
2183 goto out; 2182 goto out;
2184 2183
2184 /*
2185 * Pairs with the smp_wmb() in prepare_uprobe().
2186 *
2187 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2188 * we must also see the stores to &uprobe->arch performed by the
2189 * prepare_uprobe() call.
2190 */
2191 smp_rmb();
2192
2185 /* Tracing handlers use ->utask to communicate with fetch methods */ 2193 /* Tracing handlers use ->utask to communicate with fetch methods */
2186 if (!get_utask()) 2194 if (!get_utask())
2187 goto out; 2195 goto out;
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3ebd09efe72a..97959d7b77e2 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -56,7 +56,7 @@ struct kcov {
56 struct task_struct *t; 56 struct task_struct *t;
57}; 57};
58 58
59static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) 59static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
60{ 60{
61 unsigned int mode; 61 unsigned int mode;
62 62
@@ -78,7 +78,7 @@ static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
78 return mode == needed_mode; 78 return mode == needed_mode;
79} 79}
80 80
81static unsigned long canonicalize_ip(unsigned long ip) 81static notrace unsigned long canonicalize_ip(unsigned long ip)
82{ 82{
83#ifdef CONFIG_RANDOMIZE_BASE 83#ifdef CONFIG_RANDOMIZE_BASE
84 ip -= kaslr_offset(); 84 ip -= kaslr_offset();
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 80b34dffdfb9..c2cee9db5204 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -261,9 +261,6 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
261 261
262static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) 262static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
263{ 263{
264 if (mode & PTRACE_MODE_SCHED)
265 return false;
266
267 if (mode & PTRACE_MODE_NOAUDIT) 264 if (mode & PTRACE_MODE_NOAUDIT)
268 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); 265 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
269 else 266 else
@@ -331,16 +328,9 @@ ok:
331 !ptrace_has_cap(mm->user_ns, mode))) 328 !ptrace_has_cap(mm->user_ns, mode)))
332 return -EPERM; 329 return -EPERM;
333 330
334 if (mode & PTRACE_MODE_SCHED)
335 return 0;
336 return security_ptrace_access_check(task, mode); 331 return security_ptrace_access_check(task, mode);
337} 332}
338 333
339bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
340{
341 return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
342}
343
344bool ptrace_may_access(struct task_struct *task, unsigned int mode) 334bool ptrace_may_access(struct task_struct *task, unsigned int mode)
345{ 335{
346 int err; 336 int err;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 091e089063be..6fedf3a98581 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5738,15 +5738,10 @@ int sched_cpu_activate(unsigned int cpu)
5738 5738
5739#ifdef CONFIG_SCHED_SMT 5739#ifdef CONFIG_SCHED_SMT
5740 /* 5740 /*
5741 * The sched_smt_present static key needs to be evaluated on every 5741 * When going up, increment the number of cores with SMT present.
5742 * hotplug event because at boot time SMT might be disabled when
5743 * the number of booted CPUs is limited.
5744 *
5745 * If then later a sibling gets hotplugged, then the key would stay
5746 * off and SMT scheduling would never be functional.
5747 */ 5742 */
5748 if (cpumask_weight(cpu_smt_mask(cpu)) > 1) 5743 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
5749 static_branch_enable_cpuslocked(&sched_smt_present); 5744 static_branch_inc_cpuslocked(&sched_smt_present);
5750#endif 5745#endif
5751 set_cpu_active(cpu, true); 5746 set_cpu_active(cpu, true);
5752 5747
@@ -5790,6 +5785,14 @@ int sched_cpu_deactivate(unsigned int cpu)
5790 */ 5785 */
5791 synchronize_rcu_mult(call_rcu, call_rcu_sched); 5786 synchronize_rcu_mult(call_rcu, call_rcu_sched);
5792 5787
5788#ifdef CONFIG_SCHED_SMT
5789 /*
5790 * When going down, decrement the number of cores with SMT present.
5791 */
5792 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
5793 static_branch_dec_cpuslocked(&sched_smt_present);
5794#endif
5795
5793 if (!sched_smp_initialized) 5796 if (!sched_smp_initialized)
5794 return 0; 5797 return 0;
5795 5798
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 3d7355d7c3e3..fe24de3fbc93 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -136,8 +136,18 @@
136 136
137static int psi_bug __read_mostly; 137static int psi_bug __read_mostly;
138 138
139bool psi_disabled __read_mostly; 139DEFINE_STATIC_KEY_FALSE(psi_disabled);
140core_param(psi_disabled, psi_disabled, bool, 0644); 140
141#ifdef CONFIG_PSI_DEFAULT_DISABLED
142bool psi_enable;
143#else
144bool psi_enable = true;
145#endif
146static int __init setup_psi(char *str)
147{
148 return kstrtobool(str, &psi_enable) == 0;
149}
150__setup("psi=", setup_psi);
141 151
142/* Running averages - we need to be higher-res than loadavg */ 152/* Running averages - we need to be higher-res than loadavg */
143#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ 153#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
@@ -169,8 +179,10 @@ static void group_init(struct psi_group *group)
169 179
170void __init psi_init(void) 180void __init psi_init(void)
171{ 181{
172 if (psi_disabled) 182 if (!psi_enable) {
183 static_branch_enable(&psi_disabled);
173 return; 184 return;
185 }
174 186
175 psi_period = jiffies_to_nsecs(PSI_FREQ); 187 psi_period = jiffies_to_nsecs(PSI_FREQ);
176 group_init(&psi_system); 188 group_init(&psi_system);
@@ -549,7 +561,7 @@ void psi_memstall_enter(unsigned long *flags)
549 struct rq_flags rf; 561 struct rq_flags rf;
550 struct rq *rq; 562 struct rq *rq;
551 563
552 if (psi_disabled) 564 if (static_branch_likely(&psi_disabled))
553 return; 565 return;
554 566
555 *flags = current->flags & PF_MEMSTALL; 567 *flags = current->flags & PF_MEMSTALL;
@@ -579,7 +591,7 @@ void psi_memstall_leave(unsigned long *flags)
579 struct rq_flags rf; 591 struct rq_flags rf;
580 struct rq *rq; 592 struct rq *rq;
581 593
582 if (psi_disabled) 594 if (static_branch_likely(&psi_disabled))
583 return; 595 return;
584 596
585 if (*flags) 597 if (*flags)
@@ -600,7 +612,7 @@ void psi_memstall_leave(unsigned long *flags)
600#ifdef CONFIG_CGROUPS 612#ifdef CONFIG_CGROUPS
601int psi_cgroup_alloc(struct cgroup *cgroup) 613int psi_cgroup_alloc(struct cgroup *cgroup)
602{ 614{
603 if (psi_disabled) 615 if (static_branch_likely(&psi_disabled))
604 return 0; 616 return 0;
605 617
606 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu); 618 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
@@ -612,7 +624,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
612 624
613void psi_cgroup_free(struct cgroup *cgroup) 625void psi_cgroup_free(struct cgroup *cgroup)
614{ 626{
615 if (psi_disabled) 627 if (static_branch_likely(&psi_disabled))
616 return; 628 return;
617 629
618 cancel_delayed_work_sync(&cgroup->psi.clock_work); 630 cancel_delayed_work_sync(&cgroup->psi.clock_work);
@@ -637,7 +649,7 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to)
637 struct rq_flags rf; 649 struct rq_flags rf;
638 struct rq *rq; 650 struct rq *rq;
639 651
640 if (psi_disabled) { 652 if (static_branch_likely(&psi_disabled)) {
641 /* 653 /*
642 * Lame to do this here, but the scheduler cannot be locked 654 * Lame to do this here, but the scheduler cannot be locked
643 * from the outside, so we move cgroups from inside sched/. 655 * from the outside, so we move cgroups from inside sched/.
@@ -673,7 +685,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
673{ 685{
674 int full; 686 int full;
675 687
676 if (psi_disabled) 688 if (static_branch_likely(&psi_disabled))
677 return -EOPNOTSUPP; 689 return -EOPNOTSUPP;
678 690
679 update_stats(group); 691 update_stats(group);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 618577fc9aa8..4e524ab589c9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -23,6 +23,7 @@
23#include <linux/sched/prio.h> 23#include <linux/sched/prio.h>
24#include <linux/sched/rt.h> 24#include <linux/sched/rt.h>
25#include <linux/sched/signal.h> 25#include <linux/sched/signal.h>
26#include <linux/sched/smt.h>
26#include <linux/sched/stat.h> 27#include <linux/sched/stat.h>
27#include <linux/sched/sysctl.h> 28#include <linux/sched/sysctl.h>
28#include <linux/sched/task.h> 29#include <linux/sched/task.h>
@@ -936,9 +937,6 @@ static inline int cpu_of(struct rq *rq)
936 937
937 938
938#ifdef CONFIG_SCHED_SMT 939#ifdef CONFIG_SCHED_SMT
939
940extern struct static_key_false sched_smt_present;
941
942extern void __update_idle_core(struct rq *rq); 940extern void __update_idle_core(struct rq *rq);
943 941
944static inline void update_idle_core(struct rq *rq) 942static inline void update_idle_core(struct rq *rq)
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 4904c4677000..aa0de240fb41 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -66,7 +66,7 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
66{ 66{
67 int clear = 0, set = TSK_RUNNING; 67 int clear = 0, set = TSK_RUNNING;
68 68
69 if (psi_disabled) 69 if (static_branch_likely(&psi_disabled))
70 return; 70 return;
71 71
72 if (!wakeup || p->sched_psi_wake_requeue) { 72 if (!wakeup || p->sched_psi_wake_requeue) {
@@ -86,7 +86,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
86{ 86{
87 int clear = TSK_RUNNING, set = 0; 87 int clear = TSK_RUNNING, set = 0;
88 88
89 if (psi_disabled) 89 if (static_branch_likely(&psi_disabled))
90 return; 90 return;
91 91
92 if (!sleep) { 92 if (!sleep) {
@@ -102,7 +102,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
102 102
103static inline void psi_ttwu_dequeue(struct task_struct *p) 103static inline void psi_ttwu_dequeue(struct task_struct *p)
104{ 104{
105 if (psi_disabled) 105 if (static_branch_likely(&psi_disabled))
106 return; 106 return;
107 /* 107 /*
108 * Is the task being migrated during a wakeup? Make sure to 108 * Is the task being migrated during a wakeup? Make sure to
@@ -128,7 +128,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
128 128
129static inline void psi_task_tick(struct rq *rq) 129static inline void psi_task_tick(struct rq *rq)
130{ 130{
131 if (psi_disabled) 131 if (static_branch_likely(&psi_disabled))
132 return; 132 return;
133 133
134 if (unlikely(rq->curr->flags & PF_MEMSTALL)) 134 if (unlikely(rq->curr->flags & PF_MEMSTALL))
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
index e42892926244..08cb57eed389 100644
--- a/kernel/stackleak.c
+++ b/kernel/stackleak.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/stackleak.h> 13#include <linux/stackleak.h>
14#include <linux/kprobes.h>
14 15
15#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE 16#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
16#include <linux/jump_label.h> 17#include <linux/jump_label.h>
@@ -47,7 +48,7 @@ int stack_erasing_sysctl(struct ctl_table *table, int write,
47#define skip_erasing() false 48#define skip_erasing() false
48#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */ 49#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
49 50
50asmlinkage void stackleak_erase(void) 51asmlinkage void notrace stackleak_erase(void)
51{ 52{
52 /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */ 53 /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
53 unsigned long kstack_ptr = current->lowest_stack; 54 unsigned long kstack_ptr = current->lowest_stack;
@@ -101,6 +102,7 @@ asmlinkage void stackleak_erase(void)
101 /* Reset the 'lowest_stack' value for the next syscall */ 102 /* Reset the 'lowest_stack' value for the next syscall */
102 current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64; 103 current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
103} 104}
105NOKPROBE_SYMBOL(stackleak_erase);
104 106
105void __used stackleak_track_stack(void) 107void __used stackleak_track_stack(void)
106{ 108{
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 08fcfe440c63..9864a35c8bb5 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -196,11 +196,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
196 i++; 196 i++;
197 } else if (fmt[i] == 'p' || fmt[i] == 's') { 197 } else if (fmt[i] == 'p' || fmt[i] == 's') {
198 mod[fmt_cnt]++; 198 mod[fmt_cnt]++;
199 i++; 199 /* disallow any further format extensions */
200 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0) 200 if (fmt[i + 1] != 0 &&
201 !isspace(fmt[i + 1]) &&
202 !ispunct(fmt[i + 1]))
201 return -EINVAL; 203 return -EINVAL;
202 fmt_cnt++; 204 fmt_cnt++;
203 if (fmt[i - 1] == 's') { 205 if (fmt[i] == 's') {
204 if (str_seen) 206 if (str_seen)
205 /* allow only one '%s' per fmt string */ 207 /* allow only one '%s' per fmt string */
206 return -EINVAL; 208 return -EINVAL;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f536f601bd46..77734451cb05 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -817,7 +817,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
817#ifdef CONFIG_FUNCTION_GRAPH_TRACER 817#ifdef CONFIG_FUNCTION_GRAPH_TRACER
818static int profile_graph_entry(struct ftrace_graph_ent *trace) 818static int profile_graph_entry(struct ftrace_graph_ent *trace)
819{ 819{
820 int index = trace->depth; 820 int index = current->curr_ret_stack;
821 821
822 function_profile_call(trace->func, 0, NULL, NULL); 822 function_profile_call(trace->func, 0, NULL, NULL);
823 823
@@ -852,7 +852,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
852 if (!fgraph_graph_time) { 852 if (!fgraph_graph_time) {
853 int index; 853 int index;
854 854
855 index = trace->depth; 855 index = current->curr_ret_stack;
856 856
857 /* Append this call time to the parent time to subtract */ 857 /* Append this call time to the parent time to subtract */
858 if (index) 858 if (index)
@@ -6814,6 +6814,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
6814 atomic_set(&t->tracing_graph_pause, 0); 6814 atomic_set(&t->tracing_graph_pause, 0);
6815 atomic_set(&t->trace_overrun, 0); 6815 atomic_set(&t->trace_overrun, 0);
6816 t->curr_ret_stack = -1; 6816 t->curr_ret_stack = -1;
6817 t->curr_ret_depth = -1;
6817 /* Make sure the tasks see the -1 first: */ 6818 /* Make sure the tasks see the -1 first: */
6818 smp_wmb(); 6819 smp_wmb();
6819 t->ret_stack = ret_stack_list[start++]; 6820 t->ret_stack = ret_stack_list[start++];
@@ -7038,6 +7039,7 @@ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
7038void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) 7039void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
7039{ 7040{
7040 t->curr_ret_stack = -1; 7041 t->curr_ret_stack = -1;
7042 t->curr_ret_depth = -1;
7041 /* 7043 /*
7042 * The idle task has no parent, it either has its own 7044 * The idle task has no parent, it either has its own
7043 * stack or no stack at all. 7045 * stack or no stack at all.
@@ -7068,6 +7070,7 @@ void ftrace_graph_init_task(struct task_struct *t)
7068 /* Make sure we do not use the parent ret_stack */ 7070 /* Make sure we do not use the parent ret_stack */
7069 t->ret_stack = NULL; 7071 t->ret_stack = NULL;
7070 t->curr_ret_stack = -1; 7072 t->curr_ret_stack = -1;
7073 t->curr_ret_depth = -1;
7071 7074
7072 if (ftrace_graph_active) { 7075 if (ftrace_graph_active) {
7073 struct ftrace_ret_stack *ret_stack; 7076 struct ftrace_ret_stack *ret_stack;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3b8c0e24ab30..447bd96ee658 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -512,12 +512,44 @@ enum {
512 * can only be modified by current, we can reuse trace_recursion. 512 * can only be modified by current, we can reuse trace_recursion.
513 */ 513 */
514 TRACE_IRQ_BIT, 514 TRACE_IRQ_BIT,
515
516 /* Set if the function is in the set_graph_function file */
517 TRACE_GRAPH_BIT,
518
519 /*
520 * In the very unlikely case that an interrupt came in
521 * at a start of graph tracing, and we want to trace
522 * the function in that interrupt, the depth can be greater
523 * than zero, because of the preempted start of a previous
524 * trace. In an even more unlikely case, depth could be 2
525 * if a softirq interrupted the start of graph tracing,
526 * followed by an interrupt preempting a start of graph
527 * tracing in the softirq, and depth can even be 3
528 * if an NMI came in at the start of an interrupt function
529 * that preempted a softirq start of a function that
530 * preempted normal context!!!! Luckily, it can't be
531 * greater than 3, so the next two bits are a mask
532 * of what the depth is when we set TRACE_GRAPH_BIT
533 */
534
535 TRACE_GRAPH_DEPTH_START_BIT,
536 TRACE_GRAPH_DEPTH_END_BIT,
515}; 537};
516 538
517#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 539#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
518#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) 540#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
519#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) 541#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
520 542
543#define trace_recursion_depth() \
544 (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
545#define trace_recursion_set_depth(depth) \
546 do { \
547 current->trace_recursion &= \
548 ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
549 current->trace_recursion |= \
550 ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
551 } while (0)
552
521#define TRACE_CONTEXT_BITS 4 553#define TRACE_CONTEXT_BITS 4
522 554
523#define TRACE_FTRACE_START TRACE_FTRACE_BIT 555#define TRACE_FTRACE_START TRACE_FTRACE_BIT
@@ -843,8 +875,9 @@ extern void __trace_graph_return(struct trace_array *tr,
843extern struct ftrace_hash *ftrace_graph_hash; 875extern struct ftrace_hash *ftrace_graph_hash;
844extern struct ftrace_hash *ftrace_graph_notrace_hash; 876extern struct ftrace_hash *ftrace_graph_notrace_hash;
845 877
846static inline int ftrace_graph_addr(unsigned long addr) 878static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
847{ 879{
880 unsigned long addr = trace->func;
848 int ret = 0; 881 int ret = 0;
849 882
850 preempt_disable_notrace(); 883 preempt_disable_notrace();
@@ -855,6 +888,14 @@ static inline int ftrace_graph_addr(unsigned long addr)
855 } 888 }
856 889
857 if (ftrace_lookup_ip(ftrace_graph_hash, addr)) { 890 if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
891
892 /*
893 * This needs to be cleared on the return functions
894 * when the depth is zero.
895 */
896 trace_recursion_set(TRACE_GRAPH_BIT);
897 trace_recursion_set_depth(trace->depth);
898
858 /* 899 /*
859 * If no irqs are to be traced, but a set_graph_function 900 * If no irqs are to be traced, but a set_graph_function
860 * is set, and called by an interrupt handler, we still 901 * is set, and called by an interrupt handler, we still
@@ -872,6 +913,13 @@ out:
872 return ret; 913 return ret;
873} 914}
874 915
916static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
917{
918 if (trace_recursion_test(TRACE_GRAPH_BIT) &&
919 trace->depth == trace_recursion_depth())
920 trace_recursion_clear(TRACE_GRAPH_BIT);
921}
922
875static inline int ftrace_graph_notrace_addr(unsigned long addr) 923static inline int ftrace_graph_notrace_addr(unsigned long addr)
876{ 924{
877 int ret = 0; 925 int ret = 0;
@@ -885,7 +933,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
885 return ret; 933 return ret;
886} 934}
887#else 935#else
888static inline int ftrace_graph_addr(unsigned long addr) 936static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
889{ 937{
890 return 1; 938 return 1;
891} 939}
@@ -894,6 +942,8 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
894{ 942{
895 return 0; 943 return 0;
896} 944}
945static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
946{ }
897#endif /* CONFIG_DYNAMIC_FTRACE */ 947#endif /* CONFIG_DYNAMIC_FTRACE */
898 948
899extern unsigned int fgraph_max_depth; 949extern unsigned int fgraph_max_depth;
@@ -901,7 +951,8 @@ extern unsigned int fgraph_max_depth;
901static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace) 951static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
902{ 952{
903 /* trace it when it is-nested-in or is a function enabled. */ 953 /* trace it when it is-nested-in or is a function enabled. */
904 return !(trace->depth || ftrace_graph_addr(trace->func)) || 954 return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
955 ftrace_graph_addr(trace)) ||
905 (trace->depth < 0) || 956 (trace->depth < 0) ||
906 (fgraph_max_depth && trace->depth >= fgraph_max_depth); 957 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
907} 958}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 169b3c44ee97..086af4f5c3e8 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -118,8 +118,8 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
118 struct trace_seq *s, u32 flags); 118 struct trace_seq *s, u32 flags);
119 119
120/* Add a function return address to the trace stack on thread info.*/ 120/* Add a function return address to the trace stack on thread info.*/
121int 121static int
122ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, 122ftrace_push_return_trace(unsigned long ret, unsigned long func,
123 unsigned long frame_pointer, unsigned long *retp) 123 unsigned long frame_pointer, unsigned long *retp)
124{ 124{
125 unsigned long long calltime; 125 unsigned long long calltime;
@@ -177,9 +177,31 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
178 current->ret_stack[index].retp = retp; 178 current->ret_stack[index].retp = retp;
179#endif 179#endif
180 *depth = current->curr_ret_stack; 180 return 0;
181}
182
183int function_graph_enter(unsigned long ret, unsigned long func,
184 unsigned long frame_pointer, unsigned long *retp)
185{
186 struct ftrace_graph_ent trace;
187
188 trace.func = func;
189 trace.depth = ++current->curr_ret_depth;
190
191 if (ftrace_push_return_trace(ret, func,
192 frame_pointer, retp))
193 goto out;
194
195 /* Only trace if the calling function expects to */
196 if (!ftrace_graph_entry(&trace))
197 goto out_ret;
181 198
182 return 0; 199 return 0;
200 out_ret:
201 current->curr_ret_stack--;
202 out:
203 current->curr_ret_depth--;
204 return -EBUSY;
183} 205}
184 206
185/* Retrieve a function return address to the trace stack on thread info.*/ 207/* Retrieve a function return address to the trace stack on thread info.*/
@@ -241,7 +263,13 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
241 trace->func = current->ret_stack[index].func; 263 trace->func = current->ret_stack[index].func;
242 trace->calltime = current->ret_stack[index].calltime; 264 trace->calltime = current->ret_stack[index].calltime;
243 trace->overrun = atomic_read(&current->trace_overrun); 265 trace->overrun = atomic_read(&current->trace_overrun);
244 trace->depth = index; 266 trace->depth = current->curr_ret_depth--;
267 /*
268 * We still want to trace interrupts coming in if
269 * max_depth is set to 1. Make sure the decrement is
270 * seen before ftrace_graph_return.
271 */
272 barrier();
245} 273}
246 274
247/* 275/*
@@ -255,6 +283,12 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
255 283
256 ftrace_pop_return_trace(&trace, &ret, frame_pointer); 284 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
257 trace.rettime = trace_clock_local(); 285 trace.rettime = trace_clock_local();
286 ftrace_graph_return(&trace);
287 /*
288 * The ftrace_graph_return() may still access the current
289 * ret_stack structure, we need to make sure the update of
290 * curr_ret_stack is after that.
291 */
258 barrier(); 292 barrier();
259 current->curr_ret_stack--; 293 current->curr_ret_stack--;
260 /* 294 /*
@@ -267,13 +301,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
267 return ret; 301 return ret;
268 } 302 }
269 303
270 /*
271 * The trace should run after decrementing the ret counter
272 * in case an interrupt were to come in. We don't want to
273 * lose the interrupt if max_depth is set.
274 */
275 ftrace_graph_return(&trace);
276
277 if (unlikely(!ret)) { 304 if (unlikely(!ret)) {
278 ftrace_graph_stop(); 305 ftrace_graph_stop();
279 WARN_ON(1); 306 WARN_ON(1);
@@ -482,6 +509,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
482 int cpu; 509 int cpu;
483 int pc; 510 int pc;
484 511
512 ftrace_graph_addr_finish(trace);
513
485 local_irq_save(flags); 514 local_irq_save(flags);
486 cpu = raw_smp_processor_id(); 515 cpu = raw_smp_processor_id();
487 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 516 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
@@ -505,6 +534,8 @@ void set_graph_array(struct trace_array *tr)
505 534
506static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) 535static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
507{ 536{
537 ftrace_graph_addr_finish(trace);
538
508 if (tracing_thresh && 539 if (tracing_thresh &&
509 (trace->rettime - trace->calltime < tracing_thresh)) 540 (trace->rettime - trace->calltime < tracing_thresh))
510 return; 541 return;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index b7357f9f82a3..98ea6d28df15 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -208,6 +208,8 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
208 unsigned long flags; 208 unsigned long flags;
209 int pc; 209 int pc;
210 210
211 ftrace_graph_addr_finish(trace);
212
211 if (!func_prolog_dec(tr, &data, &flags)) 213 if (!func_prolog_dec(tr, &data, &flags))
212 return; 214 return;
213 215
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index a86b303e6c67..7d04b9890755 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -270,6 +270,8 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
270 unsigned long flags; 270 unsigned long flags;
271 int pc; 271 int pc;
272 272
273 ftrace_graph_addr_finish(trace);
274
273 if (!func_prolog_preempt_disable(tr, &data, &pc)) 275 if (!func_prolog_preempt_disable(tr, &data, &pc))
274 return; 276 return;
275 277
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 70935ed91125..14afeeb7d6ef 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -135,7 +135,6 @@ static void fill_pool(void)
135 if (!new) 135 if (!new)
136 return; 136 return;
137 137
138 kmemleak_ignore(new);
139 raw_spin_lock_irqsave(&pool_lock, flags); 138 raw_spin_lock_irqsave(&pool_lock, flags);
140 hlist_add_head(&new->node, &obj_pool); 139 hlist_add_head(&new->node, &obj_pool);
141 debug_objects_allocated++; 140 debug_objects_allocated++;
@@ -1128,7 +1127,6 @@ static int __init debug_objects_replace_static_objects(void)
1128 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); 1127 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1129 if (!obj) 1128 if (!obj)
1130 goto free; 1129 goto free;
1131 kmemleak_ignore(obj);
1132 hlist_add_head(&obj->node, &objects); 1130 hlist_add_head(&obj->node, &objects);
1133 } 1131 }
1134 1132
@@ -1184,7 +1182,8 @@ void __init debug_objects_mem_init(void)
1184 1182
1185 obj_cache = kmem_cache_create("debug_objects_cache", 1183 obj_cache = kmem_cache_create("debug_objects_cache",
1186 sizeof (struct debug_obj), 0, 1184 sizeof (struct debug_obj), 0,
1187 SLAB_DEBUG_OBJECTS, NULL); 1185 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1186 NULL);
1188 1187
1189 if (!obj_cache || debug_objects_replace_static_objects()) { 1188 if (!obj_cache || debug_objects_replace_static_objects()) {
1190 debug_objects_enabled = 0; 1189 debug_objects_enabled = 0;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 7ebccb5c1637..54c248526b55 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -560,6 +560,38 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
560 return bytes; 560 return bytes;
561} 561}
562 562
563static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
564 __wsum *csum, struct iov_iter *i)
565{
566 struct pipe_inode_info *pipe = i->pipe;
567 size_t n, r;
568 size_t off = 0;
569 __wsum sum = *csum, next;
570 int idx;
571
572 if (!sanity(i))
573 return 0;
574
575 bytes = n = push_pipe(i, bytes, &idx, &r);
576 if (unlikely(!n))
577 return 0;
578 for ( ; n; idx = next_idx(idx, pipe), r = 0) {
579 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
580 char *p = kmap_atomic(pipe->bufs[idx].page);
581 next = csum_partial_copy_nocheck(addr, p + r, chunk, 0);
582 sum = csum_block_add(sum, next, off);
583 kunmap_atomic(p);
584 i->idx = idx;
585 i->iov_offset = r + chunk;
586 n -= chunk;
587 off += chunk;
588 addr += chunk;
589 }
590 i->count -= bytes;
591 *csum = sum;
592 return bytes;
593}
594
563size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 595size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
564{ 596{
565 const char *from = addr; 597 const char *from = addr;
@@ -1438,8 +1470,12 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1438 const char *from = addr; 1470 const char *from = addr;
1439 __wsum sum, next; 1471 __wsum sum, next;
1440 size_t off = 0; 1472 size_t off = 0;
1473
1474 if (unlikely(iov_iter_is_pipe(i)))
1475 return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
1476
1441 sum = *csum; 1477 sum = *csum;
1442 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1478 if (unlikely(iov_iter_is_discard(i))) {
1443 WARN_ON(1); /* for now */ 1479 WARN_ON(1); /* for now */
1444 return 0; 1480 return 0;
1445 } 1481 }
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index b984806d7d7b..7cab9a9869ac 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -837,6 +837,7 @@ static ssize_t read_firmware_show(struct device *dev,
837 if (req->fw->size > PAGE_SIZE) { 837 if (req->fw->size > PAGE_SIZE) {
838 pr_err("Testing interface must use PAGE_SIZE firmware for now\n"); 838 pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
839 rc = -EINVAL; 839 rc = -EINVAL;
840 goto out;
840 } 841 }
841 memcpy(buf, req->fw->data, req->fw->size); 842 memcpy(buf, req->fw->data, req->fw->size);
842 843
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 626f580b4ff7..5144899d3c6b 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -99,7 +99,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
99 const char *q = *result++; 99 const char *q = *result++;
100 size_t amount = strlen(q); 100 size_t amount = strlen(q);
101 101
102 strncpy(p, q, amount); 102 memcpy(p, q, amount);
103 p += amount; 103 p += amount;
104 104
105 *p++ = ' '; 105 *p++ = ' ';
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index e3ddd836491f..d82d022111e0 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1214,7 +1214,6 @@ void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
1214 1214
1215 dev_info(test_dev->dev, "removing interface\n"); 1215 dev_info(test_dev->dev, "removing interface\n");
1216 misc_deregister(&test_dev->misc_dev); 1216 misc_deregister(&test_dev->misc_dev);
1217 kfree(&test_dev->misc_dev.name);
1218 1217
1219 mutex_unlock(&test_dev->config_mutex); 1218 mutex_unlock(&test_dev->config_mutex);
1220 mutex_unlock(&test_dev->trigger_mutex); 1219 mutex_unlock(&test_dev->trigger_mutex);
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index aa47754150ce..0598e86af8fc 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -208,15 +208,19 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
208 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2)); 208 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
209 209
210 /* We should see two elements in the array */ 210 /* We should see two elements in the array */
211 rcu_read_lock();
211 xas_for_each(&xas, entry, ULONG_MAX) 212 xas_for_each(&xas, entry, ULONG_MAX)
212 seen++; 213 seen++;
214 rcu_read_unlock();
213 XA_BUG_ON(xa, seen != 2); 215 XA_BUG_ON(xa, seen != 2);
214 216
215 /* One of which is marked */ 217 /* One of which is marked */
216 xas_set(&xas, 0); 218 xas_set(&xas, 0);
217 seen = 0; 219 seen = 0;
220 rcu_read_lock();
218 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 221 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
219 seen++; 222 seen++;
223 rcu_read_unlock();
220 XA_BUG_ON(xa, seen != 1); 224 XA_BUG_ON(xa, seen != 1);
221 } 225 }
222 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 226 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
@@ -373,6 +377,12 @@ static noinline void check_reserve(struct xarray *xa)
373 xa_erase_index(xa, 12345678); 377 xa_erase_index(xa, 12345678);
374 XA_BUG_ON(xa, !xa_empty(xa)); 378 XA_BUG_ON(xa, !xa_empty(xa));
375 379
380 /* And so does xa_insert */
381 xa_reserve(xa, 12345678, GFP_KERNEL);
382 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0);
383 xa_erase_index(xa, 12345678);
384 XA_BUG_ON(xa, !xa_empty(xa));
385
376 /* Can iterate through a reserved entry */ 386 /* Can iterate through a reserved entry */
377 xa_store_index(xa, 5, GFP_KERNEL); 387 xa_store_index(xa, 5, GFP_KERNEL);
378 xa_reserve(xa, 6, GFP_KERNEL); 388 xa_reserve(xa, 6, GFP_KERNEL);
@@ -436,7 +446,9 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
436 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 446 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
437 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 447 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
438 448
449 xas_lock(&xas);
439 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index)); 450 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index));
451 xas_unlock(&xas);
440 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min)); 452 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min));
441 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min)); 453 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min));
442 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 454 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
@@ -452,9 +464,11 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
452 XA_STATE(xas, xa, index); 464 XA_STATE(xas, xa, index);
453 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 465 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
454 466
467 xas_lock(&xas);
455 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 468 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
456 XA_BUG_ON(xa, xas.xa_index != index); 469 XA_BUG_ON(xa, xas.xa_index != index);
457 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 470 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
471 xas_unlock(&xas);
458 XA_BUG_ON(xa, !xa_empty(xa)); 472 XA_BUG_ON(xa, !xa_empty(xa));
459} 473}
460#endif 474#endif
@@ -498,7 +512,7 @@ static noinline void check_multi_store(struct xarray *xa)
498 rcu_read_unlock(); 512 rcu_read_unlock();
499 513
500 /* We can erase multiple values with a single store */ 514 /* We can erase multiple values with a single store */
501 xa_store_order(xa, 0, 63, NULL, GFP_KERNEL); 515 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
502 XA_BUG_ON(xa, !xa_empty(xa)); 516 XA_BUG_ON(xa, !xa_empty(xa));
503 517
504 /* Even when the first slot is empty but the others aren't */ 518 /* Even when the first slot is empty but the others aren't */
@@ -702,7 +716,7 @@ static noinline void check_multi_find_2(struct xarray *xa)
702 } 716 }
703} 717}
704 718
705static noinline void check_find(struct xarray *xa) 719static noinline void check_find_1(struct xarray *xa)
706{ 720{
707 unsigned long i, j, k; 721 unsigned long i, j, k;
708 722
@@ -748,6 +762,34 @@ static noinline void check_find(struct xarray *xa)
748 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 762 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
749 } 763 }
750 XA_BUG_ON(xa, !xa_empty(xa)); 764 XA_BUG_ON(xa, !xa_empty(xa));
765}
766
767static noinline void check_find_2(struct xarray *xa)
768{
769 void *entry;
770 unsigned long i, j, index = 0;
771
772 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
773 XA_BUG_ON(xa, true);
774 }
775
776 for (i = 0; i < 1024; i++) {
777 xa_store_index(xa, index, GFP_KERNEL);
778 j = 0;
779 index = 0;
780 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
781 XA_BUG_ON(xa, xa_mk_value(index) != entry);
782 XA_BUG_ON(xa, index != j++);
783 }
784 }
785
786 xa_destroy(xa);
787}
788
789static noinline void check_find(struct xarray *xa)
790{
791 check_find_1(xa);
792 check_find_2(xa);
751 check_multi_find(xa); 793 check_multi_find(xa);
752 check_multi_find_2(xa); 794 check_multi_find_2(xa);
753} 795}
@@ -1067,7 +1109,7 @@ static noinline void check_store_range(struct xarray *xa)
1067 __check_store_range(xa, 4095 + i, 4095 + j); 1109 __check_store_range(xa, 4095 + i, 4095 + j);
1068 __check_store_range(xa, 4096 + i, 4096 + j); 1110 __check_store_range(xa, 4096 + i, 4096 + j);
1069 __check_store_range(xa, 123456 + i, 123456 + j); 1111 __check_store_range(xa, 123456 + i, 123456 + j);
1070 __check_store_range(xa, UINT_MAX + i, UINT_MAX + j); 1112 __check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
1071 } 1113 }
1072 } 1114 }
1073} 1115}
@@ -1146,10 +1188,12 @@ static noinline void check_account(struct xarray *xa)
1146 XA_STATE(xas, xa, 1 << order); 1188 XA_STATE(xas, xa, 1 << order);
1147 1189
1148 xa_store_order(xa, 0, order, xa, GFP_KERNEL); 1190 xa_store_order(xa, 0, order, xa, GFP_KERNEL);
1191 rcu_read_lock();
1149 xas_load(&xas); 1192 xas_load(&xas);
1150 XA_BUG_ON(xa, xas.xa_node->count == 0); 1193 XA_BUG_ON(xa, xas.xa_node->count == 0);
1151 XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 1194 XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
1152 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1195 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1196 rcu_read_unlock();
1153 1197
1154 xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order), 1198 xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order),
1155 GFP_KERNEL); 1199 GFP_KERNEL);
diff --git a/lib/xarray.c b/lib/xarray.c
index 8b176f009c08..bbacca576593 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -610,8 +610,8 @@ static int xas_expand(struct xa_state *xas, void *head)
610 * (see the xa_cmpxchg() implementation for an example). 610 * (see the xa_cmpxchg() implementation for an example).
611 * 611 *
612 * Return: If the slot already existed, returns the contents of this slot. 612 * Return: If the slot already existed, returns the contents of this slot.
613 * If the slot was newly created, returns NULL. If it failed to create the 613 * If the slot was newly created, returns %NULL. If it failed to create the
614 * slot, returns NULL and indicates the error in @xas. 614 * slot, returns %NULL and indicates the error in @xas.
615 */ 615 */
616static void *xas_create(struct xa_state *xas) 616static void *xas_create(struct xa_state *xas)
617{ 617{
@@ -1334,44 +1334,31 @@ void *__xa_erase(struct xarray *xa, unsigned long index)
1334 XA_STATE(xas, xa, index); 1334 XA_STATE(xas, xa, index);
1335 return xas_result(&xas, xas_store(&xas, NULL)); 1335 return xas_result(&xas, xas_store(&xas, NULL));
1336} 1336}
1337EXPORT_SYMBOL_GPL(__xa_erase); 1337EXPORT_SYMBOL(__xa_erase);
1338 1338
1339/** 1339/**
1340 * xa_store() - Store this entry in the XArray. 1340 * xa_erase() - Erase this entry from the XArray.
1341 * @xa: XArray. 1341 * @xa: XArray.
1342 * @index: Index into array. 1342 * @index: Index of entry.
1343 * @entry: New entry.
1344 * @gfp: Memory allocation flags.
1345 * 1343 *
1346 * After this function returns, loads from this index will return @entry. 1344 * This function is the equivalent of calling xa_store() with %NULL as
1347 * Storing into an existing multislot entry updates the entry of every index. 1345 * the third argument. The XArray does not need to allocate memory, so
1348 * The marks associated with @index are unaffected unless @entry is %NULL. 1346 * the user does not need to provide GFP flags.
1349 * 1347 *
1350 * Context: Process context. Takes and releases the xa_lock. May sleep 1348 * Context: Any context. Takes and releases the xa_lock.
1351 * if the @gfp flags permit. 1349 * Return: The entry which used to be at this index.
1352 * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1353 * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1354 * failed.
1355 */ 1350 */
1356void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) 1351void *xa_erase(struct xarray *xa, unsigned long index)
1357{ 1352{
1358 XA_STATE(xas, xa, index); 1353 void *entry;
1359 void *curr;
1360
1361 if (WARN_ON_ONCE(xa_is_internal(entry)))
1362 return XA_ERROR(-EINVAL);
1363 1354
1364 do { 1355 xa_lock(xa);
1365 xas_lock(&xas); 1356 entry = __xa_erase(xa, index);
1366 curr = xas_store(&xas, entry); 1357 xa_unlock(xa);
1367 if (xa_track_free(xa) && entry)
1368 xas_clear_mark(&xas, XA_FREE_MARK);
1369 xas_unlock(&xas);
1370 } while (xas_nomem(&xas, gfp));
1371 1358
1372 return xas_result(&xas, curr); 1359 return entry;
1373} 1360}
1374EXPORT_SYMBOL(xa_store); 1361EXPORT_SYMBOL(xa_erase);
1375 1362
1376/** 1363/**
1377 * __xa_store() - Store this entry in the XArray. 1364 * __xa_store() - Store this entry in the XArray.
@@ -1395,10 +1382,12 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1395 1382
1396 if (WARN_ON_ONCE(xa_is_internal(entry))) 1383 if (WARN_ON_ONCE(xa_is_internal(entry)))
1397 return XA_ERROR(-EINVAL); 1384 return XA_ERROR(-EINVAL);
1385 if (xa_track_free(xa) && !entry)
1386 entry = XA_ZERO_ENTRY;
1398 1387
1399 do { 1388 do {
1400 curr = xas_store(&xas, entry); 1389 curr = xas_store(&xas, entry);
1401 if (xa_track_free(xa) && entry) 1390 if (xa_track_free(xa))
1402 xas_clear_mark(&xas, XA_FREE_MARK); 1391 xas_clear_mark(&xas, XA_FREE_MARK);
1403 } while (__xas_nomem(&xas, gfp)); 1392 } while (__xas_nomem(&xas, gfp));
1404 1393
@@ -1407,45 +1396,33 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1407EXPORT_SYMBOL(__xa_store); 1396EXPORT_SYMBOL(__xa_store);
1408 1397
1409/** 1398/**
1410 * xa_cmpxchg() - Conditionally replace an entry in the XArray. 1399 * xa_store() - Store this entry in the XArray.
1411 * @xa: XArray. 1400 * @xa: XArray.
1412 * @index: Index into array. 1401 * @index: Index into array.
1413 * @old: Old value to test against. 1402 * @entry: New entry.
1414 * @entry: New value to place in array.
1415 * @gfp: Memory allocation flags. 1403 * @gfp: Memory allocation flags.
1416 * 1404 *
1417 * If the entry at @index is the same as @old, replace it with @entry. 1405 * After this function returns, loads from this index will return @entry.
1418 * If the return value is equal to @old, then the exchange was successful. 1406 * Storing into an existing multislot entry updates the entry of every index.
1407 * The marks associated with @index are unaffected unless @entry is %NULL.
1419 * 1408 *
1420 * Context: Process context. Takes and releases the xa_lock. May sleep 1409 * Context: Any context. Takes and releases the xa_lock.
1421 * if the @gfp flags permit. 1410 * May sleep if the @gfp flags permit.
1422 * Return: The old value at this index or xa_err() if an error happened. 1411 * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1412 * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1413 * failed.
1423 */ 1414 */
1424void *xa_cmpxchg(struct xarray *xa, unsigned long index, 1415void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1425 void *old, void *entry, gfp_t gfp)
1426{ 1416{
1427 XA_STATE(xas, xa, index);
1428 void *curr; 1417 void *curr;
1429 1418
1430 if (WARN_ON_ONCE(xa_is_internal(entry))) 1419 xa_lock(xa);
1431 return XA_ERROR(-EINVAL); 1420 curr = __xa_store(xa, index, entry, gfp);
1432 1421 xa_unlock(xa);
1433 do {
1434 xas_lock(&xas);
1435 curr = xas_load(&xas);
1436 if (curr == XA_ZERO_ENTRY)
1437 curr = NULL;
1438 if (curr == old) {
1439 xas_store(&xas, entry);
1440 if (xa_track_free(xa) && entry)
1441 xas_clear_mark(&xas, XA_FREE_MARK);
1442 }
1443 xas_unlock(&xas);
1444 } while (xas_nomem(&xas, gfp));
1445 1422
1446 return xas_result(&xas, curr); 1423 return curr;
1447} 1424}
1448EXPORT_SYMBOL(xa_cmpxchg); 1425EXPORT_SYMBOL(xa_store);
1449 1426
1450/** 1427/**
1451 * __xa_cmpxchg() - Store this entry in the XArray. 1428 * __xa_cmpxchg() - Store this entry in the XArray.
@@ -1471,6 +1448,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1471 1448
1472 if (WARN_ON_ONCE(xa_is_internal(entry))) 1449 if (WARN_ON_ONCE(xa_is_internal(entry)))
1473 return XA_ERROR(-EINVAL); 1450 return XA_ERROR(-EINVAL);
1451 if (xa_track_free(xa) && !entry)
1452 entry = XA_ZERO_ENTRY;
1474 1453
1475 do { 1454 do {
1476 curr = xas_load(&xas); 1455 curr = xas_load(&xas);
@@ -1478,7 +1457,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1478 curr = NULL; 1457 curr = NULL;
1479 if (curr == old) { 1458 if (curr == old) {
1480 xas_store(&xas, entry); 1459 xas_store(&xas, entry);
1481 if (xa_track_free(xa) && entry) 1460 if (xa_track_free(xa))
1482 xas_clear_mark(&xas, XA_FREE_MARK); 1461 xas_clear_mark(&xas, XA_FREE_MARK);
1483 } 1462 }
1484 } while (__xas_nomem(&xas, gfp)); 1463 } while (__xas_nomem(&xas, gfp));
@@ -1488,7 +1467,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1488EXPORT_SYMBOL(__xa_cmpxchg); 1467EXPORT_SYMBOL(__xa_cmpxchg);
1489 1468
1490/** 1469/**
1491 * xa_reserve() - Reserve this index in the XArray. 1470 * __xa_reserve() - Reserve this index in the XArray.
1492 * @xa: XArray. 1471 * @xa: XArray.
1493 * @index: Index into array. 1472 * @index: Index into array.
1494 * @gfp: Memory allocation flags. 1473 * @gfp: Memory allocation flags.
@@ -1496,33 +1475,32 @@ EXPORT_SYMBOL(__xa_cmpxchg);
1496 * Ensures there is somewhere to store an entry at @index in the array. 1475 * Ensures there is somewhere to store an entry at @index in the array.
1497 * If there is already something stored at @index, this function does 1476 * If there is already something stored at @index, this function does
1498 * nothing. If there was nothing there, the entry is marked as reserved. 1477 * nothing. If there was nothing there, the entry is marked as reserved.
1499 * Loads from @index will continue to see a %NULL pointer until a 1478 * Loading from a reserved entry returns a %NULL pointer.
1500 * subsequent store to @index.
1501 * 1479 *
1502 * If you do not use the entry that you have reserved, call xa_release() 1480 * If you do not use the entry that you have reserved, call xa_release()
1503 * or xa_erase() to free any unnecessary memory. 1481 * or xa_erase() to free any unnecessary memory.
1504 * 1482 *
1505 * Context: Process context. Takes and releases the xa_lock, IRQ or BH safe 1483 * Context: Any context. Expects the xa_lock to be held on entry. May
1506 * if specified in XArray flags. May sleep if the @gfp flags permit. 1484 * release the lock, sleep and reacquire the lock if the @gfp flags permit.
1507 * Return: 0 if the reservation succeeded or -ENOMEM if it failed. 1485 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1508 */ 1486 */
1509int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) 1487int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
1510{ 1488{
1511 XA_STATE(xas, xa, index); 1489 XA_STATE(xas, xa, index);
1512 unsigned int lock_type = xa_lock_type(xa);
1513 void *curr; 1490 void *curr;
1514 1491
1515 do { 1492 do {
1516 xas_lock_type(&xas, lock_type);
1517 curr = xas_load(&xas); 1493 curr = xas_load(&xas);
1518 if (!curr) 1494 if (!curr) {
1519 xas_store(&xas, XA_ZERO_ENTRY); 1495 xas_store(&xas, XA_ZERO_ENTRY);
1520 xas_unlock_type(&xas, lock_type); 1496 if (xa_track_free(xa))
1521 } while (xas_nomem(&xas, gfp)); 1497 xas_clear_mark(&xas, XA_FREE_MARK);
1498 }
1499 } while (__xas_nomem(&xas, gfp));
1522 1500
1523 return xas_error(&xas); 1501 return xas_error(&xas);
1524} 1502}
1525EXPORT_SYMBOL(xa_reserve); 1503EXPORT_SYMBOL(__xa_reserve);
1526 1504
1527#ifdef CONFIG_XARRAY_MULTI 1505#ifdef CONFIG_XARRAY_MULTI
1528static void xas_set_range(struct xa_state *xas, unsigned long first, 1506static void xas_set_range(struct xa_state *xas, unsigned long first,
@@ -1587,8 +1565,9 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
1587 do { 1565 do {
1588 xas_lock(&xas); 1566 xas_lock(&xas);
1589 if (entry) { 1567 if (entry) {
1590 unsigned int order = (last == ~0UL) ? 64 : 1568 unsigned int order = BITS_PER_LONG;
1591 ilog2(last + 1); 1569 if (last + 1)
1570 order = __ffs(last + 1);
1592 xas_set_order(&xas, last, order); 1571 xas_set_order(&xas, last, order);
1593 xas_create(&xas); 1572 xas_create(&xas);
1594 if (xas_error(&xas)) 1573 if (xas_error(&xas))
@@ -1662,7 +1641,7 @@ EXPORT_SYMBOL(__xa_alloc);
1662 * @index: Index of entry. 1641 * @index: Index of entry.
1663 * @mark: Mark number. 1642 * @mark: Mark number.
1664 * 1643 *
1665 * Attempting to set a mark on a NULL entry does not succeed. 1644 * Attempting to set a mark on a %NULL entry does not succeed.
1666 * 1645 *
1667 * Context: Any context. Expects xa_lock to be held on entry. 1646 * Context: Any context. Expects xa_lock to be held on entry.
1668 */ 1647 */
@@ -1674,7 +1653,7 @@ void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1674 if (entry) 1653 if (entry)
1675 xas_set_mark(&xas, mark); 1654 xas_set_mark(&xas, mark);
1676} 1655}
1677EXPORT_SYMBOL_GPL(__xa_set_mark); 1656EXPORT_SYMBOL(__xa_set_mark);
1678 1657
1679/** 1658/**
1680 * __xa_clear_mark() - Clear this mark on this entry while locked. 1659 * __xa_clear_mark() - Clear this mark on this entry while locked.
@@ -1692,7 +1671,7 @@ void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1692 if (entry) 1671 if (entry)
1693 xas_clear_mark(&xas, mark); 1672 xas_clear_mark(&xas, mark);
1694} 1673}
1695EXPORT_SYMBOL_GPL(__xa_clear_mark); 1674EXPORT_SYMBOL(__xa_clear_mark);
1696 1675
1697/** 1676/**
1698 * xa_get_mark() - Inquire whether this mark is set on this entry. 1677 * xa_get_mark() - Inquire whether this mark is set on this entry.
@@ -1732,7 +1711,7 @@ EXPORT_SYMBOL(xa_get_mark);
1732 * @index: Index of entry. 1711 * @index: Index of entry.
1733 * @mark: Mark number. 1712 * @mark: Mark number.
1734 * 1713 *
1735 * Attempting to set a mark on a NULL entry does not succeed. 1714 * Attempting to set a mark on a %NULL entry does not succeed.
1736 * 1715 *
1737 * Context: Process context. Takes and releases the xa_lock. 1716 * Context: Process context. Takes and releases the xa_lock.
1738 */ 1717 */
@@ -1829,6 +1808,8 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
1829 entry = xas_find_marked(&xas, max, filter); 1808 entry = xas_find_marked(&xas, max, filter);
1830 else 1809 else
1831 entry = xas_find(&xas, max); 1810 entry = xas_find(&xas, max);
1811 if (xas.xa_node == XAS_BOUNDS)
1812 break;
1832 if (xas.xa_shift) { 1813 if (xas.xa_shift) {
1833 if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) 1814 if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
1834 continue; 1815 continue;
@@ -1899,7 +1880,7 @@ static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
1899 * 1880 *
1900 * The @filter may be an XArray mark value, in which case entries which are 1881 * The @filter may be an XArray mark value, in which case entries which are
1901 * marked with that mark will be copied. It may also be %XA_PRESENT, in 1882 * marked with that mark will be copied. It may also be %XA_PRESENT, in
1902 * which case all entries which are not NULL will be copied. 1883 * which case all entries which are not %NULL will be copied.
1903 * 1884 *
1904 * The entries returned may not represent a snapshot of the XArray at a 1885 * The entries returned may not represent a snapshot of the XArray at a
1905 * moment in time. For example, if another thread stores to index 5, then 1886 * moment in time. For example, if another thread stores to index 5, then
diff --git a/mm/gup.c b/mm/gup.c
index aa43620a3270..8cb68a50dbdf 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -702,12 +702,11 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
702 if (!vma || start >= vma->vm_end) { 702 if (!vma || start >= vma->vm_end) {
703 vma = find_extend_vma(mm, start); 703 vma = find_extend_vma(mm, start);
704 if (!vma && in_gate_area(mm, start)) { 704 if (!vma && in_gate_area(mm, start)) {
705 int ret;
706 ret = get_gate_page(mm, start & PAGE_MASK, 705 ret = get_gate_page(mm, start & PAGE_MASK,
707 gup_flags, &vma, 706 gup_flags, &vma,
708 pages ? &pages[i] : NULL); 707 pages ? &pages[i] : NULL);
709 if (ret) 708 if (ret)
710 return i ? : ret; 709 goto out;
711 ctx.page_mask = 0; 710 ctx.page_mask = 0;
712 goto next_page; 711 goto next_page;
713 } 712 }
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 55478ab3c83b..622cced74fd9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2350,7 +2350,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
2350 } 2350 }
2351} 2351}
2352 2352
2353static void freeze_page(struct page *page) 2353static void unmap_page(struct page *page)
2354{ 2354{
2355 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | 2355 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
2356 TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; 2356 TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
@@ -2365,7 +2365,7 @@ static void freeze_page(struct page *page)
2365 VM_BUG_ON_PAGE(!unmap_success, page); 2365 VM_BUG_ON_PAGE(!unmap_success, page);
2366} 2366}
2367 2367
2368static void unfreeze_page(struct page *page) 2368static void remap_page(struct page *page)
2369{ 2369{
2370 int i; 2370 int i;
2371 if (PageTransHuge(page)) { 2371 if (PageTransHuge(page)) {
@@ -2402,6 +2402,12 @@ static void __split_huge_page_tail(struct page *head, int tail,
2402 (1L << PG_unevictable) | 2402 (1L << PG_unevictable) |
2403 (1L << PG_dirty))); 2403 (1L << PG_dirty)));
2404 2404
2405 /* ->mapping in first tail page is compound_mapcount */
2406 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2407 page_tail);
2408 page_tail->mapping = head->mapping;
2409 page_tail->index = head->index + tail;
2410
2405 /* Page flags must be visible before we make the page non-compound. */ 2411 /* Page flags must be visible before we make the page non-compound. */
2406 smp_wmb(); 2412 smp_wmb();
2407 2413
@@ -2422,12 +2428,6 @@ static void __split_huge_page_tail(struct page *head, int tail,
2422 if (page_is_idle(head)) 2428 if (page_is_idle(head))
2423 set_page_idle(page_tail); 2429 set_page_idle(page_tail);
2424 2430
2425 /* ->mapping in first tail page is compound_mapcount */
2426 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2427 page_tail);
2428 page_tail->mapping = head->mapping;
2429
2430 page_tail->index = head->index + tail;
2431 page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 2431 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2432 2432
2433 /* 2433 /*
@@ -2439,12 +2439,11 @@ static void __split_huge_page_tail(struct page *head, int tail,
2439} 2439}
2440 2440
2441static void __split_huge_page(struct page *page, struct list_head *list, 2441static void __split_huge_page(struct page *page, struct list_head *list,
2442 unsigned long flags) 2442 pgoff_t end, unsigned long flags)
2443{ 2443{
2444 struct page *head = compound_head(page); 2444 struct page *head = compound_head(page);
2445 struct zone *zone = page_zone(head); 2445 struct zone *zone = page_zone(head);
2446 struct lruvec *lruvec; 2446 struct lruvec *lruvec;
2447 pgoff_t end = -1;
2448 int i; 2447 int i;
2449 2448
2450 lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat); 2449 lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
@@ -2452,9 +2451,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2452 /* complete memcg works before add pages to LRU */ 2451 /* complete memcg works before add pages to LRU */
2453 mem_cgroup_split_huge_fixup(head); 2452 mem_cgroup_split_huge_fixup(head);
2454 2453
2455 if (!PageAnon(page))
2456 end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
2457
2458 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 2454 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
2459 __split_huge_page_tail(head, i, lruvec, list); 2455 __split_huge_page_tail(head, i, lruvec, list);
2460 /* Some pages can be beyond i_size: drop them from page cache */ 2456 /* Some pages can be beyond i_size: drop them from page cache */
@@ -2483,7 +2479,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2483 2479
2484 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2480 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2485 2481
2486 unfreeze_page(head); 2482 remap_page(head);
2487 2483
2488 for (i = 0; i < HPAGE_PMD_NR; i++) { 2484 for (i = 0; i < HPAGE_PMD_NR; i++) {
2489 struct page *subpage = head + i; 2485 struct page *subpage = head + i;
@@ -2626,6 +2622,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2626 int count, mapcount, extra_pins, ret; 2622 int count, mapcount, extra_pins, ret;
2627 bool mlocked; 2623 bool mlocked;
2628 unsigned long flags; 2624 unsigned long flags;
2625 pgoff_t end;
2629 2626
2630 VM_BUG_ON_PAGE(is_huge_zero_page(page), page); 2627 VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
2631 VM_BUG_ON_PAGE(!PageLocked(page), page); 2628 VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -2648,6 +2645,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2648 ret = -EBUSY; 2645 ret = -EBUSY;
2649 goto out; 2646 goto out;
2650 } 2647 }
2648 end = -1;
2651 mapping = NULL; 2649 mapping = NULL;
2652 anon_vma_lock_write(anon_vma); 2650 anon_vma_lock_write(anon_vma);
2653 } else { 2651 } else {
@@ -2661,10 +2659,19 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2661 2659
2662 anon_vma = NULL; 2660 anon_vma = NULL;
2663 i_mmap_lock_read(mapping); 2661 i_mmap_lock_read(mapping);
2662
2663 /*
2664 *__split_huge_page() may need to trim off pages beyond EOF:
2665 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2666 * which cannot be nested inside the page tree lock. So note
2667 * end now: i_size itself may be changed at any moment, but
2668 * head page lock is good enough to serialize the trimming.
2669 */
2670 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2664 } 2671 }
2665 2672
2666 /* 2673 /*
2667 * Racy check if we can split the page, before freeze_page() will 2674 * Racy check if we can split the page, before unmap_page() will
2668 * split PMDs 2675 * split PMDs
2669 */ 2676 */
2670 if (!can_split_huge_page(head, &extra_pins)) { 2677 if (!can_split_huge_page(head, &extra_pins)) {
@@ -2673,7 +2680,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2673 } 2680 }
2674 2681
2675 mlocked = PageMlocked(page); 2682 mlocked = PageMlocked(page);
2676 freeze_page(head); 2683 unmap_page(head);
2677 VM_BUG_ON_PAGE(compound_mapcount(head), head); 2684 VM_BUG_ON_PAGE(compound_mapcount(head), head);
2678 2685
2679 /* Make sure the page is not on per-CPU pagevec as it takes pin */ 2686 /* Make sure the page is not on per-CPU pagevec as it takes pin */
@@ -2707,7 +2714,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2707 if (mapping) 2714 if (mapping)
2708 __dec_node_page_state(page, NR_SHMEM_THPS); 2715 __dec_node_page_state(page, NR_SHMEM_THPS);
2709 spin_unlock(&pgdata->split_queue_lock); 2716 spin_unlock(&pgdata->split_queue_lock);
2710 __split_huge_page(page, list, flags); 2717 __split_huge_page(page, list, end, flags);
2711 if (PageSwapCache(head)) { 2718 if (PageSwapCache(head)) {
2712 swp_entry_t entry = { .val = page_private(head) }; 2719 swp_entry_t entry = { .val = page_private(head) };
2713 2720
@@ -2727,7 +2734,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2727fail: if (mapping) 2734fail: if (mapping)
2728 xa_unlock(&mapping->i_pages); 2735 xa_unlock(&mapping->i_pages);
2729 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2736 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2730 unfreeze_page(head); 2737 remap_page(head);
2731 ret = -EBUSY; 2738 ret = -EBUSY;
2732 } 2739 }
2733 2740
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7f2a28ab46d5..705a3e9cc910 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4080,7 +4080,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4080 4080
4081 /* fallback to copy_from_user outside mmap_sem */ 4081 /* fallback to copy_from_user outside mmap_sem */
4082 if (unlikely(ret)) { 4082 if (unlikely(ret)) {
4083 ret = -EFAULT; 4083 ret = -ENOENT;
4084 *pagep = page; 4084 *pagep = page;
4085 /* don't free the page */ 4085 /* don't free the page */
4086 goto out; 4086 goto out;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index c13625c1ad5e..8e2ff195ecb3 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1287,7 +1287,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1287 * collapse_shmem - collapse small tmpfs/shmem pages into huge one. 1287 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1288 * 1288 *
1289 * Basic scheme is simple, details are more complex: 1289 * Basic scheme is simple, details are more complex:
1290 * - allocate and freeze a new huge page; 1290 * - allocate and lock a new huge page;
1291 * - scan page cache replacing old pages with the new one 1291 * - scan page cache replacing old pages with the new one
1292 * + swap in pages if necessary; 1292 * + swap in pages if necessary;
1293 * + fill in gaps; 1293 * + fill in gaps;
@@ -1295,11 +1295,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1295 * - if replacing succeeds: 1295 * - if replacing succeeds:
1296 * + copy data over; 1296 * + copy data over;
1297 * + free old pages; 1297 * + free old pages;
1298 * + unfreeze huge page; 1298 * + unlock huge page;
1299 * - if replacing failed; 1299 * - if replacing failed;
1300 * + put all pages back and unfreeze them; 1300 * + put all pages back and unfreeze them;
1301 * + restore gaps in the page cache; 1301 * + restore gaps in the page cache;
1302 * + free huge page; 1302 * + unlock and free huge page;
1303 */ 1303 */
1304static void collapse_shmem(struct mm_struct *mm, 1304static void collapse_shmem(struct mm_struct *mm,
1305 struct address_space *mapping, pgoff_t start, 1305 struct address_space *mapping, pgoff_t start,
@@ -1329,19 +1329,6 @@ static void collapse_shmem(struct mm_struct *mm,
1329 goto out; 1329 goto out;
1330 } 1330 }
1331 1331
1332 new_page->index = start;
1333 new_page->mapping = mapping;
1334 __SetPageSwapBacked(new_page);
1335 __SetPageLocked(new_page);
1336 BUG_ON(!page_ref_freeze(new_page, 1));
1337
1338 /*
1339 * At this point the new_page is 'frozen' (page_count() is zero),
1340 * locked and not up-to-date. It's safe to insert it into the page
1341 * cache, because nobody would be able to map it or use it in other
1342 * way until we unfreeze it.
1343 */
1344
1345 /* This will be less messy when we use multi-index entries */ 1332 /* This will be less messy when we use multi-index entries */
1346 do { 1333 do {
1347 xas_lock_irq(&xas); 1334 xas_lock_irq(&xas);
@@ -1349,19 +1336,44 @@ static void collapse_shmem(struct mm_struct *mm,
1349 if (!xas_error(&xas)) 1336 if (!xas_error(&xas))
1350 break; 1337 break;
1351 xas_unlock_irq(&xas); 1338 xas_unlock_irq(&xas);
1352 if (!xas_nomem(&xas, GFP_KERNEL)) 1339 if (!xas_nomem(&xas, GFP_KERNEL)) {
1340 mem_cgroup_cancel_charge(new_page, memcg, true);
1341 result = SCAN_FAIL;
1353 goto out; 1342 goto out;
1343 }
1354 } while (1); 1344 } while (1);
1355 1345
1346 __SetPageLocked(new_page);
1347 __SetPageSwapBacked(new_page);
1348 new_page->index = start;
1349 new_page->mapping = mapping;
1350
1351 /*
1352 * At this point the new_page is locked and not up-to-date.
1353 * It's safe to insert it into the page cache, because nobody would
1354 * be able to map it or use it in another way until we unlock it.
1355 */
1356
1356 xas_set(&xas, start); 1357 xas_set(&xas, start);
1357 for (index = start; index < end; index++) { 1358 for (index = start; index < end; index++) {
1358 struct page *page = xas_next(&xas); 1359 struct page *page = xas_next(&xas);
1359 1360
1360 VM_BUG_ON(index != xas.xa_index); 1361 VM_BUG_ON(index != xas.xa_index);
1361 if (!page) { 1362 if (!page) {
1363 /*
1364 * Stop if extent has been truncated or hole-punched,
1365 * and is now completely empty.
1366 */
1367 if (index == start) {
1368 if (!xas_next_entry(&xas, end - 1)) {
1369 result = SCAN_TRUNCATED;
1370 goto xa_locked;
1371 }
1372 xas_set(&xas, index);
1373 }
1362 if (!shmem_charge(mapping->host, 1)) { 1374 if (!shmem_charge(mapping->host, 1)) {
1363 result = SCAN_FAIL; 1375 result = SCAN_FAIL;
1364 break; 1376 goto xa_locked;
1365 } 1377 }
1366 xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); 1378 xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
1367 nr_none++; 1379 nr_none++;
@@ -1376,13 +1388,12 @@ static void collapse_shmem(struct mm_struct *mm,
1376 result = SCAN_FAIL; 1388 result = SCAN_FAIL;
1377 goto xa_unlocked; 1389 goto xa_unlocked;
1378 } 1390 }
1379 xas_lock_irq(&xas);
1380 xas_set(&xas, index);
1381 } else if (trylock_page(page)) { 1391 } else if (trylock_page(page)) {
1382 get_page(page); 1392 get_page(page);
1393 xas_unlock_irq(&xas);
1383 } else { 1394 } else {
1384 result = SCAN_PAGE_LOCK; 1395 result = SCAN_PAGE_LOCK;
1385 break; 1396 goto xa_locked;
1386 } 1397 }
1387 1398
1388 /* 1399 /*
@@ -1391,17 +1402,24 @@ static void collapse_shmem(struct mm_struct *mm,
1391 */ 1402 */
1392 VM_BUG_ON_PAGE(!PageLocked(page), page); 1403 VM_BUG_ON_PAGE(!PageLocked(page), page);
1393 VM_BUG_ON_PAGE(!PageUptodate(page), page); 1404 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1394 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1405
1406 /*
1407 * If file was truncated then extended, or hole-punched, before
1408 * we locked the first page, then a THP might be there already.
1409 */
1410 if (PageTransCompound(page)) {
1411 result = SCAN_PAGE_COMPOUND;
1412 goto out_unlock;
1413 }
1395 1414
1396 if (page_mapping(page) != mapping) { 1415 if (page_mapping(page) != mapping) {
1397 result = SCAN_TRUNCATED; 1416 result = SCAN_TRUNCATED;
1398 goto out_unlock; 1417 goto out_unlock;
1399 } 1418 }
1400 xas_unlock_irq(&xas);
1401 1419
1402 if (isolate_lru_page(page)) { 1420 if (isolate_lru_page(page)) {
1403 result = SCAN_DEL_PAGE_LRU; 1421 result = SCAN_DEL_PAGE_LRU;
1404 goto out_isolate_failed; 1422 goto out_unlock;
1405 } 1423 }
1406 1424
1407 if (page_mapped(page)) 1425 if (page_mapped(page))
@@ -1421,7 +1439,9 @@ static void collapse_shmem(struct mm_struct *mm,
1421 */ 1439 */
1422 if (!page_ref_freeze(page, 3)) { 1440 if (!page_ref_freeze(page, 3)) {
1423 result = SCAN_PAGE_COUNT; 1441 result = SCAN_PAGE_COUNT;
1424 goto out_lru; 1442 xas_unlock_irq(&xas);
1443 putback_lru_page(page);
1444 goto out_unlock;
1425 } 1445 }
1426 1446
1427 /* 1447 /*
@@ -1433,71 +1453,74 @@ static void collapse_shmem(struct mm_struct *mm,
1433 /* Finally, replace with the new page. */ 1453 /* Finally, replace with the new page. */
1434 xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); 1454 xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
1435 continue; 1455 continue;
1436out_lru:
1437 xas_unlock_irq(&xas);
1438 putback_lru_page(page);
1439out_isolate_failed:
1440 unlock_page(page);
1441 put_page(page);
1442 goto xa_unlocked;
1443out_unlock: 1456out_unlock:
1444 unlock_page(page); 1457 unlock_page(page);
1445 put_page(page); 1458 put_page(page);
1446 break; 1459 goto xa_unlocked;
1447 } 1460 }
1448 xas_unlock_irq(&xas);
1449 1461
1462 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1463 if (nr_none) {
1464 struct zone *zone = page_zone(new_page);
1465
1466 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1467 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1468 }
1469
1470xa_locked:
1471 xas_unlock_irq(&xas);
1450xa_unlocked: 1472xa_unlocked:
1473
1451 if (result == SCAN_SUCCEED) { 1474 if (result == SCAN_SUCCEED) {
1452 struct page *page, *tmp; 1475 struct page *page, *tmp;
1453 struct zone *zone = page_zone(new_page);
1454 1476
1455 /* 1477 /*
1456 * Replacing old pages with new one has succeeded, now we 1478 * Replacing old pages with new one has succeeded, now we
1457 * need to copy the content and free the old pages. 1479 * need to copy the content and free the old pages.
1458 */ 1480 */
1481 index = start;
1459 list_for_each_entry_safe(page, tmp, &pagelist, lru) { 1482 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1483 while (index < page->index) {
1484 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1485 index++;
1486 }
1460 copy_highpage(new_page + (page->index % HPAGE_PMD_NR), 1487 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1461 page); 1488 page);
1462 list_del(&page->lru); 1489 list_del(&page->lru);
1463 unlock_page(page);
1464 page_ref_unfreeze(page, 1);
1465 page->mapping = NULL; 1490 page->mapping = NULL;
1491 page_ref_unfreeze(page, 1);
1466 ClearPageActive(page); 1492 ClearPageActive(page);
1467 ClearPageUnevictable(page); 1493 ClearPageUnevictable(page);
1494 unlock_page(page);
1468 put_page(page); 1495 put_page(page);
1496 index++;
1469 } 1497 }
1470 1498 while (index < end) {
1471 local_irq_disable(); 1499 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1472 __inc_node_page_state(new_page, NR_SHMEM_THPS); 1500 index++;
1473 if (nr_none) {
1474 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1475 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1476 } 1501 }
1477 local_irq_enable();
1478 1502
1479 /*
1480 * Remove pte page tables, so we can re-fault
1481 * the page as huge.
1482 */
1483 retract_page_tables(mapping, start);
1484
1485 /* Everything is ready, let's unfreeze the new_page */
1486 set_page_dirty(new_page);
1487 SetPageUptodate(new_page); 1503 SetPageUptodate(new_page);
1488 page_ref_unfreeze(new_page, HPAGE_PMD_NR); 1504 page_ref_add(new_page, HPAGE_PMD_NR - 1);
1505 set_page_dirty(new_page);
1489 mem_cgroup_commit_charge(new_page, memcg, false, true); 1506 mem_cgroup_commit_charge(new_page, memcg, false, true);
1490 lru_cache_add_anon(new_page); 1507 lru_cache_add_anon(new_page);
1491 unlock_page(new_page);
1492 1508
1509 /*
1510 * Remove pte page tables, so we can re-fault the page as huge.
1511 */
1512 retract_page_tables(mapping, start);
1493 *hpage = NULL; 1513 *hpage = NULL;
1494 1514
1495 khugepaged_pages_collapsed++; 1515 khugepaged_pages_collapsed++;
1496 } else { 1516 } else {
1497 struct page *page; 1517 struct page *page;
1518
1498 /* Something went wrong: roll back page cache changes */ 1519 /* Something went wrong: roll back page cache changes */
1499 shmem_uncharge(mapping->host, nr_none);
1500 xas_lock_irq(&xas); 1520 xas_lock_irq(&xas);
1521 mapping->nrpages -= nr_none;
1522 shmem_uncharge(mapping->host, nr_none);
1523
1501 xas_set(&xas, start); 1524 xas_set(&xas, start);
1502 xas_for_each(&xas, page, end - 1) { 1525 xas_for_each(&xas, page, end - 1) {
1503 page = list_first_entry_or_null(&pagelist, 1526 page = list_first_entry_or_null(&pagelist,
@@ -1519,19 +1542,18 @@ xa_unlocked:
1519 xas_store(&xas, page); 1542 xas_store(&xas, page);
1520 xas_pause(&xas); 1543 xas_pause(&xas);
1521 xas_unlock_irq(&xas); 1544 xas_unlock_irq(&xas);
1522 putback_lru_page(page);
1523 unlock_page(page); 1545 unlock_page(page);
1546 putback_lru_page(page);
1524 xas_lock_irq(&xas); 1547 xas_lock_irq(&xas);
1525 } 1548 }
1526 VM_BUG_ON(nr_none); 1549 VM_BUG_ON(nr_none);
1527 xas_unlock_irq(&xas); 1550 xas_unlock_irq(&xas);
1528 1551
1529 /* Unfreeze new_page, caller would take care about freeing it */
1530 page_ref_unfreeze(new_page, 1);
1531 mem_cgroup_cancel_charge(new_page, memcg, true); 1552 mem_cgroup_cancel_charge(new_page, memcg, true);
1532 unlock_page(new_page);
1533 new_page->mapping = NULL; 1553 new_page->mapping = NULL;
1534 } 1554 }
1555
1556 unlock_page(new_page);
1535out: 1557out:
1536 VM_BUG_ON(!list_empty(&pagelist)); 1558 VM_BUG_ON(!list_empty(&pagelist));
1537 /* TODO: tracepoints */ 1559 /* TODO: tracepoints */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6847177dc4a1..2ec9cc407216 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5813,8 +5813,10 @@ void __meminit init_currently_empty_zone(struct zone *zone,
5813 unsigned long size) 5813 unsigned long size)
5814{ 5814{
5815 struct pglist_data *pgdat = zone->zone_pgdat; 5815 struct pglist_data *pgdat = zone->zone_pgdat;
5816 int zone_idx = zone_idx(zone) + 1;
5816 5817
5817 pgdat->nr_zones = zone_idx(zone) + 1; 5818 if (zone_idx > pgdat->nr_zones)
5819 pgdat->nr_zones = zone_idx;
5818 5820
5819 zone->zone_start_pfn = zone_start_pfn; 5821 zone->zone_start_pfn = zone_start_pfn;
5820 5822
diff --git a/mm/rmap.c b/mm/rmap.c
index 1e79fac3186b..85b7f9423352 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1627,16 +1627,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1627 address + PAGE_SIZE); 1627 address + PAGE_SIZE);
1628 } else { 1628 } else {
1629 /* 1629 /*
1630 * We should not need to notify here as we reach this 1630 * This is a locked file-backed page, thus it cannot
1631 * case only from freeze_page() itself only call from 1631 * be removed from the page cache and replaced by a new
1632 * split_huge_page_to_list() so everything below must 1632 * page before mmu_notifier_invalidate_range_end, so no
1633 * be true:
1634 * - page is not anonymous
1635 * - page is locked
1636 *
1637 * So as it is a locked file back page thus it can not
1638 * be remove from the page cache and replace by a new
1639 * page before mmu_notifier_invalidate_range_end so no
1640 * concurrent thread might update its page table to 1633 * concurrent thread might update its page table to
1641 * point at new page while a device still is using this 1634 * point at new page while a device still is using this
1642 * page. 1635 * page.
diff --git a/mm/shmem.c b/mm/shmem.c
index d44991ea5ed4..cddc72ac44d8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -297,12 +297,14 @@ bool shmem_charge(struct inode *inode, long pages)
297 if (!shmem_inode_acct_block(inode, pages)) 297 if (!shmem_inode_acct_block(inode, pages))
298 return false; 298 return false;
299 299
300 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
301 inode->i_mapping->nrpages += pages;
302
300 spin_lock_irqsave(&info->lock, flags); 303 spin_lock_irqsave(&info->lock, flags);
301 info->alloced += pages; 304 info->alloced += pages;
302 inode->i_blocks += pages * BLOCKS_PER_PAGE; 305 inode->i_blocks += pages * BLOCKS_PER_PAGE;
303 shmem_recalc_inode(inode); 306 shmem_recalc_inode(inode);
304 spin_unlock_irqrestore(&info->lock, flags); 307 spin_unlock_irqrestore(&info->lock, flags);
305 inode->i_mapping->nrpages += pages;
306 308
307 return true; 309 return true;
308} 310}
@@ -312,6 +314,8 @@ void shmem_uncharge(struct inode *inode, long pages)
312 struct shmem_inode_info *info = SHMEM_I(inode); 314 struct shmem_inode_info *info = SHMEM_I(inode);
313 unsigned long flags; 315 unsigned long flags;
314 316
317 /* nrpages adjustment done by __delete_from_page_cache() or caller */
318
315 spin_lock_irqsave(&info->lock, flags); 319 spin_lock_irqsave(&info->lock, flags);
316 info->alloced -= pages; 320 info->alloced -= pages;
317 inode->i_blocks -= pages * BLOCKS_PER_PAGE; 321 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
@@ -1509,11 +1513,13 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1509{ 1513{
1510 struct page *oldpage, *newpage; 1514 struct page *oldpage, *newpage;
1511 struct address_space *swap_mapping; 1515 struct address_space *swap_mapping;
1516 swp_entry_t entry;
1512 pgoff_t swap_index; 1517 pgoff_t swap_index;
1513 int error; 1518 int error;
1514 1519
1515 oldpage = *pagep; 1520 oldpage = *pagep;
1516 swap_index = page_private(oldpage); 1521 entry.val = page_private(oldpage);
1522 swap_index = swp_offset(entry);
1517 swap_mapping = page_mapping(oldpage); 1523 swap_mapping = page_mapping(oldpage);
1518 1524
1519 /* 1525 /*
@@ -1532,7 +1538,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1532 __SetPageLocked(newpage); 1538 __SetPageLocked(newpage);
1533 __SetPageSwapBacked(newpage); 1539 __SetPageSwapBacked(newpage);
1534 SetPageUptodate(newpage); 1540 SetPageUptodate(newpage);
1535 set_page_private(newpage, swap_index); 1541 set_page_private(newpage, entry.val);
1536 SetPageSwapCache(newpage); 1542 SetPageSwapCache(newpage);
1537 1543
1538 /* 1544 /*
@@ -2214,6 +2220,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2214 struct page *page; 2220 struct page *page;
2215 pte_t _dst_pte, *dst_pte; 2221 pte_t _dst_pte, *dst_pte;
2216 int ret; 2222 int ret;
2223 pgoff_t offset, max_off;
2217 2224
2218 ret = -ENOMEM; 2225 ret = -ENOMEM;
2219 if (!shmem_inode_acct_block(inode, 1)) 2226 if (!shmem_inode_acct_block(inode, 1))
@@ -2236,7 +2243,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2236 *pagep = page; 2243 *pagep = page;
2237 shmem_inode_unacct_blocks(inode, 1); 2244 shmem_inode_unacct_blocks(inode, 1);
2238 /* don't free the page */ 2245 /* don't free the page */
2239 return -EFAULT; 2246 return -ENOENT;
2240 } 2247 }
2241 } else { /* mfill_zeropage_atomic */ 2248 } else { /* mfill_zeropage_atomic */
2242 clear_highpage(page); 2249 clear_highpage(page);
@@ -2251,6 +2258,12 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2251 __SetPageSwapBacked(page); 2258 __SetPageSwapBacked(page);
2252 __SetPageUptodate(page); 2259 __SetPageUptodate(page);
2253 2260
2261 ret = -EFAULT;
2262 offset = linear_page_index(dst_vma, dst_addr);
2263 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2264 if (unlikely(offset >= max_off))
2265 goto out_release;
2266
2254 ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false); 2267 ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
2255 if (ret) 2268 if (ret)
2256 goto out_release; 2269 goto out_release;
@@ -2265,9 +2278,25 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2265 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 2278 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2266 if (dst_vma->vm_flags & VM_WRITE) 2279 if (dst_vma->vm_flags & VM_WRITE)
2267 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 2280 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2281 else {
2282 /*
2283 * We don't set the pte dirty if the vma has no
2284 * VM_WRITE permission, so mark the page dirty or it
2285 * could be freed from under us. We could do it
2286 * unconditionally before unlock_page(), but doing it
2287 * only if VM_WRITE is not set is faster.
2288 */
2289 set_page_dirty(page);
2290 }
2268 2291
2269 ret = -EEXIST;
2270 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 2292 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2293
2294 ret = -EFAULT;
2295 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2296 if (unlikely(offset >= max_off))
2297 goto out_release_uncharge_unlock;
2298
2299 ret = -EEXIST;
2271 if (!pte_none(*dst_pte)) 2300 if (!pte_none(*dst_pte))
2272 goto out_release_uncharge_unlock; 2301 goto out_release_uncharge_unlock;
2273 2302
@@ -2285,13 +2314,15 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2285 2314
2286 /* No need to invalidate - it was non-present before */ 2315 /* No need to invalidate - it was non-present before */
2287 update_mmu_cache(dst_vma, dst_addr, dst_pte); 2316 update_mmu_cache(dst_vma, dst_addr, dst_pte);
2288 unlock_page(page);
2289 pte_unmap_unlock(dst_pte, ptl); 2317 pte_unmap_unlock(dst_pte, ptl);
2318 unlock_page(page);
2290 ret = 0; 2319 ret = 0;
2291out: 2320out:
2292 return ret; 2321 return ret;
2293out_release_uncharge_unlock: 2322out_release_uncharge_unlock:
2294 pte_unmap_unlock(dst_pte, ptl); 2323 pte_unmap_unlock(dst_pte, ptl);
2324 ClearPageDirty(page);
2325 delete_from_page_cache(page);
2295out_release_uncharge: 2326out_release_uncharge:
2296 mem_cgroup_cancel_charge(page, memcg, false); 2327 mem_cgroup_cancel_charge(page, memcg, false);
2297out_release: 2328out_release:
diff --git a/mm/truncate.c b/mm/truncate.c
index 45d68e90b703..798e7ccfb030 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -517,9 +517,13 @@ void truncate_inode_pages_final(struct address_space *mapping)
517 */ 517 */
518 xa_lock_irq(&mapping->i_pages); 518 xa_lock_irq(&mapping->i_pages);
519 xa_unlock_irq(&mapping->i_pages); 519 xa_unlock_irq(&mapping->i_pages);
520
521 truncate_inode_pages(mapping, 0);
522 } 520 }
521
522 /*
523 * Cleancache needs notification even if there are no pages or shadow
524 * entries.
525 */
526 truncate_inode_pages(mapping, 0);
523} 527}
524EXPORT_SYMBOL(truncate_inode_pages_final); 528EXPORT_SYMBOL(truncate_inode_pages_final);
525 529
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 5029f241908f..458acda96f20 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -33,6 +33,8 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
33 void *page_kaddr; 33 void *page_kaddr;
34 int ret; 34 int ret;
35 struct page *page; 35 struct page *page;
36 pgoff_t offset, max_off;
37 struct inode *inode;
36 38
37 if (!*pagep) { 39 if (!*pagep) {
38 ret = -ENOMEM; 40 ret = -ENOMEM;
@@ -48,7 +50,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
48 50
49 /* fallback to copy_from_user outside mmap_sem */ 51 /* fallback to copy_from_user outside mmap_sem */
50 if (unlikely(ret)) { 52 if (unlikely(ret)) {
51 ret = -EFAULT; 53 ret = -ENOENT;
52 *pagep = page; 54 *pagep = page;
53 /* don't free the page */ 55 /* don't free the page */
54 goto out; 56 goto out;
@@ -73,8 +75,17 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
73 if (dst_vma->vm_flags & VM_WRITE) 75 if (dst_vma->vm_flags & VM_WRITE)
74 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 76 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
75 77
76 ret = -EEXIST;
77 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 78 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
79 if (dst_vma->vm_file) {
80 /* the shmem MAP_PRIVATE case requires checking the i_size */
81 inode = dst_vma->vm_file->f_inode;
82 offset = linear_page_index(dst_vma, dst_addr);
83 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
84 ret = -EFAULT;
85 if (unlikely(offset >= max_off))
86 goto out_release_uncharge_unlock;
87 }
88 ret = -EEXIST;
78 if (!pte_none(*dst_pte)) 89 if (!pte_none(*dst_pte))
79 goto out_release_uncharge_unlock; 90 goto out_release_uncharge_unlock;
80 91
@@ -108,11 +119,22 @@ static int mfill_zeropage_pte(struct mm_struct *dst_mm,
108 pte_t _dst_pte, *dst_pte; 119 pte_t _dst_pte, *dst_pte;
109 spinlock_t *ptl; 120 spinlock_t *ptl;
110 int ret; 121 int ret;
122 pgoff_t offset, max_off;
123 struct inode *inode;
111 124
112 _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 125 _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
113 dst_vma->vm_page_prot)); 126 dst_vma->vm_page_prot));
114 ret = -EEXIST;
115 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 127 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
128 if (dst_vma->vm_file) {
129 /* the shmem MAP_PRIVATE case requires checking the i_size */
130 inode = dst_vma->vm_file->f_inode;
131 offset = linear_page_index(dst_vma, dst_addr);
132 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
133 ret = -EFAULT;
134 if (unlikely(offset >= max_off))
135 goto out_unlock;
136 }
137 ret = -EEXIST;
116 if (!pte_none(*dst_pte)) 138 if (!pte_none(*dst_pte))
117 goto out_unlock; 139 goto out_unlock;
118 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 140 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
@@ -205,8 +227,9 @@ retry:
205 if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) 227 if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
206 goto out_unlock; 228 goto out_unlock;
207 /* 229 /*
208 * Only allow __mcopy_atomic_hugetlb on userfaultfd 230 * Check the vma is registered in uffd, this is
209 * registered ranges. 231 * required to enforce the VM_MAYWRITE check done at
232 * uffd registration time.
210 */ 233 */
211 if (!dst_vma->vm_userfaultfd_ctx.ctx) 234 if (!dst_vma->vm_userfaultfd_ctx.ctx)
212 goto out_unlock; 235 goto out_unlock;
@@ -274,7 +297,7 @@ retry:
274 297
275 cond_resched(); 298 cond_resched();
276 299
277 if (unlikely(err == -EFAULT)) { 300 if (unlikely(err == -ENOENT)) {
278 up_read(&dst_mm->mmap_sem); 301 up_read(&dst_mm->mmap_sem);
279 BUG_ON(!page); 302 BUG_ON(!page);
280 303
@@ -380,7 +403,17 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
380{ 403{
381 ssize_t err; 404 ssize_t err;
382 405
383 if (vma_is_anonymous(dst_vma)) { 406 /*
407 * The normal page fault path for a shmem will invoke the
408 * fault, fill the hole in the file and COW it right away. The
409 * result generates plain anonymous memory. So when we are
410 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
411 * generate anonymous memory directly without actually filling
412 * the hole. For the MAP_PRIVATE case the robustness check
413 * only happens in the pagetable (to verify it's still none)
414 * and not in the radix tree.
415 */
416 if (!(dst_vma->vm_flags & VM_SHARED)) {
384 if (!zeropage) 417 if (!zeropage)
385 err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, 418 err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
386 dst_addr, src_addr, page); 419 dst_addr, src_addr, page);
@@ -449,13 +482,9 @@ retry:
449 if (!dst_vma) 482 if (!dst_vma)
450 goto out_unlock; 483 goto out_unlock;
451 /* 484 /*
452 * Be strict and only allow __mcopy_atomic on userfaultfd 485 * Check the vma is registered in uffd, this is required to
453 * registered ranges to prevent userland errors going 486 * enforce the VM_MAYWRITE check done at uffd registration
454 * unnoticed. As far as the VM consistency is concerned, it 487 * time.
455 * would be perfectly safe to remove this check, but there's
456 * no useful usage for __mcopy_atomic ouside of userfaultfd
457 * registered ranges. This is after all why these are ioctls
458 * belonging to the userfaultfd and not syscalls.
459 */ 488 */
460 if (!dst_vma->vm_userfaultfd_ctx.ctx) 489 if (!dst_vma->vm_userfaultfd_ctx.ctx)
461 goto out_unlock; 490 goto out_unlock;
@@ -489,7 +518,8 @@ retry:
489 * dst_vma. 518 * dst_vma.
490 */ 519 */
491 err = -ENOMEM; 520 err = -ENOMEM;
492 if (vma_is_anonymous(dst_vma) && unlikely(anon_vma_prepare(dst_vma))) 521 if (!(dst_vma->vm_flags & VM_SHARED) &&
522 unlikely(anon_vma_prepare(dst_vma)))
493 goto out_unlock; 523 goto out_unlock;
494 524
495 while (src_addr < src_start + len) { 525 while (src_addr < src_start + len) {
@@ -530,7 +560,7 @@ retry:
530 src_addr, &page, zeropage); 560 src_addr, &page, zeropage);
531 cond_resched(); 561 cond_resched();
532 562
533 if (unlikely(err == -EFAULT)) { 563 if (unlikely(err == -ENOENT)) {
534 void *page_kaddr; 564 void *page_kaddr;
535 565
536 up_read(&dst_mm->mmap_sem); 566 up_read(&dst_mm->mmap_sem);
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 9f481cfdf77d..e8090f099eb8 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -352,19 +352,21 @@ out:
352 */ 352 */
353int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface) 353int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
354{ 354{
355 static const size_t tvlv_padding = sizeof(__be32);
355 struct batadv_elp_packet *elp_packet; 356 struct batadv_elp_packet *elp_packet;
356 unsigned char *elp_buff; 357 unsigned char *elp_buff;
357 u32 random_seqno; 358 u32 random_seqno;
358 size_t size; 359 size_t size;
359 int res = -ENOMEM; 360 int res = -ENOMEM;
360 361
361 size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN; 362 size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
362 hard_iface->bat_v.elp_skb = dev_alloc_skb(size); 363 hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
363 if (!hard_iface->bat_v.elp_skb) 364 if (!hard_iface->bat_v.elp_skb)
364 goto out; 365 goto out;
365 366
366 skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN); 367 skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
367 elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN); 368 elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb,
369 BATADV_ELP_HLEN + tvlv_padding);
368 elp_packet = (struct batadv_elp_packet *)elp_buff; 370 elp_packet = (struct batadv_elp_packet *)elp_buff;
369 371
370 elp_packet->packet_type = BATADV_ELP; 372 elp_packet->packet_type = BATADV_ELP;
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 0fddc17106bd..5b71a289d04f 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -275,7 +275,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
275 kfree(entry); 275 kfree(entry);
276 276
277 packet = (struct batadv_frag_packet *)skb_out->data; 277 packet = (struct batadv_frag_packet *)skb_out->data;
278 size = ntohs(packet->total_size); 278 size = ntohs(packet->total_size) + hdr_size;
279 279
280 /* Make room for the rest of the fragments. */ 280 /* Make room for the rest of the fragments. */
281 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { 281 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2920e06a5403..04c19a37e500 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -102,12 +102,18 @@ struct br_tunnel_info {
102 struct metadata_dst *tunnel_dst; 102 struct metadata_dst *tunnel_dst;
103}; 103};
104 104
105/* private vlan flags */
106enum {
107 BR_VLFLAG_PER_PORT_STATS = BIT(0),
108};
109
105/** 110/**
106 * struct net_bridge_vlan - per-vlan entry 111 * struct net_bridge_vlan - per-vlan entry
107 * 112 *
108 * @vnode: rhashtable member 113 * @vnode: rhashtable member
109 * @vid: VLAN id 114 * @vid: VLAN id
110 * @flags: bridge vlan flags 115 * @flags: bridge vlan flags
116 * @priv_flags: private (in-kernel) bridge vlan flags
111 * @stats: per-cpu VLAN statistics 117 * @stats: per-cpu VLAN statistics
112 * @br: if MASTER flag set, this points to a bridge struct 118 * @br: if MASTER flag set, this points to a bridge struct
113 * @port: if MASTER flag unset, this points to a port struct 119 * @port: if MASTER flag unset, this points to a port struct
@@ -127,6 +133,7 @@ struct net_bridge_vlan {
127 struct rhash_head tnode; 133 struct rhash_head tnode;
128 u16 vid; 134 u16 vid;
129 u16 flags; 135 u16 flags;
136 u16 priv_flags;
130 struct br_vlan_stats __percpu *stats; 137 struct br_vlan_stats __percpu *stats;
131 union { 138 union {
132 struct net_bridge *br; 139 struct net_bridge *br;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 8c9297a01947..e84be08b8285 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -197,7 +197,7 @@ static void nbp_vlan_rcu_free(struct rcu_head *rcu)
197 v = container_of(rcu, struct net_bridge_vlan, rcu); 197 v = container_of(rcu, struct net_bridge_vlan, rcu);
198 WARN_ON(br_vlan_is_master(v)); 198 WARN_ON(br_vlan_is_master(v));
199 /* if we had per-port stats configured then free them here */ 199 /* if we had per-port stats configured then free them here */
200 if (v->brvlan->stats != v->stats) 200 if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
201 free_percpu(v->stats); 201 free_percpu(v->stats);
202 v->stats = NULL; 202 v->stats = NULL;
203 kfree(v); 203 kfree(v);
@@ -264,6 +264,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
264 err = -ENOMEM; 264 err = -ENOMEM;
265 goto out_filt; 265 goto out_filt;
266 } 266 }
267 v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
267 } else { 268 } else {
268 v->stats = masterv->stats; 269 v->stats = masterv->stats;
269 } 270 }
diff --git a/net/can/raw.c b/net/can/raw.c
index 1051eee82581..3aab7664933f 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -745,18 +745,19 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
745 } else 745 } else
746 ifindex = ro->ifindex; 746 ifindex = ro->ifindex;
747 747
748 if (ro->fd_frames) { 748 dev = dev_get_by_index(sock_net(sk), ifindex);
749 if (!dev)
750 return -ENXIO;
751
752 err = -EINVAL;
753 if (ro->fd_frames && dev->mtu == CANFD_MTU) {
749 if (unlikely(size != CANFD_MTU && size != CAN_MTU)) 754 if (unlikely(size != CANFD_MTU && size != CAN_MTU))
750 return -EINVAL; 755 goto put_dev;
751 } else { 756 } else {
752 if (unlikely(size != CAN_MTU)) 757 if (unlikely(size != CAN_MTU))
753 return -EINVAL; 758 goto put_dev;
754 } 759 }
755 760
756 dev = dev_get_by_index(sock_net(sk), ifindex);
757 if (!dev)
758 return -ENXIO;
759
760 skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), 761 skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
761 msg->msg_flags & MSG_DONTWAIT, &err); 762 msg->msg_flags & MSG_DONTWAIT, &err);
762 if (!skb) 763 if (!skb)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 57fcc6b4bf6e..2f126eff275d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -580,9 +580,15 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
580 struct bio_vec bvec; 580 struct bio_vec bvec;
581 int ret; 581 int ret;
582 582
583 /* sendpage cannot properly handle pages with page_count == 0, 583 /*
584 * we need to fallback to sendmsg if that's the case */ 584 * sendpage cannot properly handle pages with page_count == 0,
585 if (page_count(page) >= 1) 585 * we need to fall back to sendmsg if that's the case.
586 *
587 * Same goes for slab pages: skb_can_coalesce() allows
588 * coalescing neighboring slab objects into a single frag which
589 * triggers one of hardened usercopy checks.
590 */
591 if (page_count(page) >= 1 && !PageSlab(page))
586 return __ceph_tcp_sendpage(sock, page, offset, size, more); 592 return __ceph_tcp_sendpage(sock, page, offset, size, more);
587 593
588 bvec.bv_page = page; 594 bvec.bv_page = page;
diff --git a/net/core/dev.c b/net/core/dev.c
index 0ffcbdd55fa9..ddc551f24ba2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5655,6 +5655,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
5655 skb->vlan_tci = 0; 5655 skb->vlan_tci = 0;
5656 skb->dev = napi->dev; 5656 skb->dev = napi->dev;
5657 skb->skb_iif = 0; 5657 skb->skb_iif = 0;
5658
5659 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
5660 skb->pkt_type = PACKET_HOST;
5661
5658 skb->encapsulation = 0; 5662 skb->encapsulation = 0;
5659 skb_shinfo(skb)->gso_type = 0; 5663 skb_shinfo(skb)->gso_type = 0;
5660 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 5664 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
@@ -5966,11 +5970,14 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
5966 if (work_done) 5970 if (work_done)
5967 timeout = n->dev->gro_flush_timeout; 5971 timeout = n->dev->gro_flush_timeout;
5968 5972
5973 /* When the NAPI instance uses a timeout and keeps postponing
5974 * it, we need to bound somehow the time packets are kept in
5975 * the GRO layer
5976 */
5977 napi_gro_flush(n, !!timeout);
5969 if (timeout) 5978 if (timeout)
5970 hrtimer_start(&n->timer, ns_to_ktime(timeout), 5979 hrtimer_start(&n->timer, ns_to_ktime(timeout),
5971 HRTIMER_MODE_REL_PINNED); 5980 HRTIMER_MODE_REL_PINNED);
5972 else
5973 napi_gro_flush(n, false);
5974 } 5981 }
5975 if (unlikely(!list_empty(&n->poll_list))) { 5982 if (unlikely(!list_empty(&n->poll_list))) {
5976 /* If n->poll_list is not empty, we need to mask irqs */ 5983 /* If n->poll_list is not empty, we need to mask irqs */
diff --git a/net/core/filter.c b/net/core/filter.c
index e521c5ebc7d1..9a1327eb25fa 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4852,18 +4852,17 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
4852 } else { 4852 } else {
4853 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; 4853 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
4854 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; 4854 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
4855 u16 hnum = ntohs(tuple->ipv6.dport);
4856 int sdif = inet6_sdif(skb); 4855 int sdif = inet6_sdif(skb);
4857 4856
4858 if (proto == IPPROTO_TCP) 4857 if (proto == IPPROTO_TCP)
4859 sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0, 4858 sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0,
4860 src6, tuple->ipv6.sport, 4859 src6, tuple->ipv6.sport,
4861 dst6, hnum, 4860 dst6, ntohs(tuple->ipv6.dport),
4862 dif, sdif, &refcounted); 4861 dif, sdif, &refcounted);
4863 else if (likely(ipv6_bpf_stub)) 4862 else if (likely(ipv6_bpf_stub))
4864 sk = ipv6_bpf_stub->udp6_lib_lookup(net, 4863 sk = ipv6_bpf_stub->udp6_lib_lookup(net,
4865 src6, tuple->ipv6.sport, 4864 src6, tuple->ipv6.sport,
4866 dst6, hnum, 4865 dst6, tuple->ipv6.dport,
4867 dif, sdif, 4866 dif, sdif,
4868 &udp_table, skb); 4867 &udp_table, skb);
4869#endif 4868#endif
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b4ee5c8b928f..a8217e221e19 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4854,6 +4854,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4854 nf_reset(skb); 4854 nf_reset(skb);
4855 nf_reset_trace(skb); 4855 nf_reset_trace(skb);
4856 4856
4857#ifdef CONFIG_NET_SWITCHDEV
4858 skb->offload_fwd_mark = 0;
4859 skb->offload_mr_fwd_mark = 0;
4860#endif
4861
4857 if (!xnet) 4862 if (!xnet)
4858 return; 4863 return;
4859 4864
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c09219e7f230..5dbec21856f4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -939,7 +939,7 @@ static int __ip_append_data(struct sock *sk,
939 unsigned int fraglen; 939 unsigned int fraglen;
940 unsigned int fraggap; 940 unsigned int fraggap;
941 unsigned int alloclen; 941 unsigned int alloclen;
942 unsigned int pagedlen = 0; 942 unsigned int pagedlen;
943 struct sk_buff *skb_prev; 943 struct sk_buff *skb_prev;
944alloc_new_skb: 944alloc_new_skb:
945 skb_prev = skb; 945 skb_prev = skb;
@@ -956,6 +956,7 @@ alloc_new_skb:
956 if (datalen > mtu - fragheaderlen) 956 if (datalen > mtu - fragheaderlen)
957 datalen = maxfraglen - fragheaderlen; 957 datalen = maxfraglen - fragheaderlen;
958 fraglen = datalen + fragheaderlen; 958 fraglen = datalen + fragheaderlen;
959 pagedlen = 0;
959 960
960 if ((flags & MSG_MORE) && 961 if ((flags & MSG_MORE) &&
961 !(rt->dst.dev->features&NETIF_F_SG)) 962 !(rt->dst.dev->features&NETIF_F_SG))
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index dde671e97829..c248e0dccbe1 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
80 80
81 iph->version = 4; 81 iph->version = 4;
82 iph->ihl = sizeof(struct iphdr) >> 2; 82 iph->ihl = sizeof(struct iphdr) >> 2;
83 iph->frag_off = df; 83 iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
84 iph->protocol = proto; 84 iph->protocol = proto;
85 iph->tos = tos; 85 iph->tos = tos;
86 iph->daddr = dst; 86 iph->daddr = dst;
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index ce1512b02cb2..fd3f9e8a74da 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -81,9 +81,12 @@ static int __init masquerade_tg_init(void)
81 int ret; 81 int ret;
82 82
83 ret = xt_register_target(&masquerade_tg_reg); 83 ret = xt_register_target(&masquerade_tg_reg);
84 if (ret)
85 return ret;
84 86
85 if (ret == 0) 87 ret = nf_nat_masquerade_ipv4_register_notifier();
86 nf_nat_masquerade_ipv4_register_notifier(); 88 if (ret)
89 xt_unregister_target(&masquerade_tg_reg);
87 90
88 return ret; 91 return ret;
89} 92}
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
index a9d5e013e555..41327bb99093 100644
--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
@@ -147,28 +147,50 @@ static struct notifier_block masq_inet_notifier = {
147 .notifier_call = masq_inet_event, 147 .notifier_call = masq_inet_event,
148}; 148};
149 149
150static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0); 150static int masq_refcnt;
151static DEFINE_MUTEX(masq_mutex);
151 152
152void nf_nat_masquerade_ipv4_register_notifier(void) 153int nf_nat_masquerade_ipv4_register_notifier(void)
153{ 154{
155 int ret = 0;
156
157 mutex_lock(&masq_mutex);
154 /* check if the notifier was already set */ 158 /* check if the notifier was already set */
155 if (atomic_inc_return(&masquerade_notifier_refcount) > 1) 159 if (++masq_refcnt > 1)
156 return; 160 goto out_unlock;
157 161
158 /* Register for device down reports */ 162 /* Register for device down reports */
159 register_netdevice_notifier(&masq_dev_notifier); 163 ret = register_netdevice_notifier(&masq_dev_notifier);
164 if (ret)
165 goto err_dec;
160 /* Register IP address change reports */ 166 /* Register IP address change reports */
161 register_inetaddr_notifier(&masq_inet_notifier); 167 ret = register_inetaddr_notifier(&masq_inet_notifier);
168 if (ret)
169 goto err_unregister;
170
171 mutex_unlock(&masq_mutex);
172 return ret;
173
174err_unregister:
175 unregister_netdevice_notifier(&masq_dev_notifier);
176err_dec:
177 masq_refcnt--;
178out_unlock:
179 mutex_unlock(&masq_mutex);
180 return ret;
162} 181}
163EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier); 182EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
164 183
165void nf_nat_masquerade_ipv4_unregister_notifier(void) 184void nf_nat_masquerade_ipv4_unregister_notifier(void)
166{ 185{
186 mutex_lock(&masq_mutex);
167 /* check if the notifier still has clients */ 187 /* check if the notifier still has clients */
168 if (atomic_dec_return(&masquerade_notifier_refcount) > 0) 188 if (--masq_refcnt > 0)
169 return; 189 goto out_unlock;
170 190
171 unregister_netdevice_notifier(&masq_dev_notifier); 191 unregister_netdevice_notifier(&masq_dev_notifier);
172 unregister_inetaddr_notifier(&masq_inet_notifier); 192 unregister_inetaddr_notifier(&masq_inet_notifier);
193out_unlock:
194 mutex_unlock(&masq_mutex);
173} 195}
174EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier); 196EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index f1193e1e928a..6847de1d1db8 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -69,7 +69,9 @@ static int __init nft_masq_ipv4_module_init(void)
69 if (ret < 0) 69 if (ret < 0)
70 return ret; 70 return ret;
71 71
72 nf_nat_masquerade_ipv4_register_notifier(); 72 ret = nf_nat_masquerade_ipv4_register_notifier();
73 if (ret)
74 nft_unregister_expr(&nft_masq_ipv4_type);
73 75
74 return ret; 76 return ret;
75} 77}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2868ef28ce52..a9d9555a973f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -579,10 +579,12 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
579 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 579 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
580 u32 delta_us; 580 u32 delta_us;
581 581
582 if (!delta) 582 if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
583 delta = 1; 583 if (!delta)
584 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 584 delta = 1;
585 tcp_rcv_rtt_update(tp, delta_us, 0); 585 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
586 tcp_rcv_rtt_update(tp, delta_us, 0);
587 }
586 } 588 }
587} 589}
588 590
@@ -2910,9 +2912,11 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2910 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2912 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2911 flag & FLAG_ACKED) { 2913 flag & FLAG_ACKED) {
2912 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 2914 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
2913 u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
2914 2915
2915 seq_rtt_us = ca_rtt_us = delta_us; 2916 if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
2917 seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
2918 ca_rtt_us = seq_rtt_us;
2919 }
2916 } 2920 }
2917 rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 2921 rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
2918 if (seq_rtt_us < 0) 2922 if (seq_rtt_us < 0)
@@ -4268,7 +4272,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
4268 * If the sack array is full, forget about the last one. 4272 * If the sack array is full, forget about the last one.
4269 */ 4273 */
4270 if (this_sack >= TCP_NUM_SACKS) { 4274 if (this_sack >= TCP_NUM_SACKS) {
4271 if (tp->compressed_ack) 4275 if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
4272 tcp_send_ack(sk); 4276 tcp_send_ack(sk);
4273 this_sack--; 4277 this_sack--;
4274 tp->rx_opt.num_sacks--; 4278 tp->rx_opt.num_sacks--;
@@ -4363,6 +4367,7 @@ static bool tcp_try_coalesce(struct sock *sk,
4363 if (TCP_SKB_CB(from)->has_rxtstamp) { 4367 if (TCP_SKB_CB(from)->has_rxtstamp) {
4364 TCP_SKB_CB(to)->has_rxtstamp = true; 4368 TCP_SKB_CB(to)->has_rxtstamp = true;
4365 to->tstamp = from->tstamp; 4369 to->tstamp = from->tstamp;
4370 skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
4366 } 4371 }
4367 4372
4368 return true; 4373 return true;
@@ -5188,7 +5193,17 @@ send_now:
5188 if (!tcp_is_sack(tp) || 5193 if (!tcp_is_sack(tp) ||
5189 tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) 5194 tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
5190 goto send_now; 5195 goto send_now;
5191 tp->compressed_ack++; 5196
5197 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
5198 tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
5199 if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
5200 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
5201 tp->compressed_ack - TCP_FASTRETRANS_THRESH);
5202 tp->compressed_ack = 0;
5203 }
5204
5205 if (++tp->compressed_ack <= TCP_FASTRETRANS_THRESH)
5206 goto send_now;
5192 5207
5193 if (hrtimer_is_queued(&tp->compressed_ack_timer)) 5208 if (hrtimer_is_queued(&tp->compressed_ack_timer))
5194 return; 5209 return;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9c34b97d365d..3f510cad0b3e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -180,10 +180,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
180{ 180{
181 struct tcp_sock *tp = tcp_sk(sk); 181 struct tcp_sock *tp = tcp_sk(sk);
182 182
183 if (unlikely(tp->compressed_ack)) { 183 if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
184 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, 184 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
185 tp->compressed_ack); 185 tp->compressed_ack - TCP_FASTRETRANS_THRESH);
186 tp->compressed_ack = 0; 186 tp->compressed_ack = TCP_FASTRETRANS_THRESH;
187 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) 187 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
188 __sock_put(sk); 188 __sock_put(sk);
189 } 189 }
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 676020663ce8..091c53925e4d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -40,15 +40,17 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
40{ 40{
41 struct inet_connection_sock *icsk = inet_csk(sk); 41 struct inet_connection_sock *icsk = inet_csk(sk);
42 u32 elapsed, start_ts; 42 u32 elapsed, start_ts;
43 s32 remaining;
43 44
44 start_ts = tcp_retransmit_stamp(sk); 45 start_ts = tcp_retransmit_stamp(sk);
45 if (!icsk->icsk_user_timeout || !start_ts) 46 if (!icsk->icsk_user_timeout || !start_ts)
46 return icsk->icsk_rto; 47 return icsk->icsk_rto;
47 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts; 48 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
48 if (elapsed >= icsk->icsk_user_timeout) 49 remaining = icsk->icsk_user_timeout - elapsed;
50 if (remaining <= 0)
49 return 1; /* user timeout has passed; fire ASAP */ 51 return 1; /* user timeout has passed; fire ASAP */
50 else 52
51 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(icsk->icsk_user_timeout - elapsed)); 53 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
52} 54}
53 55
54/** 56/**
@@ -209,7 +211,7 @@ static bool retransmits_timed_out(struct sock *sk,
209 (boundary - linear_backoff_thresh) * TCP_RTO_MAX; 211 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
210 timeout = jiffies_to_msecs(timeout); 212 timeout = jiffies_to_msecs(timeout);
211 } 213 }
212 return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= timeout; 214 return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
213} 215}
214 216
215/* A write timeout has occurred. Process the after effects. */ 217/* A write timeout has occurred. Process the after effects. */
@@ -740,7 +742,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
740 742
741 bh_lock_sock(sk); 743 bh_lock_sock(sk);
742 if (!sock_owned_by_user(sk)) { 744 if (!sock_owned_by_user(sk)) {
743 if (tp->compressed_ack) 745 if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
744 tcp_send_ack(sk); 746 tcp_send_ack(sk);
745 } else { 747 } else {
746 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, 748 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 63a808d5af15..045597b9a7c0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -179,7 +179,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp);
179static void addrconf_dad_work(struct work_struct *w); 179static void addrconf_dad_work(struct work_struct *w);
180static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, 180static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
181 bool send_na); 181 bool send_na);
182static void addrconf_dad_run(struct inet6_dev *idev); 182static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
183static void addrconf_rs_timer(struct timer_list *t); 183static void addrconf_rs_timer(struct timer_list *t);
184static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); 184static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
185static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); 185static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
@@ -3439,6 +3439,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3439 void *ptr) 3439 void *ptr)
3440{ 3440{
3441 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3441 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3442 struct netdev_notifier_change_info *change_info;
3442 struct netdev_notifier_changeupper_info *info; 3443 struct netdev_notifier_changeupper_info *info;
3443 struct inet6_dev *idev = __in6_dev_get(dev); 3444 struct inet6_dev *idev = __in6_dev_get(dev);
3444 struct net *net = dev_net(dev); 3445 struct net *net = dev_net(dev);
@@ -3513,7 +3514,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3513 break; 3514 break;
3514 } 3515 }
3515 3516
3516 if (idev) { 3517 if (!IS_ERR_OR_NULL(idev)) {
3517 if (idev->if_flags & IF_READY) { 3518 if (idev->if_flags & IF_READY) {
3518 /* device is already configured - 3519 /* device is already configured -
3519 * but resend MLD reports, we might 3520 * but resend MLD reports, we might
@@ -3521,6 +3522,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3521 * multicast snooping switches 3522 * multicast snooping switches
3522 */ 3523 */
3523 ipv6_mc_up(idev); 3524 ipv6_mc_up(idev);
3525 change_info = ptr;
3526 if (change_info->flags_changed & IFF_NOARP)
3527 addrconf_dad_run(idev, true);
3524 rt6_sync_up(dev, RTNH_F_LINKDOWN); 3528 rt6_sync_up(dev, RTNH_F_LINKDOWN);
3525 break; 3529 break;
3526 } 3530 }
@@ -3555,7 +3559,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3555 3559
3556 if (!IS_ERR_OR_NULL(idev)) { 3560 if (!IS_ERR_OR_NULL(idev)) {
3557 if (run_pending) 3561 if (run_pending)
3558 addrconf_dad_run(idev); 3562 addrconf_dad_run(idev, false);
3559 3563
3560 /* Device has an address by now */ 3564 /* Device has an address by now */
3561 rt6_sync_up(dev, RTNH_F_DEAD); 3565 rt6_sync_up(dev, RTNH_F_DEAD);
@@ -4173,16 +4177,19 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4173 addrconf_verify_rtnl(); 4177 addrconf_verify_rtnl();
4174} 4178}
4175 4179
4176static void addrconf_dad_run(struct inet6_dev *idev) 4180static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4177{ 4181{
4178 struct inet6_ifaddr *ifp; 4182 struct inet6_ifaddr *ifp;
4179 4183
4180 read_lock_bh(&idev->lock); 4184 read_lock_bh(&idev->lock);
4181 list_for_each_entry(ifp, &idev->addr_list, if_list) { 4185 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4182 spin_lock(&ifp->lock); 4186 spin_lock(&ifp->lock);
4183 if (ifp->flags & IFA_F_TENTATIVE && 4187 if ((ifp->flags & IFA_F_TENTATIVE &&
4184 ifp->state == INET6_IFADDR_STATE_DAD) 4188 ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4189 if (restart)
4190 ifp->state = INET6_IFADDR_STATE_PREDAD;
4185 addrconf_dad_kick(ifp); 4191 addrconf_dad_kick(ifp);
4192 }
4186 spin_unlock(&ifp->lock); 4193 spin_unlock(&ifp->lock);
4187 } 4194 }
4188 read_unlock_bh(&idev->lock); 4195 read_unlock_bh(&idev->lock);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 89e0d5118afe..827a3f5ff3bb 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1354,7 +1354,7 @@ emsgsize:
1354 unsigned int fraglen; 1354 unsigned int fraglen;
1355 unsigned int fraggap; 1355 unsigned int fraggap;
1356 unsigned int alloclen; 1356 unsigned int alloclen;
1357 unsigned int pagedlen = 0; 1357 unsigned int pagedlen;
1358alloc_new_skb: 1358alloc_new_skb:
1359 /* There's no room in the current skb */ 1359 /* There's no room in the current skb */
1360 if (skb) 1360 if (skb)
@@ -1378,6 +1378,7 @@ alloc_new_skb:
1378 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen) 1378 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1379 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len; 1379 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1380 fraglen = datalen + fragheaderlen; 1380 fraglen = datalen + fragheaderlen;
1381 pagedlen = 0;
1381 1382
1382 if ((flags & MSG_MORE) && 1383 if ((flags & MSG_MORE) &&
1383 !(rt->dst.dev->features&NETIF_F_SG)) 1384 !(rt->dst.dev->features&NETIF_F_SG))
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 5ae8e1c51079..8b075f0bc351 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -24,7 +24,8 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
24 unsigned int hh_len; 24 unsigned int hh_len;
25 struct dst_entry *dst; 25 struct dst_entry *dst;
26 struct flowi6 fl6 = { 26 struct flowi6 fl6 = {
27 .flowi6_oif = sk ? sk->sk_bound_dev_if : 0, 27 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
28 rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
28 .flowi6_mark = skb->mark, 29 .flowi6_mark = skb->mark,
29 .flowi6_uid = sock_net_uid(net, sk), 30 .flowi6_uid = sock_net_uid(net, sk),
30 .daddr = iph->daddr, 31 .daddr = iph->daddr,
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
index 491f808e356a..29c7f1915a96 100644
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -58,8 +58,12 @@ static int __init masquerade_tg6_init(void)
58 int err; 58 int err;
59 59
60 err = xt_register_target(&masquerade_tg6_reg); 60 err = xt_register_target(&masquerade_tg6_reg);
61 if (err == 0) 61 if (err)
62 nf_nat_masquerade_ipv6_register_notifier(); 62 return err;
63
64 err = nf_nat_masquerade_ipv6_register_notifier();
65 if (err)
66 xt_unregister_target(&masquerade_tg6_reg);
63 67
64 return err; 68 return err;
65} 69}
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
index 3e4bf2286abe..0ad0da5a2600 100644
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -132,8 +132,8 @@ static void iterate_cleanup_work(struct work_struct *work)
132 * of ipv6 addresses being deleted), we also need to add an upper 132 * of ipv6 addresses being deleted), we also need to add an upper
133 * limit to the number of queued work items. 133 * limit to the number of queued work items.
134 */ 134 */
135static int masq_inet_event(struct notifier_block *this, 135static int masq_inet6_event(struct notifier_block *this,
136 unsigned long event, void *ptr) 136 unsigned long event, void *ptr)
137{ 137{
138 struct inet6_ifaddr *ifa = ptr; 138 struct inet6_ifaddr *ifa = ptr;
139 const struct net_device *dev; 139 const struct net_device *dev;
@@ -171,30 +171,53 @@ static int masq_inet_event(struct notifier_block *this,
171 return NOTIFY_DONE; 171 return NOTIFY_DONE;
172} 172}
173 173
174static struct notifier_block masq_inet_notifier = { 174static struct notifier_block masq_inet6_notifier = {
175 .notifier_call = masq_inet_event, 175 .notifier_call = masq_inet6_event,
176}; 176};
177 177
178static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0); 178static int masq_refcnt;
179static DEFINE_MUTEX(masq_mutex);
179 180
180void nf_nat_masquerade_ipv6_register_notifier(void) 181int nf_nat_masquerade_ipv6_register_notifier(void)
181{ 182{
183 int ret = 0;
184
185 mutex_lock(&masq_mutex);
182 /* check if the notifier is already set */ 186 /* check if the notifier is already set */
183 if (atomic_inc_return(&masquerade_notifier_refcount) > 1) 187 if (++masq_refcnt > 1)
184 return; 188 goto out_unlock;
189
190 ret = register_netdevice_notifier(&masq_dev_notifier);
191 if (ret)
192 goto err_dec;
193
194 ret = register_inet6addr_notifier(&masq_inet6_notifier);
195 if (ret)
196 goto err_unregister;
185 197
186 register_netdevice_notifier(&masq_dev_notifier); 198 mutex_unlock(&masq_mutex);
187 register_inet6addr_notifier(&masq_inet_notifier); 199 return ret;
200
201err_unregister:
202 unregister_netdevice_notifier(&masq_dev_notifier);
203err_dec:
204 masq_refcnt--;
205out_unlock:
206 mutex_unlock(&masq_mutex);
207 return ret;
188} 208}
189EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier); 209EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
190 210
191void nf_nat_masquerade_ipv6_unregister_notifier(void) 211void nf_nat_masquerade_ipv6_unregister_notifier(void)
192{ 212{
213 mutex_lock(&masq_mutex);
193 /* check if the notifier still has clients */ 214 /* check if the notifier still has clients */
194 if (atomic_dec_return(&masquerade_notifier_refcount) > 0) 215 if (--masq_refcnt > 0)
195 return; 216 goto out_unlock;
196 217
197 unregister_inet6addr_notifier(&masq_inet_notifier); 218 unregister_inet6addr_notifier(&masq_inet6_notifier);
198 unregister_netdevice_notifier(&masq_dev_notifier); 219 unregister_netdevice_notifier(&masq_dev_notifier);
220out_unlock:
221 mutex_unlock(&masq_mutex);
199} 222}
200EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier); 223EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
index dd0122f3cffe..e06c82e9dfcd 100644
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -70,7 +70,9 @@ static int __init nft_masq_ipv6_module_init(void)
70 if (ret < 0) 70 if (ret < 0)
71 return ret; 71 return ret;
72 72
73 nf_nat_masquerade_ipv6_register_notifier(); 73 ret = nf_nat_masquerade_ipv6_register_notifier();
74 if (ret)
75 nft_unregister_expr(&nft_masq_ipv6_type);
74 76
75 return ret; 77 return ret;
76} 78}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 2a7423c39456..059f0531f7c1 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2232,8 +2232,7 @@ static void ip6_link_failure(struct sk_buff *skb)
2232 if (rt) { 2232 if (rt) {
2233 rcu_read_lock(); 2233 rcu_read_lock();
2234 if (rt->rt6i_flags & RTF_CACHE) { 2234 if (rt->rt6i_flags & RTF_CACHE) {
2235 if (dst_hold_safe(&rt->dst)) 2235 rt6_remove_exception_rt(rt);
2236 rt6_remove_exception_rt(rt);
2237 } else { 2236 } else {
2238 struct fib6_info *from; 2237 struct fib6_info *from;
2239 struct fib6_node *fn; 2238 struct fib6_node *fn;
@@ -2360,10 +2359,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2360 2359
2361void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) 2360void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2362{ 2361{
2362 int oif = sk->sk_bound_dev_if;
2363 struct dst_entry *dst; 2363 struct dst_entry *dst;
2364 2364
2365 ip6_update_pmtu(skb, sock_net(sk), mtu, 2365 if (!oif && skb->dev)
2366 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); 2366 oif = l3mdev_master_ifindex(skb->dev);
2367
2368 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2367 2369
2368 dst = __sk_dst_get(sk); 2370 dst = __sk_dst_get(sk);
2369 if (!dst || !dst->obsolete || 2371 if (!dst || !dst->obsolete ||
@@ -3214,8 +3216,8 @@ static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3214 if (cfg->fc_flags & RTF_GATEWAY && 3216 if (cfg->fc_flags & RTF_GATEWAY &&
3215 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) 3217 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3216 goto out; 3218 goto out;
3217 if (dst_hold_safe(&rt->dst)) 3219
3218 rc = rt6_remove_exception_rt(rt); 3220 rc = rt6_remove_exception_rt(rt);
3219out: 3221out:
3220 return rc; 3222 return rc;
3221} 3223}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 82cdf9020b53..26f1d435696a 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1490,12 +1490,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1490 goto err_sock; 1490 goto err_sock;
1491 } 1491 }
1492 1492
1493 sk = sock->sk;
1494
1495 sock_hold(sk);
1496 tunnel->sock = sk;
1497 tunnel->l2tp_net = net; 1493 tunnel->l2tp_net = net;
1498
1499 pn = l2tp_pernet(net); 1494 pn = l2tp_pernet(net);
1500 1495
1501 spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1496 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1510,6 +1505,10 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1510 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); 1505 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1511 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1506 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1512 1507
1508 sk = sock->sk;
1509 sock_hold(sk);
1510 tunnel->sock = sk;
1511
1513 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { 1512 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1514 struct udp_tunnel_sock_cfg udp_cfg = { 1513 struct udp_tunnel_sock_cfg udp_cfg = {
1515 .sk_user_data = tunnel, 1514 .sk_user_data = tunnel,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 83395bf6dc35..432141f04af3 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3980,6 +3980,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs)
3980 3980
3981static struct notifier_block ip_vs_dst_notifier = { 3981static struct notifier_block ip_vs_dst_notifier = {
3982 .notifier_call = ip_vs_dst_event, 3982 .notifier_call = ip_vs_dst_event,
3983#ifdef CONFIG_IP_VS_IPV6
3984 .priority = ADDRCONF_NOTIFY_PRIORITY + 5,
3985#endif
3983}; 3986};
3984 3987
3985int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs) 3988int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 02ca7df793f5..b6d0f6deea86 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -49,6 +49,7 @@ struct nf_conncount_tuple {
49 struct nf_conntrack_zone zone; 49 struct nf_conntrack_zone zone;
50 int cpu; 50 int cpu;
51 u32 jiffies32; 51 u32 jiffies32;
52 bool dead;
52 struct rcu_head rcu_head; 53 struct rcu_head rcu_head;
53}; 54};
54 55
@@ -106,15 +107,16 @@ nf_conncount_add(struct nf_conncount_list *list,
106 conn->zone = *zone; 107 conn->zone = *zone;
107 conn->cpu = raw_smp_processor_id(); 108 conn->cpu = raw_smp_processor_id();
108 conn->jiffies32 = (u32)jiffies; 109 conn->jiffies32 = (u32)jiffies;
109 spin_lock(&list->list_lock); 110 conn->dead = false;
111 spin_lock_bh(&list->list_lock);
110 if (list->dead == true) { 112 if (list->dead == true) {
111 kmem_cache_free(conncount_conn_cachep, conn); 113 kmem_cache_free(conncount_conn_cachep, conn);
112 spin_unlock(&list->list_lock); 114 spin_unlock_bh(&list->list_lock);
113 return NF_CONNCOUNT_SKIP; 115 return NF_CONNCOUNT_SKIP;
114 } 116 }
115 list_add_tail(&conn->node, &list->head); 117 list_add_tail(&conn->node, &list->head);
116 list->count++; 118 list->count++;
117 spin_unlock(&list->list_lock); 119 spin_unlock_bh(&list->list_lock);
118 return NF_CONNCOUNT_ADDED; 120 return NF_CONNCOUNT_ADDED;
119} 121}
120EXPORT_SYMBOL_GPL(nf_conncount_add); 122EXPORT_SYMBOL_GPL(nf_conncount_add);
@@ -132,19 +134,22 @@ static bool conn_free(struct nf_conncount_list *list,
132{ 134{
133 bool free_entry = false; 135 bool free_entry = false;
134 136
135 spin_lock(&list->list_lock); 137 spin_lock_bh(&list->list_lock);
136 138
137 if (list->count == 0) { 139 if (conn->dead) {
138 spin_unlock(&list->list_lock); 140 spin_unlock_bh(&list->list_lock);
139 return free_entry; 141 return free_entry;
140 } 142 }
141 143
142 list->count--; 144 list->count--;
145 conn->dead = true;
143 list_del_rcu(&conn->node); 146 list_del_rcu(&conn->node);
144 if (list->count == 0) 147 if (list->count == 0) {
148 list->dead = true;
145 free_entry = true; 149 free_entry = true;
150 }
146 151
147 spin_unlock(&list->list_lock); 152 spin_unlock_bh(&list->list_lock);
148 call_rcu(&conn->rcu_head, __conn_free); 153 call_rcu(&conn->rcu_head, __conn_free);
149 return free_entry; 154 return free_entry;
150} 155}
@@ -245,7 +250,7 @@ void nf_conncount_list_init(struct nf_conncount_list *list)
245{ 250{
246 spin_lock_init(&list->list_lock); 251 spin_lock_init(&list->list_lock);
247 INIT_LIST_HEAD(&list->head); 252 INIT_LIST_HEAD(&list->head);
248 list->count = 1; 253 list->count = 0;
249 list->dead = false; 254 list->dead = false;
250} 255}
251EXPORT_SYMBOL_GPL(nf_conncount_list_init); 256EXPORT_SYMBOL_GPL(nf_conncount_list_init);
@@ -259,6 +264,7 @@ bool nf_conncount_gc_list(struct net *net,
259 struct nf_conn *found_ct; 264 struct nf_conn *found_ct;
260 unsigned int collected = 0; 265 unsigned int collected = 0;
261 bool free_entry = false; 266 bool free_entry = false;
267 bool ret = false;
262 268
263 list_for_each_entry_safe(conn, conn_n, &list->head, node) { 269 list_for_each_entry_safe(conn, conn_n, &list->head, node) {
264 found = find_or_evict(net, list, conn, &free_entry); 270 found = find_or_evict(net, list, conn, &free_entry);
@@ -288,7 +294,15 @@ bool nf_conncount_gc_list(struct net *net,
288 if (collected > CONNCOUNT_GC_MAX_NODES) 294 if (collected > CONNCOUNT_GC_MAX_NODES)
289 return false; 295 return false;
290 } 296 }
291 return false; 297
298 spin_lock_bh(&list->list_lock);
299 if (!list->count) {
300 list->dead = true;
301 ret = true;
302 }
303 spin_unlock_bh(&list->list_lock);
304
305 return ret;
292} 306}
293EXPORT_SYMBOL_GPL(nf_conncount_gc_list); 307EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
294 308
@@ -309,11 +323,8 @@ static void tree_nodes_free(struct rb_root *root,
309 while (gc_count) { 323 while (gc_count) {
310 rbconn = gc_nodes[--gc_count]; 324 rbconn = gc_nodes[--gc_count];
311 spin_lock(&rbconn->list.list_lock); 325 spin_lock(&rbconn->list.list_lock);
312 if (rbconn->list.count == 0 && rbconn->list.dead == false) { 326 rb_erase(&rbconn->node, root);
313 rbconn->list.dead = true; 327 call_rcu(&rbconn->rcu_head, __tree_nodes_free);
314 rb_erase(&rbconn->node, root);
315 call_rcu(&rbconn->rcu_head, __tree_nodes_free);
316 }
317 spin_unlock(&rbconn->list.list_lock); 328 spin_unlock(&rbconn->list.list_lock);
318 } 329 }
319} 330}
@@ -414,6 +425,7 @@ insert_tree(struct net *net,
414 nf_conncount_list_init(&rbconn->list); 425 nf_conncount_list_init(&rbconn->list);
415 list_add(&conn->node, &rbconn->list.head); 426 list_add(&conn->node, &rbconn->list.head);
416 count = 1; 427 count = 1;
428 rbconn->list.count = count;
417 429
418 rb_link_node(&rbconn->node, parent, rbnode); 430 rb_link_node(&rbconn->node, parent, rbnode);
419 rb_insert_color(&rbconn->node, root); 431 rb_insert_color(&rbconn->node, root);
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 9b48dc8b4b88..2a5e56c6d8d9 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -43,24 +43,12 @@
43#include <linux/netfilter/nf_conntrack_proto_gre.h> 43#include <linux/netfilter/nf_conntrack_proto_gre.h>
44#include <linux/netfilter/nf_conntrack_pptp.h> 44#include <linux/netfilter/nf_conntrack_pptp.h>
45 45
46enum grep_conntrack {
47 GRE_CT_UNREPLIED,
48 GRE_CT_REPLIED,
49 GRE_CT_MAX
50};
51
52static const unsigned int gre_timeouts[GRE_CT_MAX] = { 46static const unsigned int gre_timeouts[GRE_CT_MAX] = {
53 [GRE_CT_UNREPLIED] = 30*HZ, 47 [GRE_CT_UNREPLIED] = 30*HZ,
54 [GRE_CT_REPLIED] = 180*HZ, 48 [GRE_CT_REPLIED] = 180*HZ,
55}; 49};
56 50
57static unsigned int proto_gre_net_id __read_mostly; 51static unsigned int proto_gre_net_id __read_mostly;
58struct netns_proto_gre {
59 struct nf_proto_net nf;
60 rwlock_t keymap_lock;
61 struct list_head keymap_list;
62 unsigned int gre_timeouts[GRE_CT_MAX];
63};
64 52
65static inline struct netns_proto_gre *gre_pernet(struct net *net) 53static inline struct netns_proto_gre *gre_pernet(struct net *net)
66{ 54{
@@ -402,6 +390,8 @@ static int __init nf_ct_proto_gre_init(void)
402{ 390{
403 int ret; 391 int ret;
404 392
393 BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0);
394
405 ret = register_pernet_subsys(&proto_gre_net_ops); 395 ret = register_pernet_subsys(&proto_gre_net_ops);
406 if (ret < 0) 396 if (ret < 0)
407 goto out_pernet; 397 goto out_pernet;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 42487d01a3ed..2e61aab6ed73 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2457,7 +2457,7 @@ err:
2457static void nf_tables_rule_destroy(const struct nft_ctx *ctx, 2457static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2458 struct nft_rule *rule) 2458 struct nft_rule *rule)
2459{ 2459{
2460 struct nft_expr *expr; 2460 struct nft_expr *expr, *next;
2461 2461
2462 /* 2462 /*
2463 * Careful: some expressions might not be initialized in case this 2463 * Careful: some expressions might not be initialized in case this
@@ -2465,8 +2465,9 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2465 */ 2465 */
2466 expr = nft_expr_first(rule); 2466 expr = nft_expr_first(rule);
2467 while (expr != nft_expr_last(rule) && expr->ops) { 2467 while (expr != nft_expr_last(rule) && expr->ops) {
2468 next = nft_expr_next(expr);
2468 nf_tables_expr_destroy(ctx, expr); 2469 nf_tables_expr_destroy(ctx, expr);
2469 expr = nft_expr_next(expr); 2470 expr = next;
2470 } 2471 }
2471 kfree(rule); 2472 kfree(rule);
2472} 2473}
@@ -2589,17 +2590,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2589 2590
2590 if (chain->use == UINT_MAX) 2591 if (chain->use == UINT_MAX)
2591 return -EOVERFLOW; 2592 return -EOVERFLOW;
2592 }
2593
2594 if (nla[NFTA_RULE_POSITION]) {
2595 if (!(nlh->nlmsg_flags & NLM_F_CREATE))
2596 return -EOPNOTSUPP;
2597 2593
2598 pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION])); 2594 if (nla[NFTA_RULE_POSITION]) {
2599 old_rule = __nft_rule_lookup(chain, pos_handle); 2595 pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
2600 if (IS_ERR(old_rule)) { 2596 old_rule = __nft_rule_lookup(chain, pos_handle);
2601 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]); 2597 if (IS_ERR(old_rule)) {
2602 return PTR_ERR(old_rule); 2598 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
2599 return PTR_ERR(old_rule);
2600 }
2603 } 2601 }
2604 } 2602 }
2605 2603
@@ -2669,21 +2667,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2669 } 2667 }
2670 2668
2671 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 2669 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
2672 if (!nft_is_active_next(net, old_rule)) { 2670 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
2673 err = -ENOENT;
2674 goto err2;
2675 }
2676 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
2677 old_rule);
2678 if (trans == NULL) { 2671 if (trans == NULL) {
2679 err = -ENOMEM; 2672 err = -ENOMEM;
2680 goto err2; 2673 goto err2;
2681 } 2674 }
2682 nft_deactivate_next(net, old_rule); 2675 err = nft_delrule(&ctx, old_rule);
2683 chain->use--; 2676 if (err < 0) {
2684 2677 nft_trans_destroy(trans);
2685 if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
2686 err = -ENOMEM;
2687 goto err2; 2678 goto err2;
2688 } 2679 }
2689 2680
@@ -6324,7 +6315,7 @@ static void nf_tables_commit_chain_free_rules_old(struct nft_rule **rules)
6324 call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old); 6315 call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old);
6325} 6316}
6326 6317
6327static void nf_tables_commit_chain_active(struct net *net, struct nft_chain *chain) 6318static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
6328{ 6319{
6329 struct nft_rule **g0, **g1; 6320 struct nft_rule **g0, **g1;
6330 bool next_genbit; 6321 bool next_genbit;
@@ -6441,11 +6432,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
6441 6432
6442 /* step 2. Make rules_gen_X visible to packet path */ 6433 /* step 2. Make rules_gen_X visible to packet path */
6443 list_for_each_entry(table, &net->nft.tables, list) { 6434 list_for_each_entry(table, &net->nft.tables, list) {
6444 list_for_each_entry(chain, &table->chains, list) { 6435 list_for_each_entry(chain, &table->chains, list)
6445 if (!nft_is_active_next(net, chain)) 6436 nf_tables_commit_chain(net, chain);
6446 continue;
6447 nf_tables_commit_chain_active(net, chain);
6448 }
6449 } 6437 }
6450 6438
6451 /* 6439 /*
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index a518eb162344..109b0d27345a 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -455,7 +455,8 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
455 case IPPROTO_TCP: 455 case IPPROTO_TCP:
456 timeouts = nf_tcp_pernet(net)->timeouts; 456 timeouts = nf_tcp_pernet(net)->timeouts;
457 break; 457 break;
458 case IPPROTO_UDP: 458 case IPPROTO_UDP: /* fallthrough */
459 case IPPROTO_UDPLITE:
459 timeouts = nf_udp_pernet(net)->timeouts; 460 timeouts = nf_udp_pernet(net)->timeouts;
460 break; 461 break;
461 case IPPROTO_DCCP: 462 case IPPROTO_DCCP:
@@ -471,11 +472,21 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
471 timeouts = nf_sctp_pernet(net)->timeouts; 472 timeouts = nf_sctp_pernet(net)->timeouts;
472#endif 473#endif
473 break; 474 break;
475 case IPPROTO_GRE:
476#ifdef CONFIG_NF_CT_PROTO_GRE
477 if (l4proto->net_id) {
478 struct netns_proto_gre *net_gre;
479
480 net_gre = net_generic(net, *l4proto->net_id);
481 timeouts = net_gre->gre_timeouts;
482 }
483#endif
484 break;
474 case 255: 485 case 255:
475 timeouts = &nf_generic_pernet(net)->timeout; 486 timeouts = &nf_generic_pernet(net)->timeout;
476 break; 487 break;
477 default: 488 default:
478 WARN_ON_ONCE(1); 489 WARN_ONCE(1, "Missing timeouts for proto %d", l4proto->l4proto);
479 break; 490 break;
480 } 491 }
481 492
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 9d0ede474224..7334e0b80a5e 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -520,6 +520,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
520 void *info) 520 void *info)
521{ 521{
522 struct xt_match *match = expr->ops->data; 522 struct xt_match *match = expr->ops->data;
523 struct module *me = match->me;
523 struct xt_mtdtor_param par; 524 struct xt_mtdtor_param par;
524 525
525 par.net = ctx->net; 526 par.net = ctx->net;
@@ -530,7 +531,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
530 par.match->destroy(&par); 531 par.match->destroy(&par);
531 532
532 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) 533 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
533 module_put(match->me); 534 module_put(me);
534} 535}
535 536
536static void 537static void
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index e82d9a966c45..974525eb92df 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -214,7 +214,9 @@ static int __init nft_flow_offload_module_init(void)
214{ 214{
215 int err; 215 int err;
216 216
217 register_netdevice_notifier(&flow_offload_netdev_notifier); 217 err = register_netdevice_notifier(&flow_offload_netdev_notifier);
218 if (err)
219 goto err;
218 220
219 err = nft_register_expr(&nft_flow_offload_type); 221 err = nft_register_expr(&nft_flow_offload_type);
220 if (err < 0) 222 if (err < 0)
@@ -224,6 +226,7 @@ static int __init nft_flow_offload_module_init(void)
224 226
225register_expr: 227register_expr:
226 unregister_netdevice_notifier(&flow_offload_netdev_notifier); 228 unregister_netdevice_notifier(&flow_offload_netdev_notifier);
229err:
227 return err; 230 return err;
228} 231}
229 232
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index dec843cadf46..9e05c86ba5c4 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -201,18 +201,8 @@ static __net_init int xt_rateest_net_init(struct net *net)
201 return 0; 201 return 0;
202} 202}
203 203
204static void __net_exit xt_rateest_net_exit(struct net *net)
205{
206 struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
207 int i;
208
209 for (i = 0; i < ARRAY_SIZE(xn->hash); i++)
210 WARN_ON_ONCE(!hlist_empty(&xn->hash[i]));
211}
212
213static struct pernet_operations xt_rateest_net_ops = { 204static struct pernet_operations xt_rateest_net_ops = {
214 .init = xt_rateest_net_init, 205 .init = xt_rateest_net_init,
215 .exit = xt_rateest_net_exit,
216 .id = &xt_rateest_id, 206 .id = &xt_rateest_id,
217 .size = sizeof(struct xt_rateest_net), 207 .size = sizeof(struct xt_rateest_net),
218}; 208};
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 3e7d259e5d8d..1ad4017f9b73 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -295,9 +295,10 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
295 295
296 /* copy match config into hashtable config */ 296 /* copy match config into hashtable config */
297 ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3); 297 ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3);
298 298 if (ret) {
299 if (ret) 299 vfree(hinfo);
300 return ret; 300 return ret;
301 }
301 302
302 hinfo->cfg.size = size; 303 hinfo->cfg.size = size;
303 if (hinfo->cfg.max == 0) 304 if (hinfo->cfg.max == 0)
@@ -814,7 +815,6 @@ hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
814 int ret; 815 int ret;
815 816
816 ret = cfg_copy(&cfg, (void *)&info->cfg, 1); 817 ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
817
818 if (ret) 818 if (ret)
819 return ret; 819 return ret;
820 820
@@ -830,7 +830,6 @@ hashlimit_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
830 int ret; 830 int ret;
831 831
832 ret = cfg_copy(&cfg, (void *)&info->cfg, 2); 832 ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
833
834 if (ret) 833 if (ret)
835 return ret; 834 return ret;
836 835
@@ -921,7 +920,6 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
921 return ret; 920 return ret;
922 921
923 ret = cfg_copy(&cfg, (void *)&info->cfg, 1); 922 ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
924
925 if (ret) 923 if (ret)
926 return ret; 924 return ret;
927 925
@@ -940,7 +938,6 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
940 return ret; 938 return ret;
941 939
942 ret = cfg_copy(&cfg, (void *)&info->cfg, 2); 940 ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
943
944 if (ret) 941 if (ret)
945 return ret; 942 return ret;
946 943
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ec3095f13aae..a74650e98f42 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2394,7 +2394,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
2394 void *ph; 2394 void *ph;
2395 __u32 ts; 2395 __u32 ts;
2396 2396
2397 ph = skb_shinfo(skb)->destructor_arg; 2397 ph = skb_zcopy_get_nouarg(skb);
2398 packet_dec_pending(&po->tx_ring); 2398 packet_dec_pending(&po->tx_ring);
2399 2399
2400 ts = __packet_set_timestamp(po, ph, skb); 2400 ts = __packet_set_timestamp(po, ph, skb);
@@ -2461,7 +2461,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2461 skb->mark = po->sk.sk_mark; 2461 skb->mark = po->sk.sk_mark;
2462 skb->tstamp = sockc->transmit_time; 2462 skb->tstamp = sockc->transmit_time;
2463 sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); 2463 sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
2464 skb_shinfo(skb)->destructor_arg = ph.raw; 2464 skb_zcopy_set_nouarg(skb, ph.raw);
2465 2465
2466 skb_reserve(skb, hlen); 2466 skb_reserve(skb, hlen);
2467 skb_reset_network_header(skb); 2467 skb_reset_network_header(skb);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 64362d078da8..a2522f9d71e2 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -375,17 +375,36 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
375 * getting ACKs from the server. Returns a number representing the life state 375 * getting ACKs from the server. Returns a number representing the life state
376 * which can be compared to that returned by a previous call. 376 * which can be compared to that returned by a previous call.
377 * 377 *
378 * If this is a client call, ping ACKs will be sent to the server to find out 378 * If the life state stalls, rxrpc_kernel_probe_life() should be called and
379 * whether it's still responsive and whether the call is still alive on the 379 * then 2RTT waited.
380 * server.
381 */ 380 */
382u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call) 381u32 rxrpc_kernel_check_life(const struct socket *sock,
382 const struct rxrpc_call *call)
383{ 383{
384 return call->acks_latest; 384 return call->acks_latest;
385} 385}
386EXPORT_SYMBOL(rxrpc_kernel_check_life); 386EXPORT_SYMBOL(rxrpc_kernel_check_life);
387 387
388/** 388/**
389 * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive
390 * @sock: The socket the call is on
391 * @call: The call to check
392 *
393 * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to
394 * find out whether a call is still alive by pinging it. This should cause the
395 * life state to be bumped in about 2*RTT.
396 *
397 * The must be called in TASK_RUNNING state on pain of might_sleep() objecting.
398 */
399void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
400{
401 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
402 rxrpc_propose_ack_ping_for_check_life);
403 rxrpc_send_ack_packet(call, true, NULL);
404}
405EXPORT_SYMBOL(rxrpc_kernel_probe_life);
406
407/**
389 * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call. 408 * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
390 * @sock: The socket the call is on 409 * @sock: The socket the call is on
391 * @call: The call to query 410 * @call: The call to query
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index da3dd0f68cc2..2b372a06b432 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -201,7 +201,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
201 goto out_release; 201 goto out_release;
202 } 202 }
203 } else { 203 } else {
204 return err; 204 ret = err;
205 goto out_free;
205 } 206 }
206 207
207 p = to_pedit(*a); 208 p = to_pedit(*a);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 052855d47354..37c9b8f0e10f 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -27,10 +27,7 @@ struct tcf_police_params {
27 u32 tcfp_ewma_rate; 27 u32 tcfp_ewma_rate;
28 s64 tcfp_burst; 28 s64 tcfp_burst;
29 u32 tcfp_mtu; 29 u32 tcfp_mtu;
30 s64 tcfp_toks;
31 s64 tcfp_ptoks;
32 s64 tcfp_mtu_ptoks; 30 s64 tcfp_mtu_ptoks;
33 s64 tcfp_t_c;
34 struct psched_ratecfg rate; 31 struct psched_ratecfg rate;
35 bool rate_present; 32 bool rate_present;
36 struct psched_ratecfg peak; 33 struct psched_ratecfg peak;
@@ -41,6 +38,11 @@ struct tcf_police_params {
41struct tcf_police { 38struct tcf_police {
42 struct tc_action common; 39 struct tc_action common;
43 struct tcf_police_params __rcu *params; 40 struct tcf_police_params __rcu *params;
41
42 spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
43 s64 tcfp_toks;
44 s64 tcfp_ptoks;
45 s64 tcfp_t_c;
44}; 46};
45 47
46#define to_police(pc) ((struct tcf_police *)pc) 48#define to_police(pc) ((struct tcf_police *)pc)
@@ -122,6 +124,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
122 return ret; 124 return ret;
123 } 125 }
124 ret = ACT_P_CREATED; 126 ret = ACT_P_CREATED;
127 spin_lock_init(&(to_police(*a)->tcfp_lock));
125 } else if (!ovr) { 128 } else if (!ovr) {
126 tcf_idr_release(*a, bind); 129 tcf_idr_release(*a, bind);
127 return -EEXIST; 130 return -EEXIST;
@@ -186,12 +189,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
186 } 189 }
187 190
188 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); 191 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
189 new->tcfp_toks = new->tcfp_burst; 192 if (new->peak_present)
190 if (new->peak_present) {
191 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, 193 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
192 new->tcfp_mtu); 194 new->tcfp_mtu);
193 new->tcfp_ptoks = new->tcfp_mtu_ptoks;
194 }
195 195
196 if (tb[TCA_POLICE_AVRATE]) 196 if (tb[TCA_POLICE_AVRATE])
197 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); 197 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
@@ -207,7 +207,12 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
207 } 207 }
208 208
209 spin_lock_bh(&police->tcf_lock); 209 spin_lock_bh(&police->tcf_lock);
210 new->tcfp_t_c = ktime_get_ns(); 210 spin_lock_bh(&police->tcfp_lock);
211 police->tcfp_t_c = ktime_get_ns();
212 police->tcfp_toks = new->tcfp_burst;
213 if (new->peak_present)
214 police->tcfp_ptoks = new->tcfp_mtu_ptoks;
215 spin_unlock_bh(&police->tcfp_lock);
211 police->tcf_action = parm->action; 216 police->tcf_action = parm->action;
212 rcu_swap_protected(police->params, 217 rcu_swap_protected(police->params,
213 new, 218 new,
@@ -257,25 +262,28 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
257 } 262 }
258 263
259 now = ktime_get_ns(); 264 now = ktime_get_ns();
260 toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst); 265 spin_lock_bh(&police->tcfp_lock);
266 toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
261 if (p->peak_present) { 267 if (p->peak_present) {
262 ptoks = toks + p->tcfp_ptoks; 268 ptoks = toks + police->tcfp_ptoks;
263 if (ptoks > p->tcfp_mtu_ptoks) 269 if (ptoks > p->tcfp_mtu_ptoks)
264 ptoks = p->tcfp_mtu_ptoks; 270 ptoks = p->tcfp_mtu_ptoks;
265 ptoks -= (s64)psched_l2t_ns(&p->peak, 271 ptoks -= (s64)psched_l2t_ns(&p->peak,
266 qdisc_pkt_len(skb)); 272 qdisc_pkt_len(skb));
267 } 273 }
268 toks += p->tcfp_toks; 274 toks += police->tcfp_toks;
269 if (toks > p->tcfp_burst) 275 if (toks > p->tcfp_burst)
270 toks = p->tcfp_burst; 276 toks = p->tcfp_burst;
271 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); 277 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
272 if ((toks|ptoks) >= 0) { 278 if ((toks|ptoks) >= 0) {
273 p->tcfp_t_c = now; 279 police->tcfp_t_c = now;
274 p->tcfp_toks = toks; 280 police->tcfp_toks = toks;
275 p->tcfp_ptoks = ptoks; 281 police->tcfp_ptoks = ptoks;
282 spin_unlock_bh(&police->tcfp_lock);
276 ret = p->tcfp_result; 283 ret = p->tcfp_result;
277 goto inc_drops; 284 goto inc_drops;
278 } 285 }
286 spin_unlock_bh(&police->tcfp_lock);
279 } 287 }
280 288
281inc_overlimits: 289inc_overlimits:
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 4b1af706896c..25a7cf6d380f 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -469,22 +469,29 @@ begin:
469 goto begin; 469 goto begin;
470 } 470 }
471 prefetch(&skb->end); 471 prefetch(&skb->end);
472 f->credit -= qdisc_pkt_len(skb); 472 plen = qdisc_pkt_len(skb);
473 f->credit -= plen;
473 474
474 if (ktime_to_ns(skb->tstamp) || !q->rate_enable) 475 if (!q->rate_enable)
475 goto out; 476 goto out;
476 477
477 rate = q->flow_max_rate; 478 rate = q->flow_max_rate;
478 if (skb->sk) 479
479 rate = min(skb->sk->sk_pacing_rate, rate); 480 /* If EDT time was provided for this skb, we need to
480 481 * update f->time_next_packet only if this qdisc enforces
481 if (rate <= q->low_rate_threshold) { 482 * a flow max rate.
482 f->credit = 0; 483 */
483 plen = qdisc_pkt_len(skb); 484 if (!skb->tstamp) {
484 } else { 485 if (skb->sk)
485 plen = max(qdisc_pkt_len(skb), q->quantum); 486 rate = min(skb->sk->sk_pacing_rate, rate);
486 if (f->credit > 0) 487
487 goto out; 488 if (rate <= q->low_rate_threshold) {
489 f->credit = 0;
490 } else {
491 plen = max(plen, q->quantum);
492 if (f->credit > 0)
493 goto out;
494 }
488 } 495 }
489 if (rate != ~0UL) { 496 if (rate != ~0UL) {
490 u64 len = (u64)plen * NSEC_PER_SEC; 497 u64 len = (u64)plen * NSEC_PER_SEC;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 67939ad99c01..025f48e14a91 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
118 sctp_transport_route(tp, NULL, sp); 118 sctp_transport_route(tp, NULL, sp);
119 if (asoc->param_flags & SPP_PMTUD_ENABLE) 119 if (asoc->param_flags & SPP_PMTUD_ENABLE)
120 sctp_assoc_sync_pmtu(asoc); 120 sctp_assoc_sync_pmtu(asoc);
121 } else if (!sctp_transport_pmtu_check(tp)) {
122 if (asoc->param_flags & SPP_PMTUD_ENABLE)
123 sctp_assoc_sync_pmtu(asoc);
121 } 124 }
122 125
123 if (asoc->pmtu_pending) { 126 if (asoc->pmtu_pending) {
@@ -396,25 +399,6 @@ finish:
396 return retval; 399 return retval;
397} 400}
398 401
399static void sctp_packet_release_owner(struct sk_buff *skb)
400{
401 sk_free(skb->sk);
402}
403
404static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
405{
406 skb_orphan(skb);
407 skb->sk = sk;
408 skb->destructor = sctp_packet_release_owner;
409
410 /*
411 * The data chunks have already been accounted for in sctp_sendmsg(),
412 * therefore only reserve a single byte to keep socket around until
413 * the packet has been transmitted.
414 */
415 refcount_inc(&sk->sk_wmem_alloc);
416}
417
418static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) 402static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
419{ 403{
420 if (SCTP_OUTPUT_CB(head)->last == head) 404 if (SCTP_OUTPUT_CB(head)->last == head)
@@ -426,6 +410,7 @@ static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
426 head->truesize += skb->truesize; 410 head->truesize += skb->truesize;
427 head->data_len += skb->len; 411 head->data_len += skb->len;
428 head->len += skb->len; 412 head->len += skb->len;
413 refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
429 414
430 __skb_header_release(skb); 415 __skb_header_release(skb);
431} 416}
@@ -601,7 +586,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
601 if (!head) 586 if (!head)
602 goto out; 587 goto out;
603 skb_reserve(head, packet->overhead + MAX_HEADER); 588 skb_reserve(head, packet->overhead + MAX_HEADER);
604 sctp_packet_set_owner_w(head, sk); 589 skb_set_owner_w(head, sk);
605 590
606 /* set sctp header */ 591 /* set sctp header */
607 sh = skb_push(head, sizeof(struct sctphdr)); 592 sh = skb_push(head, sizeof(struct sctphdr));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 739f3e50120d..bf618d1b41fd 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3940,32 +3940,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk,
3940 unsigned int optlen) 3940 unsigned int optlen)
3941{ 3941{
3942 struct sctp_assoc_value params; 3942 struct sctp_assoc_value params;
3943 struct sctp_association *asoc;
3944 int retval = -EINVAL;
3945 3943
3946 if (optlen != sizeof(params)) 3944 if (optlen != sizeof(params))
3947 goto out; 3945 return -EINVAL;
3948
3949 if (copy_from_user(&params, optval, optlen)) {
3950 retval = -EFAULT;
3951 goto out;
3952 }
3953
3954 asoc = sctp_id2assoc(sk, params.assoc_id);
3955 if (asoc) {
3956 asoc->prsctp_enable = !!params.assoc_value;
3957 } else if (!params.assoc_id) {
3958 struct sctp_sock *sp = sctp_sk(sk);
3959 3946
3960 sp->ep->prsctp_enable = !!params.assoc_value; 3947 if (copy_from_user(&params, optval, optlen))
3961 } else { 3948 return -EFAULT;
3962 goto out;
3963 }
3964 3949
3965 retval = 0; 3950 sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
3966 3951
3967out: 3952 return 0;
3968 return retval;
3969} 3953}
3970 3954
3971static int sctp_setsockopt_default_prinfo(struct sock *sk, 3955static int sctp_setsockopt_default_prinfo(struct sock *sk,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index ffb940d3b57c..3892e7630f3a 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_association *asoc,
535 goto out; 535 goto out;
536 } 536 }
537 537
538 stream->incnt = incnt;
539 stream->outcnt = outcnt; 538 stream->outcnt = outcnt;
540 539
541 asoc->strreset_outstanding = !!out + !!in; 540 asoc->strreset_outstanding = !!out + !!in;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 80e2119f1c70..5fbaf1901571 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -127,6 +127,8 @@ static int smc_release(struct socket *sock)
127 smc = smc_sk(sk); 127 smc = smc_sk(sk);
128 128
129 /* cleanup for a dangling non-blocking connect */ 129 /* cleanup for a dangling non-blocking connect */
130 if (smc->connect_info && sk->sk_state == SMC_INIT)
131 tcp_abort(smc->clcsock->sk, ECONNABORTED);
130 flush_work(&smc->connect_work); 132 flush_work(&smc->connect_work);
131 kfree(smc->connect_info); 133 kfree(smc->connect_info);
132 smc->connect_info = NULL; 134 smc->connect_info = NULL;
@@ -547,7 +549,8 @@ static int smc_connect_rdma(struct smc_sock *smc,
547 549
548 mutex_lock(&smc_create_lgr_pending); 550 mutex_lock(&smc_create_lgr_pending);
549 local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, 551 local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev,
550 ibport, &aclc->lcl, NULL, 0); 552 ibport, ntoh24(aclc->qpn), &aclc->lcl,
553 NULL, 0);
551 if (local_contact < 0) { 554 if (local_contact < 0) {
552 if (local_contact == -ENOMEM) 555 if (local_contact == -ENOMEM)
553 reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ 556 reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
@@ -618,7 +621,7 @@ static int smc_connect_ism(struct smc_sock *smc,
618 int rc = 0; 621 int rc = 0;
619 622
620 mutex_lock(&smc_create_lgr_pending); 623 mutex_lock(&smc_create_lgr_pending);
621 local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 624 local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0,
622 NULL, ismdev, aclc->gid); 625 NULL, ismdev, aclc->gid);
623 if (local_contact < 0) 626 if (local_contact < 0)
624 return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); 627 return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0);
@@ -1083,7 +1086,7 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
1083 int *local_contact) 1086 int *local_contact)
1084{ 1087{
1085 /* allocate connection / link group */ 1088 /* allocate connection / link group */
1086 *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 1089 *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 0,
1087 &pclc->lcl, NULL, 0); 1090 &pclc->lcl, NULL, 0);
1088 if (*local_contact < 0) { 1091 if (*local_contact < 0) {
1089 if (*local_contact == -ENOMEM) 1092 if (*local_contact == -ENOMEM)
@@ -1107,7 +1110,7 @@ static int smc_listen_ism_init(struct smc_sock *new_smc,
1107 struct smc_clc_msg_smcd *pclc_smcd; 1110 struct smc_clc_msg_smcd *pclc_smcd;
1108 1111
1109 pclc_smcd = smc_get_clc_msg_smcd(pclc); 1112 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1110 *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL, 1113 *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, 0, NULL,
1111 ismdev, pclc_smcd->gid); 1114 ismdev, pclc_smcd->gid);
1112 if (*local_contact < 0) { 1115 if (*local_contact < 0) {
1113 if (*local_contact == -ENOMEM) 1116 if (*local_contact == -ENOMEM)
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index ed5dcf03fe0b..db83332ac1c8 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -81,7 +81,7 @@ static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
81 sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, 81 sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
82 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); 82 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
83 BUILD_BUG_ON_MSG( 83 BUILD_BUG_ON_MSG(
84 sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE, 84 offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
85 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); 85 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
86 BUILD_BUG_ON_MSG( 86 BUILD_BUG_ON_MSG(
87 sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, 87 sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
@@ -177,23 +177,24 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
177int smcd_cdc_msg_send(struct smc_connection *conn) 177int smcd_cdc_msg_send(struct smc_connection *conn)
178{ 178{
179 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 179 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
180 union smc_host_cursor curs;
180 struct smcd_cdc_msg cdc; 181 struct smcd_cdc_msg cdc;
181 int rc, diff; 182 int rc, diff;
182 183
183 memset(&cdc, 0, sizeof(cdc)); 184 memset(&cdc, 0, sizeof(cdc));
184 cdc.common.type = SMC_CDC_MSG_TYPE; 185 cdc.common.type = SMC_CDC_MSG_TYPE;
185 cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap; 186 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
186 cdc.prod_count = conn->local_tx_ctrl.prod.count; 187 cdc.prod.wrap = curs.wrap;
187 188 cdc.prod.count = curs.count;
188 cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap; 189 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
189 cdc.cons_count = conn->local_tx_ctrl.cons.count; 190 cdc.cons.wrap = curs.wrap;
190 cdc.prod_flags = conn->local_tx_ctrl.prod_flags; 191 cdc.cons.count = curs.count;
191 cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; 192 cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
193 cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
192 rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); 194 rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
193 if (rc) 195 if (rc)
194 return rc; 196 return rc;
195 smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons, 197 smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
196 conn);
197 /* Calculate transmitted data and increment free send buffer space */ 198 /* Calculate transmitted data and increment free send buffer space */
198 diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, 199 diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
199 &conn->tx_curs_sent); 200 &conn->tx_curs_sent);
@@ -331,13 +332,16 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
331static void smcd_cdc_rx_tsklet(unsigned long data) 332static void smcd_cdc_rx_tsklet(unsigned long data)
332{ 333{
333 struct smc_connection *conn = (struct smc_connection *)data; 334 struct smc_connection *conn = (struct smc_connection *)data;
335 struct smcd_cdc_msg *data_cdc;
334 struct smcd_cdc_msg cdc; 336 struct smcd_cdc_msg cdc;
335 struct smc_sock *smc; 337 struct smc_sock *smc;
336 338
337 if (!conn) 339 if (!conn)
338 return; 340 return;
339 341
340 memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc)); 342 data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
343 smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
344 smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
341 smc = container_of(conn, struct smc_sock, conn); 345 smc = container_of(conn, struct smc_sock, conn);
342 smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); 346 smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
343} 347}
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index 934df4473a7c..b5bfe38c7f9b 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -48,21 +48,31 @@ struct smc_cdc_msg {
48 struct smc_cdc_producer_flags prod_flags; 48 struct smc_cdc_producer_flags prod_flags;
49 struct smc_cdc_conn_state_flags conn_state_flags; 49 struct smc_cdc_conn_state_flags conn_state_flags;
50 u8 reserved[18]; 50 u8 reserved[18];
51} __packed; /* format defined in RFC7609 */ 51};
52
53/* SMC-D cursor format */
54union smcd_cdc_cursor {
55 struct {
56 u16 wrap;
57 u32 count;
58 struct smc_cdc_producer_flags prod_flags;
59 struct smc_cdc_conn_state_flags conn_state_flags;
60 } __packed;
61#ifdef KERNEL_HAS_ATOMIC64
62 atomic64_t acurs; /* for atomic processing */
63#else
64 u64 acurs; /* for atomic processing */
65#endif
66} __aligned(8);
52 67
53/* CDC message for SMC-D */ 68/* CDC message for SMC-D */
54struct smcd_cdc_msg { 69struct smcd_cdc_msg {
55 struct smc_wr_rx_hdr common; /* Type = 0xFE */ 70 struct smc_wr_rx_hdr common; /* Type = 0xFE */
56 u8 res1[7]; 71 u8 res1[7];
57 u16 prod_wrap; 72 union smcd_cdc_cursor prod;
58 u32 prod_count; 73 union smcd_cdc_cursor cons;
59 u8 res2[2];
60 u16 cons_wrap;
61 u32 cons_count;
62 struct smc_cdc_producer_flags prod_flags;
63 struct smc_cdc_conn_state_flags conn_state_flags;
64 u8 res3[8]; 74 u8 res3[8];
65} __packed; 75} __aligned(8);
66 76
67static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) 77static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
68{ 78{
@@ -135,6 +145,21 @@ static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
135#endif 145#endif
136} 146}
137 147
148static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
149 union smcd_cdc_cursor *src,
150 struct smc_connection *conn)
151{
152#ifndef KERNEL_HAS_ATOMIC64
153 unsigned long flags;
154
155 spin_lock_irqsave(&conn->acurs_lock, flags);
156 tgt->acurs = src->acurs;
157 spin_unlock_irqrestore(&conn->acurs_lock, flags);
158#else
159 atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
160#endif
161}
162
138/* calculate cursor difference between old and new, where old <= new */ 163/* calculate cursor difference between old and new, where old <= new */
139static inline int smc_curs_diff(unsigned int size, 164static inline int smc_curs_diff(unsigned int size,
140 union smc_host_cursor *old, 165 union smc_host_cursor *old,
@@ -222,12 +247,17 @@ static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
222static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, 247static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
223 struct smcd_cdc_msg *peer) 248 struct smcd_cdc_msg *peer)
224{ 249{
225 local->prod.wrap = peer->prod_wrap; 250 union smc_host_cursor temp;
226 local->prod.count = peer->prod_count; 251
227 local->cons.wrap = peer->cons_wrap; 252 temp.wrap = peer->prod.wrap;
228 local->cons.count = peer->cons_count; 253 temp.count = peer->prod.count;
229 local->prod_flags = peer->prod_flags; 254 atomic64_set(&local->prod.acurs, atomic64_read(&temp.acurs));
230 local->conn_state_flags = peer->conn_state_flags; 255
256 temp.wrap = peer->cons.wrap;
257 temp.count = peer->cons.count;
258 atomic64_set(&local->cons.acurs, atomic64_read(&temp.acurs));
259 local->prod_flags = peer->cons.prod_flags;
260 local->conn_state_flags = peer->cons.conn_state_flags;
231} 261}
232 262
233static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, 263static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 18daebcef181..1c9fa7f0261a 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -184,6 +184,8 @@ free:
184 184
185 if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) 185 if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
186 smc_llc_link_inactive(lnk); 186 smc_llc_link_inactive(lnk);
187 if (lgr->is_smcd)
188 smc_ism_signal_shutdown(lgr);
187 smc_lgr_free(lgr); 189 smc_lgr_free(lgr);
188 } 190 }
189} 191}
@@ -485,7 +487,7 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
485} 487}
486 488
487/* Called when SMC-D device is terminated or peer is lost */ 489/* Called when SMC-D device is terminated or peer is lost */
488void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) 490void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
489{ 491{
490 struct smc_link_group *lgr, *l; 492 struct smc_link_group *lgr, *l;
491 LIST_HEAD(lgr_free_list); 493 LIST_HEAD(lgr_free_list);
@@ -495,7 +497,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
495 list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { 497 list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
496 if (lgr->is_smcd && lgr->smcd == dev && 498 if (lgr->is_smcd && lgr->smcd == dev &&
497 (!peer_gid || lgr->peer_gid == peer_gid) && 499 (!peer_gid || lgr->peer_gid == peer_gid) &&
498 !list_empty(&lgr->list)) { 500 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
499 __smc_lgr_terminate(lgr); 501 __smc_lgr_terminate(lgr);
500 list_move(&lgr->list, &lgr_free_list); 502 list_move(&lgr->list, &lgr_free_list);
501 } 503 }
@@ -506,6 +508,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
506 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { 508 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
507 list_del_init(&lgr->list); 509 list_del_init(&lgr->list);
508 cancel_delayed_work_sync(&lgr->free_work); 510 cancel_delayed_work_sync(&lgr->free_work);
511 if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */
512 smc_ism_signal_shutdown(lgr);
509 smc_lgr_free(lgr); 513 smc_lgr_free(lgr);
510 } 514 }
511} 515}
@@ -559,7 +563,7 @@ out:
559 563
560static bool smcr_lgr_match(struct smc_link_group *lgr, 564static bool smcr_lgr_match(struct smc_link_group *lgr,
561 struct smc_clc_msg_local *lcl, 565 struct smc_clc_msg_local *lcl,
562 enum smc_lgr_role role) 566 enum smc_lgr_role role, u32 clcqpn)
563{ 567{
564 return !memcmp(lgr->peer_systemid, lcl->id_for_peer, 568 return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
565 SMC_SYSTEMID_LEN) && 569 SMC_SYSTEMID_LEN) &&
@@ -567,7 +571,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr,
567 SMC_GID_SIZE) && 571 SMC_GID_SIZE) &&
568 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, 572 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
569 sizeof(lcl->mac)) && 573 sizeof(lcl->mac)) &&
570 lgr->role == role; 574 lgr->role == role &&
575 (lgr->role == SMC_SERV ||
576 lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
571} 577}
572 578
573static bool smcd_lgr_match(struct smc_link_group *lgr, 579static bool smcd_lgr_match(struct smc_link_group *lgr,
@@ -578,7 +584,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
578 584
579/* create a new SMC connection (and a new link group if necessary) */ 585/* create a new SMC connection (and a new link group if necessary) */
580int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, 586int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
581 struct smc_ib_device *smcibdev, u8 ibport, 587 struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
582 struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, 588 struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
583 u64 peer_gid) 589 u64 peer_gid)
584{ 590{
@@ -603,7 +609,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
603 list_for_each_entry(lgr, &smc_lgr_list.list, list) { 609 list_for_each_entry(lgr, &smc_lgr_list.list, list) {
604 write_lock_bh(&lgr->conns_lock); 610 write_lock_bh(&lgr->conns_lock);
605 if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : 611 if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) :
606 smcr_lgr_match(lgr, lcl, role)) && 612 smcr_lgr_match(lgr, lcl, role, clcqpn)) &&
607 !lgr->sync_err && 613 !lgr->sync_err &&
608 lgr->vlan_id == vlan_id && 614 lgr->vlan_id == vlan_id &&
609 (role == SMC_CLNT || 615 (role == SMC_CLNT ||
@@ -1024,6 +1030,8 @@ void smc_core_exit(void)
1024 smc_llc_link_inactive(lnk); 1030 smc_llc_link_inactive(lnk);
1025 } 1031 }
1026 cancel_delayed_work_sync(&lgr->free_work); 1032 cancel_delayed_work_sync(&lgr->free_work);
1033 if (lgr->is_smcd)
1034 smc_ism_signal_shutdown(lgr);
1027 smc_lgr_free(lgr); /* free link group */ 1035 smc_lgr_free(lgr); /* free link group */
1028 } 1036 }
1029} 1037}
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index c156674733c9..cf98f4d6093e 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -247,7 +247,8 @@ void smc_lgr_free(struct smc_link_group *lgr);
247void smc_lgr_forget(struct smc_link_group *lgr); 247void smc_lgr_forget(struct smc_link_group *lgr);
248void smc_lgr_terminate(struct smc_link_group *lgr); 248void smc_lgr_terminate(struct smc_link_group *lgr);
249void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); 249void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
250void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid); 250void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
251 unsigned short vlan);
251int smc_buf_create(struct smc_sock *smc, bool is_smcd); 252int smc_buf_create(struct smc_sock *smc, bool is_smcd);
252int smc_uncompress_bufsize(u8 compressed); 253int smc_uncompress_bufsize(u8 compressed);
253int smc_rmb_rtoken_handling(struct smc_connection *conn, 254int smc_rmb_rtoken_handling(struct smc_connection *conn,
@@ -262,7 +263,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id);
262 263
263void smc_conn_free(struct smc_connection *conn); 264void smc_conn_free(struct smc_connection *conn);
264int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, 265int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
265 struct smc_ib_device *smcibdev, u8 ibport, 266 struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
266 struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, 267 struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
267 u64 peer_gid); 268 u64 peer_gid);
268void smcd_conn_free(struct smc_connection *conn); 269void smcd_conn_free(struct smc_connection *conn);
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index e36f21ce7252..2fff79db1a59 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -187,22 +187,28 @@ struct smc_ism_event_work {
187#define ISM_EVENT_REQUEST 0x0001 187#define ISM_EVENT_REQUEST 0x0001
188#define ISM_EVENT_RESPONSE 0x0002 188#define ISM_EVENT_RESPONSE 0x0002
189#define ISM_EVENT_REQUEST_IR 0x00000001 189#define ISM_EVENT_REQUEST_IR 0x00000001
190#define ISM_EVENT_CODE_SHUTDOWN 0x80
190#define ISM_EVENT_CODE_TESTLINK 0x83 191#define ISM_EVENT_CODE_TESTLINK 0x83
191 192
193union smcd_sw_event_info {
194 u64 info;
195 struct {
196 u8 uid[SMC_LGR_ID_SIZE];
197 unsigned short vlan_id;
198 u16 code;
199 };
200};
201
192static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) 202static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
193{ 203{
194 union { 204 union smcd_sw_event_info ev_info;
195 u64 info;
196 struct {
197 u32 uid;
198 unsigned short vlanid;
199 u16 code;
200 };
201 } ev_info;
202 205
206 ev_info.info = wrk->event.info;
203 switch (wrk->event.code) { 207 switch (wrk->event.code) {
208 case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */
209 smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id);
210 break;
204 case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ 211 case ISM_EVENT_CODE_TESTLINK: /* Activity timer */
205 ev_info.info = wrk->event.info;
206 if (ev_info.code == ISM_EVENT_REQUEST) { 212 if (ev_info.code == ISM_EVENT_REQUEST) {
207 ev_info.code = ISM_EVENT_RESPONSE; 213 ev_info.code = ISM_EVENT_RESPONSE;
208 wrk->smcd->ops->signal_event(wrk->smcd, 214 wrk->smcd->ops->signal_event(wrk->smcd,
@@ -215,6 +221,21 @@ static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
215 } 221 }
216} 222}
217 223
224int smc_ism_signal_shutdown(struct smc_link_group *lgr)
225{
226 int rc;
227 union smcd_sw_event_info ev_info;
228
229 memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
230 ev_info.vlan_id = lgr->vlan_id;
231 ev_info.code = ISM_EVENT_REQUEST;
232 rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid,
233 ISM_EVENT_REQUEST_IR,
234 ISM_EVENT_CODE_SHUTDOWN,
235 ev_info.info);
236 return rc;
237}
238
218/* worker for SMC-D events */ 239/* worker for SMC-D events */
219static void smc_ism_event_work(struct work_struct *work) 240static void smc_ism_event_work(struct work_struct *work)
220{ 241{
@@ -223,7 +244,7 @@ static void smc_ism_event_work(struct work_struct *work)
223 244
224 switch (wrk->event.type) { 245 switch (wrk->event.type) {
225 case ISM_EVENT_GID: /* GID event, token is peer GID */ 246 case ISM_EVENT_GID: /* GID event, token is peer GID */
226 smc_smcd_terminate(wrk->smcd, wrk->event.tok); 247 smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK);
227 break; 248 break;
228 case ISM_EVENT_DMB: 249 case ISM_EVENT_DMB:
229 break; 250 break;
@@ -289,7 +310,7 @@ void smcd_unregister_dev(struct smcd_dev *smcd)
289 spin_unlock(&smcd_dev_list.lock); 310 spin_unlock(&smcd_dev_list.lock);
290 flush_workqueue(smcd->event_wq); 311 flush_workqueue(smcd->event_wq);
291 destroy_workqueue(smcd->event_wq); 312 destroy_workqueue(smcd->event_wq);
292 smc_smcd_terminate(smcd, 0); 313 smc_smcd_terminate(smcd, 0, VLAN_VID_MASK);
293 314
294 device_del(&smcd->dev); 315 device_del(&smcd->dev);
295} 316}
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index aee45b860b79..4da946cbfa29 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -45,4 +45,5 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size,
45int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); 45int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc);
46int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, 46int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos,
47 void *data, size_t len); 47 void *data, size_t len);
48int smc_ism_signal_shutdown(struct smc_link_group *lgr);
48#endif 49#endif
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 3c458d279855..c2694750a6a8 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -215,12 +215,14 @@ int smc_wr_tx_put_slot(struct smc_link *link,
215 215
216 pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); 216 pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
217 if (pend->idx < link->wr_tx_cnt) { 217 if (pend->idx < link->wr_tx_cnt) {
218 u32 idx = pend->idx;
219
218 /* clear the full struct smc_wr_tx_pend including .priv */ 220 /* clear the full struct smc_wr_tx_pend including .priv */
219 memset(&link->wr_tx_pends[pend->idx], 0, 221 memset(&link->wr_tx_pends[pend->idx], 0,
220 sizeof(link->wr_tx_pends[pend->idx])); 222 sizeof(link->wr_tx_pends[pend->idx]));
221 memset(&link->wr_tx_bufs[pend->idx], 0, 223 memset(&link->wr_tx_bufs[pend->idx], 0,
222 sizeof(link->wr_tx_bufs[pend->idx])); 224 sizeof(link->wr_tx_bufs[pend->idx]));
223 test_and_clear_bit(pend->idx, link->wr_tx_mask); 225 test_and_clear_bit(idx, link->wr_tx_mask);
224 return 1; 226 return 1;
225 } 227 }
226 228
diff --git a/net/socket.c b/net/socket.c
index 593826e11a53..334fcc617ef2 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -853,7 +853,7 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
853 struct socket *sock = file->private_data; 853 struct socket *sock = file->private_data;
854 854
855 if (unlikely(!sock->ops->splice_read)) 855 if (unlikely(!sock->ops->splice_read))
856 return -EINVAL; 856 return generic_file_splice_read(file, ppos, pipe, len, flags);
857 857
858 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 858 return sock->ops->splice_read(sock, ppos, pipe, len, flags);
859} 859}
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 2830709957bd..c138d68e8a69 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -166,7 +166,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
166 166
167 /* Apply trial address if we just left trial period */ 167 /* Apply trial address if we just left trial period */
168 if (!trial && !self) { 168 if (!trial && !self) {
169 tipc_net_finalize(net, tn->trial_addr); 169 tipc_sched_net_finalize(net, tn->trial_addr);
170 msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
170 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); 171 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
171 } 172 }
172 173
@@ -300,14 +301,12 @@ static void tipc_disc_timeout(struct timer_list *t)
300 goto exit; 301 goto exit;
301 } 302 }
302 303
303 /* Trial period over ? */ 304 /* Did we just leave trial period ? */
304 if (!time_before(jiffies, tn->addr_trial_end)) { 305 if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
305 /* Did we just leave it ? */ 306 mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
306 if (!tipc_own_addr(net)) 307 spin_unlock_bh(&d->lock);
307 tipc_net_finalize(net, tn->trial_addr); 308 tipc_sched_net_finalize(net, tn->trial_addr);
308 309 return;
309 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
310 msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
311 } 310 }
312 311
313 /* Adjust timeout interval according to discovery phase */ 312 /* Adjust timeout interval according to discovery phase */
@@ -319,6 +318,8 @@ static void tipc_disc_timeout(struct timer_list *t)
319 d->timer_intv = TIPC_DISC_SLOW; 318 d->timer_intv = TIPC_DISC_SLOW;
320 else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST) 319 else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
321 d->timer_intv = TIPC_DISC_FAST; 320 d->timer_intv = TIPC_DISC_FAST;
321 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
322 msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
322 } 323 }
323 324
324 mod_timer(&d->timer, jiffies + d->timer_intv); 325 mod_timer(&d->timer, jiffies + d->timer_intv);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 62199cf5a56c..f076edb74338 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -104,6 +104,14 @@
104 * - A local spin_lock protecting the queue of subscriber events. 104 * - A local spin_lock protecting the queue of subscriber events.
105*/ 105*/
106 106
107struct tipc_net_work {
108 struct work_struct work;
109 struct net *net;
110 u32 addr;
111};
112
113static void tipc_net_finalize(struct net *net, u32 addr);
114
107int tipc_net_init(struct net *net, u8 *node_id, u32 addr) 115int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
108{ 116{
109 if (tipc_own_id(net)) { 117 if (tipc_own_id(net)) {
@@ -119,17 +127,38 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
119 return 0; 127 return 0;
120} 128}
121 129
122void tipc_net_finalize(struct net *net, u32 addr) 130static void tipc_net_finalize(struct net *net, u32 addr)
123{ 131{
124 struct tipc_net *tn = tipc_net(net); 132 struct tipc_net *tn = tipc_net(net);
125 133
126 if (!cmpxchg(&tn->node_addr, 0, addr)) { 134 if (cmpxchg(&tn->node_addr, 0, addr))
127 tipc_set_node_addr(net, addr); 135 return;
128 tipc_named_reinit(net); 136 tipc_set_node_addr(net, addr);
129 tipc_sk_reinit(net); 137 tipc_named_reinit(net);
130 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, 138 tipc_sk_reinit(net);
131 TIPC_CLUSTER_SCOPE, 0, addr); 139 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
132 } 140 TIPC_CLUSTER_SCOPE, 0, addr);
141}
142
143static void tipc_net_finalize_work(struct work_struct *work)
144{
145 struct tipc_net_work *fwork;
146
147 fwork = container_of(work, struct tipc_net_work, work);
148 tipc_net_finalize(fwork->net, fwork->addr);
149 kfree(fwork);
150}
151
152void tipc_sched_net_finalize(struct net *net, u32 addr)
153{
154 struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
155
156 if (!fwork)
157 return;
158 INIT_WORK(&fwork->work, tipc_net_finalize_work);
159 fwork->net = net;
160 fwork->addr = addr;
161 schedule_work(&fwork->work);
133} 162}
134 163
135void tipc_net_stop(struct net *net) 164void tipc_net_stop(struct net *net)
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 09ad02b50bb1..b7f2e364eb99 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -42,7 +42,7 @@
42extern const struct nla_policy tipc_nl_net_policy[]; 42extern const struct nla_policy tipc_nl_net_policy[];
43 43
44int tipc_net_init(struct net *net, u8 *node_id, u32 addr); 44int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
45void tipc_net_finalize(struct net *net, u32 addr); 45void tipc_sched_net_finalize(struct net *net, u32 addr);
46void tipc_net_stop(struct net *net); 46void tipc_net_stop(struct net *net);
47int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); 47int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
48int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); 48int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2afc4f8c37a7..488019766433 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -584,12 +584,15 @@ static void tipc_node_clear_links(struct tipc_node *node)
584/* tipc_node_cleanup - delete nodes that does not 584/* tipc_node_cleanup - delete nodes that does not
585 * have active links for NODE_CLEANUP_AFTER time 585 * have active links for NODE_CLEANUP_AFTER time
586 */ 586 */
587static int tipc_node_cleanup(struct tipc_node *peer) 587static bool tipc_node_cleanup(struct tipc_node *peer)
588{ 588{
589 struct tipc_net *tn = tipc_net(peer->net); 589 struct tipc_net *tn = tipc_net(peer->net);
590 bool deleted = false; 590 bool deleted = false;
591 591
592 spin_lock_bh(&tn->node_list_lock); 592 /* If lock held by tipc_node_stop() the node will be deleted anyway */
593 if (!spin_trylock_bh(&tn->node_list_lock))
594 return false;
595
593 tipc_node_write_lock(peer); 596 tipc_node_write_lock(peer);
594 597
595 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { 598 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 636e6131769d..b57b1be7252b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1555,16 +1555,17 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1555/** 1555/**
1556 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1556 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1557 * @m: descriptor for message info 1557 * @m: descriptor for message info
1558 * @msg: received message header 1558 * @skb: received message buffer
1559 * @tsk: TIPC port associated with message 1559 * @tsk: TIPC port associated with message
1560 * 1560 *
1561 * Note: Ancillary data is not captured if not requested by receiver. 1561 * Note: Ancillary data is not captured if not requested by receiver.
1562 * 1562 *
1563 * Returns 0 if successful, otherwise errno 1563 * Returns 0 if successful, otherwise errno
1564 */ 1564 */
1565static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1565static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1566 struct tipc_sock *tsk) 1566 struct tipc_sock *tsk)
1567{ 1567{
1568 struct tipc_msg *msg;
1568 u32 anc_data[3]; 1569 u32 anc_data[3];
1569 u32 err; 1570 u32 err;
1570 u32 dest_type; 1571 u32 dest_type;
@@ -1573,6 +1574,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1573 1574
1574 if (likely(m->msg_controllen == 0)) 1575 if (likely(m->msg_controllen == 0))
1575 return 0; 1576 return 0;
1577 msg = buf_msg(skb);
1576 1578
1577 /* Optionally capture errored message object(s) */ 1579 /* Optionally capture errored message object(s) */
1578 err = msg ? msg_errcode(msg) : 0; 1580 err = msg ? msg_errcode(msg) : 0;
@@ -1583,6 +1585,9 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1583 if (res) 1585 if (res)
1584 return res; 1586 return res;
1585 if (anc_data[1]) { 1587 if (anc_data[1]) {
1588 if (skb_linearize(skb))
1589 return -ENOMEM;
1590 msg = buf_msg(skb);
1586 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1591 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1587 msg_data(msg)); 1592 msg_data(msg));
1588 if (res) 1593 if (res)
@@ -1744,9 +1749,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1744 1749
1745 /* Collect msg meta data, including error code and rejected data */ 1750 /* Collect msg meta data, including error code and rejected data */
1746 tipc_sk_set_orig_addr(m, skb); 1751 tipc_sk_set_orig_addr(m, skb);
1747 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1752 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1748 if (unlikely(rc)) 1753 if (unlikely(rc))
1749 goto exit; 1754 goto exit;
1755 hdr = buf_msg(skb);
1750 1756
1751 /* Capture data if non-error msg, otherwise just set return value */ 1757 /* Capture data if non-error msg, otherwise just set return value */
1752 if (likely(!err)) { 1758 if (likely(!err)) {
@@ -1856,9 +1862,10 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1856 /* Collect msg meta data, incl. error code and rejected data */ 1862 /* Collect msg meta data, incl. error code and rejected data */
1857 if (!copied) { 1863 if (!copied) {
1858 tipc_sk_set_orig_addr(m, skb); 1864 tipc_sk_set_orig_addr(m, skb);
1859 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1865 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1860 if (rc) 1866 if (rc)
1861 break; 1867 break;
1868 hdr = buf_msg(skb);
1862 } 1869 }
1863 1870
1864 /* Copy data if msg ok, otherwise return error/partial data */ 1871 /* Copy data if msg ok, otherwise return error/partial data */
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index a8e7ba9f73e8..6a6be9f440cf 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -236,10 +236,8 @@ ifdef CONFIG_GCOV_KERNEL
236objtool_args += --no-unreachable 236objtool_args += --no-unreachable
237endif 237endif
238ifdef CONFIG_RETPOLINE 238ifdef CONFIG_RETPOLINE
239ifneq ($(RETPOLINE_CFLAGS),)
240 objtool_args += --retpoline 239 objtool_args += --retpoline
241endif 240endif
242endif
243 241
244 242
245ifdef CONFIG_MODVERSIONS 243ifdef CONFIG_MODVERSIONS
diff --git a/scripts/unifdef.c b/scripts/unifdef.c
index 7493c0ee51cc..db00e3e30a59 100644
--- a/scripts/unifdef.c
+++ b/scripts/unifdef.c
@@ -395,7 +395,7 @@ usage(void)
395 * When we have processed a group that starts off with a known-false 395 * When we have processed a group that starts off with a known-false
396 * #if/#elif sequence (which has therefore been deleted) followed by a 396 * #if/#elif sequence (which has therefore been deleted) followed by a
397 * #elif that we don't understand and therefore must keep, we edit the 397 * #elif that we don't understand and therefore must keep, we edit the
398 * latter into a #if to keep the nesting correct. We use strncpy() to 398 * latter into a #if to keep the nesting correct. We use memcpy() to
399 * overwrite the 4 byte token "elif" with "if " without a '\0' byte. 399 * overwrite the 4 byte token "elif" with "if " without a '\0' byte.
400 * 400 *
401 * When we find a true #elif in a group, the following block will 401 * When we find a true #elif in a group, the following block will
@@ -450,7 +450,7 @@ static void Idrop (void) { Fdrop(); ignoreon(); }
450static void Itrue (void) { Ftrue(); ignoreon(); } 450static void Itrue (void) { Ftrue(); ignoreon(); }
451static void Ifalse(void) { Ffalse(); ignoreon(); } 451static void Ifalse(void) { Ffalse(); ignoreon(); }
452/* modify this line */ 452/* modify this line */
453static void Mpass (void) { strncpy(keyword, "if ", 4); Pelif(); } 453static void Mpass (void) { memcpy(keyword, "if ", 4); Pelif(); }
454static void Mtrue (void) { keywordedit("else"); state(IS_TRUE_MIDDLE); } 454static void Mtrue (void) { keywordedit("else"); state(IS_TRUE_MIDDLE); }
455static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); } 455static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); }
456static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); } 456static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); }
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 74b951f55608..9cec81209617 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -80,6 +80,9 @@ static const struct nlmsg_perm nlmsg_route_perms[] =
80 { RTM_NEWSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ }, 80 { RTM_NEWSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
81 { RTM_GETSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ }, 81 { RTM_GETSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
82 { RTM_NEWCACHEREPORT, NETLINK_ROUTE_SOCKET__NLMSG_READ }, 82 { RTM_NEWCACHEREPORT, NETLINK_ROUTE_SOCKET__NLMSG_READ },
83 { RTM_NEWCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
84 { RTM_DELCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
85 { RTM_GETCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_READ },
83}; 86};
84 87
85static const struct nlmsg_perm nlmsg_tcpdiag_perms[] = 88static const struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -158,7 +161,11 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
158 161
159 switch (sclass) { 162 switch (sclass) {
160 case SECCLASS_NETLINK_ROUTE_SOCKET: 163 case SECCLASS_NETLINK_ROUTE_SOCKET:
161 /* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */ 164 /* RTM_MAX always points to RTM_SETxxxx, ie RTM_NEWxxx + 3.
165 * If the BUILD_BUG_ON() below fails you must update the
166 * structures at the top of this file with the new mappings
167 * before updating the BUILD_BUG_ON() macro!
168 */
162 BUILD_BUG_ON(RTM_MAX != (RTM_NEWCHAIN + 3)); 169 BUILD_BUG_ON(RTM_MAX != (RTM_NEWCHAIN + 3));
163 err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms, 170 err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
164 sizeof(nlmsg_route_perms)); 171 sizeof(nlmsg_route_perms));
@@ -170,6 +177,10 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
170 break; 177 break;
171 178
172 case SECCLASS_NETLINK_XFRM_SOCKET: 179 case SECCLASS_NETLINK_XFRM_SOCKET:
180 /* If the BUILD_BUG_ON() below fails you must update the
181 * structures at the top of this file with the new mappings
182 * before updating the BUILD_BUG_ON() macro!
183 */
173 BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING); 184 BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING);
174 err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms, 185 err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms,
175 sizeof(nlmsg_xfrm_perms)); 186 sizeof(nlmsg_xfrm_perms));
diff --git a/sound/core/control.c b/sound/core/control.c
index 9aa15bfc7936..649d3217590e 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -348,6 +348,40 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
348 return 0; 348 return 0;
349} 349}
350 350
351/* add a new kcontrol object; call with card->controls_rwsem locked */
352static int __snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
353{
354 struct snd_ctl_elem_id id;
355 unsigned int idx;
356 unsigned int count;
357
358 id = kcontrol->id;
359 if (id.index > UINT_MAX - kcontrol->count)
360 return -EINVAL;
361
362 if (snd_ctl_find_id(card, &id)) {
363 dev_err(card->dev,
364 "control %i:%i:%i:%s:%i is already present\n",
365 id.iface, id.device, id.subdevice, id.name, id.index);
366 return -EBUSY;
367 }
368
369 if (snd_ctl_find_hole(card, kcontrol->count) < 0)
370 return -ENOMEM;
371
372 list_add_tail(&kcontrol->list, &card->controls);
373 card->controls_count += kcontrol->count;
374 kcontrol->id.numid = card->last_numid + 1;
375 card->last_numid += kcontrol->count;
376
377 id = kcontrol->id;
378 count = kcontrol->count;
379 for (idx = 0; idx < count; idx++, id.index++, id.numid++)
380 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
381
382 return 0;
383}
384
351/** 385/**
352 * snd_ctl_add - add the control instance to the card 386 * snd_ctl_add - add the control instance to the card
353 * @card: the card instance 387 * @card: the card instance
@@ -364,45 +398,18 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
364 */ 398 */
365int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol) 399int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
366{ 400{
367 struct snd_ctl_elem_id id;
368 unsigned int idx;
369 unsigned int count;
370 int err = -EINVAL; 401 int err = -EINVAL;
371 402
372 if (! kcontrol) 403 if (! kcontrol)
373 return err; 404 return err;
374 if (snd_BUG_ON(!card || !kcontrol->info)) 405 if (snd_BUG_ON(!card || !kcontrol->info))
375 goto error; 406 goto error;
376 id = kcontrol->id;
377 if (id.index > UINT_MAX - kcontrol->count)
378 goto error;
379 407
380 down_write(&card->controls_rwsem); 408 down_write(&card->controls_rwsem);
381 if (snd_ctl_find_id(card, &id)) { 409 err = __snd_ctl_add(card, kcontrol);
382 up_write(&card->controls_rwsem);
383 dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
384 id.iface,
385 id.device,
386 id.subdevice,
387 id.name,
388 id.index);
389 err = -EBUSY;
390 goto error;
391 }
392 if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
393 up_write(&card->controls_rwsem);
394 err = -ENOMEM;
395 goto error;
396 }
397 list_add_tail(&kcontrol->list, &card->controls);
398 card->controls_count += kcontrol->count;
399 kcontrol->id.numid = card->last_numid + 1;
400 card->last_numid += kcontrol->count;
401 id = kcontrol->id;
402 count = kcontrol->count;
403 up_write(&card->controls_rwsem); 410 up_write(&card->controls_rwsem);
404 for (idx = 0; idx < count; idx++, id.index++, id.numid++) 411 if (err < 0)
405 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); 412 goto error;
406 return 0; 413 return 0;
407 414
408 error: 415 error:
@@ -1361,9 +1368,12 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1361 kctl->tlv.c = snd_ctl_elem_user_tlv; 1368 kctl->tlv.c = snd_ctl_elem_user_tlv;
1362 1369
1363 /* This function manage to free the instance on failure. */ 1370 /* This function manage to free the instance on failure. */
1364 err = snd_ctl_add(card, kctl); 1371 down_write(&card->controls_rwsem);
1365 if (err < 0) 1372 err = __snd_ctl_add(card, kctl);
1366 return err; 1373 if (err < 0) {
1374 snd_ctl_free_one(kctl);
1375 goto unlock;
1376 }
1367 offset = snd_ctl_get_ioff(kctl, &info->id); 1377 offset = snd_ctl_get_ioff(kctl, &info->id);
1368 snd_ctl_build_ioff(&info->id, kctl, offset); 1378 snd_ctl_build_ioff(&info->id, kctl, offset);
1369 /* 1379 /*
@@ -1374,10 +1384,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1374 * which locks the element. 1384 * which locks the element.
1375 */ 1385 */
1376 1386
1377 down_write(&card->controls_rwsem);
1378 card->user_ctl_count++; 1387 card->user_ctl_count++;
1379 up_write(&card->controls_rwsem);
1380 1388
1389 unlock:
1390 up_write(&card->controls_rwsem);
1381 return 0; 1391 return 0;
1382} 1392}
1383 1393
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index f8d4a419f3af..467039b342b5 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1062,8 +1062,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
1062 runtime->oss.channels = params_channels(params); 1062 runtime->oss.channels = params_channels(params);
1063 runtime->oss.rate = params_rate(params); 1063 runtime->oss.rate = params_rate(params);
1064 1064
1065 vfree(runtime->oss.buffer); 1065 kvfree(runtime->oss.buffer);
1066 runtime->oss.buffer = vmalloc(runtime->oss.period_bytes); 1066 runtime->oss.buffer = kvzalloc(runtime->oss.period_bytes, GFP_KERNEL);
1067 if (!runtime->oss.buffer) { 1067 if (!runtime->oss.buffer) {
1068 err = -ENOMEM; 1068 err = -ENOMEM;
1069 goto failure; 1069 goto failure;
@@ -2328,7 +2328,7 @@ static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream)
2328{ 2328{
2329 struct snd_pcm_runtime *runtime; 2329 struct snd_pcm_runtime *runtime;
2330 runtime = substream->runtime; 2330 runtime = substream->runtime;
2331 vfree(runtime->oss.buffer); 2331 kvfree(runtime->oss.buffer);
2332 runtime->oss.buffer = NULL; 2332 runtime->oss.buffer = NULL;
2333#ifdef CONFIG_SND_PCM_OSS_PLUGINS 2333#ifdef CONFIG_SND_PCM_OSS_PLUGINS
2334 snd_pcm_oss_plugin_clear(substream); 2334 snd_pcm_oss_plugin_clear(substream);
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 141c5f3a9575..31cb2acf8afc 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -66,8 +66,8 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t
66 return -ENXIO; 66 return -ENXIO;
67 size /= 8; 67 size /= 8;
68 if (plugin->buf_frames < frames) { 68 if (plugin->buf_frames < frames) {
69 vfree(plugin->buf); 69 kvfree(plugin->buf);
70 plugin->buf = vmalloc(size); 70 plugin->buf = kvzalloc(size, GFP_KERNEL);
71 plugin->buf_frames = frames; 71 plugin->buf_frames = frames;
72 } 72 }
73 if (!plugin->buf) { 73 if (!plugin->buf) {
@@ -191,7 +191,7 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin)
191 if (plugin->private_free) 191 if (plugin->private_free)
192 plugin->private_free(plugin); 192 plugin->private_free(plugin);
193 kfree(plugin->buf_channels); 193 kfree(plugin->buf_channels);
194 vfree(plugin->buf); 194 kvfree(plugin->buf);
195 kfree(plugin); 195 kfree(plugin);
196 return 0; 196 return 0;
197} 197}
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 32453f81b95a..3a5008837576 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -1531,7 +1531,6 @@ static int snd_wss_playback_open(struct snd_pcm_substream *substream)
1531 if (err < 0) { 1531 if (err < 0) {
1532 if (chip->release_dma) 1532 if (chip->release_dma)
1533 chip->release_dma(chip, chip->dma_private_data, chip->dma1); 1533 chip->release_dma(chip, chip->dma_private_data, chip->dma1);
1534 snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1535 return err; 1534 return err;
1536 } 1535 }
1537 chip->playback_substream = substream; 1536 chip->playback_substream = substream;
@@ -1572,7 +1571,6 @@ static int snd_wss_capture_open(struct snd_pcm_substream *substream)
1572 if (err < 0) { 1571 if (err < 0) {
1573 if (chip->release_dma) 1572 if (chip->release_dma)
1574 chip->release_dma(chip, chip->dma_private_data, chip->dma2); 1573 chip->release_dma(chip, chip->dma_private_data, chip->dma2);
1575 snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1576 return err; 1574 return err;
1577 } 1575 }
1578 chip->capture_substream = substream; 1576 chip->capture_substream = substream;
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index f4459d1a9d67..27b468f057dd 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -824,7 +824,7 @@ static int snd_ac97_put_spsa(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
824{ 824{
825 struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); 825 struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
826 int reg = kcontrol->private_value & 0xff; 826 int reg = kcontrol->private_value & 0xff;
827 int shift = (kcontrol->private_value >> 8) & 0xff; 827 int shift = (kcontrol->private_value >> 8) & 0x0f;
828 int mask = (kcontrol->private_value >> 16) & 0xff; 828 int mask = (kcontrol->private_value >> 16) & 0xff;
829 // int invert = (kcontrol->private_value >> 24) & 0xff; 829 // int invert = (kcontrol->private_value >> 24) & 0xff;
830 unsigned short value, old, new; 830 unsigned short value, old, new;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index d8eb2b5f51ae..0bbdf1a01e76 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2169,6 +2169,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
2169 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ 2169 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2170 SND_PCI_QUIRK(0x1849, 0xc892, "Asrock B85M-ITX", 0), 2170 SND_PCI_QUIRK(0x1849, 0xc892, "Asrock B85M-ITX", 0),
2171 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ 2171 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2172 SND_PCI_QUIRK(0x1849, 0x0397, "Asrock N68C-S UCC", 0),
2173 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2172 SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0), 2174 SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
2173 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ 2175 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2174 SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), 2176 SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0a24037184c3..0a567634e5fa 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1177,6 +1177,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
1177 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), 1177 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
1178 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), 1178 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
1179 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), 1179 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
1180 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
1180 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), 1181 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
1181 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), 1182 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
1182 SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), 1183 SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
@@ -8413,7 +8414,7 @@ static void ca0132_free(struct hda_codec *codec)
8413 8414
8414 snd_hda_power_down(codec); 8415 snd_hda_power_down(codec);
8415 if (spec->mem_base) 8416 if (spec->mem_base)
8416 iounmap(spec->mem_base); 8417 pci_iounmap(codec->bus->pci, spec->mem_base);
8417 kfree(spec->spec_init_verbs); 8418 kfree(spec->spec_init_verbs);
8418 kfree(codec->spec); 8419 kfree(codec->spec);
8419} 8420}
@@ -8488,7 +8489,7 @@ static void ca0132_config(struct hda_codec *codec)
8488 break; 8489 break;
8489 case QUIRK_AE5: 8490 case QUIRK_AE5:
8490 codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__); 8491 codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__);
8491 snd_hda_apply_pincfgs(codec, r3di_pincfgs); 8492 snd_hda_apply_pincfgs(codec, ae5_pincfgs);
8492 break; 8493 break;
8493 } 8494 }
8494 8495
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index fa61674a5605..06f93032d0cc 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -388,6 +388,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
388 case 0x10ec0285: 388 case 0x10ec0285:
389 case 0x10ec0298: 389 case 0x10ec0298:
390 case 0x10ec0289: 390 case 0x10ec0289:
391 case 0x10ec0300:
391 alc_update_coef_idx(codec, 0x10, 1<<9, 0); 392 alc_update_coef_idx(codec, 0x10, 1<<9, 0);
392 break; 393 break;
393 case 0x10ec0275: 394 case 0x10ec0275:
@@ -2830,6 +2831,7 @@ enum {
2830 ALC269_TYPE_ALC215, 2831 ALC269_TYPE_ALC215,
2831 ALC269_TYPE_ALC225, 2832 ALC269_TYPE_ALC225,
2832 ALC269_TYPE_ALC294, 2833 ALC269_TYPE_ALC294,
2834 ALC269_TYPE_ALC300,
2833 ALC269_TYPE_ALC700, 2835 ALC269_TYPE_ALC700,
2834}; 2836};
2835 2837
@@ -2864,6 +2866,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
2864 case ALC269_TYPE_ALC215: 2866 case ALC269_TYPE_ALC215:
2865 case ALC269_TYPE_ALC225: 2867 case ALC269_TYPE_ALC225:
2866 case ALC269_TYPE_ALC294: 2868 case ALC269_TYPE_ALC294:
2869 case ALC269_TYPE_ALC300:
2867 case ALC269_TYPE_ALC700: 2870 case ALC269_TYPE_ALC700:
2868 ssids = alc269_ssids; 2871 ssids = alc269_ssids;
2869 break; 2872 break;
@@ -5358,6 +5361,16 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
5358 spec->gen.preferred_dacs = preferred_pairs; 5361 spec->gen.preferred_dacs = preferred_pairs;
5359} 5362}
5360 5363
5364/* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
5365static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
5366 const struct hda_fixup *fix, int action)
5367{
5368 if (action != HDA_FIXUP_ACT_PRE_PROBE)
5369 return;
5370
5371 snd_hda_override_wcaps(codec, 0x03, 0);
5372}
5373
5361/* for hda_fixup_thinkpad_acpi() */ 5374/* for hda_fixup_thinkpad_acpi() */
5362#include "thinkpad_helper.c" 5375#include "thinkpad_helper.c"
5363 5376
@@ -5495,6 +5508,8 @@ enum {
5495 ALC255_FIXUP_DELL_HEADSET_MIC, 5508 ALC255_FIXUP_DELL_HEADSET_MIC,
5496 ALC295_FIXUP_HP_X360, 5509 ALC295_FIXUP_HP_X360,
5497 ALC221_FIXUP_HP_HEADSET_MIC, 5510 ALC221_FIXUP_HP_HEADSET_MIC,
5511 ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
5512 ALC295_FIXUP_HP_AUTO_MUTE,
5498}; 5513};
5499 5514
5500static const struct hda_fixup alc269_fixups[] = { 5515static const struct hda_fixup alc269_fixups[] = {
@@ -5659,6 +5674,8 @@ static const struct hda_fixup alc269_fixups[] = {
5659 [ALC269_FIXUP_HP_MUTE_LED_MIC3] = { 5674 [ALC269_FIXUP_HP_MUTE_LED_MIC3] = {
5660 .type = HDA_FIXUP_FUNC, 5675 .type = HDA_FIXUP_FUNC,
5661 .v.func = alc269_fixup_hp_mute_led_mic3, 5676 .v.func = alc269_fixup_hp_mute_led_mic3,
5677 .chained = true,
5678 .chain_id = ALC295_FIXUP_HP_AUTO_MUTE
5662 }, 5679 },
5663 [ALC269_FIXUP_HP_GPIO_LED] = { 5680 [ALC269_FIXUP_HP_GPIO_LED] = {
5664 .type = HDA_FIXUP_FUNC, 5681 .type = HDA_FIXUP_FUNC,
@@ -6362,6 +6379,14 @@ static const struct hda_fixup alc269_fixups[] = {
6362 .chained = true, 6379 .chained = true,
6363 .chain_id = ALC269_FIXUP_HEADSET_MIC 6380 .chain_id = ALC269_FIXUP_HEADSET_MIC
6364 }, 6381 },
6382 [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = {
6383 .type = HDA_FIXUP_FUNC,
6384 .v.func = alc285_fixup_invalidate_dacs,
6385 },
6386 [ALC295_FIXUP_HP_AUTO_MUTE] = {
6387 .type = HDA_FIXUP_FUNC,
6388 .v.func = alc_fixup_auto_mute_via_amp,
6389 },
6365}; 6390};
6366 6391
6367static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6392static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6481,6 +6506,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6481 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 6506 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
6482 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 6507 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
6483 SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC), 6508 SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
6509 SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6484 SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC), 6510 SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
6485 SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360), 6511 SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
6486 SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6512 SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
@@ -6531,6 +6557,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6531 SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), 6557 SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
6532 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), 6558 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
6533 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), 6559 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
6560 SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
6534 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), 6561 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
6535 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 6562 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
6536 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), 6563 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -7033,6 +7060,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7033 {0x12, 0x90a60130}, 7060 {0x12, 0x90a60130},
7034 {0x19, 0x03a11020}, 7061 {0x19, 0x03a11020},
7035 {0x21, 0x0321101f}), 7062 {0x21, 0x0321101f}),
7063 SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
7064 {0x12, 0x90a60130},
7065 {0x14, 0x90170110},
7066 {0x19, 0x04a11040},
7067 {0x21, 0x04211020}),
7036 SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, 7068 SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
7037 {0x12, 0x90a60120}, 7069 {0x12, 0x90a60120},
7038 {0x14, 0x90170110}, 7070 {0x14, 0x90170110},
@@ -7294,6 +7326,10 @@ static int patch_alc269(struct hda_codec *codec)
7294 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ 7326 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
7295 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ 7327 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
7296 break; 7328 break;
7329 case 0x10ec0300:
7330 spec->codec_variant = ALC269_TYPE_ALC300;
7331 spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
7332 break;
7297 case 0x10ec0700: 7333 case 0x10ec0700:
7298 case 0x10ec0701: 7334 case 0x10ec0701:
7299 case 0x10ec0703: 7335 case 0x10ec0703:
@@ -8404,6 +8440,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
8404 HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269), 8440 HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
8405 HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269), 8441 HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
8406 HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269), 8442 HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
8443 HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269),
8407 HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861), 8444 HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
8408 HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd), 8445 HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
8409 HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861), 8446 HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 4e9854889a95..e63d6e33df48 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -2187,11 +2187,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
2187 */ 2187 */
2188 snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE, 2188 snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
2189 AC_PWRST_D3); 2189 AC_PWRST_D3);
2190 err = snd_hdac_display_power(bus, false);
2191 if (err < 0) {
2192 dev_err(dev, "Cannot turn on display power on i915\n");
2193 return err;
2194 }
2195 2190
2196 hlink = snd_hdac_ext_bus_get_link(bus, dev_name(dev)); 2191 hlink = snd_hdac_ext_bus_get_link(bus, dev_name(dev));
2197 if (!hlink) { 2192 if (!hlink) {
@@ -2201,7 +2196,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
2201 2196
2202 snd_hdac_ext_bus_link_put(bus, hlink); 2197 snd_hdac_ext_bus_link_put(bus, hlink);
2203 2198
2204 return 0; 2199 err = snd_hdac_display_power(bus, false);
2200 if (err < 0)
2201 dev_err(dev, "Cannot turn off display power on i915\n");
2202
2203 return err;
2205} 2204}
2206 2205
2207static int hdac_hdmi_runtime_resume(struct device *dev) 2206static int hdac_hdmi_runtime_resume(struct device *dev)
diff --git a/sound/soc/codecs/pcm186x.h b/sound/soc/codecs/pcm186x.h
index 2c6ba55bf394..bb3f0c42a1cd 100644
--- a/sound/soc/codecs/pcm186x.h
+++ b/sound/soc/codecs/pcm186x.h
@@ -139,7 +139,7 @@ enum pcm186x_type {
139#define PCM186X_MAX_REGISTER PCM186X_CURR_TRIM_CTRL 139#define PCM186X_MAX_REGISTER PCM186X_CURR_TRIM_CTRL
140 140
141/* PCM186X_PAGE */ 141/* PCM186X_PAGE */
142#define PCM186X_RESET 0xff 142#define PCM186X_RESET 0xfe
143 143
144/* PCM186X_ADCX_INPUT_SEL_X */ 144/* PCM186X_ADCX_INPUT_SEL_X */
145#define PCM186X_ADC_INPUT_SEL_POL BIT(7) 145#define PCM186X_ADC_INPUT_SEL_POL BIT(7)
diff --git a/sound/soc/codecs/pcm3060.c b/sound/soc/codecs/pcm3060.c
index 494d9d662be8..771b46e1974b 100644
--- a/sound/soc/codecs/pcm3060.c
+++ b/sound/soc/codecs/pcm3060.c
@@ -198,20 +198,16 @@ static const struct snd_kcontrol_new pcm3060_dapm_controls[] = {
198}; 198};
199 199
200static const struct snd_soc_dapm_widget pcm3060_dapm_widgets[] = { 200static const struct snd_soc_dapm_widget pcm3060_dapm_widgets[] = {
201 SND_SOC_DAPM_OUTPUT("OUTL+"), 201 SND_SOC_DAPM_OUTPUT("OUTL"),
202 SND_SOC_DAPM_OUTPUT("OUTR+"), 202 SND_SOC_DAPM_OUTPUT("OUTR"),
203 SND_SOC_DAPM_OUTPUT("OUTL-"),
204 SND_SOC_DAPM_OUTPUT("OUTR-"),
205 203
206 SND_SOC_DAPM_INPUT("INL"), 204 SND_SOC_DAPM_INPUT("INL"),
207 SND_SOC_DAPM_INPUT("INR"), 205 SND_SOC_DAPM_INPUT("INR"),
208}; 206};
209 207
210static const struct snd_soc_dapm_route pcm3060_dapm_map[] = { 208static const struct snd_soc_dapm_route pcm3060_dapm_map[] = {
211 { "OUTL+", NULL, "Playback" }, 209 { "OUTL", NULL, "Playback" },
212 { "OUTR+", NULL, "Playback" }, 210 { "OUTR", NULL, "Playback" },
213 { "OUTL-", NULL, "Playback" },
214 { "OUTR-", NULL, "Playback" },
215 211
216 { "Capture", NULL, "INL" }, 212 { "Capture", NULL, "INL" },
217 { "Capture", NULL, "INR" }, 213 { "Capture", NULL, "INR" },
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index a53dc174bbf0..66501b8dc46f 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -765,38 +765,41 @@ static unsigned int wm_adsp_region_to_reg(struct wm_adsp_region const *mem,
765 765
766static void wm_adsp2_show_fw_status(struct wm_adsp *dsp) 766static void wm_adsp2_show_fw_status(struct wm_adsp *dsp)
767{ 767{
768 u16 scratch[4]; 768 unsigned int scratch[4];
769 unsigned int addr = dsp->base + ADSP2_SCRATCH0;
770 unsigned int i;
769 int ret; 771 int ret;
770 772
771 ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2_SCRATCH0, 773 for (i = 0; i < ARRAY_SIZE(scratch); ++i) {
772 scratch, sizeof(scratch)); 774 ret = regmap_read(dsp->regmap, addr + i, &scratch[i]);
773 if (ret) { 775 if (ret) {
774 adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret); 776 adsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
775 return; 777 return;
778 }
776 } 779 }
777 780
778 adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n", 781 adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
779 be16_to_cpu(scratch[0]), 782 scratch[0], scratch[1], scratch[2], scratch[3]);
780 be16_to_cpu(scratch[1]),
781 be16_to_cpu(scratch[2]),
782 be16_to_cpu(scratch[3]));
783} 783}
784 784
785static void wm_adsp2v2_show_fw_status(struct wm_adsp *dsp) 785static void wm_adsp2v2_show_fw_status(struct wm_adsp *dsp)
786{ 786{
787 u32 scratch[2]; 787 unsigned int scratch[2];
788 int ret; 788 int ret;
789 789
790 ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1, 790 ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
791 scratch, sizeof(scratch)); 791 &scratch[0]);
792
793 if (ret) { 792 if (ret) {
794 adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret); 793 adsp_err(dsp, "Failed to read SCRATCH0_1: %d\n", ret);
795 return; 794 return;
796 } 795 }
797 796
798 scratch[0] = be32_to_cpu(scratch[0]); 797 ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH2_3,
799 scratch[1] = be32_to_cpu(scratch[1]); 798 &scratch[1]);
799 if (ret) {
800 adsp_err(dsp, "Failed to read SCRATCH2_3: %d\n", ret);
801 return;
802 }
800 803
801 adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n", 804 adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
802 scratch[0] & 0xFFFF, 805 scratch[0] & 0xFFFF,
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 0caa1f4eb94d..18e717703685 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -101,22 +101,42 @@ config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
101 codec, then enable this option by saying Y or m. This is a 101 codec, then enable this option by saying Y or m. This is a
102 recommended option 102 recommended option
103 103
104config SND_SOC_INTEL_SKYLAKE_SSP_CLK
105 tristate
106
107config SND_SOC_INTEL_SKYLAKE 104config SND_SOC_INTEL_SKYLAKE
108 tristate "SKL/BXT/KBL/GLK/CNL... Platforms" 105 tristate "SKL/BXT/KBL/GLK/CNL... Platforms"
109 depends on PCI && ACPI 106 depends on PCI && ACPI
107 select SND_SOC_INTEL_SKYLAKE_COMMON
108 help
109 If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
110 GeminiLake or CannonLake platform with the DSP enabled in the BIOS
111 then enable this option by saying Y or m.
112
113if SND_SOC_INTEL_SKYLAKE
114
115config SND_SOC_INTEL_SKYLAKE_SSP_CLK
116 tristate
117
118config SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
119 bool "HDAudio codec support"
120 help
121 If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
122 GeminiLake or CannonLake platform with an HDaudio codec
123 then enable this option by saying Y
124
125config SND_SOC_INTEL_SKYLAKE_COMMON
126 tristate
110 select SND_HDA_EXT_CORE 127 select SND_HDA_EXT_CORE
111 select SND_HDA_DSP_LOADER 128 select SND_HDA_DSP_LOADER
112 select SND_SOC_TOPOLOGY 129 select SND_SOC_TOPOLOGY
113 select SND_SOC_INTEL_SST 130 select SND_SOC_INTEL_SST
131 select SND_SOC_HDAC_HDA if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
114 select SND_SOC_ACPI_INTEL_MATCH 132 select SND_SOC_ACPI_INTEL_MATCH
115 help 133 help
116 If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/ 134 If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
117 GeminiLake or CannonLake platform with the DSP enabled in the BIOS 135 GeminiLake or CannonLake platform with the DSP enabled in the BIOS
118 then enable this option by saying Y or m. 136 then enable this option by saying Y or m.
119 137
138endif ## SND_SOC_INTEL_SKYLAKE
139
120config SND_SOC_ACPI_INTEL_MATCH 140config SND_SOC_ACPI_INTEL_MATCH
121 tristate 141 tristate
122 select SND_SOC_ACPI if ACPI 142 select SND_SOC_ACPI if ACPI
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 73ca1350aa31..b177db2a0dbb 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -293,16 +293,6 @@ config SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH
293 Say Y if you have such a device. 293 Say Y if you have such a device.
294 If unsure select "N". 294 If unsure select "N".
295 295
296config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
297 tristate "SKL/KBL/BXT/APL with HDA Codecs"
298 select SND_SOC_HDAC_HDMI
299 select SND_SOC_HDAC_HDA
300 help
301 This adds support for ASoC machine driver for Intel platforms
302 SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
303 Say Y or m if you have such a device. This is a recommended option.
304 If unsure select "N".
305
306config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH 296config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
307 tristate "GLK with RT5682 and MAX98357A in I2S Mode" 297 tristate "GLK with RT5682 and MAX98357A in I2S Mode"
308 depends on MFD_INTEL_LPSS && I2C && ACPI 298 depends on MFD_INTEL_LPSS && I2C && ACPI
@@ -319,4 +309,18 @@ config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
319 309
320endif ## SND_SOC_INTEL_SKYLAKE 310endif ## SND_SOC_INTEL_SKYLAKE
321 311
312if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
313
314config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
315 tristate "SKL/KBL/BXT/APL with HDA Codecs"
316 select SND_SOC_HDAC_HDMI
317 # SND_SOC_HDAC_HDA is already selected
318 help
319 This adds support for ASoC machine driver for Intel platforms
320 SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
321 Say Y or m if you have such a device. This is a recommended option.
322 If unsure select "N".
323
324endif ## SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
325
322endif ## SND_SOC_INTEL_MACH 326endif ## SND_SOC_INTEL_MACH
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index db6976f4ddaa..9d9f6e41d81c 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -19,6 +19,7 @@
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */ 20 */
21 21
22#include <linux/dmi.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
@@ -35,6 +36,8 @@
35#define CHT_PLAT_CLK_3_HZ 19200000 36#define CHT_PLAT_CLK_3_HZ 19200000
36#define CHT_CODEC_DAI "HiFi" 37#define CHT_CODEC_DAI "HiFi"
37 38
39#define QUIRK_PMC_PLT_CLK_0 0x01
40
38struct cht_mc_private { 41struct cht_mc_private {
39 struct clk *mclk; 42 struct clk *mclk;
40 struct snd_soc_jack jack; 43 struct snd_soc_jack jack;
@@ -385,11 +388,29 @@ static struct snd_soc_card snd_soc_card_cht = {
385 .num_controls = ARRAY_SIZE(cht_mc_controls), 388 .num_controls = ARRAY_SIZE(cht_mc_controls),
386}; 389};
387 390
391static const struct dmi_system_id cht_max98090_quirk_table[] = {
392 {
393 /* Swanky model Chromebook (Toshiba Chromebook 2) */
394 .matches = {
395 DMI_MATCH(DMI_PRODUCT_NAME, "Swanky"),
396 },
397 .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
398 },
399 {}
400};
401
388static int snd_cht_mc_probe(struct platform_device *pdev) 402static int snd_cht_mc_probe(struct platform_device *pdev)
389{ 403{
404 const struct dmi_system_id *dmi_id;
390 struct device *dev = &pdev->dev; 405 struct device *dev = &pdev->dev;
391 int ret_val = 0; 406 int ret_val = 0;
392 struct cht_mc_private *drv; 407 struct cht_mc_private *drv;
408 const char *mclk_name;
409 int quirks = 0;
410
411 dmi_id = dmi_first_match(cht_max98090_quirk_table);
412 if (dmi_id)
413 quirks = (unsigned long)dmi_id->driver_data;
393 414
394 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); 415 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
395 if (!drv) 416 if (!drv)
@@ -411,11 +432,16 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
411 snd_soc_card_cht.dev = &pdev->dev; 432 snd_soc_card_cht.dev = &pdev->dev;
412 snd_soc_card_set_drvdata(&snd_soc_card_cht, drv); 433 snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
413 434
414 drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3"); 435 if (quirks & QUIRK_PMC_PLT_CLK_0)
436 mclk_name = "pmc_plt_clk_0";
437 else
438 mclk_name = "pmc_plt_clk_3";
439
440 drv->mclk = devm_clk_get(&pdev->dev, mclk_name);
415 if (IS_ERR(drv->mclk)) { 441 if (IS_ERR(drv->mclk)) {
416 dev_err(&pdev->dev, 442 dev_err(&pdev->dev,
417 "Failed to get MCLK from pmc_plt_clk_3: %ld\n", 443 "Failed to get MCLK from %s: %ld\n",
418 PTR_ERR(drv->mclk)); 444 mclk_name, PTR_ERR(drv->mclk));
419 return PTR_ERR(drv->mclk); 445 return PTR_ERR(drv->mclk);
420 } 446 }
421 447
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 29225623b4b4..7487f388e65d 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -37,7 +37,9 @@
37#include "skl.h" 37#include "skl.h"
38#include "skl-sst-dsp.h" 38#include "skl-sst-dsp.h"
39#include "skl-sst-ipc.h" 39#include "skl-sst-ipc.h"
40#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
40#include "../../../soc/codecs/hdac_hda.h" 41#include "../../../soc/codecs/hdac_hda.h"
42#endif
41 43
42/* 44/*
43 * initialize the PCI registers 45 * initialize the PCI registers
@@ -658,6 +660,8 @@ static void skl_clock_device_unregister(struct skl *skl)
658 platform_device_unregister(skl->clk_dev); 660 platform_device_unregister(skl->clk_dev);
659} 661}
660 662
663#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
664
661#define IDISP_INTEL_VENDOR_ID 0x80860000 665#define IDISP_INTEL_VENDOR_ID 0x80860000
662 666
663/* 667/*
@@ -676,6 +680,8 @@ static void load_codec_module(struct hda_codec *codec)
676#endif 680#endif
677} 681}
678 682
683#endif /* CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC */
684
679/* 685/*
680 * Probe the given codec address 686 * Probe the given codec address
681 */ 687 */
@@ -685,9 +691,11 @@ static int probe_codec(struct hdac_bus *bus, int addr)
685 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; 691 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
686 unsigned int res = -1; 692 unsigned int res = -1;
687 struct skl *skl = bus_to_skl(bus); 693 struct skl *skl = bus_to_skl(bus);
694#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
688 struct hdac_hda_priv *hda_codec; 695 struct hdac_hda_priv *hda_codec;
689 struct hdac_device *hdev;
690 int err; 696 int err;
697#endif
698 struct hdac_device *hdev;
691 699
692 mutex_lock(&bus->cmd_mutex); 700 mutex_lock(&bus->cmd_mutex);
693 snd_hdac_bus_send_cmd(bus, cmd); 701 snd_hdac_bus_send_cmd(bus, cmd);
@@ -697,6 +705,7 @@ static int probe_codec(struct hdac_bus *bus, int addr)
697 return -EIO; 705 return -EIO;
698 dev_dbg(bus->dev, "codec #%d probed OK: %x\n", addr, res); 706 dev_dbg(bus->dev, "codec #%d probed OK: %x\n", addr, res);
699 707
708#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
700 hda_codec = devm_kzalloc(&skl->pci->dev, sizeof(*hda_codec), 709 hda_codec = devm_kzalloc(&skl->pci->dev, sizeof(*hda_codec),
701 GFP_KERNEL); 710 GFP_KERNEL);
702 if (!hda_codec) 711 if (!hda_codec)
@@ -715,6 +724,13 @@ static int probe_codec(struct hdac_bus *bus, int addr)
715 load_codec_module(&hda_codec->codec); 724 load_codec_module(&hda_codec->codec);
716 } 725 }
717 return 0; 726 return 0;
727#else
728 hdev = devm_kzalloc(&skl->pci->dev, sizeof(*hdev), GFP_KERNEL);
729 if (!hdev)
730 return -ENOMEM;
731
732 return snd_hdac_ext_bus_device_init(bus, addr, hdev);
733#endif /* CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC */
718} 734}
719 735
720/* Codec initialization */ 736/* Codec initialization */
@@ -815,6 +831,12 @@ static void skl_probe_work(struct work_struct *work)
815 } 831 }
816 } 832 }
817 833
834 /*
835 * we are done probing so decrement link counts
836 */
837 list_for_each_entry(hlink, &bus->hlink_list, list)
838 snd_hdac_ext_bus_link_put(bus, hlink);
839
818 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { 840 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
819 err = snd_hdac_display_power(bus, false); 841 err = snd_hdac_display_power(bus, false);
820 if (err < 0) { 842 if (err < 0) {
@@ -824,12 +846,6 @@ static void skl_probe_work(struct work_struct *work)
824 } 846 }
825 } 847 }
826 848
827 /*
828 * we are done probing so decrement link counts
829 */
830 list_for_each_entry(hlink, &bus->hlink_list, list)
831 snd_hdac_ext_bus_link_put(bus, hlink);
832
833 /* configure PM */ 849 /* configure PM */
834 pm_runtime_put_noidle(bus->dev); 850 pm_runtime_put_noidle(bus->dev);
835 pm_runtime_allow(bus->dev); 851 pm_runtime_allow(bus->dev);
@@ -870,7 +886,7 @@ static int skl_create(struct pci_dev *pci,
870 hbus = skl_to_hbus(skl); 886 hbus = skl_to_hbus(skl);
871 bus = skl_to_bus(skl); 887 bus = skl_to_bus(skl);
872 888
873#if IS_ENABLED(CONFIG_SND_SOC_HDAC_HDA) 889#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
874 ext_ops = snd_soc_hdac_hda_get_ops(); 890 ext_ops = snd_soc_hdac_hda_get_ops();
875#endif 891#endif
876 snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, ext_ops); 892 snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, ext_ops);
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index d5ae9eb8c756..fed45b41f9d3 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -36,6 +36,8 @@
36#include "../codecs/twl6040.h" 36#include "../codecs/twl6040.h"
37 37
38struct abe_twl6040 { 38struct abe_twl6040 {
39 struct snd_soc_card card;
40 struct snd_soc_dai_link dai_links[2];
39 int jack_detection; /* board can detect jack events */ 41 int jack_detection; /* board can detect jack events */
40 int mclk_freq; /* MCLK frequency speed for twl6040 */ 42 int mclk_freq; /* MCLK frequency speed for twl6040 */
41}; 43};
@@ -208,40 +210,10 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
208 ARRAY_SIZE(dmic_audio_map)); 210 ARRAY_SIZE(dmic_audio_map));
209} 211}
210 212
211/* Digital audio interface glue - connects codec <--> CPU */
212static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
213 {
214 .name = "TWL6040",
215 .stream_name = "TWL6040",
216 .codec_dai_name = "twl6040-legacy",
217 .codec_name = "twl6040-codec",
218 .init = omap_abe_twl6040_init,
219 .ops = &omap_abe_ops,
220 },
221 {
222 .name = "DMIC",
223 .stream_name = "DMIC Capture",
224 .codec_dai_name = "dmic-hifi",
225 .codec_name = "dmic-codec",
226 .init = omap_abe_dmic_init,
227 .ops = &omap_abe_dmic_ops,
228 },
229};
230
231/* Audio machine driver */
232static struct snd_soc_card omap_abe_card = {
233 .owner = THIS_MODULE,
234
235 .dapm_widgets = twl6040_dapm_widgets,
236 .num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets),
237 .dapm_routes = audio_map,
238 .num_dapm_routes = ARRAY_SIZE(audio_map),
239};
240
241static int omap_abe_probe(struct platform_device *pdev) 213static int omap_abe_probe(struct platform_device *pdev)
242{ 214{
243 struct device_node *node = pdev->dev.of_node; 215 struct device_node *node = pdev->dev.of_node;
244 struct snd_soc_card *card = &omap_abe_card; 216 struct snd_soc_card *card;
245 struct device_node *dai_node; 217 struct device_node *dai_node;
246 struct abe_twl6040 *priv; 218 struct abe_twl6040 *priv;
247 int num_links = 0; 219 int num_links = 0;
@@ -252,12 +224,18 @@ static int omap_abe_probe(struct platform_device *pdev)
252 return -ENODEV; 224 return -ENODEV;
253 } 225 }
254 226
255 card->dev = &pdev->dev;
256
257 priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL); 227 priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
258 if (priv == NULL) 228 if (priv == NULL)
259 return -ENOMEM; 229 return -ENOMEM;
260 230
231 card = &priv->card;
232 card->dev = &pdev->dev;
233 card->owner = THIS_MODULE;
234 card->dapm_widgets = twl6040_dapm_widgets;
235 card->num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets);
236 card->dapm_routes = audio_map;
237 card->num_dapm_routes = ARRAY_SIZE(audio_map);
238
261 if (snd_soc_of_parse_card_name(card, "ti,model")) { 239 if (snd_soc_of_parse_card_name(card, "ti,model")) {
262 dev_err(&pdev->dev, "Card name is not provided\n"); 240 dev_err(&pdev->dev, "Card name is not provided\n");
263 return -ENODEV; 241 return -ENODEV;
@@ -274,14 +252,27 @@ static int omap_abe_probe(struct platform_device *pdev)
274 dev_err(&pdev->dev, "McPDM node is not provided\n"); 252 dev_err(&pdev->dev, "McPDM node is not provided\n");
275 return -EINVAL; 253 return -EINVAL;
276 } 254 }
277 abe_twl6040_dai_links[0].cpu_of_node = dai_node; 255
278 abe_twl6040_dai_links[0].platform_of_node = dai_node; 256 priv->dai_links[0].name = "DMIC";
257 priv->dai_links[0].stream_name = "TWL6040";
258 priv->dai_links[0].cpu_of_node = dai_node;
259 priv->dai_links[0].platform_of_node = dai_node;
260 priv->dai_links[0].codec_dai_name = "twl6040-legacy";
261 priv->dai_links[0].codec_name = "twl6040-codec";
262 priv->dai_links[0].init = omap_abe_twl6040_init;
263 priv->dai_links[0].ops = &omap_abe_ops;
279 264
280 dai_node = of_parse_phandle(node, "ti,dmic", 0); 265 dai_node = of_parse_phandle(node, "ti,dmic", 0);
281 if (dai_node) { 266 if (dai_node) {
282 num_links = 2; 267 num_links = 2;
283 abe_twl6040_dai_links[1].cpu_of_node = dai_node; 268 priv->dai_links[1].name = "TWL6040";
284 abe_twl6040_dai_links[1].platform_of_node = dai_node; 269 priv->dai_links[1].stream_name = "DMIC Capture";
270 priv->dai_links[1].cpu_of_node = dai_node;
271 priv->dai_links[1].platform_of_node = dai_node;
272 priv->dai_links[1].codec_dai_name = "dmic-hifi";
273 priv->dai_links[1].codec_name = "dmic-codec";
274 priv->dai_links[1].init = omap_abe_dmic_init;
275 priv->dai_links[1].ops = &omap_abe_dmic_ops;
285 } else { 276 } else {
286 num_links = 1; 277 num_links = 1;
287 } 278 }
@@ -300,7 +291,7 @@ static int omap_abe_probe(struct platform_device *pdev)
300 return -ENODEV; 291 return -ENODEV;
301 } 292 }
302 293
303 card->dai_link = abe_twl6040_dai_links; 294 card->dai_link = priv->dai_links;
304 card->num_links = num_links; 295 card->num_links = num_links;
305 296
306 snd_soc_card_set_drvdata(card, priv); 297 snd_soc_card_set_drvdata(card, priv);
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index fe966272bd0c..cba9645b6487 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -48,6 +48,8 @@ struct omap_dmic {
48 struct device *dev; 48 struct device *dev;
49 void __iomem *io_base; 49 void __iomem *io_base;
50 struct clk *fclk; 50 struct clk *fclk;
51 struct pm_qos_request pm_qos_req;
52 int latency;
51 int fclk_freq; 53 int fclk_freq;
52 int out_freq; 54 int out_freq;
53 int clk_div; 55 int clk_div;
@@ -124,6 +126,8 @@ static void omap_dmic_dai_shutdown(struct snd_pcm_substream *substream,
124 126
125 mutex_lock(&dmic->mutex); 127 mutex_lock(&dmic->mutex);
126 128
129 pm_qos_remove_request(&dmic->pm_qos_req);
130
127 if (!dai->active) 131 if (!dai->active)
128 dmic->active = 0; 132 dmic->active = 0;
129 133
@@ -228,6 +232,8 @@ static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream,
228 /* packet size is threshold * channels */ 232 /* packet size is threshold * channels */
229 dma_data = snd_soc_dai_get_dma_data(dai, substream); 233 dma_data = snd_soc_dai_get_dma_data(dai, substream);
230 dma_data->maxburst = dmic->threshold * channels; 234 dma_data->maxburst = dmic->threshold * channels;
235 dmic->latency = (OMAP_DMIC_THRES_MAX - dmic->threshold) * USEC_PER_SEC /
236 params_rate(params);
231 237
232 return 0; 238 return 0;
233} 239}
@@ -238,6 +244,9 @@ static int omap_dmic_dai_prepare(struct snd_pcm_substream *substream,
238 struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai); 244 struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
239 u32 ctrl; 245 u32 ctrl;
240 246
247 if (pm_qos_request_active(&dmic->pm_qos_req))
248 pm_qos_update_request(&dmic->pm_qos_req, dmic->latency);
249
241 /* Configure uplink threshold */ 250 /* Configure uplink threshold */
242 omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold); 251 omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold);
243 252
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index d0ebb6b9bfac..2d6decbfc99e 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -308,9 +308,9 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
308 pkt_size = channels; 308 pkt_size = channels;
309 } 309 }
310 310
311 latency = ((((buffer_size - pkt_size) / channels) * 1000) 311 latency = (buffer_size - pkt_size) / channels;
312 / (params->rate_num / params->rate_den)); 312 latency = latency * USEC_PER_SEC /
313 313 (params->rate_num / params->rate_den);
314 mcbsp->latency[substream->stream] = latency; 314 mcbsp->latency[substream->stream] = latency;
315 315
316 omap_mcbsp_set_threshold(substream, pkt_size); 316 omap_mcbsp_set_threshold(substream, pkt_size);
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 4c1be36c2207..7d5bdc5a2890 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -54,6 +54,8 @@ struct omap_mcpdm {
54 unsigned long phys_base; 54 unsigned long phys_base;
55 void __iomem *io_base; 55 void __iomem *io_base;
56 int irq; 56 int irq;
57 struct pm_qos_request pm_qos_req;
58 int latency[2];
57 59
58 struct mutex mutex; 60 struct mutex mutex;
59 61
@@ -277,6 +279,9 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
277 struct snd_soc_dai *dai) 279 struct snd_soc_dai *dai)
278{ 280{
279 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); 281 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
282 int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
283 int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
284 int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
280 285
281 mutex_lock(&mcpdm->mutex); 286 mutex_lock(&mcpdm->mutex);
282 287
@@ -289,6 +294,14 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
289 } 294 }
290 } 295 }
291 296
297 if (mcpdm->latency[stream2])
298 pm_qos_update_request(&mcpdm->pm_qos_req,
299 mcpdm->latency[stream2]);
300 else if (mcpdm->latency[stream1])
301 pm_qos_remove_request(&mcpdm->pm_qos_req);
302
303 mcpdm->latency[stream1] = 0;
304
292 mutex_unlock(&mcpdm->mutex); 305 mutex_unlock(&mcpdm->mutex);
293} 306}
294 307
@@ -300,7 +313,7 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
300 int stream = substream->stream; 313 int stream = substream->stream;
301 struct snd_dmaengine_dai_dma_data *dma_data; 314 struct snd_dmaengine_dai_dma_data *dma_data;
302 u32 threshold; 315 u32 threshold;
303 int channels; 316 int channels, latency;
304 int link_mask = 0; 317 int link_mask = 0;
305 318
306 channels = params_channels(params); 319 channels = params_channels(params);
@@ -344,14 +357,25 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
344 357
345 dma_data->maxburst = 358 dma_data->maxburst =
346 (MCPDM_DN_THRES_MAX - threshold) * channels; 359 (MCPDM_DN_THRES_MAX - threshold) * channels;
360 latency = threshold;
347 } else { 361 } else {
348 /* If playback is not running assume a stereo stream to come */ 362 /* If playback is not running assume a stereo stream to come */
349 if (!mcpdm->config[!stream].link_mask) 363 if (!mcpdm->config[!stream].link_mask)
350 mcpdm->config[!stream].link_mask = (0x3 << 3); 364 mcpdm->config[!stream].link_mask = (0x3 << 3);
351 365
352 dma_data->maxburst = threshold * channels; 366 dma_data->maxburst = threshold * channels;
367 latency = (MCPDM_DN_THRES_MAX - threshold);
353 } 368 }
354 369
370 /*
371 * The DMA must act to a DMA request within latency time (usec) to avoid
372 * under/overflow
373 */
374 mcpdm->latency[stream] = latency * USEC_PER_SEC / params_rate(params);
375
376 if (!mcpdm->latency[stream])
377 mcpdm->latency[stream] = 10;
378
355 /* Check if we need to restart McPDM with this stream */ 379 /* Check if we need to restart McPDM with this stream */
356 if (mcpdm->config[stream].link_mask && 380 if (mcpdm->config[stream].link_mask &&
357 mcpdm->config[stream].link_mask != link_mask) 381 mcpdm->config[stream].link_mask != link_mask)
@@ -366,6 +390,20 @@ static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
366 struct snd_soc_dai *dai) 390 struct snd_soc_dai *dai)
367{ 391{
368 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); 392 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
393 struct pm_qos_request *pm_qos_req = &mcpdm->pm_qos_req;
394 int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
395 int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
396 int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
397 int latency = mcpdm->latency[stream2];
398
399 /* Prevent omap hardware from hitting off between FIFO fills */
400 if (!latency || mcpdm->latency[stream1] < latency)
401 latency = mcpdm->latency[stream1];
402
403 if (pm_qos_request_active(pm_qos_req))
404 pm_qos_update_request(pm_qos_req, latency);
405 else if (latency)
406 pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
369 407
370 if (!omap_mcpdm_active(mcpdm)) { 408 if (!omap_mcpdm_active(mcpdm)) {
371 omap_mcpdm_start(mcpdm); 409 omap_mcpdm_start(mcpdm);
@@ -427,6 +465,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
427 free_irq(mcpdm->irq, (void *)mcpdm); 465 free_irq(mcpdm->irq, (void *)mcpdm);
428 pm_runtime_disable(mcpdm->dev); 466 pm_runtime_disable(mcpdm->dev);
429 467
468 if (pm_qos_request_active(&mcpdm->pm_qos_req))
469 pm_qos_remove_request(&mcpdm->pm_qos_req);
470
430 return 0; 471 return 0;
431} 472}
432 473
diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
index eb1b9da05dd4..4715527054e5 100644
--- a/sound/soc/qcom/common.c
+++ b/sound/soc/qcom/common.c
@@ -13,6 +13,7 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
13 struct device_node *cpu = NULL; 13 struct device_node *cpu = NULL;
14 struct device *dev = card->dev; 14 struct device *dev = card->dev;
15 struct snd_soc_dai_link *link; 15 struct snd_soc_dai_link *link;
16 struct of_phandle_args args;
16 int ret, num_links; 17 int ret, num_links;
17 18
18 ret = snd_soc_of_parse_card_name(card, "model"); 19 ret = snd_soc_of_parse_card_name(card, "model");
@@ -47,12 +48,14 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
47 goto err; 48 goto err;
48 } 49 }
49 50
50 link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0); 51 ret = of_parse_phandle_with_args(cpu, "sound-dai",
51 if (!link->cpu_of_node) { 52 "#sound-dai-cells", 0, &args);
53 if (ret) {
52 dev_err(card->dev, "error getting cpu phandle\n"); 54 dev_err(card->dev, "error getting cpu phandle\n");
53 ret = -EINVAL;
54 goto err; 55 goto err;
55 } 56 }
57 link->cpu_of_node = args.np;
58 link->id = args.args[0];
56 59
57 ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name); 60 ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
58 if (ret) { 61 if (ret) {
diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
index 60ff4a2d3577..8f6c8fc073a9 100644
--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
+++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
@@ -1112,204 +1112,204 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
1112} 1112}
1113 1113
1114static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = { 1114static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
1115 SND_SOC_DAPM_AIF_OUT("HDMI_RX", "HDMI Playback", 0, 0, 0, 0), 1115 SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
1116 SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0), 1116 SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
1117 SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0), 1117 SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
1118 SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0), 1118 SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
1119 SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0), 1119 SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
1120 SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0, 0), 1120 SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
1121 SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0), 1121 SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
1122 SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0, 0), 1122 SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
1123 SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0), 1123 SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
1124 SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0), 1124 SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
1125 SND_SOC_DAPM_AIF_IN("SLIMBUS_2_TX", "Slimbus2 Capture", 0, 0, 0, 0), 1125 SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
1126 SND_SOC_DAPM_AIF_IN("SLIMBUS_3_TX", "Slimbus3 Capture", 0, 0, 0, 0), 1126 SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
1127 SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture", 0, 0, 0, 0), 1127 SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
1128 SND_SOC_DAPM_AIF_IN("SLIMBUS_5_TX", "Slimbus5 Capture", 0, 0, 0, 0), 1128 SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
1129 SND_SOC_DAPM_AIF_IN("SLIMBUS_6_TX", "Slimbus6 Capture", 0, 0, 0, 0), 1129 SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
1130 SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback", 1130 SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
1131 0, 0, 0, 0), 1131 0, 0, 0, 0),
1132 SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture", 1132 SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
1133 0, 0, 0, 0), 1133 0, 0, 0, 0),
1134 SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback", 1134 SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
1135 0, 0, 0, 0), 1135 0, 0, 0, 0),
1136 SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture", 1136 SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
1137 0, 0, 0, 0), 1137 0, 0, 0, 0),
1138 SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback", 1138 SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
1139 0, 0, 0, 0), 1139 0, 0, 0, 0),
1140 SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture", 1140 SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
1141 0, 0, 0, 0), 1141 0, 0, 0, 0),
1142 SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1", 1142 SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
1143 "Secondary MI2S Playback SD1", 1143 "Secondary MI2S Playback SD1",
1144 0, 0, 0, 0), 1144 0, 0, 0, 0),
1145 SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback", 1145 SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
1146 0, 0, 0, 0), 1146 0, 0, 0, 0),
1147 SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture", 1147 SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
1148 0, 0, 0, 0), 1148 0, 0, 0, 0),
1149 1149
1150 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_0", "Primary TDM0 Playback", 1150 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
1151 0, 0, 0, 0), 1151 0, 0, 0, 0),
1152 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_1", "Primary TDM1 Playback", 1152 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
1153 0, 0, 0, 0), 1153 0, 0, 0, 0),
1154 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_2", "Primary TDM2 Playback", 1154 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
1155 0, 0, 0, 0), 1155 0, 0, 0, 0),
1156 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_3", "Primary TDM3 Playback", 1156 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
1157 0, 0, 0, 0), 1157 0, 0, 0, 0),
1158 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_4", "Primary TDM4 Playback", 1158 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
1159 0, 0, 0, 0), 1159 0, 0, 0, 0),
1160 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_5", "Primary TDM5 Playback", 1160 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
1161 0, 0, 0, 0), 1161 0, 0, 0, 0),
1162 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_6", "Primary TDM6 Playback", 1162 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
1163 0, 0, 0, 0), 1163 0, 0, 0, 0),
1164 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_7", "Primary TDM7 Playback", 1164 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
1165 0, 0, 0, 0), 1165 0, 0, 0, 0),
1166 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_0", "Primary TDM0 Capture", 1166 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
1167 0, 0, 0, 0), 1167 0, 0, 0, 0),
1168 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_1", "Primary TDM1 Capture", 1168 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
1169 0, 0, 0, 0), 1169 0, 0, 0, 0),
1170 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_2", "Primary TDM2 Capture", 1170 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
1171 0, 0, 0, 0), 1171 0, 0, 0, 0),
1172 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_3", "Primary TDM3 Capture", 1172 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
1173 0, 0, 0, 0), 1173 0, 0, 0, 0),
1174 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_4", "Primary TDM4 Capture", 1174 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
1175 0, 0, 0, 0), 1175 0, 0, 0, 0),
1176 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_5", "Primary TDM5 Capture", 1176 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
1177 0, 0, 0, 0), 1177 0, 0, 0, 0),
1178 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_6", "Primary TDM6 Capture", 1178 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
1179 0, 0, 0, 0), 1179 0, 0, 0, 0),
1180 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_7", "Primary TDM7 Capture", 1180 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
1181 0, 0, 0, 0), 1181 0, 0, 0, 0),
1182 1182
1183 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_0", "Secondary TDM0 Playback", 1183 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
1184 0, 0, 0, 0), 1184 0, 0, 0, 0),
1185 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_1", "Secondary TDM1 Playback", 1185 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
1186 0, 0, 0, 0), 1186 0, 0, 0, 0),
1187 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_2", "Secondary TDM2 Playback", 1187 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
1188 0, 0, 0, 0), 1188 0, 0, 0, 0),
1189 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_3", "Secondary TDM3 Playback", 1189 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
1190 0, 0, 0, 0), 1190 0, 0, 0, 0),
1191 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_4", "Secondary TDM4 Playback", 1191 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
1192 0, 0, 0, 0), 1192 0, 0, 0, 0),
1193 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_5", "Secondary TDM5 Playback", 1193 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
1194 0, 0, 0, 0), 1194 0, 0, 0, 0),
1195 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_6", "Secondary TDM6 Playback", 1195 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
1196 0, 0, 0, 0), 1196 0, 0, 0, 0),
1197 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_7", "Secondary TDM7 Playback", 1197 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
1198 0, 0, 0, 0), 1198 0, 0, 0, 0),
1199 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_0", "Secondary TDM0 Capture", 1199 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
1200 0, 0, 0, 0), 1200 0, 0, 0, 0),
1201 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_1", "Secondary TDM1 Capture", 1201 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
1202 0, 0, 0, 0), 1202 0, 0, 0, 0),
1203 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_2", "Secondary TDM2 Capture", 1203 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
1204 0, 0, 0, 0), 1204 0, 0, 0, 0),
1205 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_3", "Secondary TDM3 Capture", 1205 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
1206 0, 0, 0, 0), 1206 0, 0, 0, 0),
1207 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_4", "Secondary TDM4 Capture", 1207 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
1208 0, 0, 0, 0), 1208 0, 0, 0, 0),
1209 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_5", "Secondary TDM5 Capture", 1209 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
1210 0, 0, 0, 0), 1210 0, 0, 0, 0),
1211 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_6", "Secondary TDM6 Capture", 1211 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
1212 0, 0, 0, 0), 1212 0, 0, 0, 0),
1213 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_7", "Secondary TDM7 Capture", 1213 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
1214 0, 0, 0, 0), 1214 0, 0, 0, 0),
1215 1215
1216 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_0", "Tertiary TDM0 Playback", 1216 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
1217 0, 0, 0, 0), 1217 0, 0, 0, 0),
1218 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_1", "Tertiary TDM1 Playback", 1218 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
1219 0, 0, 0, 0), 1219 0, 0, 0, 0),
1220 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_2", "Tertiary TDM2 Playback", 1220 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
1221 0, 0, 0, 0), 1221 0, 0, 0, 0),
1222 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_3", "Tertiary TDM3 Playback", 1222 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
1223 0, 0, 0, 0), 1223 0, 0, 0, 0),
1224 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_4", "Tertiary TDM4 Playback", 1224 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
1225 0, 0, 0, 0), 1225 0, 0, 0, 0),
1226 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_5", "Tertiary TDM5 Playback", 1226 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
1227 0, 0, 0, 0), 1227 0, 0, 0, 0),
1228 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_6", "Tertiary TDM6 Playback", 1228 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
1229 0, 0, 0, 0), 1229 0, 0, 0, 0),
1230 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_7", "Tertiary TDM7 Playback", 1230 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
1231 0, 0, 0, 0), 1231 0, 0, 0, 0),
1232 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_0", "Tertiary TDM0 Capture", 1232 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
1233 0, 0, 0, 0), 1233 0, 0, 0, 0),
1234 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_1", "Tertiary TDM1 Capture", 1234 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
1235 0, 0, 0, 0), 1235 0, 0, 0, 0),
1236 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_2", "Tertiary TDM2 Capture", 1236 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
1237 0, 0, 0, 0), 1237 0, 0, 0, 0),
1238 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_3", "Tertiary TDM3 Capture", 1238 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
1239 0, 0, 0, 0), 1239 0, 0, 0, 0),
1240 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_4", "Tertiary TDM4 Capture", 1240 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
1241 0, 0, 0, 0), 1241 0, 0, 0, 0),
1242 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_5", "Tertiary TDM5 Capture", 1242 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
1243 0, 0, 0, 0), 1243 0, 0, 0, 0),
1244 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_6", "Tertiary TDM6 Capture", 1244 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
1245 0, 0, 0, 0), 1245 0, 0, 0, 0),
1246 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_7", "Tertiary TDM7 Capture", 1246 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
1247 0, 0, 0, 0), 1247 0, 0, 0, 0),
1248 1248
1249 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_0", "Quaternary TDM0 Playback", 1249 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
1250 0, 0, 0, 0), 1250 0, 0, 0, 0),
1251 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_1", "Quaternary TDM1 Playback", 1251 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
1252 0, 0, 0, 0), 1252 0, 0, 0, 0),
1253 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_2", "Quaternary TDM2 Playback", 1253 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
1254 0, 0, 0, 0), 1254 0, 0, 0, 0),
1255 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_3", "Quaternary TDM3 Playback", 1255 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
1256 0, 0, 0, 0), 1256 0, 0, 0, 0),
1257 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_4", "Quaternary TDM4 Playback", 1257 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
1258 0, 0, 0, 0), 1258 0, 0, 0, 0),
1259 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_5", "Quaternary TDM5 Playback", 1259 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
1260 0, 0, 0, 0), 1260 0, 0, 0, 0),
1261 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_6", "Quaternary TDM6 Playback", 1261 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
1262 0, 0, 0, 0), 1262 0, 0, 0, 0),
1263 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_7", "Quaternary TDM7 Playback", 1263 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
1264 0, 0, 0, 0), 1264 0, 0, 0, 0),
1265 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_0", "Quaternary TDM0 Capture", 1265 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
1266 0, 0, 0, 0), 1266 0, 0, 0, 0),
1267 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_1", "Quaternary TDM1 Capture", 1267 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
1268 0, 0, 0, 0), 1268 0, 0, 0, 0),
1269 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_2", "Quaternary TDM2 Capture", 1269 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
1270 0, 0, 0, 0), 1270 0, 0, 0, 0),
1271 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_3", "Quaternary TDM3 Capture", 1271 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
1272 0, 0, 0, 0), 1272 0, 0, 0, 0),
1273 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_4", "Quaternary TDM4 Capture", 1273 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
1274 0, 0, 0, 0), 1274 0, 0, 0, 0),
1275 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_5", "Quaternary TDM5 Capture", 1275 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
1276 0, 0, 0, 0), 1276 0, 0, 0, 0),
1277 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_6", "Quaternary TDM6 Capture", 1277 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
1278 0, 0, 0, 0), 1278 0, 0, 0, 0),
1279 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_7", "Quaternary TDM7 Capture", 1279 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
1280 0, 0, 0, 0), 1280 0, 0, 0, 0),
1281 1281
1282 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_0", "Quinary TDM0 Playback", 1282 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
1283 0, 0, 0, 0), 1283 0, 0, 0, 0),
1284 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_1", "Quinary TDM1 Playback", 1284 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
1285 0, 0, 0, 0), 1285 0, 0, 0, 0),
1286 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_2", "Quinary TDM2 Playback", 1286 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
1287 0, 0, 0, 0), 1287 0, 0, 0, 0),
1288 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_3", "Quinary TDM3 Playback", 1288 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
1289 0, 0, 0, 0), 1289 0, 0, 0, 0),
1290 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_4", "Quinary TDM4 Playback", 1290 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
1291 0, 0, 0, 0), 1291 0, 0, 0, 0),
1292 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_5", "Quinary TDM5 Playback", 1292 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
1293 0, 0, 0, 0), 1293 0, 0, 0, 0),
1294 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_6", "Quinary TDM6 Playback", 1294 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
1295 0, 0, 0, 0), 1295 0, 0, 0, 0),
1296 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_7", "Quinary TDM7 Playback", 1296 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
1297 0, 0, 0, 0), 1297 0, 0, 0, 0),
1298 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_0", "Quinary TDM0 Capture", 1298 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
1299 0, 0, 0, 0), 1299 0, 0, 0, 0),
1300 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_1", "Quinary TDM1 Capture", 1300 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
1301 0, 0, 0, 0), 1301 0, 0, 0, 0),
1302 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_2", "Quinary TDM2 Capture", 1302 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
1303 0, 0, 0, 0), 1303 0, 0, 0, 0),
1304 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_3", "Quinary TDM3 Capture", 1304 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
1305 0, 0, 0, 0), 1305 0, 0, 0, 0),
1306 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_4", "Quinary TDM4 Capture", 1306 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
1307 0, 0, 0, 0), 1307 0, 0, 0, 0),
1308 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_5", "Quinary TDM5 Capture", 1308 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
1309 0, 0, 0, 0), 1309 0, 0, 0, 0),
1310 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_6", "Quinary TDM6 Capture", 1310 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
1311 0, 0, 0, 0), 1311 0, 0, 0, 0),
1312 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_7", "Quinary TDM7 Capture", 1312 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
1313 0, 0, 0, 0), 1313 0, 0, 0, 0),
1314}; 1314};
1315 1315
diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
index 000775b4bba8..829b5e987b2a 100644
--- a/sound/soc/qcom/qdsp6/q6afe.c
+++ b/sound/soc/qcom/qdsp6/q6afe.c
@@ -49,14 +49,14 @@
49#define AFE_PORT_I2S_SD1 0x2 49#define AFE_PORT_I2S_SD1 0x2
50#define AFE_PORT_I2S_SD2 0x3 50#define AFE_PORT_I2S_SD2 0x3
51#define AFE_PORT_I2S_SD3 0x4 51#define AFE_PORT_I2S_SD3 0x4
52#define AFE_PORT_I2S_SD0_MASK BIT(0x1) 52#define AFE_PORT_I2S_SD0_MASK BIT(0x0)
53#define AFE_PORT_I2S_SD1_MASK BIT(0x2) 53#define AFE_PORT_I2S_SD1_MASK BIT(0x1)
54#define AFE_PORT_I2S_SD2_MASK BIT(0x3) 54#define AFE_PORT_I2S_SD2_MASK BIT(0x2)
55#define AFE_PORT_I2S_SD3_MASK BIT(0x4) 55#define AFE_PORT_I2S_SD3_MASK BIT(0x3)
56#define AFE_PORT_I2S_SD0_1_MASK GENMASK(2, 1) 56#define AFE_PORT_I2S_SD0_1_MASK GENMASK(1, 0)
57#define AFE_PORT_I2S_SD2_3_MASK GENMASK(4, 3) 57#define AFE_PORT_I2S_SD2_3_MASK GENMASK(3, 2)
58#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(3, 1) 58#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(2, 0)
59#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(4, 1) 59#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
60#define AFE_PORT_I2S_QUAD01 0x5 60#define AFE_PORT_I2S_QUAD01 0x5
61#define AFE_PORT_I2S_QUAD23 0x6 61#define AFE_PORT_I2S_QUAD23 0x6
62#define AFE_PORT_I2S_6CHS 0x7 62#define AFE_PORT_I2S_6CHS 0x7
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index a16c71c03058..86115de5c1b2 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -122,7 +122,6 @@ static struct snd_pcm_hardware q6asm_dai_hardware_playback = {
122 .rate_max = 48000, \ 122 .rate_max = 48000, \
123 }, \ 123 }, \
124 .name = "MultiMedia"#num, \ 124 .name = "MultiMedia"#num, \
125 .probe = fe_dai_probe, \
126 .id = MSM_FRONTEND_DAI_MULTIMEDIA##num, \ 125 .id = MSM_FRONTEND_DAI_MULTIMEDIA##num, \
127 } 126 }
128 127
@@ -511,38 +510,6 @@ static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
511 } 510 }
512} 511}
513 512
514static const struct snd_soc_dapm_route afe_pcm_routes[] = {
515 {"MM_DL1", NULL, "MultiMedia1 Playback" },
516 {"MM_DL2", NULL, "MultiMedia2 Playback" },
517 {"MM_DL3", NULL, "MultiMedia3 Playback" },
518 {"MM_DL4", NULL, "MultiMedia4 Playback" },
519 {"MM_DL5", NULL, "MultiMedia5 Playback" },
520 {"MM_DL6", NULL, "MultiMedia6 Playback" },
521 {"MM_DL7", NULL, "MultiMedia7 Playback" },
522 {"MM_DL7", NULL, "MultiMedia8 Playback" },
523 {"MultiMedia1 Capture", NULL, "MM_UL1"},
524 {"MultiMedia2 Capture", NULL, "MM_UL2"},
525 {"MultiMedia3 Capture", NULL, "MM_UL3"},
526 {"MultiMedia4 Capture", NULL, "MM_UL4"},
527 {"MultiMedia5 Capture", NULL, "MM_UL5"},
528 {"MultiMedia6 Capture", NULL, "MM_UL6"},
529 {"MultiMedia7 Capture", NULL, "MM_UL7"},
530 {"MultiMedia8 Capture", NULL, "MM_UL8"},
531
532};
533
534static int fe_dai_probe(struct snd_soc_dai *dai)
535{
536 struct snd_soc_dapm_context *dapm;
537
538 dapm = snd_soc_component_get_dapm(dai->component);
539 snd_soc_dapm_add_routes(dapm, afe_pcm_routes,
540 ARRAY_SIZE(afe_pcm_routes));
541
542 return 0;
543}
544
545
546static const struct snd_soc_component_driver q6asm_fe_dai_component = { 513static const struct snd_soc_component_driver q6asm_fe_dai_component = {
547 .name = DRV_NAME, 514 .name = DRV_NAME,
548 .ops = &q6asm_dai_ops, 515 .ops = &q6asm_dai_ops,
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index c6b51571be94..d61b8404f7da 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -909,6 +909,25 @@ static const struct snd_soc_dapm_route intercon[] = {
909 {"MM_UL6", NULL, "MultiMedia6 Mixer"}, 909 {"MM_UL6", NULL, "MultiMedia6 Mixer"},
910 {"MM_UL7", NULL, "MultiMedia7 Mixer"}, 910 {"MM_UL7", NULL, "MultiMedia7 Mixer"},
911 {"MM_UL8", NULL, "MultiMedia8 Mixer"}, 911 {"MM_UL8", NULL, "MultiMedia8 Mixer"},
912
913 {"MM_DL1", NULL, "MultiMedia1 Playback" },
914 {"MM_DL2", NULL, "MultiMedia2 Playback" },
915 {"MM_DL3", NULL, "MultiMedia3 Playback" },
916 {"MM_DL4", NULL, "MultiMedia4 Playback" },
917 {"MM_DL5", NULL, "MultiMedia5 Playback" },
918 {"MM_DL6", NULL, "MultiMedia6 Playback" },
919 {"MM_DL7", NULL, "MultiMedia7 Playback" },
920 {"MM_DL8", NULL, "MultiMedia8 Playback" },
921
922 {"MultiMedia1 Capture", NULL, "MM_UL1"},
923 {"MultiMedia2 Capture", NULL, "MM_UL2"},
924 {"MultiMedia3 Capture", NULL, "MM_UL3"},
925 {"MultiMedia4 Capture", NULL, "MM_UL4"},
926 {"MultiMedia5 Capture", NULL, "MM_UL5"},
927 {"MultiMedia6 Capture", NULL, "MM_UL6"},
928 {"MultiMedia7 Capture", NULL, "MM_UL7"},
929 {"MultiMedia8 Capture", NULL, "MM_UL8"},
930
912}; 931};
913 932
914static int routing_hw_params(struct snd_pcm_substream *substream, 933static int routing_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
index 9e7b5fa4cf59..4ac78d7a4b2d 100644
--- a/sound/soc/rockchip/rockchip_pcm.c
+++ b/sound/soc/rockchip/rockchip_pcm.c
@@ -33,6 +33,7 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
33 33
34static const struct snd_dmaengine_pcm_config rk_dmaengine_pcm_config = { 34static const struct snd_dmaengine_pcm_config rk_dmaengine_pcm_config = {
35 .pcm_hardware = &snd_rockchip_hardware, 35 .pcm_hardware = &snd_rockchip_hardware,
36 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
36 .prealloc_buffer_size = 32 * 1024, 37 .prealloc_buffer_size = 32 * 1024,
37}; 38};
38 39
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index fcb4df23248c..6ec78f3096dd 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -306,7 +306,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
306 if (rsnd_ssi_is_multi_slave(mod, io)) 306 if (rsnd_ssi_is_multi_slave(mod, io))
307 return 0; 307 return 0;
308 308
309 if (ssi->rate) { 309 if (ssi->usrcnt > 1) {
310 if (ssi->rate != rate) { 310 if (ssi->rate != rate) {
311 dev_err(dev, "SSI parent/child should use same rate\n"); 311 dev_err(dev, "SSI parent/child should use same rate\n");
312 return -EINVAL; 312 return -EINVAL;
diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
index b8e72b52db30..4fb29f0e561e 100644
--- a/sound/soc/soc-acpi.c
+++ b/sound/soc/soc-acpi.c
@@ -10,11 +10,17 @@ struct snd_soc_acpi_mach *
10snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines) 10snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
11{ 11{
12 struct snd_soc_acpi_mach *mach; 12 struct snd_soc_acpi_mach *mach;
13 struct snd_soc_acpi_mach *mach_alt;
13 14
14 for (mach = machines; mach->id[0]; mach++) { 15 for (mach = machines; mach->id[0]; mach++) {
15 if (acpi_dev_present(mach->id, NULL, -1)) { 16 if (acpi_dev_present(mach->id, NULL, -1)) {
16 if (mach->machine_quirk) 17 if (mach->machine_quirk) {
17 mach = mach->machine_quirk(mach); 18 mach_alt = mach->machine_quirk(mach);
19 if (!mach_alt)
20 continue; /* not full match, ignore */
21 mach = mach_alt;
22 }
23
18 return mach; 24 return mach;
19 } 25 }
20 } 26 }
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 6ddcf12bc030..b29d0f65611e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2131,6 +2131,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
2131 } 2131 }
2132 2132
2133 card->instantiated = 1; 2133 card->instantiated = 1;
2134 dapm_mark_endpoints_dirty(card);
2134 snd_soc_dapm_sync(&card->dapm); 2135 snd_soc_dapm_sync(&card->dapm);
2135 mutex_unlock(&card->mutex); 2136 mutex_unlock(&card->mutex);
2136 mutex_unlock(&client_mutex); 2137 mutex_unlock(&client_mutex);
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index ea05cc91aa05..211589b0b2ef 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -390,7 +390,7 @@ static int stm32_sai_add_mclk_provider(struct stm32_sai_sub_data *sai)
390 char *mclk_name, *p, *s = (char *)pname; 390 char *mclk_name, *p, *s = (char *)pname;
391 int ret, i = 0; 391 int ret, i = 0;
392 392
393 mclk = devm_kzalloc(dev, sizeof(mclk), GFP_KERNEL); 393 mclk = devm_kzalloc(dev, sizeof(*mclk), GFP_KERNEL);
394 if (!mclk) 394 if (!mclk)
395 return -ENOMEM; 395 return -ENOMEM;
396 396
diff --git a/sound/soc/sunxi/Kconfig b/sound/soc/sunxi/Kconfig
index 66aad0d3f9c7..8134c3c94229 100644
--- a/sound/soc/sunxi/Kconfig
+++ b/sound/soc/sunxi/Kconfig
@@ -31,7 +31,7 @@ config SND_SUN8I_CODEC_ANALOG
31config SND_SUN50I_CODEC_ANALOG 31config SND_SUN50I_CODEC_ANALOG
32 tristate "Allwinner sun50i Codec Analog Controls Support" 32 tristate "Allwinner sun50i Codec Analog Controls Support"
33 depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST 33 depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
34 select SND_SUNXI_ADDA_PR_REGMAP 34 select SND_SUN8I_ADDA_PR_REGMAP
35 help 35 help
36 Say Y or M if you want to add support for the analog controls for 36 Say Y or M if you want to add support for the analog controls for
37 the codec embedded in Allwinner A64 SoC. 37 the codec embedded in Allwinner A64 SoC.
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index 522a72fde78d..92c5de026c43 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -481,7 +481,11 @@ static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
481 { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch", 481 { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
482 "AIF1 Slot 0 Right"}, 482 "AIF1 Slot 0 Right"},
483 483
484 /* ADC routes */ 484 /* ADC Routes */
485 { "AIF1 Slot 0 Right ADC", NULL, "ADC" },
486 { "AIF1 Slot 0 Left ADC", NULL, "ADC" },
487
488 /* ADC Mixer Routes */
485 { "Left Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch", 489 { "Left Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch",
486 "AIF1 Slot 0 Left ADC" }, 490 "AIF1 Slot 0 Left ADC" },
487 { "Right Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch", 491 { "Right Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch",
@@ -605,16 +609,10 @@ err_pm_disable:
605 609
606static int sun8i_codec_remove(struct platform_device *pdev) 610static int sun8i_codec_remove(struct platform_device *pdev)
607{ 611{
608 struct snd_soc_card *card = platform_get_drvdata(pdev);
609 struct sun8i_codec *scodec = snd_soc_card_get_drvdata(card);
610
611 pm_runtime_disable(&pdev->dev); 612 pm_runtime_disable(&pdev->dev);
612 if (!pm_runtime_status_suspended(&pdev->dev)) 613 if (!pm_runtime_status_suspended(&pdev->dev))
613 sun8i_codec_runtime_suspend(&pdev->dev); 614 sun8i_codec_runtime_suspend(&pdev->dev);
614 615
615 clk_disable_unprepare(scodec->clk_module);
616 clk_disable_unprepare(scodec->clk_bus);
617
618 return 0; 616 return 0;
619} 617}
620 618
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index e73c962590eb..079063d8038d 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -1146,10 +1146,8 @@ static int snd_cs4231_playback_open(struct snd_pcm_substream *substream)
1146 runtime->hw = snd_cs4231_playback; 1146 runtime->hw = snd_cs4231_playback;
1147 1147
1148 err = snd_cs4231_open(chip, CS4231_MODE_PLAY); 1148 err = snd_cs4231_open(chip, CS4231_MODE_PLAY);
1149 if (err < 0) { 1149 if (err < 0)
1150 snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1151 return err; 1150 return err;
1152 }
1153 chip->playback_substream = substream; 1151 chip->playback_substream = substream;
1154 chip->p_periods_sent = 0; 1152 chip->p_periods_sent = 0;
1155 snd_pcm_set_sync(substream); 1153 snd_pcm_set_sync(substream);
@@ -1167,10 +1165,8 @@ static int snd_cs4231_capture_open(struct snd_pcm_substream *substream)
1167 runtime->hw = snd_cs4231_capture; 1165 runtime->hw = snd_cs4231_capture;
1168 1166
1169 err = snd_cs4231_open(chip, CS4231_MODE_RECORD); 1167 err = snd_cs4231_open(chip, CS4231_MODE_RECORD);
1170 if (err < 0) { 1168 if (err < 0)
1171 snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1172 return err; 1169 return err;
1173 }
1174 chip->capture_substream = substream; 1170 chip->capture_substream = substream;
1175 chip->c_periods_sent = 0; 1171 chip->c_periods_sent = 0;
1176 snd_pcm_set_sync(substream); 1172 snd_pcm_set_sync(substream);
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 849953e5775c..37fc0447c071 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3382,5 +3382,15 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
3382 .ifnum = QUIRK_NO_INTERFACE 3382 .ifnum = QUIRK_NO_INTERFACE
3383 } 3383 }
3384}, 3384},
3385/* Dell WD19 Dock */
3386{
3387 USB_DEVICE(0x0bda, 0x402e),
3388 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
3389 .vendor_name = "Dell",
3390 .product_name = "WD19 Dock",
3391 .profile_name = "Dell-WD15-Dock",
3392 .ifnum = QUIRK_NO_INTERFACE
3393 }
3394},
3385 3395
3386#undef USB_DEVICE_VENDOR_SPEC 3396#undef USB_DEVICE_VENDOR_SPEC
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index edbe81534c6d..d07ccf8a23f7 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -137,4 +137,10 @@ EXAMPLES
137 137
138SEE ALSO 138SEE ALSO
139======== 139========
140 **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8) 140 **bpf**\ (2),
141 **bpf-helpers**\ (7),
142 **bpftool**\ (8),
143 **bpftool-prog**\ (8),
144 **bpftool-map**\ (8),
145 **bpftool-net**\ (8),
146 **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index f55a2daed59b..7bb787cfa971 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -171,4 +171,10 @@ The following three commands are equivalent:
171 171
172SEE ALSO 172SEE ALSO
173======== 173========
174 **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8) 174 **bpf**\ (2),
175 **bpf-helpers**\ (7),
176 **bpftool**\ (8),
177 **bpftool-prog**\ (8),
178 **bpftool-cgroup**\ (8),
179 **bpftool-net**\ (8),
180 **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index 408ec30d8872..ed87c9b619ad 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -136,4 +136,10 @@ EXAMPLES
136 136
137SEE ALSO 137SEE ALSO
138======== 138========
139 **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8) 139 **bpf**\ (2),
140 **bpf-helpers**\ (7),
141 **bpftool**\ (8),
142 **bpftool-prog**\ (8),
143 **bpftool-map**\ (8),
144 **bpftool-cgroup**\ (8),
145 **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
index e3eb0eab7641..f4c5e5538bb8 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
@@ -78,4 +78,10 @@ EXAMPLES
78 78
79SEE ALSO 79SEE ALSO
80======== 80========
81 **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8) 81 **bpf**\ (2),
82 **bpf-helpers**\ (7),
83 **bpftool**\ (8),
84 **bpftool-prog**\ (8),
85 **bpftool-map**\ (8),
86 **bpftool-cgroup**\ (8),
87 **bpftool-net**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index ac4e904b10fb..ecf618807125 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -124,7 +124,8 @@ OPTIONS
124 Generate human-readable JSON output. Implies **-j**. 124 Generate human-readable JSON output. Implies **-j**.
125 125
126 -f, --bpffs 126 -f, --bpffs
127 Show file names of pinned programs. 127 When showing BPF programs, show file names of pinned
128 programs.
128 129
129EXAMPLES 130EXAMPLES
130======== 131========
@@ -206,4 +207,10 @@ EXAMPLES
206 207
207SEE ALSO 208SEE ALSO
208======== 209========
209 **bpftool**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8) 210 **bpf**\ (2),
211 **bpf-helpers**\ (7),
212 **bpftool**\ (8),
213 **bpftool-map**\ (8),
214 **bpftool-cgroup**\ (8),
215 **bpftool-net**\ (8),
216 **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 04cd4f92ab89..129b7a9c0f9b 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -63,5 +63,10 @@ OPTIONS
63 63
64SEE ALSO 64SEE ALSO
65======== 65========
66 **bpftool-map**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8) 66 **bpf**\ (2),
67 **bpftool-perf**\ (8), **bpftool-net**\ (8) 67 **bpf-helpers**\ (7),
68 **bpftool-prog**\ (8),
69 **bpftool-map**\ (8),
70 **bpftool-cgroup**\ (8),
71 **bpftool-net**\ (8),
72 **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 25af85304ebe..70fd48d79f61 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -130,16 +130,17 @@ static int mnt_bpffs(const char *target, char *buff, size_t bufflen)
130 return 0; 130 return 0;
131} 131}
132 132
133int open_obj_pinned(char *path) 133int open_obj_pinned(char *path, bool quiet)
134{ 134{
135 int fd; 135 int fd;
136 136
137 fd = bpf_obj_get(path); 137 fd = bpf_obj_get(path);
138 if (fd < 0) { 138 if (fd < 0) {
139 p_err("bpf obj get (%s): %s", path, 139 if (!quiet)
140 errno == EACCES && !is_bpffs(dirname(path)) ? 140 p_err("bpf obj get (%s): %s", path,
141 "directory not in bpf file system (bpffs)" : 141 errno == EACCES && !is_bpffs(dirname(path)) ?
142 strerror(errno)); 142 "directory not in bpf file system (bpffs)" :
143 strerror(errno));
143 return -1; 144 return -1;
144 } 145 }
145 146
@@ -151,7 +152,7 @@ int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
151 enum bpf_obj_type type; 152 enum bpf_obj_type type;
152 int fd; 153 int fd;
153 154
154 fd = open_obj_pinned(path); 155 fd = open_obj_pinned(path, false);
155 if (fd < 0) 156 if (fd < 0)
156 return -1; 157 return -1;
157 158
@@ -304,7 +305,7 @@ char *get_fdinfo(int fd, const char *key)
304 return NULL; 305 return NULL;
305 } 306 }
306 307
307 while ((n = getline(&line, &line_n, fdi))) { 308 while ((n = getline(&line, &line_n, fdi)) > 0) {
308 char *value; 309 char *value;
309 int len; 310 int len;
310 311
@@ -384,7 +385,7 @@ int build_pinned_obj_table(struct pinned_obj_table *tab,
384 while ((ftse = fts_read(fts))) { 385 while ((ftse = fts_read(fts))) {
385 if (!(ftse->fts_info & FTS_F)) 386 if (!(ftse->fts_info & FTS_F))
386 continue; 387 continue;
387 fd = open_obj_pinned(ftse->fts_path); 388 fd = open_obj_pinned(ftse->fts_path, true);
388 if (fd < 0) 389 if (fd < 0)
389 continue; 390 continue;
390 391
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 28322ace2856..a8bf1e2d9818 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -127,7 +127,7 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv,
127int get_fd_type(int fd); 127int get_fd_type(int fd);
128const char *get_fd_type_name(enum bpf_obj_type type); 128const char *get_fd_type_name(enum bpf_obj_type type);
129char *get_fdinfo(int fd, const char *key); 129char *get_fdinfo(int fd, const char *key);
130int open_obj_pinned(char *path); 130int open_obj_pinned(char *path, bool quiet);
131int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type); 131int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
132int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)); 132int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
133int do_pin_fd(int fd, const char *name); 133int do_pin_fd(int fd, const char *name);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 5302ee282409..ccee180dfb76 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -357,10 +357,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
357 if (!hash_empty(prog_table.table)) { 357 if (!hash_empty(prog_table.table)) {
358 struct pinned_obj *obj; 358 struct pinned_obj *obj;
359 359
360 printf("\n");
361 hash_for_each_possible(prog_table.table, obj, hash, info->id) { 360 hash_for_each_possible(prog_table.table, obj, hash, info->id) {
362 if (obj->id == info->id) 361 if (obj->id == info->id)
363 printf("\tpinned %s\n", obj->path); 362 printf("\n\tpinned %s", obj->path);
364 } 363 }
365 } 364 }
366 365
@@ -845,6 +844,7 @@ static int do_load(int argc, char **argv)
845 } 844 }
846 NEXT_ARG(); 845 NEXT_ARG();
847 } else if (is_prefix(*argv, "map")) { 846 } else if (is_prefix(*argv, "map")) {
847 void *new_map_replace;
848 char *endptr, *name; 848 char *endptr, *name;
849 int fd; 849 int fd;
850 850
@@ -878,12 +878,15 @@ static int do_load(int argc, char **argv)
878 if (fd < 0) 878 if (fd < 0)
879 goto err_free_reuse_maps; 879 goto err_free_reuse_maps;
880 880
881 map_replace = reallocarray(map_replace, old_map_fds + 1, 881 new_map_replace = reallocarray(map_replace,
882 sizeof(*map_replace)); 882 old_map_fds + 1,
883 if (!map_replace) { 883 sizeof(*map_replace));
884 if (!new_map_replace) {
884 p_err("mem alloc failed"); 885 p_err("mem alloc failed");
885 goto err_free_reuse_maps; 886 goto err_free_reuse_maps;
886 } 887 }
888 map_replace = new_map_replace;
889
887 map_replace[old_map_fds].idx = idx; 890 map_replace[old_map_fds].idx = idx;
888 map_replace[old_map_fds].name = name; 891 map_replace[old_map_fds].name = name;
889 map_replace[old_map_fds].fd = fd; 892 map_replace[old_map_fds].fd = fd;
diff --git a/tools/include/uapi/linux/pkt_cls.h b/tools/include/uapi/linux/pkt_cls.h
new file mode 100644
index 000000000000..401d0c1e612d
--- /dev/null
+++ b/tools/include/uapi/linux/pkt_cls.h
@@ -0,0 +1,612 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __LINUX_PKT_CLS_H
3#define __LINUX_PKT_CLS_H
4
5#include <linux/types.h>
6#include <linux/pkt_sched.h>
7
8#define TC_COOKIE_MAX_SIZE 16
9
10/* Action attributes */
11enum {
12 TCA_ACT_UNSPEC,
13 TCA_ACT_KIND,
14 TCA_ACT_OPTIONS,
15 TCA_ACT_INDEX,
16 TCA_ACT_STATS,
17 TCA_ACT_PAD,
18 TCA_ACT_COOKIE,
19 __TCA_ACT_MAX
20};
21
22#define TCA_ACT_MAX __TCA_ACT_MAX
23#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
24#define TCA_ACT_MAX_PRIO 32
25#define TCA_ACT_BIND 1
26#define TCA_ACT_NOBIND 0
27#define TCA_ACT_UNBIND 1
28#define TCA_ACT_NOUNBIND 0
29#define TCA_ACT_REPLACE 1
30#define TCA_ACT_NOREPLACE 0
31
32#define TC_ACT_UNSPEC (-1)
33#define TC_ACT_OK 0
34#define TC_ACT_RECLASSIFY 1
35#define TC_ACT_SHOT 2
36#define TC_ACT_PIPE 3
37#define TC_ACT_STOLEN 4
38#define TC_ACT_QUEUED 5
39#define TC_ACT_REPEAT 6
40#define TC_ACT_REDIRECT 7
41#define TC_ACT_TRAP 8 /* For hw path, this means "trap to cpu"
42 * and don't further process the frame
43 * in hardware. For sw path, this is
44 * equivalent of TC_ACT_STOLEN - drop
45 * the skb and act like everything
46 * is alright.
47 */
48#define TC_ACT_VALUE_MAX TC_ACT_TRAP
49
50/* There is a special kind of actions called "extended actions",
51 * which need a value parameter. These have a local opcode located in
52 * the highest nibble, starting from 1. The rest of the bits
53 * are used to carry the value. These two parts together make
54 * a combined opcode.
55 */
56#define __TC_ACT_EXT_SHIFT 28
57#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT)
58#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1)
59#define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK))
60#define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode)
61
62#define TC_ACT_JUMP __TC_ACT_EXT(1)
63#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
64#define TC_ACT_EXT_OPCODE_MAX TC_ACT_GOTO_CHAIN
65
66/* Action type identifiers*/
67enum {
68 TCA_ID_UNSPEC=0,
69 TCA_ID_POLICE=1,
70 /* other actions go here */
71 __TCA_ID_MAX=255
72};
73
74#define TCA_ID_MAX __TCA_ID_MAX
75
76struct tc_police {
77 __u32 index;
78 int action;
79#define TC_POLICE_UNSPEC TC_ACT_UNSPEC
80#define TC_POLICE_OK TC_ACT_OK
81#define TC_POLICE_RECLASSIFY TC_ACT_RECLASSIFY
82#define TC_POLICE_SHOT TC_ACT_SHOT
83#define TC_POLICE_PIPE TC_ACT_PIPE
84
85 __u32 limit;
86 __u32 burst;
87 __u32 mtu;
88 struct tc_ratespec rate;
89 struct tc_ratespec peakrate;
90 int refcnt;
91 int bindcnt;
92 __u32 capab;
93};
94
95struct tcf_t {
96 __u64 install;
97 __u64 lastuse;
98 __u64 expires;
99 __u64 firstuse;
100};
101
102struct tc_cnt {
103 int refcnt;
104 int bindcnt;
105};
106
107#define tc_gen \
108 __u32 index; \
109 __u32 capab; \
110 int action; \
111 int refcnt; \
112 int bindcnt
113
114enum {
115 TCA_POLICE_UNSPEC,
116 TCA_POLICE_TBF,
117 TCA_POLICE_RATE,
118 TCA_POLICE_PEAKRATE,
119 TCA_POLICE_AVRATE,
120 TCA_POLICE_RESULT,
121 TCA_POLICE_TM,
122 TCA_POLICE_PAD,
123 __TCA_POLICE_MAX
124#define TCA_POLICE_RESULT TCA_POLICE_RESULT
125};
126
127#define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1)
128
129/* tca flags definitions */
130#define TCA_CLS_FLAGS_SKIP_HW (1 << 0) /* don't offload filter to HW */
131#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) /* don't use filter in SW */
132#define TCA_CLS_FLAGS_IN_HW (1 << 2) /* filter is offloaded to HW */
133#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */
134#define TCA_CLS_FLAGS_VERBOSE (1 << 4) /* verbose logging */
135
136/* U32 filters */
137
138#define TC_U32_HTID(h) ((h)&0xFFF00000)
139#define TC_U32_USERHTID(h) (TC_U32_HTID(h)>>20)
140#define TC_U32_HASH(h) (((h)>>12)&0xFF)
141#define TC_U32_NODE(h) ((h)&0xFFF)
142#define TC_U32_KEY(h) ((h)&0xFFFFF)
143#define TC_U32_UNSPEC 0
144#define TC_U32_ROOT (0xFFF00000)
145
146enum {
147 TCA_U32_UNSPEC,
148 TCA_U32_CLASSID,
149 TCA_U32_HASH,
150 TCA_U32_LINK,
151 TCA_U32_DIVISOR,
152 TCA_U32_SEL,
153 TCA_U32_POLICE,
154 TCA_U32_ACT,
155 TCA_U32_INDEV,
156 TCA_U32_PCNT,
157 TCA_U32_MARK,
158 TCA_U32_FLAGS,
159 TCA_U32_PAD,
160 __TCA_U32_MAX
161};
162
163#define TCA_U32_MAX (__TCA_U32_MAX - 1)
164
165struct tc_u32_key {
166 __be32 mask;
167 __be32 val;
168 int off;
169 int offmask;
170};
171
172struct tc_u32_sel {
173 unsigned char flags;
174 unsigned char offshift;
175 unsigned char nkeys;
176
177 __be16 offmask;
178 __u16 off;
179 short offoff;
180
181 short hoff;
182 __be32 hmask;
183 struct tc_u32_key keys[0];
184};
185
186struct tc_u32_mark {
187 __u32 val;
188 __u32 mask;
189 __u32 success;
190};
191
192struct tc_u32_pcnt {
193 __u64 rcnt;
194 __u64 rhit;
195 __u64 kcnts[0];
196};
197
198/* Flags */
199
200#define TC_U32_TERMINAL 1
201#define TC_U32_OFFSET 2
202#define TC_U32_VAROFFSET 4
203#define TC_U32_EAT 8
204
205#define TC_U32_MAXDEPTH 8
206
207
208/* RSVP filter */
209
210enum {
211 TCA_RSVP_UNSPEC,
212 TCA_RSVP_CLASSID,
213 TCA_RSVP_DST,
214 TCA_RSVP_SRC,
215 TCA_RSVP_PINFO,
216 TCA_RSVP_POLICE,
217 TCA_RSVP_ACT,
218 __TCA_RSVP_MAX
219};
220
221#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 )
222
223struct tc_rsvp_gpi {
224 __u32 key;
225 __u32 mask;
226 int offset;
227};
228
229struct tc_rsvp_pinfo {
230 struct tc_rsvp_gpi dpi;
231 struct tc_rsvp_gpi spi;
232 __u8 protocol;
233 __u8 tunnelid;
234 __u8 tunnelhdr;
235 __u8 pad;
236};
237
238/* ROUTE filter */
239
240enum {
241 TCA_ROUTE4_UNSPEC,
242 TCA_ROUTE4_CLASSID,
243 TCA_ROUTE4_TO,
244 TCA_ROUTE4_FROM,
245 TCA_ROUTE4_IIF,
246 TCA_ROUTE4_POLICE,
247 TCA_ROUTE4_ACT,
248 __TCA_ROUTE4_MAX
249};
250
251#define TCA_ROUTE4_MAX (__TCA_ROUTE4_MAX - 1)
252
253
254/* FW filter */
255
256enum {
257 TCA_FW_UNSPEC,
258 TCA_FW_CLASSID,
259 TCA_FW_POLICE,
260 TCA_FW_INDEV, /* used by CONFIG_NET_CLS_IND */
261 TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */
262 TCA_FW_MASK,
263 __TCA_FW_MAX
264};
265
266#define TCA_FW_MAX (__TCA_FW_MAX - 1)
267
268/* TC index filter */
269
270enum {
271 TCA_TCINDEX_UNSPEC,
272 TCA_TCINDEX_HASH,
273 TCA_TCINDEX_MASK,
274 TCA_TCINDEX_SHIFT,
275 TCA_TCINDEX_FALL_THROUGH,
276 TCA_TCINDEX_CLASSID,
277 TCA_TCINDEX_POLICE,
278 TCA_TCINDEX_ACT,
279 __TCA_TCINDEX_MAX
280};
281
282#define TCA_TCINDEX_MAX (__TCA_TCINDEX_MAX - 1)
283
284/* Flow filter */
285
286enum {
287 FLOW_KEY_SRC,
288 FLOW_KEY_DST,
289 FLOW_KEY_PROTO,
290 FLOW_KEY_PROTO_SRC,
291 FLOW_KEY_PROTO_DST,
292 FLOW_KEY_IIF,
293 FLOW_KEY_PRIORITY,
294 FLOW_KEY_MARK,
295 FLOW_KEY_NFCT,
296 FLOW_KEY_NFCT_SRC,
297 FLOW_KEY_NFCT_DST,
298 FLOW_KEY_NFCT_PROTO_SRC,
299 FLOW_KEY_NFCT_PROTO_DST,
300 FLOW_KEY_RTCLASSID,
301 FLOW_KEY_SKUID,
302 FLOW_KEY_SKGID,
303 FLOW_KEY_VLAN_TAG,
304 FLOW_KEY_RXHASH,
305 __FLOW_KEY_MAX,
306};
307
308#define FLOW_KEY_MAX (__FLOW_KEY_MAX - 1)
309
310enum {
311 FLOW_MODE_MAP,
312 FLOW_MODE_HASH,
313};
314
315enum {
316 TCA_FLOW_UNSPEC,
317 TCA_FLOW_KEYS,
318 TCA_FLOW_MODE,
319 TCA_FLOW_BASECLASS,
320 TCA_FLOW_RSHIFT,
321 TCA_FLOW_ADDEND,
322 TCA_FLOW_MASK,
323 TCA_FLOW_XOR,
324 TCA_FLOW_DIVISOR,
325 TCA_FLOW_ACT,
326 TCA_FLOW_POLICE,
327 TCA_FLOW_EMATCHES,
328 TCA_FLOW_PERTURB,
329 __TCA_FLOW_MAX
330};
331
332#define TCA_FLOW_MAX (__TCA_FLOW_MAX - 1)
333
334/* Basic filter */
335
336enum {
337 TCA_BASIC_UNSPEC,
338 TCA_BASIC_CLASSID,
339 TCA_BASIC_EMATCHES,
340 TCA_BASIC_ACT,
341 TCA_BASIC_POLICE,
342 __TCA_BASIC_MAX
343};
344
345#define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1)
346
347
348/* Cgroup classifier */
349
350enum {
351 TCA_CGROUP_UNSPEC,
352 TCA_CGROUP_ACT,
353 TCA_CGROUP_POLICE,
354 TCA_CGROUP_EMATCHES,
355 __TCA_CGROUP_MAX,
356};
357
358#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1)
359
360/* BPF classifier */
361
362#define TCA_BPF_FLAG_ACT_DIRECT (1 << 0)
363
364enum {
365 TCA_BPF_UNSPEC,
366 TCA_BPF_ACT,
367 TCA_BPF_POLICE,
368 TCA_BPF_CLASSID,
369 TCA_BPF_OPS_LEN,
370 TCA_BPF_OPS,
371 TCA_BPF_FD,
372 TCA_BPF_NAME,
373 TCA_BPF_FLAGS,
374 TCA_BPF_FLAGS_GEN,
375 TCA_BPF_TAG,
376 TCA_BPF_ID,
377 __TCA_BPF_MAX,
378};
379
380#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
381
382/* Flower classifier */
383
384enum {
385 TCA_FLOWER_UNSPEC,
386 TCA_FLOWER_CLASSID,
387 TCA_FLOWER_INDEV,
388 TCA_FLOWER_ACT,
389 TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
390 TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
391 TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
392 TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
393 TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
394 TCA_FLOWER_KEY_IP_PROTO, /* u8 */
395 TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
396 TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
397 TCA_FLOWER_KEY_IPV4_DST, /* be32 */
398 TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
399 TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
400 TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
401 TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
402 TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
403 TCA_FLOWER_KEY_TCP_SRC, /* be16 */
404 TCA_FLOWER_KEY_TCP_DST, /* be16 */
405 TCA_FLOWER_KEY_UDP_SRC, /* be16 */
406 TCA_FLOWER_KEY_UDP_DST, /* be16 */
407
408 TCA_FLOWER_FLAGS,
409 TCA_FLOWER_KEY_VLAN_ID, /* be16 */
410 TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
411 TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
412
413 TCA_FLOWER_KEY_ENC_KEY_ID, /* be32 */
414 TCA_FLOWER_KEY_ENC_IPV4_SRC, /* be32 */
415 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */
416 TCA_FLOWER_KEY_ENC_IPV4_DST, /* be32 */
417 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */
418 TCA_FLOWER_KEY_ENC_IPV6_SRC, /* struct in6_addr */
419 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */
420 TCA_FLOWER_KEY_ENC_IPV6_DST, /* struct in6_addr */
421 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
422
423 TCA_FLOWER_KEY_TCP_SRC_MASK, /* be16 */
424 TCA_FLOWER_KEY_TCP_DST_MASK, /* be16 */
425 TCA_FLOWER_KEY_UDP_SRC_MASK, /* be16 */
426 TCA_FLOWER_KEY_UDP_DST_MASK, /* be16 */
427 TCA_FLOWER_KEY_SCTP_SRC_MASK, /* be16 */
428 TCA_FLOWER_KEY_SCTP_DST_MASK, /* be16 */
429
430 TCA_FLOWER_KEY_SCTP_SRC, /* be16 */
431 TCA_FLOWER_KEY_SCTP_DST, /* be16 */
432
433 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, /* be16 */
434 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, /* be16 */
435 TCA_FLOWER_KEY_ENC_UDP_DST_PORT, /* be16 */
436 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, /* be16 */
437
438 TCA_FLOWER_KEY_FLAGS, /* be32 */
439 TCA_FLOWER_KEY_FLAGS_MASK, /* be32 */
440
441 TCA_FLOWER_KEY_ICMPV4_CODE, /* u8 */
442 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,/* u8 */
443 TCA_FLOWER_KEY_ICMPV4_TYPE, /* u8 */
444 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,/* u8 */
445 TCA_FLOWER_KEY_ICMPV6_CODE, /* u8 */
446 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,/* u8 */
447 TCA_FLOWER_KEY_ICMPV6_TYPE, /* u8 */
448 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */
449
450 TCA_FLOWER_KEY_ARP_SIP, /* be32 */
451 TCA_FLOWER_KEY_ARP_SIP_MASK, /* be32 */
452 TCA_FLOWER_KEY_ARP_TIP, /* be32 */
453 TCA_FLOWER_KEY_ARP_TIP_MASK, /* be32 */
454 TCA_FLOWER_KEY_ARP_OP, /* u8 */
455 TCA_FLOWER_KEY_ARP_OP_MASK, /* u8 */
456 TCA_FLOWER_KEY_ARP_SHA, /* ETH_ALEN */
457 TCA_FLOWER_KEY_ARP_SHA_MASK, /* ETH_ALEN */
458 TCA_FLOWER_KEY_ARP_THA, /* ETH_ALEN */
459 TCA_FLOWER_KEY_ARP_THA_MASK, /* ETH_ALEN */
460
461 TCA_FLOWER_KEY_MPLS_TTL, /* u8 - 8 bits */
462 TCA_FLOWER_KEY_MPLS_BOS, /* u8 - 1 bit */
463 TCA_FLOWER_KEY_MPLS_TC, /* u8 - 3 bits */
464 TCA_FLOWER_KEY_MPLS_LABEL, /* be32 - 20 bits */
465
466 TCA_FLOWER_KEY_TCP_FLAGS, /* be16 */
467 TCA_FLOWER_KEY_TCP_FLAGS_MASK, /* be16 */
468
469 TCA_FLOWER_KEY_IP_TOS, /* u8 */
470 TCA_FLOWER_KEY_IP_TOS_MASK, /* u8 */
471 TCA_FLOWER_KEY_IP_TTL, /* u8 */
472 TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */
473
474 TCA_FLOWER_KEY_CVLAN_ID, /* be16 */
475 TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */
476 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */
477
478 TCA_FLOWER_KEY_ENC_IP_TOS, /* u8 */
479 TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */
480 TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */
481 TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */
482
483 TCA_FLOWER_KEY_ENC_OPTS,
484 TCA_FLOWER_KEY_ENC_OPTS_MASK,
485
486 TCA_FLOWER_IN_HW_COUNT,
487
488 __TCA_FLOWER_MAX,
489};
490
491#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
492
493enum {
494 TCA_FLOWER_KEY_ENC_OPTS_UNSPEC,
495 TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested
496 * TCA_FLOWER_KEY_ENC_OPT_GENEVE_
497 * attributes
498 */
499 __TCA_FLOWER_KEY_ENC_OPTS_MAX,
500};
501
502#define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1)
503
504enum {
505 TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC,
506 TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, /* u16 */
507 TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
508 TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
509
510 __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
511};
512
513#define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \
514 (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
515
516enum {
517 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
518 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
519};
520
521/* Match-all classifier */
522
523enum {
524 TCA_MATCHALL_UNSPEC,
525 TCA_MATCHALL_CLASSID,
526 TCA_MATCHALL_ACT,
527 TCA_MATCHALL_FLAGS,
528 __TCA_MATCHALL_MAX,
529};
530
531#define TCA_MATCHALL_MAX (__TCA_MATCHALL_MAX - 1)
532
533/* Extended Matches */
534
535struct tcf_ematch_tree_hdr {
536 __u16 nmatches;
537 __u16 progid;
538};
539
540enum {
541 TCA_EMATCH_TREE_UNSPEC,
542 TCA_EMATCH_TREE_HDR,
543 TCA_EMATCH_TREE_LIST,
544 __TCA_EMATCH_TREE_MAX
545};
546#define TCA_EMATCH_TREE_MAX (__TCA_EMATCH_TREE_MAX - 1)
547
548struct tcf_ematch_hdr {
549 __u16 matchid;
550 __u16 kind;
551 __u16 flags;
552 __u16 pad; /* currently unused */
553};
554
555/* 0 1
556 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
557 * +-----------------------+-+-+---+
558 * | Unused |S|I| R |
559 * +-----------------------+-+-+---+
560 *
561 * R(2) ::= relation to next ematch
562 * where: 0 0 END (last ematch)
563 * 0 1 AND
564 * 1 0 OR
565 * 1 1 Unused (invalid)
566 * I(1) ::= invert result
567 * S(1) ::= simple payload
568 */
569#define TCF_EM_REL_END 0
570#define TCF_EM_REL_AND (1<<0)
571#define TCF_EM_REL_OR (1<<1)
572#define TCF_EM_INVERT (1<<2)
573#define TCF_EM_SIMPLE (1<<3)
574
575#define TCF_EM_REL_MASK 3
576#define TCF_EM_REL_VALID(v) (((v) & TCF_EM_REL_MASK) != TCF_EM_REL_MASK)
577
578enum {
579 TCF_LAYER_LINK,
580 TCF_LAYER_NETWORK,
581 TCF_LAYER_TRANSPORT,
582 __TCF_LAYER_MAX
583};
584#define TCF_LAYER_MAX (__TCF_LAYER_MAX - 1)
585
586/* Ematch type assignments
587 * 1..32767 Reserved for ematches inside kernel tree
588 * 32768..65535 Free to use, not reliable
589 */
590#define TCF_EM_CONTAINER 0
591#define TCF_EM_CMP 1
592#define TCF_EM_NBYTE 2
593#define TCF_EM_U32 3
594#define TCF_EM_META 4
595#define TCF_EM_TEXT 5
596#define TCF_EM_VLAN 6
597#define TCF_EM_CANID 7
598#define TCF_EM_IPSET 8
599#define TCF_EM_IPT 9
600#define TCF_EM_MAX 9
601
602enum {
603 TCF_EM_PROG_TC
604};
605
606enum {
607 TCF_EM_OPND_EQ,
608 TCF_EM_OPND_GT,
609 TCF_EM_OPND_LT
610};
611
612#endif
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index c0d7ea0bf5b6..b17201edfa09 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -212,6 +212,7 @@ struct prctl_mm_map {
212#define PR_SET_SPECULATION_CTRL 53 212#define PR_SET_SPECULATION_CTRL 53
213/* Speculation control variants */ 213/* Speculation control variants */
214# define PR_SPEC_STORE_BYPASS 0 214# define PR_SPEC_STORE_BYPASS 0
215# define PR_SPEC_INDIRECT_BRANCH 1
215/* Return and control values for PR_SET/GET_SPECULATION_CTRL */ 216/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
216# define PR_SPEC_NOT_AFFECTED 0 217# define PR_SPEC_NOT_AFFECTED 0
217# define PR_SPEC_PRCTL (1UL << 0) 218# define PR_SPEC_PRCTL (1UL << 0)
diff --git a/tools/include/uapi/linux/tc_act/tc_bpf.h b/tools/include/uapi/linux/tc_act/tc_bpf.h
new file mode 100644
index 000000000000..6e89a5df49a4
--- /dev/null
+++ b/tools/include/uapi/linux/tc_act/tc_bpf.h
@@ -0,0 +1,37 @@
1/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2/*
3 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#ifndef __LINUX_TC_BPF_H
12#define __LINUX_TC_BPF_H
13
14#include <linux/pkt_cls.h>
15
16#define TCA_ACT_BPF 13
17
18struct tc_act_bpf {
19 tc_gen;
20};
21
22enum {
23 TCA_ACT_BPF_UNSPEC,
24 TCA_ACT_BPF_TM,
25 TCA_ACT_BPF_PARMS,
26 TCA_ACT_BPF_OPS_LEN,
27 TCA_ACT_BPF_OPS,
28 TCA_ACT_BPF_FD,
29 TCA_ACT_BPF_NAME,
30 TCA_ACT_BPF_PAD,
31 TCA_ACT_BPF_TAG,
32 TCA_ACT_BPF_ID,
33 __TCA_ACT_BPF_MAX,
34};
35#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
36
37#endif
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 6dbb9fae0f9d..b8f3cca8e58b 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -31,6 +31,8 @@
31#include "elf.h" 31#include "elf.h"
32#include "warn.h" 32#include "warn.h"
33 33
34#define MAX_NAME_LEN 128
35
34struct section *find_section_by_name(struct elf *elf, const char *name) 36struct section *find_section_by_name(struct elf *elf, const char *name)
35{ 37{
36 struct section *sec; 38 struct section *sec;
@@ -298,6 +300,8 @@ static int read_symbols(struct elf *elf)
298 /* Create parent/child links for any cold subfunctions */ 300 /* Create parent/child links for any cold subfunctions */
299 list_for_each_entry(sec, &elf->sections, list) { 301 list_for_each_entry(sec, &elf->sections, list) {
300 list_for_each_entry(sym, &sec->symbol_list, list) { 302 list_for_each_entry(sym, &sec->symbol_list, list) {
303 char pname[MAX_NAME_LEN + 1];
304 size_t pnamelen;
301 if (sym->type != STT_FUNC) 305 if (sym->type != STT_FUNC)
302 continue; 306 continue;
303 sym->pfunc = sym->cfunc = sym; 307 sym->pfunc = sym->cfunc = sym;
@@ -305,14 +309,21 @@ static int read_symbols(struct elf *elf)
305 if (!coldstr) 309 if (!coldstr)
306 continue; 310 continue;
307 311
308 coldstr[0] = '\0'; 312 pnamelen = coldstr - sym->name;
309 pfunc = find_symbol_by_name(elf, sym->name); 313 if (pnamelen > MAX_NAME_LEN) {
310 coldstr[0] = '.'; 314 WARN("%s(): parent function name exceeds maximum length of %d characters",
315 sym->name, MAX_NAME_LEN);
316 return -1;
317 }
318
319 strncpy(pname, sym->name, pnamelen);
320 pname[pnamelen] = '\0';
321 pfunc = find_symbol_by_name(elf, pname);
311 322
312 if (!pfunc) { 323 if (!pfunc) {
313 WARN("%s(): can't find parent function", 324 WARN("%s(): can't find parent function",
314 sym->name); 325 sym->name);
315 goto err; 326 return -1;
316 } 327 }
317 328
318 sym->pfunc = pfunc; 329 sym->pfunc = pfunc;
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 1dd5f4fcffd5..db66a952c173 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -129,7 +129,7 @@ WARNINGS += $(call cc-supports,-Wno-pointer-sign)
129WARNINGS += $(call cc-supports,-Wdeclaration-after-statement) 129WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
130WARNINGS += -Wshadow 130WARNINGS += -Wshadow
131 131
132CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \ 132override CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \
133 -DPACKAGE_BUGREPORT=\"$(PACKAGE_BUGREPORT)\" -D_GNU_SOURCE 133 -DPACKAGE_BUGREPORT=\"$(PACKAGE_BUGREPORT)\" -D_GNU_SOURCE
134 134
135UTIL_OBJS = utils/helpers/amd.o utils/helpers/msr.o \ 135UTIL_OBJS = utils/helpers/amd.o utils/helpers/msr.o \
@@ -156,12 +156,12 @@ LIB_SRC = lib/cpufreq.c lib/cpupower.c lib/cpuidle.c
156LIB_OBJS = lib/cpufreq.o lib/cpupower.o lib/cpuidle.o 156LIB_OBJS = lib/cpufreq.o lib/cpupower.o lib/cpuidle.o
157LIB_OBJS := $(addprefix $(OUTPUT),$(LIB_OBJS)) 157LIB_OBJS := $(addprefix $(OUTPUT),$(LIB_OBJS))
158 158
159CFLAGS += -pipe 159override CFLAGS += -pipe
160 160
161ifeq ($(strip $(NLS)),true) 161ifeq ($(strip $(NLS)),true)
162 INSTALL_NLS += install-gmo 162 INSTALL_NLS += install-gmo
163 COMPILE_NLS += create-gmo 163 COMPILE_NLS += create-gmo
164 CFLAGS += -DNLS 164 override CFLAGS += -DNLS
165endif 165endif
166 166
167ifeq ($(strip $(CPUFREQ_BENCH)),true) 167ifeq ($(strip $(CPUFREQ_BENCH)),true)
@@ -175,7 +175,7 @@ ifeq ($(strip $(STATIC)),true)
175 UTIL_SRC += $(LIB_SRC) 175 UTIL_SRC += $(LIB_SRC)
176endif 176endif
177 177
178CFLAGS += $(WARNINGS) 178override CFLAGS += $(WARNINGS)
179 179
180ifeq ($(strip $(V)),false) 180ifeq ($(strip $(V)),false)
181 QUIET=@ 181 QUIET=@
@@ -188,10 +188,10 @@ export QUIET ECHO
188 188
189# if DEBUG is enabled, then we do not strip or optimize 189# if DEBUG is enabled, then we do not strip or optimize
190ifeq ($(strip $(DEBUG)),true) 190ifeq ($(strip $(DEBUG)),true)
191 CFLAGS += -O1 -g -DDEBUG 191 override CFLAGS += -O1 -g -DDEBUG
192 STRIPCMD = /bin/true -Since_we_are_debugging 192 STRIPCMD = /bin/true -Since_we_are_debugging
193else 193else
194 CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer 194 override CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
195 STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment 195 STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
196endif 196endif
197 197
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index d79ab161cc75..f68b4bc55273 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -9,7 +9,7 @@ endif
9ifeq ($(strip $(STATIC)),true) 9ifeq ($(strip $(STATIC)),true)
10LIBS = -L../ -L$(OUTPUT) -lm 10LIBS = -L../ -L$(OUTPUT) -lm
11OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \ 11OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \
12 $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/sysfs.o 12 $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/cpupower.o
13else 13else
14LIBS = -L../ -L$(OUTPUT) -lm -lcpupower 14LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
15OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o 15OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
diff --git a/tools/power/cpupower/debug/x86_64/Makefile b/tools/power/cpupower/debug/x86_64/Makefile
index 59af84b8ef45..b1b6c43644e7 100644
--- a/tools/power/cpupower/debug/x86_64/Makefile
+++ b/tools/power/cpupower/debug/x86_64/Makefile
@@ -13,10 +13,10 @@ INSTALL = /usr/bin/install
13default: all 13default: all
14 14
15$(OUTPUT)centrino-decode: ../i386/centrino-decode.c 15$(OUTPUT)centrino-decode: ../i386/centrino-decode.c
16 $(CC) $(CFLAGS) -o $@ $< 16 $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $<
17 17
18$(OUTPUT)powernow-k8-decode: ../i386/powernow-k8-decode.c 18$(OUTPUT)powernow-k8-decode: ../i386/powernow-k8-decode.c
19 $(CC) $(CFLAGS) -o $@ $< 19 $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $<
20 20
21all: $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode 21all: $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode
22 22
diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c
index 1b993fe1ce23..0c0f3e3f0d80 100644
--- a/tools/power/cpupower/lib/cpufreq.c
+++ b/tools/power/cpupower/lib/cpufreq.c
@@ -28,7 +28,7 @@ static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
28 28
29 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s", 29 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
30 cpu, fname); 30 cpu, fname);
31 return sysfs_read_file(path, buf, buflen); 31 return cpupower_read_sysfs(path, buf, buflen);
32} 32}
33 33
34/* helper function to write a new value to a /sys file */ 34/* helper function to write a new value to a /sys file */
diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c
index 9bd4c7655fdb..852d25462388 100644
--- a/tools/power/cpupower/lib/cpuidle.c
+++ b/tools/power/cpupower/lib/cpuidle.c
@@ -319,7 +319,7 @@ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf,
319 319
320 snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname); 320 snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname);
321 321
322 return sysfs_read_file(path, buf, buflen); 322 return cpupower_read_sysfs(path, buf, buflen);
323} 323}
324 324
325 325
diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c
index 9c395ec924de..9711d628b0f4 100644
--- a/tools/power/cpupower/lib/cpupower.c
+++ b/tools/power/cpupower/lib/cpupower.c
@@ -15,7 +15,7 @@
15#include "cpupower.h" 15#include "cpupower.h"
16#include "cpupower_intern.h" 16#include "cpupower_intern.h"
17 17
18unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) 18unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen)
19{ 19{
20 int fd; 20 int fd;
21 ssize_t numread; 21 ssize_t numread;
@@ -95,7 +95,7 @@ static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *re
95 95
96 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s", 96 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
97 cpu, fname); 97 cpu, fname);
98 if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) 98 if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0)
99 return -1; 99 return -1;
100 *result = strtol(linebuf, &endp, 0); 100 *result = strtol(linebuf, &endp, 0);
101 if (endp == linebuf || errno == ERANGE) 101 if (endp == linebuf || errno == ERANGE)
diff --git a/tools/power/cpupower/lib/cpupower_intern.h b/tools/power/cpupower/lib/cpupower_intern.h
index 92affdfbe417..4887c76d23f8 100644
--- a/tools/power/cpupower/lib/cpupower_intern.h
+++ b/tools/power/cpupower/lib/cpupower_intern.h
@@ -3,4 +3,4 @@
3#define MAX_LINE_LEN 4096 3#define MAX_LINE_LEN 4096
4#define SYSFS_PATH_MAX 255 4#define SYSFS_PATH_MAX 255
5 5
6unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen); 6unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index f1fe492c8e17..f0017c831e57 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -24,6 +24,7 @@ TARGETS += memory-hotplug
24TARGETS += mount 24TARGETS += mount
25TARGETS += mqueue 25TARGETS += mqueue
26TARGETS += net 26TARGETS += net
27TARGETS += netfilter
27TARGETS += nsfs 28TARGETS += nsfs
28TARGETS += powerpc 29TARGETS += powerpc
29TARGETS += proc 30TARGETS += proc
diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c
index 7887df693399..44ed7f29f8ab 100644
--- a/tools/testing/selftests/bpf/test_netcnt.c
+++ b/tools/testing/selftests/bpf/test_netcnt.c
@@ -81,7 +81,10 @@ int main(int argc, char **argv)
81 goto err; 81 goto err;
82 } 82 }
83 83
84 assert(system("ping localhost -6 -c 10000 -f -q > /dev/null") == 0); 84 if (system("which ping6 &>/dev/null") == 0)
85 assert(!system("ping6 localhost -c 10000 -f -q > /dev/null"));
86 else
87 assert(!system("ping -6 localhost -c 10000 -f -q > /dev/null"));
85 88
86 if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL, 89 if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL,
87 &prog_cnt)) { 90 &prog_cnt)) {
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 6f61df62f690..550b7e46bf4a 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -13896,6 +13896,25 @@ static struct bpf_test tests[] = {
13896 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 13896 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13897 .result = ACCEPT, 13897 .result = ACCEPT,
13898 }, 13898 },
13899 {
13900 "calls: ctx read at start of subprog",
13901 .insns = {
13902 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13903 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13904 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
13905 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13906 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13908 BPF_EXIT_INSN(),
13909 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
13910 BPF_MOV64_IMM(BPF_REG_0, 0),
13911 BPF_EXIT_INSN(),
13912 },
13913 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
13914 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
13915 .result_unpriv = REJECT,
13916 .result = ACCEPT,
13917 },
13899}; 13918};
13900 13919
13901static int probe_filter_length(const struct bpf_insn *fp) 13920static int probe_filter_length(const struct bpf_insn *fp)
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
new file mode 100644
index 000000000000..47ed6cef93fb
--- /dev/null
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -0,0 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0
2# Makefile for netfilter selftests
3
4TEST_PROGS := nft_trans_stress.sh
5
6include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
new file mode 100644
index 000000000000..1017313e41a8
--- /dev/null
+++ b/tools/testing/selftests/netfilter/config
@@ -0,0 +1,2 @@
1CONFIG_NET_NS=y
2NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_trans_stress.sh b/tools/testing/selftests/netfilter/nft_trans_stress.sh
new file mode 100755
index 000000000000..f1affd12c4b1
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_trans_stress.sh
@@ -0,0 +1,78 @@
1#!/bin/bash
2#
3# This test is for stress-testing the nf_tables config plane path vs.
4# packet path processing: Make sure we never release rules that are
5# still visible to other cpus.
6#
7# set -e
8
9# Kselftest framework requirement - SKIP code is 4.
10ksft_skip=4
11
12testns=testns1
13tables="foo bar baz quux"
14
15nft --version > /dev/null 2>&1
16if [ $? -ne 0 ];then
17 echo "SKIP: Could not run test without nft tool"
18 exit $ksft_skip
19fi
20
21ip -Version > /dev/null 2>&1
22if [ $? -ne 0 ];then
23 echo "SKIP: Could not run test without ip tool"
24 exit $ksft_skip
25fi
26
27tmp=$(mktemp)
28
29for table in $tables; do
30 echo add table inet "$table" >> "$tmp"
31 echo flush table inet "$table" >> "$tmp"
32
33 echo "add chain inet $table INPUT { type filter hook input priority 0; }" >> "$tmp"
34 echo "add chain inet $table OUTPUT { type filter hook output priority 0; }" >> "$tmp"
35 for c in $(seq 1 400); do
36 chain=$(printf "chain%03u" "$c")
37 echo "add chain inet $table $chain" >> "$tmp"
38 done
39
40 for c in $(seq 1 400); do
41 chain=$(printf "chain%03u" "$c")
42 for BASE in INPUT OUTPUT; do
43 echo "add rule inet $table $BASE counter jump $chain" >> "$tmp"
44 done
45 echo "add rule inet $table $chain counter return" >> "$tmp"
46 done
47done
48
49ip netns add "$testns"
50ip -netns "$testns" link set lo up
51
52lscpu | grep ^CPU\(s\): | ( read cpu cpunum ;
53cpunum=$((cpunum-1))
54for i in $(seq 0 $cpunum);do
55 mask=$(printf 0x%x $((1<<$i)))
56 ip netns exec "$testns" taskset $mask ping -4 127.0.0.1 -fq > /dev/null &
57 ip netns exec "$testns" taskset $mask ping -6 ::1 -fq > /dev/null &
58done)
59
60sleep 1
61
62for i in $(seq 1 10) ; do ip netns exec "$testns" nft -f "$tmp" & done
63
64for table in $tables;do
65 randsleep=$((RANDOM%10))
66 sleep $randsleep
67 ip netns exec "$testns" nft delete table inet $table 2>/dev/null
68done
69
70randsleep=$((RANDOM%10))
71sleep $randsleep
72
73pkill -9 ping
74
75wait
76
77rm -f "$tmp"
78ip netns del "$testns"
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
index 6f1f4a6e1ecb..85744425b08d 100644
--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
+++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
@@ -13,7 +13,7 @@
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16/* Test readlink /proc/self/map_files/... with address 0. */ 16/* Test readlink /proc/self/map_files/... with minimum address. */
17#include <errno.h> 17#include <errno.h>
18#include <sys/types.h> 18#include <sys/types.h>
19#include <sys/stat.h> 19#include <sys/stat.h>
@@ -47,6 +47,11 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
47int main(void) 47int main(void)
48{ 48{
49 const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE); 49 const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
50#ifdef __arm__
51 unsigned long va = 2 * PAGE_SIZE;
52#else
53 unsigned long va = 0;
54#endif
50 void *p; 55 void *p;
51 int fd; 56 int fd;
52 unsigned long a, b; 57 unsigned long a, b;
@@ -55,7 +60,7 @@ int main(void)
55 if (fd == -1) 60 if (fd == -1)
56 return 1; 61 return 1;
57 62
58 p = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0); 63 p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
59 if (p == MAP_FAILED) { 64 if (p == MAP_FAILED) {
60 if (errno == EPERM) 65 if (errno == EPERM)
61 return 2; 66 return 2;
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index 87a04a8a5945..7607ba3e3cbe 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -134,9 +134,9 @@ def exec_cmd(args, pm, stage, command):
134 (rawout, serr) = proc.communicate() 134 (rawout, serr) = proc.communicate()
135 135
136 if proc.returncode != 0 and len(serr) > 0: 136 if proc.returncode != 0 and len(serr) > 0:
137 foutput = serr.decode("utf-8") 137 foutput = serr.decode("utf-8", errors="ignore")
138 else: 138 else:
139 foutput = rawout.decode("utf-8") 139 foutput = rawout.decode("utf-8", errors="ignore")
140 140
141 proc.stdout.close() 141 proc.stdout.close()
142 proc.stderr.close() 142 proc.stderr.close()
@@ -169,6 +169,8 @@ def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
169 file=sys.stderr) 169 file=sys.stderr)
170 print("\n{} *** Error message: \"{}\"".format(prefix, foutput), 170 print("\n{} *** Error message: \"{}\"".format(prefix, foutput),
171 file=sys.stderr) 171 file=sys.stderr)
172 print("returncode {}; expected {}".format(proc.returncode,
173 exit_codes))
172 print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr) 174 print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr)
173 print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr) 175 print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr)
174 print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr) 176 print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr)
@@ -195,12 +197,18 @@ def run_one_test(pm, args, index, tidx):
195 print('-----> execute stage') 197 print('-----> execute stage')
196 pm.call_pre_execute() 198 pm.call_pre_execute()
197 (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"]) 199 (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"])
198 exit_code = p.returncode 200 if p:
201 exit_code = p.returncode
202 else:
203 exit_code = None
204
199 pm.call_post_execute() 205 pm.call_post_execute()
200 206
201 if (exit_code != int(tidx["expExitCode"])): 207 if (exit_code is None or exit_code != int(tidx["expExitCode"])):
202 result = False 208 result = False
203 print("exit:", exit_code, int(tidx["expExitCode"])) 209 print("exit: {!r}".format(exit_code))
210 print("exit: {}".format(int(tidx["expExitCode"])))
211 #print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"])))
204 print(procout) 212 print(procout)
205 else: 213 else:
206 if args.verbose > 0: 214 if args.verbose > 0: