aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--CREDITS8
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-trigger-pattern4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt65
-rw-r--r--Documentation/admin-guide/pm/cpufreq.rst2
-rw-r--r--Documentation/admin-guide/security-bugs.rst21
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/core-api/xarray.rst52
-rw-r--r--Documentation/cpu-freq/cpufreq-stats.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/shmobile.txt2
-rw-r--r--Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt65
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-omap.txt8
-rw-r--r--Documentation/devicetree/bindings/input/input-reset.txt2
-rw-r--r--Documentation/devicetree/bindings/media/rockchip-vpu.txt29
-rw-r--r--Documentation/devicetree/bindings/net/can/holt_hi311x.txt2
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_can.txt28
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt31
-rw-r--r--Documentation/devicetree/bindings/spi/spi-uniphier.txt14
-rw-r--r--Documentation/i2c/busses/i2c-nvidia-gpu18
-rw-r--r--Documentation/input/event-codes.rst11
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst26
-rw-r--r--Documentation/media/uapi/mediactl/media-request-ioc-queue.rst26
-rw-r--r--Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst26
-rw-r--r--Documentation/media/uapi/mediactl/request-api.rst26
-rw-r--r--Documentation/media/uapi/mediactl/request-func-close.rst26
-rw-r--r--Documentation/media/uapi/mediactl/request-func-ioctl.rst26
-rw-r--r--Documentation/media/uapi/mediactl/request-func-poll.rst26
-rw-r--r--Documentation/media/uapi/v4l/dev-meta.rst2
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-fmt.rst5
-rw-r--r--Documentation/networking/rxrpc.txt17
-rw-r--r--Documentation/userspace-api/spec_ctrl.rst9
-rw-r--r--Documentation/x86/boot.txt32
-rw-r--r--Documentation/x86/x86_64/mm.txt34
-rw-r--r--Documentation/x86/zero-page.txt2
-rw-r--r--MAINTAINERS208
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/include/asm/termios.h8
-rw-r--r--arch/alpha/include/uapi/asm/ioctls.h5
-rw-r--r--arch/alpha/include/uapi/asm/termbits.h17
-rw-r--r--arch/arc/Kconfig13
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/boot/dts/hsdk.dts15
-rw-r--r--arch/arc/configs/axs101_defconfig2
-rw-r--r--arch/arc/configs/axs103_defconfig1
-rw-r--r--arch/arc/configs/axs103_smp_defconfig1
-rw-r--r--arch/arc/configs/hsdk_defconfig4
-rw-r--r--arch/arc/configs/nps_defconfig2
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig1
-rw-r--r--arch/arc/include/asm/cache.h2
-rw-r--r--arch/arc/include/asm/io.h72
-rw-r--r--arch/arc/kernel/setup.c9
-rw-r--r--arch/arc/mm/cache.c20
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arm/boot/dts/am3517-evm.dts2
-rw-r--r--arch/arm/boot/dts/am3517-som.dtsi2
-rw-r--r--arch/arm/boot/dts/imx51-zii-rdu1.dts6
-rw-r--r--arch/arm/boot/dts/imx53-ppd.dts2
-rw-r--r--arch/arm/boot/dts/imx6sll.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi7
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi2
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts2
-rw-r--r--arch/arm/boot/dts/rk3288-veyron.dtsi6
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi2
-rw-r--r--arch/arm/boot/dts/vf610m4-colibri.dts4
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/pgtable-2level.h2
-rw-r--r--arch/arm/include/asm/proc-fns.h61
-rw-r--r--arch/arm/kernel/bugs.c4
-rw-r--r--arch/arm/kernel/ftrace.c17
-rw-r--r--arch/arm/kernel/head-common.S6
-rw-r--r--arch/arm/kernel/setup.c40
-rw-r--r--arch/arm/kernel/smp.c31
-rw-r--r--arch/arm/mach-davinci/da830.c4
-rw-r--r--arch/arm/mach-davinci/da850.c4
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c40
-rw-r--r--arch/arm/mach-davinci/dm355.c32
-rw-r--r--arch/arm/mach-davinci/dm365.c37
-rw-r--r--arch/arm/mach-davinci/dm644x.c22
-rw-r--r--arch/arm/mach-davinci/dm646x.c12
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c3
-rw-r--r--arch/arm/mach-omap2/display.c111
-rw-r--r--arch/arm/mach-omap2/prm44xx.c2
-rw-r--r--arch/arm/mm/proc-v7-bugs.c17
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/arm64/Kconfig25
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980-condor.dts47
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi2
-rw-r--r--arch/arm64/include/asm/ftrace.h13
-rw-r--r--arch/arm64/include/asm/processor.h8
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/include/asm/tlbflush.h4
-rw-r--r--arch/arm64/kernel/cpu_errata.c20
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/ftrace.c15
-rw-r--r--arch/arm64/kernel/setup.c1
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/arm64/net/bpf_jit_comp.c26
-rw-r--r--arch/ia64/include/asm/numa.h4
-rw-r--r--arch/ia64/kernel/acpi.c6
-rw-r--r--arch/ia64/mm/numa.c6
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h4
-rw-r--r--arch/microblaze/include/asm/pgtable.h2
-rw-r--r--arch/microblaze/kernel/ftrace.c15
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c2
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig1
-rw-r--r--arch/mips/include/asm/syscall.h2
-rw-r--r--arch/mips/kernel/ftrace.c14
-rw-r--r--arch/mips/kernel/setup.c1
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/loongson64/loongson-3/numa.c12
-rw-r--r--arch/mips/mm/dma-noncoherent.c2
-rw-r--r--arch/mips/ralink/mt7620.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c11
-rw-r--r--arch/nds32/include/asm/pgtable.h2
-rw-r--r--arch/nds32/kernel/ftrace.c18
-rw-r--r--arch/parisc/Makefile7
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/asm/spinlock.h4
-rw-r--r--arch/parisc/kernel/ftrace.c17
-rw-r--r--arch/parisc/kernel/syscall.S12
-rw-r--r--arch/powerpc/include/asm/io.h20
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/include/asm/ptrace.h1
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/kvm/trace.h8
-rw-r--r--arch/powerpc/kvm/trace_booke.h9
-rw-r--r--arch/powerpc/kvm/trace_hv.h9
-rw-r--r--arch/powerpc/kvm/trace_pr.h9
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/slb.c35
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c57
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c64
-rw-r--r--arch/riscv/Makefile19
-rw-r--r--arch/riscv/boot/.gitignore2
-rw-r--r--arch/riscv/boot/Makefile33
-rw-r--r--arch/riscv/boot/install.sh60
-rw-r--r--arch/riscv/configs/defconfig1
-rw-r--r--arch/riscv/include/asm/module.h1
-rw-r--r--arch/riscv/include/asm/ptrace.h4
-rw-r--r--arch/riscv/include/asm/uaccess.h4
-rw-r--r--arch/riscv/include/asm/unistd.h5
-rw-r--r--arch/riscv/include/uapi/asm/unistd.h (renamed from arch/riscv/include/uapi/asm/syscalls.h)26
-rw-r--r--arch/riscv/kernel/cpu.c9
-rw-r--r--arch/riscv/kernel/ftrace.c14
-rw-r--r--arch/riscv/kernel/head.S10
-rw-r--r--arch/riscv/kernel/module.c12
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S2
-rw-r--r--arch/riscv/lib/Makefile2
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/boot/compressed/Makefile16
-rw-r--r--arch/s390/configs/debug_defconfig14
-rw-r--r--arch/s390/configs/performance_defconfig13
-rw-r--r--arch/s390/defconfig79
-rw-r--r--arch/s390/include/asm/mmu_context.h5
-rw-r--r--arch/s390/include/asm/pgalloc.h6
-rw-r--r--arch/s390/include/asm/pgtable.h18
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/tlb.h6
-rw-r--r--arch/s390/kernel/entry.S6
-rw-r--r--arch/s390/kernel/ftrace.c13
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c4
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c33
-rw-r--r--arch/s390/kernel/vdso32/Makefile6
-rw-r--r--arch/s390/kernel/vdso64/Makefile6
-rw-r--r--arch/s390/kernel/vmlinux.lds.S4
-rw-r--r--arch/s390/mm/pgalloc.c2
-rw-r--r--arch/s390/numa/numa.c1
-rw-r--r--arch/sh/kernel/ftrace.c16
-rw-r--r--arch/sparc/kernel/ftrace.c11
-rw-r--r--arch/sparc/kernel/iommu.c3
-rw-r--r--arch/sparc/kernel/signal32.c1
-rw-r--r--arch/sparc/kernel/signal_32.c1
-rw-r--r--arch/sparc/kernel/signal_64.c1
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c97
-rw-r--r--arch/um/drivers/ubd_kern.c12
-rw-r--r--arch/x86/Kconfig13
-rw-r--r--arch/x86/Makefile9
-rw-r--r--arch/x86/boot/header.S6
-rw-r--r--arch/x86/events/core.c20
-rw-r--r--arch/x86/events/intel/core.c68
-rw-r--r--arch/x86/events/intel/uncore.h33
-rw-r--r--arch/x86/events/intel/uncore_snb.c121
-rw-r--r--arch/x86/events/perf_event.h13
-rw-r--r--arch/x86/include/asm/fpu/internal.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/mshyperv.h2
-rw-r--r--arch/x86/include/asm/msr-index.h5
-rw-r--r--arch/x86/include/asm/nospec-branch.h26
-rw-r--r--arch/x86/include/asm/page_64_types.h12
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h4
-rw-r--r--arch/x86/include/asm/qspinlock.h13
-rw-r--r--arch/x86/include/asm/spec-ctrl.h20
-rw-r--r--arch/x86/include/asm/switch_to.h3
-rw-r--r--arch/x86/include/asm/thread_info.h20
-rw-r--r--arch/x86/include/asm/tlbflush.h8
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/include/asm/xen/page.h35
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h7
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/cpu/bugs.c525
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c19
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c11
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/fpu/signal.c4
-rw-r--r--arch/x86/kernel/ftrace.c15
-rw-r--r--arch/x86/kernel/head32.c1
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/ldt.c59
-rw-r--r--arch/x86/kernel/process.c101
-rw-r--r--arch/x86/kernel/process.h39
-rw-r--r--arch/x86/kernel/process_32.c10
-rw-r--r--arch/x86/kernel/process_64.c10
-rw-r--r--arch/x86/kernel/setup.c17
-rw-r--r--arch/x86/kernel/vsmp_64.c84
-rw-r--r--arch/x86/kvm/lapic.c7
-rw-r--r--arch/x86/kvm/mmu.c27
-rw-r--r--arch/x86/kvm/svm.c44
-rw-r--r--arch/x86/kvm/vmx.c98
-rw-r--r--arch/x86/kvm/x86.c10
-rw-r--r--arch/x86/mm/tlb.c115
-rw-r--r--arch/x86/xen/enlighten.c78
-rw-r--r--arch/x86/xen/mmu_pv.c6
-rw-r--r--arch/x86/xen/multicalls.c35
-rw-r--r--arch/x86/xen/p2m.c3
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--arch/x86/xen/spinlock.c21
-rw-r--r--arch/xtensa/include/asm/processor.h6
-rw-r--r--arch/xtensa/kernel/asm-offsets.c16
-rw-r--r--arch/xtensa/kernel/head.S7
-rw-r--r--arch/xtensa/kernel/process.c5
-rw-r--r--arch/xtensa/kernel/ptrace.c42
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-lib.c26
-rw-r--r--block/blk-merge.c7
-rw-r--r--block/blk-mq.c26
-rw-r--r--block/blk.h12
-rw-r--r--block/bounce.c1
-rw-r--r--crypto/crypto_user_base.c18
-rw-r--r--crypto/crypto_user_stat.c21
-rw-r--r--crypto/simd.c5
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_platform.c1
-rw-r--r--drivers/acpi/acpica/exserial.c21
-rw-r--r--drivers/acpi/arm64/iort.c2
-rw-r--r--drivers/acpi/nfit/core.c19
-rw-r--r--drivers/acpi/nfit/mce.c8
-rw-r--r--drivers/android/binder.c21
-rw-r--r--drivers/android/binder_alloc.c16
-rw-r--r--drivers/android/binder_alloc.h3
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/sata_rcar.c6
-rw-r--r--drivers/atm/firestream.c4
-rw-r--r--drivers/base/devres.c10
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/clk/clk-fixed-factor.c1
-rw-r--r--drivers/clk/meson/axg.c13
-rw-r--r--drivers/clk/meson/gxbb.c12
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/clocksource/i8253.c14
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c7
-rw-r--r--drivers/cpufreq/ti-cpufreq.c26
-rw-r--r--drivers/cpuidle/cpuidle-arm.c40
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c31
-rw-r--r--drivers/dma-buf/udmabuf.c1
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/firmware/efi/arm-init.c4
-rw-r--r--drivers/firmware/efi/arm-runtime.c2
-rw-r--r--drivers/firmware/efi/efi.c51
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/libstub/fdt.c4
-rw-r--r--drivers/firmware/efi/memmap.c3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c2
-rw-r--r--drivers/fsi/Kconfig1
-rw-r--r--drivers/fsi/fsi-scom.c1
-rw-r--r--drivers/gnss/serial.c3
-rw-r--r--drivers/gnss/sirf.c3
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-mockup.c6
-rw-r--r--drivers/gpio/gpio-pxa.c4
-rw-r--r--drivers/gpio/gpiolib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c43
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c84
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c3
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h4
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c83
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c21
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c39
-rw-r--r--drivers/gpu/drm/drm_auth.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c3
-rw-r--r--drivers/gpu/drm/drm_fourcc.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c116
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h10
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h36
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h20
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c17
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c18
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c103
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c8
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c70
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c44
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c38
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c16
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c93
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c27
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c1
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c19
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c22
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c37
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c27
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c21
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c15
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/hid/hid-alps.c18
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-hyperv.c2
-rw-r--r--drivers/hid/hid-ids.h11
-rw-r--r--drivers/hid/hid-input.c47
-rw-r--r--drivers/hid/hid-logitech-hidpp.c309
-rw-r--r--drivers/hid/hid-multitouch.c6
-rw-r--r--drivers/hid/hid-quirks.c4
-rw-r--r--drivers/hid/hid-sensor-custom.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c13
-rw-r--r--drivers/hid/hid-steam.c154
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c21
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c8
-rw-r--r--drivers/hid/uhid.c25
-rw-r--r--drivers/hid/usbhid/hiddev.c18
-rw-r--r--drivers/hv/channel.c8
-rw-r--r--drivers/hv/hv_kvp.c26
-rw-r--r--drivers/hwmon/hwmon.c8
-rw-r--r--drivers/hwmon/ibmpowernv.c7
-rw-r--r--drivers/hwmon/ina2xx.c6
-rw-r--r--drivers/hwmon/mlxreg-fan.c2
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c6
-rw-r--r--drivers/hwmon/w83795.c2
-rw-r--r--drivers/i2c/busses/Kconfig11
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c368
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c15
-rw-r--r--drivers/ide/ide-proc.c15
-rw-r--r--drivers/ide/pmac.c1
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c5
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c5
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c3
-rw-r--r--drivers/iio/light/hid-sensor-als.c8
-rw-r--r--drivers/iio/light/hid-sensor-prox.c8
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c8
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c12
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c8
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c8
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c3
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c6
-rw-r--r--drivers/infiniband/core/umem_odp.c20
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c128
-rw-r--r--drivers/infiniband/hw/mlx5/main.c29
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c22
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c7
-rw-r--r--drivers/input/joystick/xpad.c16
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c3
-rw-r--r--drivers/input/keyboard/matrix_keypad.c23
-rw-r--r--drivers/input/keyboard/omap4-keypad.c18
-rw-r--r--drivers/input/mouse/elan_i2c_core.c3
-rw-r--r--drivers/input/mouse/synaptics.c2
-rw-r--r--drivers/input/serio/hyperv-keyboard.c2
-rw-r--r--drivers/input/touchscreen/migor_ts.c15
-rw-r--r--drivers/input/touchscreen/st1232.c12
-rw-r--r--drivers/iommu/amd_iommu_init.c3
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/intel-svm.c2
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c27
-rw-r--r--drivers/media/cec/cec-adap.c49
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c106
-rw-r--r--drivers/media/i2c/tc358743.c1
-rw-r--r--drivers/media/media-request.c3
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c6
-rw-r--r--drivers/media/platform/omap3isp/isp.c3
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c5
-rw-r--r--drivers/media/platform/vim2m.c2
-rw-r--r--drivers/media/usb/gspca/gspca.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c4
-rw-r--r--drivers/mfd/cros_ec_dev.c8
-rw-r--r--drivers/misc/atmel-ssc.c2
-rw-r--r--drivers/misc/mic/scif/scif_rma.c2
-rw-r--r--drivers/misc/sgi-gru/grukdump.c4
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c86
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c10
-rw-r--r--drivers/mtd/nand/bbt.c3
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c11
-rw-r--r--drivers/mtd/nand/raw/nand_base.c1
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c32
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c21
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c165
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/can/dev.c48
-rw-r--r--drivers/net/can/flexcan.c108
-rw-r--r--drivers/net/can/rcar/rcar_can.c5
-rw-r--r--drivers/net/can/rx-offload.c51
-rw-r--r--drivers/net/can/spi/hi311x.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c10
-rw-r--r--drivers/net/can/usb/ucan.c7
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c23
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c61
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c21
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h1
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c70
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c18
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c74
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c86
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c4
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c11
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c44
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c51
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c69
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/fddi/defza.c7
-rw-r--r--drivers/net/fddi/defza.h3
-rw-r--r--drivers/net/phy/broadcom.c18
-rw-r--r--drivers/net/phy/mdio-gpio.c10
-rw-r--r--drivers/net/phy/mscc.c14
-rw-r--r--drivers/net/phy/phy_device.c8
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/ipheth.c10
-rw-r--r--drivers/net/usb/smsc95xx.c9
-rw-r--r--drivers/net/virtio_net.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c17
-rw-r--r--drivers/nvme/host/core.c12
-rw-r--r--drivers/nvme/host/fc.c75
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/rdma.c19
-rw-r--r--drivers/nvmem/core.c10
-rw-r--r--drivers/of/device.c4
-rw-r--r--drivers/of/of_numa.c9
-rw-r--r--drivers/opp/of.c6
-rw-r--r--drivers/opp/ti-opp-supply.c6
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c10
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c1
-rw-r--r--drivers/pci/pci-acpi.c5
-rw-r--r--drivers/pci/pci.c24
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c20
-rw-r--r--drivers/phy/socionext/Kconfig3
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c2
-rw-r--r--drivers/rtc/hctosys.c4
-rw-r--r--drivers/rtc/rtc-cmos.c16
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c2
-rw-r--r--drivers/rtc/rtc-pcf2127.c3
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c10
-rw-r--r--drivers/s390/crypto/ap_bus.c8
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/s390/crypto/ap_queue.c15
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c1
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c1
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--drivers/s390/net/qeth_core.h27
-rw-r--r--drivers/s390/net/qeth_core_main.c199
-rw-r--r--drivers/s390/net/qeth_core_mpc.h4
-rw-r--r--drivers/s390/net/qeth_l2_main.c39
-rw-r--r--drivers/s390/net/qeth_l3_main.c207
-rw-r--r--drivers/sbus/char/display7seg.c1
-rw-r--r--drivers/sbus/char/envctrl.c2
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c3
-rw-r--r--drivers/slimbus/slimbus.h6
-rw-r--r--drivers/spi/spi-mt65xx.c4
-rw-r--r--drivers/spi/spi-omap2-mcspi.c37
-rw-r--r--drivers/staging/comedi/comedi.h39
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c3
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c1
-rw-r--r--drivers/staging/media/sunxi/cedrus/TODO5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c24
-rw-r--r--drivers/staging/most/core.c2
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c3
-rw-r--r--drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c7
-rw-r--r--drivers/thunderbolt/switch.c40
-rw-r--r--drivers/tty/serial/sh-sci.c8
-rw-r--r--drivers/tty/serial/suncore.c1
-rw-r--r--drivers/tty/tty_baudrate.c4
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/uio/uio.c7
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/core/quirks.c17
-rw-r--r--drivers/usb/dwc2/pci.c1
-rw-r--r--drivers/usb/dwc3/core.c1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c13
-rw-r--r--drivers/usb/gadget/function/f_fs.c26
-rw-r--r--drivers/usb/gadget/function/u_ether.c11
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c88
-rw-r--r--drivers/usb/host/xhci-histb.c6
-rw-r--r--drivers/usb/host/xhci-hub.c66
-rw-r--r--drivers/usb/host/xhci-mtk.c6
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-plat.c6
-rw-r--r--drivers/usb/host/xhci-ring.c45
-rw-r--r--drivers/usb/host/xhci-tegra.c1
-rw-r--r--drivers/usb/host/xhci.c2
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/appledisplay.c1
-rw-r--r--drivers/usb/storage/unusual_realtek.h10
-rw-r--r--drivers/usb/typec/ucsi/Kconfig10
-rw-r--r--drivers/usb/typec/ucsi/Makefile2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c307
-rw-r--r--drivers/xen/balloon.c65
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/privcmd-buf.c22
-rw-r--r--drivers/xen/pvcalls-front.c4
-rw-r--r--drivers/xen/xlate_mmu.c1
-rw-r--r--fs/afs/dir.c4
-rw-r--r--fs/afs/fs_probe.c39
-rw-r--r--fs/afs/inode.c18
-rw-r--r--fs/afs/internal.h9
-rw-r--r--fs/afs/misc.c52
-rw-r--r--fs/afs/rotate.c53
-rw-r--r--fs/afs/rxrpc.c11
-rw-r--r--fs/afs/vl_probe.c45
-rw-r--r--fs/afs/vl_rotate.c50
-rw-r--r--fs/aio.c1
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/disk-io.c74
-rw-r--r--fs/btrfs/file.c24
-rw-r--r--fs/btrfs/free-space-cache.c22
-rw-r--r--fs/btrfs/inode.c37
-rw-r--r--fs/btrfs/ioctl.c14
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/relocation.c1
-rw-r--r--fs/btrfs/send.c11
-rw-r--r--fs/btrfs/super.c7
-rw-r--r--fs/btrfs/tree-checker.c10
-rw-r--r--fs/btrfs/tree-log.c17
-rw-r--r--fs/cachefiles/namei.c8
-rw-r--r--fs/cachefiles/rdwr.c9
-rw-r--r--fs/cachefiles/xattr.c3
-rw-r--r--fs/ceph/file.c11
-rw-r--r--fs/ceph/mds_client.c12
-rw-r--r--fs/ceph/quota.c3
-rw-r--r--fs/dax.c60
-rw-r--r--fs/direct-io.c4
-rw-r--r--fs/exportfs/expfs.c3
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext4/inode.c5
-rw-r--r--fs/ext4/namei.c5
-rw-r--r--fs/ext4/resize.c28
-rw-r--r--fs/ext4/super.c17
-rw-r--r--fs/ext4/xattr.c27
-rw-r--r--fs/fscache/object.c3
-rw-r--r--fs/fuse/dev.c16
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/gfs2/bmap.c54
-rw-r--r--fs/gfs2/rgrp.c3
-rw-r--r--fs/hfs/btree.c3
-rw-r--r--fs/hfsplus/btree.c3
-rw-r--r--fs/inode.c7
-rw-r--r--fs/iomap.c53
-rw-r--r--fs/namespace.c28
-rw-r--r--fs/nfs/callback_proc.c26
-rw-r--r--fs/nfs/delegation.c11
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c21
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h4
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c19
-rw-r--r--fs/nfs/nfs42proc.c19
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4state.c26
-rw-r--r--fs/nfsd/nfs4proc.c3
-rw-r--r--fs/nilfs2/btnode.c4
-rw-r--r--fs/notify/fanotify/fanotify.c10
-rw-r--r--fs/notify/fsnotify.c7
-rw-r--r--fs/ocfs2/aops.c12
-rw-r--r--fs/ocfs2/cluster/masklog.h9
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/move_extents.c47
-rw-r--r--fs/pstore/ram.c15
-rw-r--r--fs/read_write.c15
-rw-r--r--fs/sysv/inode.c2
-rw-r--r--fs/udf/super.c16
-rw-r--r--fs/udf/unicode.c14
-rw-r--r--fs/userfaultfd.c15
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c11
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c5
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c11
-rw-r--r--fs/xfs/xfs_bmap_util.c10
-rw-r--r--fs/xfs/xfs_bmap_util.h3
-rw-r--r--fs/xfs/xfs_buf_item.c28
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_ioctl.c2
-rw-r--r--fs/xfs/xfs_message.c2
-rw-r--r--fs/xfs/xfs_reflink.c18
-rw-r--r--fs/xfs/xfs_trace.h5
-rw-r--r--include/asm-generic/4level-fixup.h2
-rw-r--r--include/asm-generic/5level-fixup.h2
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h2
-rw-r--r--include/asm-generic/pgtable-nop4d.h2
-rw-r--r--include/asm-generic/pgtable-nopmd.h2
-rw-r--r--include/asm-generic/pgtable-nopud.h2
-rw-r--r--include/asm-generic/pgtable.h16
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/rx-offload.h7
-rw-r--r--include/linux/ceph/ceph_features.h8
-rw-r--r--include/linux/compiler-gcc.h12
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_attributes.h14
-rw-r--r--include/linux/compiler_types.h4
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/efi.h7
-rw-r--r--include/linux/filter.h4
-rw-r--r--include/linux/fscache-cache.h3
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/hid-sensor-hub.h4
-rw-r--r--include/linux/hid.h32
-rw-r--r--include/linux/i8253.h1
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h12
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/net_dim.h2
-rw-r--r--include/linux/netdevice.h20
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h13
-rw-r--r--include/linux/nmi.h2
-rw-r--r--include/linux/platform_data/gpio-davinci.h2
-rw-r--r--include/linux/psi.h3
-rw-r--r--include/linux/pstore.h5
-rw-r--r--include/linux/ptrace.h17
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sched/smt.h20
-rw-r--r--include/linux/skbuff.h18
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/linux/tracehook.h4
-rw-r--r--include/linux/tracepoint.h6
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/xarray.h267
-rw-r--r--include/media/media-request.h2
-rw-r--r--include/media/v4l2-mem2mem.h2
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/af_rxrpc.h3
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h39
-rw-r--r--include/net/sctp/sctp.h12
-rw-r--r--include/sound/soc.h2
-rw-r--r--include/trace/events/kyber.h8
-rw-r--r--include/trace/events/rxrpc.h2
-rw-r--r--include/trace/events/sched.h12
-rw-r--r--include/uapi/linux/input-event-codes.h10
-rw-r--r--include/uapi/linux/kfd_ioctl.h18
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h4
-rw-r--r--include/uapi/linux/netfilter_bridge.h4
-rw-r--r--include/uapi/linux/prctl.h1
-rw-r--r--include/uapi/linux/sctp.h3
-rw-r--r--include/uapi/linux/v4l2-controls.h5
-rw-r--r--include/xen/balloon.h5
-rw-r--r--include/xen/xen-ops.h12
-rw-r--r--init/Kconfig9
-rw-r--r--init/initramfs.c22
-rw-r--r--kernel/bpf/core.c38
-rw-r--r--kernel/bpf/local_storage.c3
-rw-r--r--kernel/bpf/queue_stack_maps.c16
-rw-r--r--kernel/bpf/syscall.c35
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/cpu.c15
-rw-r--r--kernel/debug/kdb/kdb_bt.c4
-rw-r--r--kernel/debug/kdb/kdb_io.c15
-rw-r--r--kernel/debug/kdb/kdb_keyboard.c4
-rw-r--r--kernel/debug/kdb/kdb_main.c35
-rw-r--r--kernel/debug/kdb/kdb_private.h2
-rw-r--r--kernel/debug/kdb/kdb_support.c28
-rw-r--r--kernel/dma/swiotlb.c3
-rw-r--r--kernel/events/uprobes.c12
-rw-r--r--kernel/kcov.c4
-rw-r--r--kernel/ptrace.c10
-rw-r--r--kernel/resource.c19
-rw-r--r--kernel/sched/core.c24
-rw-r--r--kernel/sched/fair.c66
-rw-r--r--kernel/sched/psi.c71
-rw-r--r--kernel/sched/sched.h4
-rw-r--r--kernel/sched/stats.h8
-rw-r--r--kernel/stackleak.c4
-rw-r--r--kernel/time/posix-cpu-timers.c3
-rw-r--r--kernel/trace/bpf_trace.c8
-rw-r--r--kernel/trace/ftrace.c7
-rw-r--r--kernel/trace/trace.h57
-rw-r--r--kernel/trace/trace_functions_graph.c53
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_probe.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/user_namespace.c12
-rw-r--r--lib/debugobjects.c5
-rw-r--r--lib/iov_iter.c38
-rw-r--r--lib/raid6/test/Makefile4
-rw-r--r--lib/test_firmware.c1
-rw-r--r--lib/test_hexdump.c2
-rw-r--r--lib/test_kmod.c1
-rw-r--r--lib/test_xarray.c50
-rw-r--r--lib/ubsan.c3
-rw-r--r--lib/xarray.c139
-rw-r--r--mm/gup.c13
-rw-r--r--mm/huge_memory.c85
-rw-r--r--mm/hugetlb.c25
-rw-r--r--mm/khugepaged.c140
-rw-r--r--mm/memblock.c2
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c32
-rw-r--r--mm/rmap.c13
-rw-r--r--mm/shmem.c47
-rw-r--r--mm/swapfile.c6
-rw-r--r--mm/truncate.c8
-rw-r--r--mm/userfaultfd.c62
-rw-r--r--mm/vmstat.c7
-rw-r--r--mm/z3fold.c101
-rw-r--r--net/batman-adv/bat_v_elp.c6
-rw-r--r--net/batman-adv/fragmentation.c2
-rw-r--r--net/bridge/br_private.h7
-rw-r--r--net/bridge/br_vlan.c3
-rw-r--r--net/can/raw.c15
-rw-r--r--net/ceph/messenger.c12
-rw-r--r--net/core/dev.c13
-rw-r--r--net/core/filter.c5
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/netpoll.c3
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/core/sock.c1
-rw-r--r--net/ipv4/inet_fragment.c29
-rw-r--r--net/ipv4/ip_fragment.c12
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c7
-rw-r--r--net/ipv4/netfilter/nf_nat_masquerade_ipv4.c38
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c4
-rw-r--r--net/ipv4/tcp_input.c31
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/tcp_timer.c12
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/af_inet6.c5
-rw-r--r--net/ipv6/anycast.c80
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/netfilter.c3
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c13
-rw-r--r--net/ipv6/netfilter/nf_nat_masquerade_ipv6.c49
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c4
-rw-r--r--net/ipv6/route.c14
-rw-r--r--net/l2tp/l2tp_core.c9
-rw-r--r--net/netfilter/ipset/ip_set_core.c43
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c8
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/nf_conncount.c44
-rw-r--r--net/netfilter/nf_conntrack_core.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c14
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c15
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c11
-rw-r--r--net/netfilter/nf_tables_api.c46
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c58
-rw-r--r--net/netfilter/nft_compat.c24
-rw-r--r--net/netfilter/nft_flow_offload.c5
-rw-r--r--net/netfilter/nft_numgen.c127
-rw-r--r--net/netfilter/nft_osf.c2
-rw-r--r--net/netfilter/xt_IDLETIMER.c20
-rw-r--r--net/netfilter/xt_RATEEST.c10
-rw-r--r--net/netfilter/xt_hashlimit.c9
-rw-r--r--net/openvswitch/conntrack.c3
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rxrpc/af_rxrpc.c27
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_event.c18
-rw-r--r--net/rxrpc/output.c35
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c36
-rw-r--r--net/sched/cls_flower.c14
-rw-r--r--net/sched/sch_fq.c31
-rw-r--r--net/sched/sch_netem.c9
-rw-r--r--net/sctp/output.c25
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/socket.c26
-rw-r--r--net/sctp/stream.c1
-rw-r--r--net/smc/af_smc.c11
-rw-r--r--net/smc/smc_cdc.c26
-rw-r--r--net/smc/smc_cdc.h60
-rw-r--r--net/smc/smc_core.c20
-rw-r--r--net/smc/smc_core.h5
-rw-r--r--net/smc/smc_ism.c43
-rw-r--r--net/smc/smc_ism.h1
-rw-r--r--net/smc/smc_wr.c4
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_generic.c8
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c61
-rw-r--r--net/sunrpc/xdr.c7
-rw-r--r--net/tipc/discover.c19
-rw-r--r--net/tipc/link.c11
-rw-r--r--net/tipc/net.c45
-rw-r--r--net/tipc/net.h2
-rw-r--r--net/tipc/node.c7
-rw-r--r--net/tipc/socket.c15
-rw-r--r--scripts/Makefile.build2
-rwxr-xr-xscripts/faddr2line2
-rwxr-xr-xscripts/kconfig/merge_config.sh7
-rwxr-xr-xscripts/package/builddeb6
-rwxr-xr-xscripts/package/mkdebian7
-rwxr-xr-xscripts/package/mkspec11
-rwxr-xr-xscripts/setlocalversion2
-rwxr-xr-xscripts/spdxcheck.py1
-rw-r--r--scripts/unifdef.c4
-rw-r--r--security/integrity/digsig_asymmetric.c1
-rw-r--r--security/selinux/hooks.c3
-rw-r--r--security/selinux/nlmsgtab.c13
-rw-r--r--security/selinux/ss/mls.c10
-rw-r--r--sound/core/control.c80
-rw-r--r--sound/core/oss/pcm_oss.c6
-rw-r--r--sound/core/oss/pcm_plugin.c6
-rw-r--r--sound/isa/wss/wss_lib.c2
-rw-r--r--sound/pci/ac97/ac97_codec.c2
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c5
-rw-r--r--sound/pci/hda/patch_realtek.c37
-rw-r--r--sound/pci/hda/thinkpad_helper.c4
-rw-r--r--sound/soc/codecs/hdac_hdmi.c11
-rw-r--r--sound/soc/codecs/pcm186x.h2
-rw-r--r--sound/soc/codecs/pcm3060.c12
-rw-r--r--sound/soc/codecs/wm_adsp.c37
-rw-r--r--sound/soc/intel/Kconfig26
-rw-r--r--sound/soc/intel/boards/Kconfig24
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c32
-rw-r--r--sound/soc/intel/skylake/skl.c32
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c67
-rw-r--r--sound/soc/omap/omap-dmic.c9
-rw-r--r--sound/soc/omap/omap-mcbsp.c6
-rw-r--r--sound/soc/omap/omap-mcpdm.c43
-rw-r--r--sound/soc/qcom/common.c9
-rw-r--r--sound/soc/qcom/qdsp6/q6afe-dai.c208
-rw-r--r--sound/soc/qcom/qdsp6/q6afe.c16
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c33
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c19
-rw-r--r--sound/soc/rockchip/rockchip_pcm.c1
-rw-r--r--sound/soc/sh/rcar/ssi.c2
-rw-r--r--sound/soc/soc-acpi.c10
-rw-r--r--sound/soc/soc-core.c1
-rw-r--r--sound/soc/stm/stm32_sai_sub.c2
-rw-r--r--sound/soc/sunxi/Kconfig2
-rw-r--r--sound/soc/sunxi/sun8i-codec.c12
-rw-r--r--sound/sparc/cs4231.c8
-rw-r--r--sound/usb/quirks-table.h10
-rw-r--r--tools/arch/arm64/include/asm/barrier.h133
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst11
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst9
-rw-r--r--tools/bpf/bpftool/common.c17
-rw-r--r--tools/bpf/bpftool/main.h2
-rw-r--r--tools/bpf/bpftool/prog.c13
-rw-r--r--tools/build/Makefile.feature1
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-all.c5
-rw-r--r--tools/build/feature/test-get_current_dir_name.c10
-rw-r--r--tools/include/uapi/asm-generic/ioctls.h2
-rw-r--r--tools/include/uapi/drm/i915_drm.h22
-rw-r--r--tools/include/uapi/linux/pkt_cls.h612
-rw-r--r--tools/include/uapi/linux/prctl.h1
-rw-r--r--tools/include/uapi/linux/tc_act/tc_bpf.h37
-rw-r--r--tools/objtool/elf.c19
-rw-r--r--tools/perf/Documentation/perf-list.txt1
-rw-r--r--tools/perf/Makefile.config5
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/builtin-record.c7
-rw-r--r--tools/perf/builtin-stat.c28
-rw-r--r--tools/perf/builtin-top.c3
-rw-r--r--tools/perf/builtin-trace.c34
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c131
-rw-r--r--tools/perf/jvmti/jvmti_agent.c49
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py493
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling1
-rw-r--r--tools/perf/trace/beauty/ioctl.c1
-rw-r--r--tools/perf/util/Build1
-rw-r--r--tools/perf/util/evlist.c27
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c3
-rw-r--r--tools/perf/util/get_current_dir_name.c18
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c4
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.c5
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.h1
-rw-r--r--tools/perf/util/intel-pt.c16
-rw-r--r--tools/perf/util/namespaces.c17
-rw-r--r--tools/perf/util/namespaces.h1
-rw-r--r--tools/perf/util/pmu.c2
-rw-r--r--tools/perf/util/util.h4
-rw-r--r--tools/power/cpupower/Makefile12
-rw-r--r--tools/power/cpupower/bench/Makefile2
-rw-r--r--tools/power/cpupower/debug/x86_64/Makefile4
-rw-r--r--tools/power/cpupower/lib/cpufreq.c2
-rw-r--r--tools/power/cpupower/lib/cpuidle.c2
-rw-r--r--tools/power/cpupower/lib/cpupower.c4
-rw-r--r--tools/power/cpupower/lib/cpupower_intern.h2
-rw-r--r--tools/testing/nvdimm/test/nfit.c8
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/bpf/test_netcnt.c5
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c19
-rw-r--r--tools/testing/selftests/netfilter/Makefile6
-rw-r--r--tools/testing/selftests/netfilter/config2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_trans_stress.sh78
-rw-r--r--tools/testing/selftests/powerpc/mm/wild_bctr.c21
-rw-r--r--tools/testing/selftests/proc/proc-self-map-files-002.c9
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py18
1124 files changed, 13115 insertions, 6773 deletions
diff --git a/.mailmap b/.mailmap
index a76be45fef6c..28fecafa6506 100644
--- a/.mailmap
+++ b/.mailmap
@@ -159,6 +159,7 @@ Peter Oruba <peter@oruba.de>
159Peter Oruba <peter.oruba@amd.com> 159Peter Oruba <peter.oruba@amd.com>
160Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com> 160Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
161Praveen BP <praveenbp@ti.com> 161Praveen BP <praveenbp@ti.com>
162Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
162Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com> 163Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
163Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net> 164Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
164Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com> 165Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
diff --git a/CREDITS b/CREDITS
index 5befd2d714d0..c9273393fe14 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2138,6 +2138,10 @@ E: paul@laufernet.com
2138D: Soundblaster driver fixes, ISAPnP quirk 2138D: Soundblaster driver fixes, ISAPnP quirk
2139S: California, USA 2139S: California, USA
2140 2140
2141N: Jarkko Lavinen
2142E: jarkko.lavinen@nokia.com
2143D: OMAP MMC support
2144
2141N: Jonathan Layes 2145N: Jonathan Layes
2142D: ARPD support 2146D: ARPD support
2143 2147
@@ -2200,6 +2204,10 @@ S: Post Office Box 371
2200S: North Little Rock, Arkansas 72115 2204S: North Little Rock, Arkansas 72115
2201S: USA 2205S: USA
2202 2206
2207N: Christopher Li
2208E: sparse@chrisli.org
2209D: Sparse maintainer 2009 - 2018
2210
2203N: Stephan Linz 2211N: Stephan Linz
2204E: linz@mazet.de 2212E: linz@mazet.de
2205E: Stephan.Linz@gmx.de 2213E: Stephan.Linz@gmx.de
diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
index fb3d1e03b881..1e5d172e0646 100644
--- a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
@@ -37,8 +37,8 @@ Description:
37 0-| / \/ \/ 37 0-| / \/ \/
38 +---0----1----2----3----4----5----6------------> time (s) 38 +---0----1----2----3----4----5----6------------> time (s)
39 39
40 2. To make the LED go instantly from one brigntess value to another, 40 2. To make the LED go instantly from one brightness value to another,
41 we should use use zero-time lengths (the brightness must be same as 41 we should use zero-time lengths (the brightness must be same as
42 the previous tuple's). So the format should be: 42 the previous tuple's). So the format should be:
43 "brightness_1 duration_1 brightness_1 0 brightness_2 duration_2 43 "brightness_1 duration_1 brightness_1 0 brightness_2 duration_2
44 brightness_2 0 ...". For example: 44 brightness_2 0 ...". For example:
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 81d1d5a74728..aefd358a5ca3 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -856,7 +856,8 @@
856 causing system reset or hang due to sending 856 causing system reset or hang due to sending
857 INIT from AP to BSP. 857 INIT from AP to BSP.
858 858
859 disable_counter_freezing [HW] 859 perf_v4_pmi= [X86,INTEL]
860 Format: <bool>
860 Disable Intel PMU counter freezing feature. 861 Disable Intel PMU counter freezing feature.
861 The feature only exists starting from 862 The feature only exists starting from
862 Arch Perfmon v4 (Skylake and newer). 863 Arch Perfmon v4 (Skylake and newer).
@@ -3504,6 +3505,10 @@
3504 before loading. 3505 before loading.
3505 See Documentation/blockdev/ramdisk.txt. 3506 See Documentation/blockdev/ramdisk.txt.
3506 3507
3508 psi= [KNL] Enable or disable pressure stall information
3509 tracking.
3510 Format: <bool>
3511
3507 psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to 3512 psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to
3508 probe for; one of (bare|imps|exps|lifebook|any). 3513 probe for; one of (bare|imps|exps|lifebook|any).
3509 psmouse.rate= [HW,MOUSE] Set desired mouse report rate, in reports 3514 psmouse.rate= [HW,MOUSE] Set desired mouse report rate, in reports
@@ -4194,9 +4199,13 @@
4194 4199
4195 spectre_v2= [X86] Control mitigation of Spectre variant 2 4200 spectre_v2= [X86] Control mitigation of Spectre variant 2
4196 (indirect branch speculation) vulnerability. 4201 (indirect branch speculation) vulnerability.
4202 The default operation protects the kernel from
4203 user space attacks.
4197 4204
4198 on - unconditionally enable 4205 on - unconditionally enable, implies
4199 off - unconditionally disable 4206 spectre_v2_user=on
4207 off - unconditionally disable, implies
4208 spectre_v2_user=off
4200 auto - kernel detects whether your CPU model is 4209 auto - kernel detects whether your CPU model is
4201 vulnerable 4210 vulnerable
4202 4211
@@ -4206,6 +4215,12 @@
4206 CONFIG_RETPOLINE configuration option, and the 4215 CONFIG_RETPOLINE configuration option, and the
4207 compiler with which the kernel was built. 4216 compiler with which the kernel was built.
4208 4217
4218 Selecting 'on' will also enable the mitigation
4219 against user space to user space task attacks.
4220
4221 Selecting 'off' will disable both the kernel and
4222 the user space protections.
4223
4209 Specific mitigations can also be selected manually: 4224 Specific mitigations can also be selected manually:
4210 4225
4211 retpoline - replace indirect branches 4226 retpoline - replace indirect branches
@@ -4215,6 +4230,48 @@
4215 Not specifying this option is equivalent to 4230 Not specifying this option is equivalent to
4216 spectre_v2=auto. 4231 spectre_v2=auto.
4217 4232
4233 spectre_v2_user=
4234 [X86] Control mitigation of Spectre variant 2
4235 (indirect branch speculation) vulnerability between
4236 user space tasks
4237
4238 on - Unconditionally enable mitigations. Is
4239 enforced by spectre_v2=on
4240
4241 off - Unconditionally disable mitigations. Is
4242 enforced by spectre_v2=off
4243
4244 prctl - Indirect branch speculation is enabled,
4245 but mitigation can be enabled via prctl
4246 per thread. The mitigation control state
4247 is inherited on fork.
4248
4249 prctl,ibpb
4250 - Like "prctl" above, but only STIBP is
4251 controlled per thread. IBPB is issued
4252 always when switching between different user
4253 space processes.
4254
4255 seccomp
4256 - Same as "prctl" above, but all seccomp
4257 threads will enable the mitigation unless
4258 they explicitly opt out.
4259
4260 seccomp,ibpb
4261 - Like "seccomp" above, but only STIBP is
4262 controlled per thread. IBPB is issued
4263 always when switching between different
4264 user space processes.
4265
4266 auto - Kernel selects the mitigation depending on
4267 the available CPU features and vulnerability.
4268
4269 Default mitigation:
4270 If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl"
4271
4272 Not specifying this option is equivalent to
4273 spectre_v2_user=auto.
4274
4218 spec_store_bypass_disable= 4275 spec_store_bypass_disable=
4219 [HW] Control Speculative Store Bypass (SSB) Disable mitigation 4276 [HW] Control Speculative Store Bypass (SSB) Disable mitigation
4220 (Speculative Store Bypass vulnerability) 4277 (Speculative Store Bypass vulnerability)
@@ -4713,6 +4770,8 @@
4713 prevent spurious wakeup); 4770 prevent spurious wakeup);
4714 n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a 4771 n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a
4715 pause after every control message); 4772 pause after every control message);
4773 o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
4774 delay after resetting its port);
4716 Example: quirks=0781:5580:bk,0a5c:5834:gij 4775 Example: quirks=0781:5580:bk,0a5c:5834:gij
4717 4776
4718 usbhid.mousepoll= 4777 usbhid.mousepoll=
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index 47153e64dfb5..7eca9026a9ed 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -150,7 +150,7 @@ data structures necessary to handle the given policy and, possibly, to add
150a governor ``sysfs`` interface to it. Next, the governor is started by 150a governor ``sysfs`` interface to it. Next, the governor is started by
151invoking its ``->start()`` callback. 151invoking its ``->start()`` callback.
152 152
153That callback it expected to register per-CPU utilization update callbacks for 153That callback is expected to register per-CPU utilization update callbacks for
154all of the online CPUs belonging to the given policy with the CPU scheduler. 154all of the online CPUs belonging to the given policy with the CPU scheduler.
155The utilization update callbacks will be invoked by the CPU scheduler on 155The utilization update callbacks will be invoked by the CPU scheduler on
156important events, like task enqueue and dequeue, on every iteration of the 156important events, like task enqueue and dequeue, on every iteration of the
diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst
index 164bf71149fd..30187d49dc2c 100644
--- a/Documentation/admin-guide/security-bugs.rst
+++ b/Documentation/admin-guide/security-bugs.rst
@@ -32,16 +32,17 @@ Disclosure and embargoed information
32The security list is not a disclosure channel. For that, see Coordination 32The security list is not a disclosure channel. For that, see Coordination
33below. 33below.
34 34
35Once a robust fix has been developed, our preference is to release the 35Once a robust fix has been developed, the release process starts. Fixes
36fix in a timely fashion, treating it no differently than any of the other 36for publicly known bugs are released immediately.
37thousands of changes and fixes the Linux kernel project releases every 37
38month. 38Although our preference is to release fixes for publicly undisclosed bugs
39 39as soon as they become available, this may be postponed at the request of
40However, at the request of the reporter, we will postpone releasing the 40the reporter or an affected party for up to 7 calendar days from the start
41fix for up to 5 business days after the date of the report or after the 41of the release process, with an exceptional extension to 14 calendar days
42embargo has lifted; whichever comes first. The only exception to that 42if it is agreed that the criticality of the bug requires more time. The
43rule is if the bug is publicly known, in which case the preference is to 43only valid reason for deferring the publication of a fix is to accommodate
44release the fix as soon as it's available. 44the logistics of QA and large scale rollouts which require release
45coordination.
45 46
46Whilst embargoed information may be shared with trusted individuals in 47Whilst embargoed information may be shared with trusted individuals in
47order to develop a fix, such information will not be published alongside 48order to develop a fix, such information will not be published alongside
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 76ccded8b74c..8f9577621144 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -57,6 +57,7 @@ stable kernels.
57| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | 57| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
58| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 | 58| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
59| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 | 59| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
60| ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
60| ARM | MMU-500 | #841119,#826419 | N/A | 61| ARM | MMU-500 | #841119,#826419 | N/A |
61| | | | | 62| | | | |
62| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | 63| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index a4e705108f42..dbe96cb5558e 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -74,7 +74,8 @@ using :c:func:`xa_load`. xa_store will overwrite any entry with the
74new entry and return the previous entry stored at that index. You can 74new entry and return the previous entry stored at that index. You can
75use :c:func:`xa_erase` instead of calling :c:func:`xa_store` with a 75use :c:func:`xa_erase` instead of calling :c:func:`xa_store` with a
76``NULL`` entry. There is no difference between an entry that has never 76``NULL`` entry. There is no difference between an entry that has never
77been stored to and one that has most recently had ``NULL`` stored to it. 77been stored to, one that has been erased and one that has most recently
78had ``NULL`` stored to it.
78 79
79You can conditionally replace an entry at an index by using 80You can conditionally replace an entry at an index by using
80:c:func:`xa_cmpxchg`. Like :c:func:`cmpxchg`, it will only succeed if 81:c:func:`xa_cmpxchg`. Like :c:func:`cmpxchg`, it will only succeed if
@@ -105,23 +106,44 @@ may result in the entry being marked at some, but not all of the other
105indices. Storing into one index may result in the entry retrieved by 106indices. Storing into one index may result in the entry retrieved by
106some, but not all of the other indices changing. 107some, but not all of the other indices changing.
107 108
109Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
110will not need to allocate memory. The :c:func:`xa_reserve` function
111will store a reserved entry at the indicated index. Users of the normal
112API will see this entry as containing ``NULL``. If you do not need to
113use the reserved entry, you can call :c:func:`xa_release` to remove the
114unused entry. If another user has stored to the entry in the meantime,
115:c:func:`xa_release` will do nothing; if instead you want the entry to
116become ``NULL``, you should use :c:func:`xa_erase`.
117
118If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
119will return ``true``.
120
108Finally, you can remove all entries from an XArray by calling 121Finally, you can remove all entries from an XArray by calling
109:c:func:`xa_destroy`. If the XArray entries are pointers, you may wish 122:c:func:`xa_destroy`. If the XArray entries are pointers, you may wish
110to free the entries first. You can do this by iterating over all present 123to free the entries first. You can do this by iterating over all present
111entries in the XArray using the :c:func:`xa_for_each` iterator. 124entries in the XArray using the :c:func:`xa_for_each` iterator.
112 125
113ID assignment 126Allocating XArrays
114------------- 127------------------
128
129If you use :c:func:`DEFINE_XARRAY_ALLOC` to define the XArray, or
130initialise it by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`,
131the XArray changes to track whether entries are in use or not.
115 132
116You can call :c:func:`xa_alloc` to store the entry at any unused index 133You can call :c:func:`xa_alloc` to store the entry at any unused index
117in the XArray. If you need to modify the array from interrupt context, 134in the XArray. If you need to modify the array from interrupt context,
118you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable 135you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable
119interrupts while allocating the ID. Unlike :c:func:`xa_store`, allocating 136interrupts while allocating the ID.
120a ``NULL`` pointer does not delete an entry. Instead it reserves an 137
121entry like :c:func:`xa_reserve` and you can release it using either 138Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert`
122:c:func:`xa_erase` or :c:func:`xa_release`. To use ID assignment, the 139will mark the entry as being allocated. Unlike a normal XArray, storing
123XArray must be defined with :c:func:`DEFINE_XARRAY_ALLOC`, or initialised 140``NULL`` will mark the entry as being in use, like :c:func:`xa_reserve`.
124by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`, 141To free an entry, use :c:func:`xa_erase` (or :c:func:`xa_release` if
142you only want to free the entry if it's ``NULL``).
143
144You cannot use ``XA_MARK_0`` with an allocating XArray as this mark
145is used to track whether an entry is free or not. The other marks are
146available for your use.
125 147
126Memory allocation 148Memory allocation
127----------------- 149-----------------
@@ -158,6 +180,8 @@ Takes RCU read lock:
158 180
159Takes xa_lock internally: 181Takes xa_lock internally:
160 * :c:func:`xa_store` 182 * :c:func:`xa_store`
183 * :c:func:`xa_store_bh`
184 * :c:func:`xa_store_irq`
161 * :c:func:`xa_insert` 185 * :c:func:`xa_insert`
162 * :c:func:`xa_erase` 186 * :c:func:`xa_erase`
163 * :c:func:`xa_erase_bh` 187 * :c:func:`xa_erase_bh`
@@ -167,6 +191,9 @@ Takes xa_lock internally:
167 * :c:func:`xa_alloc` 191 * :c:func:`xa_alloc`
168 * :c:func:`xa_alloc_bh` 192 * :c:func:`xa_alloc_bh`
169 * :c:func:`xa_alloc_irq` 193 * :c:func:`xa_alloc_irq`
194 * :c:func:`xa_reserve`
195 * :c:func:`xa_reserve_bh`
196 * :c:func:`xa_reserve_irq`
170 * :c:func:`xa_destroy` 197 * :c:func:`xa_destroy`
171 * :c:func:`xa_set_mark` 198 * :c:func:`xa_set_mark`
172 * :c:func:`xa_clear_mark` 199 * :c:func:`xa_clear_mark`
@@ -177,6 +204,7 @@ Assumes xa_lock held on entry:
177 * :c:func:`__xa_erase` 204 * :c:func:`__xa_erase`
178 * :c:func:`__xa_cmpxchg` 205 * :c:func:`__xa_cmpxchg`
179 * :c:func:`__xa_alloc` 206 * :c:func:`__xa_alloc`
207 * :c:func:`__xa_reserve`
180 * :c:func:`__xa_set_mark` 208 * :c:func:`__xa_set_mark`
181 * :c:func:`__xa_clear_mark` 209 * :c:func:`__xa_clear_mark`
182 210
@@ -234,7 +262,8 @@ Sharing the XArray with interrupt context is also possible, either
234using :c:func:`xa_lock_irqsave` in both the interrupt handler and process 262using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
235context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock` 263context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
236in the interrupt handler. Some of the more common patterns have helper 264in the interrupt handler. Some of the more common patterns have helper
237functions such as :c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`. 265functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`,
266:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
238 267
239Sometimes you need to protect access to the XArray with a mutex because 268Sometimes you need to protect access to the XArray with a mutex because
240that lock sits above another mutex in the locking hierarchy. That does 269that lock sits above another mutex in the locking hierarchy. That does
@@ -322,7 +351,8 @@ to :c:func:`xas_retry`, and retry the operation if it returns ``true``.
322 - :c:func:`xa_is_zero` 351 - :c:func:`xa_is_zero`
323 - Zero entries appear as ``NULL`` through the Normal API, but occupy 352 - Zero entries appear as ``NULL`` through the Normal API, but occupy
324 an entry in the XArray which can be used to reserve the index for 353 an entry in the XArray which can be used to reserve the index for
325 future use. 354 future use. This is used by allocating XArrays for allocated entries
355 which are ``NULL``.
326 356
327Other internal entries may be added in the future. As far as possible, they 357Other internal entries may be added in the future. As far as possible, they
328will be handled by :c:func:`xas_retry`. 358will be handled by :c:func:`xas_retry`.
diff --git a/Documentation/cpu-freq/cpufreq-stats.txt b/Documentation/cpu-freq/cpufreq-stats.txt
index a873855c811d..14378cecb172 100644
--- a/Documentation/cpu-freq/cpufreq-stats.txt
+++ b/Documentation/cpu-freq/cpufreq-stats.txt
@@ -86,9 +86,11 @@ transitions.
86This will give a fine grained information about all the CPU frequency 86This will give a fine grained information about all the CPU frequency
87transitions. The cat output here is a two dimensional matrix, where an entry 87transitions. The cat output here is a two dimensional matrix, where an entry
88<i,j> (row i, column j) represents the count of number of transitions from 88<i,j> (row i, column j) represents the count of number of transitions from
89Freq_i to Freq_j. Freq_i is in descending order with increasing rows and 89Freq_i to Freq_j. Freq_i rows and Freq_j columns follow the sorting order in
90Freq_j is in descending order with increasing columns. The output here also 90which the driver has provided the frequency table initially to the cpufreq core
91contains the actual freq values for each row and column for better readability. 91and so can be sorted (ascending or descending) or unsorted. The output here
92also contains the actual freq values for each row and column for better
93readability.
92 94
93If the transition table is bigger than PAGE_SIZE, reading this will 95If the transition table is bigger than PAGE_SIZE, reading this will
94return an -EFBIG error. 96return an -EFBIG error.
diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt
index f5e0f82fd503..58c4256d37a3 100644
--- a/Documentation/devicetree/bindings/arm/shmobile.txt
+++ b/Documentation/devicetree/bindings/arm/shmobile.txt
@@ -27,7 +27,7 @@ SoCs:
27 compatible = "renesas,r8a77470" 27 compatible = "renesas,r8a77470"
28 - RZ/G2M (R8A774A1) 28 - RZ/G2M (R8A774A1)
29 compatible = "renesas,r8a774a1" 29 compatible = "renesas,r8a774a1"
30 - RZ/G2E (RA8774C0) 30 - RZ/G2E (R8A774C0)
31 compatible = "renesas,r8a774c0" 31 compatible = "renesas,r8a774c0"
32 - R-Car M1A (R8A77781) 32 - R-Car M1A (R8A77781)
33 compatible = "renesas,r8a7778" 33 compatible = "renesas,r8a7778"
diff --git a/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt b/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt
deleted file mode 100644
index 2aa06ac0fac5..000000000000
--- a/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt
+++ /dev/null
@@ -1,65 +0,0 @@
1Generic ARM big LITTLE cpufreq driver's DT glue
2-----------------------------------------------
3
4This is DT specific glue layer for generic cpufreq driver for big LITTLE
5systems.
6
7Both required and optional properties listed below must be defined
8under node /cpus/cpu@x. Where x is the first cpu inside a cluster.
9
10FIXME: Cpus should boot in the order specified in DT and all cpus for a cluster
11must be present contiguously. Generic DT driver will check only node 'x' for
12cpu:x.
13
14Required properties:
15- operating-points: Refer to Documentation/devicetree/bindings/opp/opp.txt
16 for details
17
18Optional properties:
19- clock-latency: Specify the possible maximum transition latency for clock,
20 in unit of nanoseconds.
21
22Examples:
23
24cpus {
25 #address-cells = <1>;
26 #size-cells = <0>;
27
28 cpu@0 {
29 compatible = "arm,cortex-a15";
30 reg = <0>;
31 next-level-cache = <&L2>;
32 operating-points = <
33 /* kHz uV */
34 792000 1100000
35 396000 950000
36 198000 850000
37 >;
38 clock-latency = <61036>; /* two CLK32 periods */
39 };
40
41 cpu@1 {
42 compatible = "arm,cortex-a15";
43 reg = <1>;
44 next-level-cache = <&L2>;
45 };
46
47 cpu@100 {
48 compatible = "arm,cortex-a7";
49 reg = <100>;
50 next-level-cache = <&L2>;
51 operating-points = <
52 /* kHz uV */
53 792000 950000
54 396000 750000
55 198000 450000
56 >;
57 clock-latency = <61036>; /* two CLK32 periods */
58 };
59
60 cpu@101 {
61 compatible = "arm,cortex-a7";
62 reg = <101>;
63 next-level-cache = <&L2>;
64 };
65};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-omap.txt b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
index 7e49839d4124..4b90ba9f31b7 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-omap.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
@@ -1,8 +1,12 @@
1I2C for OMAP platforms 1I2C for OMAP platforms
2 2
3Required properties : 3Required properties :
4- compatible : Must be "ti,omap2420-i2c", "ti,omap2430-i2c", "ti,omap3-i2c" 4- compatible : Must be
5 or "ti,omap4-i2c" 5 "ti,omap2420-i2c" for OMAP2420 SoCs
6 "ti,omap2430-i2c" for OMAP2430 SoCs
7 "ti,omap3-i2c" for OMAP3 SoCs
8 "ti,omap4-i2c" for OMAP4+ SoCs
9 "ti,am654-i2c", "ti,omap4-i2c" for AM654 SoCs
6- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based) 10- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
7- #address-cells = <1>; 11- #address-cells = <1>;
8- #size-cells = <0>; 12- #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/input/input-reset.txt b/Documentation/devicetree/bindings/input/input-reset.txt
index 2bb2626fdb78..1ca6cc5ebf8e 100644
--- a/Documentation/devicetree/bindings/input/input-reset.txt
+++ b/Documentation/devicetree/bindings/input/input-reset.txt
@@ -12,7 +12,7 @@ The /chosen node should contain a 'linux,sysrq-reset-seq' child node to define
12a set of keys. 12a set of keys.
13 13
14Required property: 14Required property:
15sysrq-reset-seq: array of Linux keycodes, one keycode per cell. 15keyset: array of Linux keycodes, one keycode per cell.
16 16
17Optional property: 17Optional property:
18timeout-ms: duration keys must be pressed together in milliseconds before 18timeout-ms: duration keys must be pressed together in milliseconds before
diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.txt b/Documentation/devicetree/bindings/media/rockchip-vpu.txt
deleted file mode 100644
index 35dc464ad7c8..000000000000
--- a/Documentation/devicetree/bindings/media/rockchip-vpu.txt
+++ /dev/null
@@ -1,29 +0,0 @@
1device-tree bindings for rockchip VPU codec
2
3Rockchip (Video Processing Unit) present in various Rockchip platforms,
4such as RK3288 and RK3399.
5
6Required properties:
7- compatible: value should be one of the following
8 "rockchip,rk3288-vpu";
9 "rockchip,rk3399-vpu";
10- interrupts: encoding and decoding interrupt specifiers
11- interrupt-names: should be "vepu" and "vdpu"
12- clocks: phandle to VPU aclk, hclk clocks
13- clock-names: should be "aclk" and "hclk"
14- power-domains: phandle to power domain node
15- iommus: phandle to a iommu node
16
17Example:
18SoC-specific DT entry:
19 vpu: video-codec@ff9a0000 {
20 compatible = "rockchip,rk3288-vpu";
21 reg = <0x0 0xff9a0000 0x0 0x800>;
22 interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
23 <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
24 interrupt-names = "vepu", "vdpu";
25 clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
26 clock-names = "aclk", "hclk";
27 power-domains = <&power RK3288_PD_VIDEO>;
28 iommus = <&vpu_mmu>;
29 };
diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
index 903a78da65be..3a9926f99937 100644
--- a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
+++ b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
@@ -17,7 +17,7 @@ Example:
17 reg = <1>; 17 reg = <1>;
18 clocks = <&clk32m>; 18 clocks = <&clk32m>;
19 interrupt-parent = <&gpio4>; 19 interrupt-parent = <&gpio4>;
20 interrupts = <13 IRQ_TYPE_EDGE_RISING>; 20 interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
21 vdd-supply = <&reg5v0>; 21 vdd-supply = <&reg5v0>;
22 xceiver-supply = <&reg5v0>; 22 xceiver-supply = <&reg5v0>;
23 }; 23 };
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
index cc4372842bf3..9936b9ee67c3 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_can.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt
@@ -5,6 +5,7 @@ Required properties:
5- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC. 5- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
6 "renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC. 6 "renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC.
7 "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC. 7 "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
8 "renesas,can-r8a774a1" if CAN controller is a part of R8A774A1 SoC.
8 "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC. 9 "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
9 "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC. 10 "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
10 "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC. 11 "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
@@ -14,26 +15,32 @@ Required properties:
14 "renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC. 15 "renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC.
15 "renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC. 16 "renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
16 "renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC. 17 "renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC.
18 "renesas,can-r8a77965" if CAN controller is a part of R8A77965 SoC.
17 "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device. 19 "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
18 "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1 20 "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1
19 compatible device. 21 compatible device.
20 "renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device. 22 "renesas,rcar-gen3-can" for a generic R-Car Gen3 or RZ/G2
23 compatible device.
21 When compatible with the generic version, nodes must list the 24 When compatible with the generic version, nodes must list the
22 SoC-specific version corresponding to the platform first 25 SoC-specific version corresponding to the platform first
23 followed by the generic version. 26 followed by the generic version.
24 27
25- reg: physical base address and size of the R-Car CAN register map. 28- reg: physical base address and size of the R-Car CAN register map.
26- interrupts: interrupt specifier for the sole interrupt. 29- interrupts: interrupt specifier for the sole interrupt.
27- clocks: phandles and clock specifiers for 3 CAN clock inputs. 30- clocks: phandles and clock specifiers for 2 CAN clock inputs for RZ/G2
28- clock-names: 3 clock input name strings: "clkp1", "clkp2", "can_clk". 31 devices.
32 phandles and clock specifiers for 3 CAN clock inputs for every other
33 SoC.
34- clock-names: 2 clock input name strings for RZ/G2: "clkp1", "can_clk".
35 3 clock input name strings for every other SoC: "clkp1", "clkp2",
36 "can_clk".
29- pinctrl-0: pin control group to be used for this controller. 37- pinctrl-0: pin control group to be used for this controller.
30- pinctrl-names: must be "default". 38- pinctrl-names: must be "default".
31 39
32Required properties for "renesas,can-r8a7795" and "renesas,can-r8a7796" 40Required properties for R8A7795, R8A7796 and R8A77965:
33compatible: 41For the denoted SoCs, "clkp2" can be CANFD clock. This is a div6 clock and can
34In R8A7795 and R8A7796 SoCs, "clkp2" can be CANFD clock. This is a div6 clock 42be used by both CAN and CAN FD controller at the same time. It needs to be
35and can be used by both CAN and CAN FD controller at the same time. It needs to 43scaled to maximum frequency if any of these controllers use it. This is done
36be scaled to maximum frequency if any of these controllers use it. This is done
37using the below properties: 44using the below properties:
38 45
39- assigned-clocks: phandle of clkp2(CANFD) clock. 46- assigned-clocks: phandle of clkp2(CANFD) clock.
@@ -42,8 +49,9 @@ using the below properties:
42Optional properties: 49Optional properties:
43- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are: 50- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are:
44 <0x0> (default) : Peripheral clock (clkp1) 51 <0x0> (default) : Peripheral clock (clkp1)
45 <0x1> : Peripheral clock (clkp2) 52 <0x1> : Peripheral clock (clkp2) (not supported by
46 <0x3> : Externally input clock 53 RZ/G2 devices)
54 <0x3> : External input clock
47 55
48Example 56Example
49------- 57-------
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index 3ceeb8de1196..35694c0c376b 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -7,7 +7,7 @@ limitations.
7Current Binding 7Current Binding
8--------------- 8---------------
9 9
10Switches are true Linux devices and can be probes by any means. Once 10Switches are true Linux devices and can be probed by any means. Once
11probed, they register to the DSA framework, passing a node 11probed, they register to the DSA framework, passing a node
12pointer. This node is expected to fulfil the following binding, and 12pointer. This node is expected to fulfil the following binding, and
13may contain additional properties as required by the device it is 13may contain additional properties as required by the device it is
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
index adf20b2bdf71..fbc198d5dd39 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -40,24 +40,36 @@ Required properties:
40 "ref" for 19.2 MHz ref clk, 40 "ref" for 19.2 MHz ref clk,
41 "com_aux" for phy common block aux clock, 41 "com_aux" for phy common block aux clock,
42 "ref_aux" for phy reference aux clock, 42 "ref_aux" for phy reference aux clock,
43
44 For "qcom,ipq8074-qmp-pcie-phy": no clocks are listed.
43 For "qcom,msm8996-qmp-pcie-phy" must contain: 45 For "qcom,msm8996-qmp-pcie-phy" must contain:
44 "aux", "cfg_ahb", "ref". 46 "aux", "cfg_ahb", "ref".
45 For "qcom,msm8996-qmp-usb3-phy" must contain: 47 For "qcom,msm8996-qmp-usb3-phy" must contain:
46 "aux", "cfg_ahb", "ref". 48 "aux", "cfg_ahb", "ref".
47 For "qcom,qmp-v3-usb3-phy" must contain: 49 For "qcom,sdm845-qmp-usb3-phy" must contain:
50 "aux", "cfg_ahb", "ref", "com_aux".
51 For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
48 "aux", "cfg_ahb", "ref", "com_aux". 52 "aux", "cfg_ahb", "ref", "com_aux".
53 For "qcom,sdm845-qmp-ufs-phy" must contain:
54 "ref", "ref_aux".
49 55
50 - resets: a list of phandles and reset controller specifier pairs, 56 - resets: a list of phandles and reset controller specifier pairs,
51 one for each entry in reset-names. 57 one for each entry in reset-names.
52 - reset-names: "phy" for reset of phy block, 58 - reset-names: "phy" for reset of phy block,
53 "common" for phy common block reset, 59 "common" for phy common block reset,
54 "cfg" for phy's ahb cfg block reset (Optional). 60 "cfg" for phy's ahb cfg block reset.
61
62 For "qcom,ipq8074-qmp-pcie-phy" must contain:
63 "phy", "common".
55 For "qcom,msm8996-qmp-pcie-phy" must contain: 64 For "qcom,msm8996-qmp-pcie-phy" must contain:
56 "phy", "common", "cfg". 65 "phy", "common", "cfg".
57 For "qcom,msm8996-qmp-usb3-phy" must contain 66 For "qcom,msm8996-qmp-usb3-phy" must contain
58 "phy", "common". 67 "phy", "common".
59 For "qcom,ipq8074-qmp-pcie-phy" must contain: 68 For "qcom,sdm845-qmp-usb3-phy" must contain:
60 "phy", "common". 69 "phy", "common".
70 For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
71 "phy", "common".
72 For "qcom,sdm845-qmp-ufs-phy": no resets are listed.
61 73
62 - vdda-phy-supply: Phandle to a regulator supply to PHY core block. 74 - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
63 - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block. 75 - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
@@ -79,9 +91,10 @@ Required properties for child node:
79 91
80 - #phy-cells: must be 0 92 - #phy-cells: must be 0
81 93
94Required properties child node of pcie and usb3 qmp phys:
82 - clocks: a list of phandles and clock-specifier pairs, 95 - clocks: a list of phandles and clock-specifier pairs,
83 one for each entry in clock-names. 96 one for each entry in clock-names.
84 - clock-names: Must contain following for pcie and usb qmp phys: 97 - clock-names: Must contain following:
85 "pipe<lane-number>" for pipe clock specific to each lane. 98 "pipe<lane-number>" for pipe clock specific to each lane.
86 - clock-output-names: Name of the PHY clock that will be the parent for 99 - clock-output-names: Name of the PHY clock that will be the parent for
87 the above pipe clock. 100 the above pipe clock.
@@ -91,9 +104,11 @@ Required properties for child node:
91 (or) 104 (or)
92 "pcie20_phy1_pipe_clk" 105 "pcie20_phy1_pipe_clk"
93 106
107Required properties for child node of PHYs with lane reset, AKA:
108 "qcom,msm8996-qmp-pcie-phy"
94 - resets: a list of phandles and reset controller specifier pairs, 109 - resets: a list of phandles and reset controller specifier pairs,
95 one for each entry in reset-names. 110 one for each entry in reset-names.
96 - reset-names: Must contain following for pcie qmp phys: 111 - reset-names: Must contain following:
97 "lane<lane-number>" for reset specific to each lane. 112 "lane<lane-number>" for reset specific to each lane.
98 113
99Example: 114Example:
diff --git a/Documentation/devicetree/bindings/spi/spi-uniphier.txt b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
index 504a4ecfc7b1..b04e66a52de5 100644
--- a/Documentation/devicetree/bindings/spi/spi-uniphier.txt
+++ b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
@@ -5,18 +5,20 @@ UniPhier SoCs have SCSSI which supports SPI single channel.
5Required properties: 5Required properties:
6 - compatible: should be "socionext,uniphier-scssi" 6 - compatible: should be "socionext,uniphier-scssi"
7 - reg: address and length of the spi master registers 7 - reg: address and length of the spi master registers
8 - #address-cells: must be <1>, see spi-bus.txt 8 - interrupts: a single interrupt specifier
9 - #size-cells: must be <0>, see spi-bus.txt 9 - pinctrl-names: should be "default"
10 - clocks: A phandle to the clock for the device. 10 - pinctrl-0: pin control state for the default mode
11 - resets: A phandle to the reset control for the device. 11 - clocks: a phandle to the clock for the device
12 - resets: a phandle to the reset control for the device
12 13
13Example: 14Example:
14 15
15spi0: spi@54006000 { 16spi0: spi@54006000 {
16 compatible = "socionext,uniphier-scssi"; 17 compatible = "socionext,uniphier-scssi";
17 reg = <0x54006000 0x100>; 18 reg = <0x54006000 0x100>;
18 #address-cells = <1>; 19 interrupts = <0 39 4>;
19 #size-cells = <0>; 20 pinctrl-names = "default";
21 pinctrl-0 = <&pinctrl_spi0>;
20 clocks = <&peri_clk 11>; 22 clocks = <&peri_clk 11>;
21 resets = <&peri_rst 11>; 23 resets = <&peri_rst 11>;
22}; 24};
diff --git a/Documentation/i2c/busses/i2c-nvidia-gpu b/Documentation/i2c/busses/i2c-nvidia-gpu
new file mode 100644
index 000000000000..31884d2b2eb5
--- /dev/null
+++ b/Documentation/i2c/busses/i2c-nvidia-gpu
@@ -0,0 +1,18 @@
1Kernel driver i2c-nvidia-gpu
2
3Datasheet: not publicly available.
4
5Authors:
6 Ajay Gupta <ajayg@nvidia.com>
7
8Description
9-----------
10
11i2c-nvidia-gpu is a driver for I2C controller included in NVIDIA Turing
12and later GPUs and it is used to communicate with Type-C controller on GPUs.
13
14If your 'lspci -v' listing shows something like the following,
15
1601:00.3 Serial bus controller [0c80]: NVIDIA Corporation Device 1ad9 (rev a1)
17
18then this driver should support the I2C controller of your GPU.
diff --git a/Documentation/input/event-codes.rst b/Documentation/input/event-codes.rst
index cef220c176a4..a8c0873beb95 100644
--- a/Documentation/input/event-codes.rst
+++ b/Documentation/input/event-codes.rst
@@ -190,16 +190,7 @@ A few EV_REL codes have special meanings:
190* REL_WHEEL, REL_HWHEEL: 190* REL_WHEEL, REL_HWHEEL:
191 191
192 - These codes are used for vertical and horizontal scroll wheels, 192 - These codes are used for vertical and horizontal scroll wheels,
193 respectively. The value is the number of "notches" moved on the wheel, the 193 respectively.
194 physical size of which varies by device. For high-resolution wheels (which
195 report multiple events for each notch of movement, or do not have notches)
196 this may be an approximation based on the high-resolution scroll events.
197
198* REL_WHEEL_HI_RES:
199
200 - If a vertical scroll wheel supports high-resolution scrolling, this code
201 will be emitted in addition to REL_WHEEL. The value is the (approximate)
202 distance travelled by the user's finger, in microns.
203 194
204EV_ABS 195EV_ABS
205------ 196------
diff --git a/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst b/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst
index 0f8b31874002..de131f00c249 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst
@@ -1,4 +1,28 @@
1.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 1.. This file is dual-licensed: you can use it either under the terms
2.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
3.. dual licensing only applies to this file, and not this project as a
4.. whole.
5..
6.. a) This file is free software; you can redistribute it and/or
7.. modify it under the terms of the GNU General Public License as
8.. published by the Free Software Foundation; either version 2 of
9.. the License, or (at your option) any later version.
10..
11.. This file is distributed in the hope that it will be useful,
12.. but WITHOUT ANY WARRANTY; without even the implied warranty of
13.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14.. GNU General Public License for more details.
15..
16.. Or, alternatively,
17..
18.. b) Permission is granted to copy, distribute and/or modify this
19.. document under the terms of the GNU Free Documentation License,
20.. Version 1.1 or any later version published by the Free Software
21.. Foundation, with no Invariant Sections, no Front-Cover Texts
22.. and no Back-Cover Texts. A copy of the license is included at
23.. Documentation/media/uapi/fdl-appendix.rst.
24..
25.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
2 26
3.. _media_ioc_request_alloc: 27.. _media_ioc_request_alloc:
4 28
diff --git a/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst b/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst
index 6dd2d7fea714..5d2604345e19 100644
--- a/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst
+++ b/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst
@@ -1,4 +1,28 @@
1.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 1.. This file is dual-licensed: you can use it either under the terms
2.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
3.. dual licensing only applies to this file, and not this project as a
4.. whole.
5..
6.. a) This file is free software; you can redistribute it and/or
7.. modify it under the terms of the GNU General Public License as
8.. published by the Free Software Foundation; either version 2 of
9.. the License, or (at your option) any later version.
10..
11.. This file is distributed in the hope that it will be useful,
12.. but WITHOUT ANY WARRANTY; without even the implied warranty of
13.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14.. GNU General Public License for more details.
15..
16.. Or, alternatively,
17..
18.. b) Permission is granted to copy, distribute and/or modify this
19.. document under the terms of the GNU Free Documentation License,
20.. Version 1.1 or any later version published by the Free Software
21.. Foundation, with no Invariant Sections, no Front-Cover Texts
22.. and no Back-Cover Texts. A copy of the license is included at
23.. Documentation/media/uapi/fdl-appendix.rst.
24..
25.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
2 26
3.. _media_request_ioc_queue: 27.. _media_request_ioc_queue:
4 28
diff --git a/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst b/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst
index febe888494c8..ec61960c81ce 100644
--- a/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst
+++ b/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst
@@ -1,4 +1,28 @@
1.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 1.. This file is dual-licensed: you can use it either under the terms
2.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
3.. dual licensing only applies to this file, and not this project as a
4.. whole.
5..
6.. a) This file is free software; you can redistribute it and/or
7.. modify it under the terms of the GNU General Public License as
8.. published by the Free Software Foundation; either version 2 of
9.. the License, or (at your option) any later version.
10..
11.. This file is distributed in the hope that it will be useful,
12.. but WITHOUT ANY WARRANTY; without even the implied warranty of
13.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14.. GNU General Public License for more details.
15..
16.. Or, alternatively,
17..
18.. b) Permission is granted to copy, distribute and/or modify this
19.. document under the terms of the GNU Free Documentation License,
20.. Version 1.1 or any later version published by the Free Software
21.. Foundation, with no Invariant Sections, no Front-Cover Texts
22.. and no Back-Cover Texts. A copy of the license is included at
23.. Documentation/media/uapi/fdl-appendix.rst.
24..
25.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
2 26
3.. _media_request_ioc_reinit: 27.. _media_request_ioc_reinit:
4 28
diff --git a/Documentation/media/uapi/mediactl/request-api.rst b/Documentation/media/uapi/mediactl/request-api.rst
index 5f4a23029c48..945113dcb218 100644
--- a/Documentation/media/uapi/mediactl/request-api.rst
+++ b/Documentation/media/uapi/mediactl/request-api.rst
@@ -1,4 +1,28 @@
1.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 1.. This file is dual-licensed: you can use it either under the terms
2.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
3.. dual licensing only applies to this file, and not this project as a
4.. whole.
5..
6.. a) This file is free software; you can redistribute it and/or
7.. modify it under the terms of the GNU General Public License as
8.. published by the Free Software Foundation; either version 2 of
9.. the License, or (at your option) any later version.
10..
11.. This file is distributed in the hope that it will be useful,
12.. but WITHOUT ANY WARRANTY; without even the implied warranty of
13.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14.. GNU General Public License for more details.
15..
16.. Or, alternatively,
17..
18.. b) Permission is granted to copy, distribute and/or modify this
19.. document under the terms of the GNU Free Documentation License,
20.. Version 1.1 or any later version published by the Free Software
21.. Foundation, with no Invariant Sections, no Front-Cover Texts
22.. and no Back-Cover Texts. A copy of the license is included at
23.. Documentation/media/uapi/fdl-appendix.rst.
24..
25.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
2 26
3.. _media-request-api: 27.. _media-request-api:
4 28
diff --git a/Documentation/media/uapi/mediactl/request-func-close.rst b/Documentation/media/uapi/mediactl/request-func-close.rst
index 098d7f2b9548..dcf3f35bcf17 100644
--- a/Documentation/media/uapi/mediactl/request-func-close.rst
+++ b/Documentation/media/uapi/mediactl/request-func-close.rst
@@ -1,4 +1,28 @@
1.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 1.. This file is dual-licensed: you can use it either under the terms
2.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
3.. dual licensing only applies to this file, and not this project as a
4.. whole.
5..
6.. a) This file is free software; you can redistribute it and/or
7.. modify it under the terms of the GNU General Public License as
8.. published by the Free Software Foundation; either version 2 of
9.. the License, or (at your option) any later version.
10..
11.. This file is distributed in the hope that it will be useful,
12.. but WITHOUT ANY WARRANTY; without even the implied warranty of
13.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14.. GNU General Public License for more details.
15..
16.. Or, alternatively,
17..
18.. b) Permission is granted to copy, distribute and/or modify this
19.. document under the terms of the GNU Free Documentation License,
20.. Version 1.1 or any later version published by the Free Software
21.. Foundation, with no Invariant Sections, no Front-Cover Texts
22.. and no Back-Cover Texts. A copy of the license is included at
23.. Documentation/media/uapi/fdl-appendix.rst.
24..
25.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
2 26
3.. _request-func-close: 27.. _request-func-close:
4 28
diff --git a/Documentation/media/uapi/mediactl/request-func-ioctl.rst b/Documentation/media/uapi/mediactl/request-func-ioctl.rst
index ff7b072a6999..11a22f887843 100644
--- a/Documentation/media/uapi/mediactl/request-func-ioctl.rst
+++ b/Documentation/media/uapi/mediactl/request-func-ioctl.rst
@@ -1,4 +1,28 @@
1.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 1.. This file is dual-licensed: you can use it either under the terms
2.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
3.. dual licensing only applies to this file, and not this project as a
4.. whole.
5..
6.. a) This file is free software; you can redistribute it and/or
7.. modify it under the terms of the GNU General Public License as
8.. published by the Free Software Foundation; either version 2 of
9.. the License, or (at your option) any later version.
10..
11.. This file is distributed in the hope that it will be useful,
12.. but WITHOUT ANY WARRANTY; without even the implied warranty of
13.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14.. GNU General Public License for more details.
15..
16.. Or, alternatively,
17..
18.. b) Permission is granted to copy, distribute and/or modify this
19.. document under the terms of the GNU Free Documentation License,
20.. Version 1.1 or any later version published by the Free Software
21.. Foundation, with no Invariant Sections, no Front-Cover Texts
22.. and no Back-Cover Texts. A copy of the license is included at
23.. Documentation/media/uapi/fdl-appendix.rst.
24..
25.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
2 26
3.. _request-func-ioctl: 27.. _request-func-ioctl:
4 28
diff --git a/Documentation/media/uapi/mediactl/request-func-poll.rst b/Documentation/media/uapi/mediactl/request-func-poll.rst
index 85191254f381..2609fd54d519 100644
--- a/Documentation/media/uapi/mediactl/request-func-poll.rst
+++ b/Documentation/media/uapi/mediactl/request-func-poll.rst
@@ -1,4 +1,28 @@
1.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 1.. This file is dual-licensed: you can use it either under the terms
2.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
3.. dual licensing only applies to this file, and not this project as a
4.. whole.
5..
6.. a) This file is free software; you can redistribute it and/or
7.. modify it under the terms of the GNU General Public License as
8.. published by the Free Software Foundation; either version 2 of
9.. the License, or (at your option) any later version.
10..
11.. This file is distributed in the hope that it will be useful,
12.. but WITHOUT ANY WARRANTY; without even the implied warranty of
13.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14.. GNU General Public License for more details.
15..
16.. Or, alternatively,
17..
18.. b) Permission is granted to copy, distribute and/or modify this
19.. document under the terms of the GNU Free Documentation License,
20.. Version 1.1 or any later version published by the Free Software
21.. Foundation, with no Invariant Sections, no Front-Cover Texts
22.. and no Back-Cover Texts. A copy of the license is included at
23.. Documentation/media/uapi/fdl-appendix.rst.
24..
25.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
2 26
3.. _request-func-poll: 27.. _request-func-poll:
4 28
diff --git a/Documentation/media/uapi/v4l/dev-meta.rst b/Documentation/media/uapi/v4l/dev-meta.rst
index f7ac8d0d3af1..b65dc078abeb 100644
--- a/Documentation/media/uapi/v4l/dev-meta.rst
+++ b/Documentation/media/uapi/v4l/dev-meta.rst
@@ -40,7 +40,7 @@ To use the :ref:`format` ioctls applications set the ``type`` field of the
40the desired operation. Both drivers and applications must set the remainder of 40the desired operation. Both drivers and applications must set the remainder of
41the :c:type:`v4l2_format` structure to 0. 41the :c:type:`v4l2_format` structure to 0.
42 42
43.. _v4l2-meta-format: 43.. c:type:: v4l2_meta_format
44 44
45.. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}| 45.. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}|
46 46
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
index 3ead350e099f..9ea494a8faca 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
@@ -133,6 +133,11 @@ The format as returned by :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` must be identical
133 - Definition of a data format, see :ref:`pixfmt`, used by SDR 133 - Definition of a data format, see :ref:`pixfmt`, used by SDR
134 capture and output devices. 134 capture and output devices.
135 * - 135 * -
136 - struct :c:type:`v4l2_meta_format`
137 - ``meta``
138 - Definition of a metadata format, see :ref:`meta-formats`, used by
139 metadata capture devices.
140 * -
136 - __u8 141 - __u8
137 - ``raw_data``\ [200] 142 - ``raw_data``\ [200]
138 - Place holder for future extensions. 143 - Place holder for future extensions.
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index 605e00cdd6be..89f1302d593a 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -1056,18 +1056,23 @@ The kernel interface functions are as follows:
1056 1056
1057 u32 rxrpc_kernel_check_life(struct socket *sock, 1057 u32 rxrpc_kernel_check_life(struct socket *sock,
1058 struct rxrpc_call *call); 1058 struct rxrpc_call *call);
1059 void rxrpc_kernel_probe_life(struct socket *sock,
1060 struct rxrpc_call *call);
1059 1061
1060 This returns a number that is updated when ACKs are received from the peer 1062 The first function returns a number that is updated when ACKs are received
1061 (notably including PING RESPONSE ACKs which we can elicit by sending PING 1063 from the peer (notably including PING RESPONSE ACKs which we can elicit by
1062 ACKs to see if the call still exists on the server). The caller should 1064 sending PING ACKs to see if the call still exists on the server). The
1063 compare the numbers of two calls to see if the call is still alive after 1065 caller should compare the numbers of two calls to see if the call is still
1064 waiting for a suitable interval. 1066 alive after waiting for a suitable interval.
1065 1067
1066 This allows the caller to work out if the server is still contactable and 1068 This allows the caller to work out if the server is still contactable and
1067 if the call is still alive on the server whilst waiting for the server to 1069 if the call is still alive on the server whilst waiting for the server to
1068 process a client operation. 1070 process a client operation.
1069 1071
1070 This function may transmit a PING ACK. 1072 The second function causes a ping ACK to be transmitted to try to provoke
1073 the peer into responding, which would then cause the value returned by the
1074 first function to change. Note that this must be called in TASK_RUNNING
1075 state.
1071 1076
1072 (*) Get reply timestamp. 1077 (*) Get reply timestamp.
1073 1078
diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst
index 32f3d55c54b7..c4dbe6f7cdae 100644
--- a/Documentation/userspace-api/spec_ctrl.rst
+++ b/Documentation/userspace-api/spec_ctrl.rst
@@ -92,3 +92,12 @@ Speculation misfeature controls
92 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0); 92 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
93 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0); 93 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
94 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0); 94 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
95
96- PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes
97 (Mitigate Spectre V2 style attacks against user processes)
98
99 Invocations:
100 * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
101 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
102 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
103 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 7727db8f94bc..5e9b826b5f62 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -61,18 +61,6 @@ Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
61 to struct boot_params for loading bzImage and ramdisk 61 to struct boot_params for loading bzImage and ramdisk
62 above 4G in 64bit. 62 above 4G in 64bit.
63 63
64Protocol 2.13: (Kernel 3.14) Support 32- and 64-bit flags being set in
65 xloadflags to support booting a 64-bit kernel from 32-bit
66 EFI
67
68Protocol 2.14: (Kernel 4.20) Added acpi_rsdp_addr holding the physical
69 address of the ACPI RSDP table.
70 The bootloader updates version with:
71 0x8000 | min(kernel-version, bootloader-version)
72 kernel-version being the protocol version supported by
73 the kernel and bootloader-version the protocol version
74 supported by the bootloader.
75
76**** MEMORY LAYOUT 64**** MEMORY LAYOUT
77 65
78The traditional memory map for the kernel loader, used for Image or 66The traditional memory map for the kernel loader, used for Image or
@@ -209,7 +197,6 @@ Offset Proto Name Meaning
2090258/8 2.10+ pref_address Preferred loading address 1970258/8 2.10+ pref_address Preferred loading address
2100260/4 2.10+ init_size Linear memory required during initialization 1980260/4 2.10+ init_size Linear memory required during initialization
2110264/4 2.11+ handover_offset Offset of handover entry point 1990264/4 2.11+ handover_offset Offset of handover entry point
2120268/8 2.14+ acpi_rsdp_addr Physical address of RSDP table
213 200
214(1) For backwards compatibility, if the setup_sects field contains 0, the 201(1) For backwards compatibility, if the setup_sects field contains 0, the
215 real value is 4. 202 real value is 4.
@@ -322,7 +309,7 @@ Protocol: 2.00+
322 Contains the magic number "HdrS" (0x53726448). 309 Contains the magic number "HdrS" (0x53726448).
323 310
324Field name: version 311Field name: version
325Type: modify 312Type: read
326Offset/size: 0x206/2 313Offset/size: 0x206/2
327Protocol: 2.00+ 314Protocol: 2.00+
328 315
@@ -330,12 +317,6 @@ Protocol: 2.00+
330 e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version 317 e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
331 10.17. 318 10.17.
332 319
333 Up to protocol version 2.13 this information is only read by the
334 bootloader. From protocol version 2.14 onwards the bootloader will
335 write the used protocol version or-ed with 0x8000 to the field. The
336 used protocol version will be the minimum of the supported protocol
337 versions of the bootloader and the kernel.
338
339Field name: realmode_swtch 320Field name: realmode_swtch
340Type: modify (optional) 321Type: modify (optional)
341Offset/size: 0x208/4 322Offset/size: 0x208/4
@@ -763,17 +744,6 @@ Offset/size: 0x264/4
763 744
764 See EFI HANDOVER PROTOCOL below for more details. 745 See EFI HANDOVER PROTOCOL below for more details.
765 746
766Field name: acpi_rsdp_addr
767Type: write
768Offset/size: 0x268/8
769Protocol: 2.14+
770
771 This field can be set by the boot loader to tell the kernel the
772 physical address of the ACPI RSDP table.
773
774 A value of 0 indicates the kernel should fall back to the standard
775 methods to locate the RSDP.
776
777 747
778**** THE IMAGE CHECKSUM 748**** THE IMAGE CHECKSUM
779 749
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 73aaaa3da436..804f9426ed17 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -34,23 +34,24 @@ __________________|____________|__________________|_________|___________________
34____________________________________________________________|___________________________________________________________ 34____________________________________________________________|___________________________________________________________
35 | | | | 35 | | | |
36 ffff800000000000 | -128 TB | ffff87ffffffffff | 8 TB | ... guard hole, also reserved for hypervisor 36 ffff800000000000 | -128 TB | ffff87ffffffffff | 8 TB | ... guard hole, also reserved for hypervisor
37 ffff880000000000 | -120 TB | ffffc7ffffffffff | 64 TB | direct mapping of all physical memory (page_offset_base) 37 ffff880000000000 | -120 TB | ffff887fffffffff | 0.5 TB | LDT remap for PTI
38 ffffc80000000000 | -56 TB | ffffc8ffffffffff | 1 TB | ... unused hole 38 ffff888000000000 | -119.5 TB | ffffc87fffffffff | 64 TB | direct mapping of all physical memory (page_offset_base)
39 ffffc88000000000 | -55.5 TB | ffffc8ffffffffff | 0.5 TB | ... unused hole
39 ffffc90000000000 | -55 TB | ffffe8ffffffffff | 32 TB | vmalloc/ioremap space (vmalloc_base) 40 ffffc90000000000 | -55 TB | ffffe8ffffffffff | 32 TB | vmalloc/ioremap space (vmalloc_base)
40 ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole 41 ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole
41 ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base) 42 ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base)
42 ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole 43 ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole
43 ffffec0000000000 | -20 TB | fffffbffffffffff | 16 TB | KASAN shadow memory 44 ffffec0000000000 | -20 TB | fffffbffffffffff | 16 TB | KASAN shadow memory
44 fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
45 | | | | vaddr_end for KASLR
46 fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
47 fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | LDT remap for PTI
48 ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
49__________________|____________|__________________|_________|____________________________________________________________ 45__________________|____________|__________________|_________|____________________________________________________________
50 | 46 |
51 | Identical layout to the 47-bit one from here on: 47 | Identical layout to the 56-bit one from here on:
52____________________________________________________________|____________________________________________________________ 48____________________________________________________________|____________________________________________________________
53 | | | | 49 | | | |
50 fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
51 | | | | vaddr_end for KASLR
52 fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
53 fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
54 ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
54 ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole 55 ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
55 ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space 56 ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
56 ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole 57 ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
@@ -83,7 +84,7 @@ Notes:
83__________________|____________|__________________|_________|___________________________________________________________ 84__________________|____________|__________________|_________|___________________________________________________________
84 | | | | 85 | | | |
85 0000800000000000 | +64 PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical 86 0000800000000000 | +64 PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical
86 | | | | virtual memory addresses up to the -128 TB 87 | | | | virtual memory addresses up to the -64 PB
87 | | | | starting offset of kernel mappings. 88 | | | | starting offset of kernel mappings.
88__________________|____________|__________________|_________|___________________________________________________________ 89__________________|____________|__________________|_________|___________________________________________________________
89 | 90 |
@@ -91,23 +92,24 @@ __________________|____________|__________________|_________|___________________
91____________________________________________________________|___________________________________________________________ 92____________________________________________________________|___________________________________________________________
92 | | | | 93 | | | |
93 ff00000000000000 | -64 PB | ff0fffffffffffff | 4 PB | ... guard hole, also reserved for hypervisor 94 ff00000000000000 | -64 PB | ff0fffffffffffff | 4 PB | ... guard hole, also reserved for hypervisor
94 ff10000000000000 | -60 PB | ff8fffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base) 95 ff10000000000000 | -60 PB | ff10ffffffffffff | 0.25 PB | LDT remap for PTI
95 ff90000000000000 | -28 PB | ff9fffffffffffff | 4 PB | LDT remap for PTI 96 ff11000000000000 | -59.75 PB | ff90ffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base)
97 ff91000000000000 | -27.75 PB | ff9fffffffffffff | 3.75 PB | ... unused hole
96 ffa0000000000000 | -24 PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base) 98 ffa0000000000000 | -24 PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base)
97 ffd2000000000000 | -11.5 PB | ffd3ffffffffffff | 0.5 PB | ... unused hole 99 ffd2000000000000 | -11.5 PB | ffd3ffffffffffff | 0.5 PB | ... unused hole
98 ffd4000000000000 | -11 PB | ffd5ffffffffffff | 0.5 PB | virtual memory map (vmemmap_base) 100 ffd4000000000000 | -11 PB | ffd5ffffffffffff | 0.5 PB | virtual memory map (vmemmap_base)
99 ffd6000000000000 | -10.5 PB | ffdeffffffffffff | 2.25 PB | ... unused hole 101 ffd6000000000000 | -10.5 PB | ffdeffffffffffff | 2.25 PB | ... unused hole
100 ffdf000000000000 | -8.25 PB | fffffdffffffffff | ~8 PB | KASAN shadow memory 102 ffdf000000000000 | -8.25 PB | fffffdffffffffff | ~8 PB | KASAN shadow memory
101 fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
102 | | | | vaddr_end for KASLR
103 fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
104 fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
105 ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
106__________________|____________|__________________|_________|____________________________________________________________ 103__________________|____________|__________________|_________|____________________________________________________________
107 | 104 |
108 | Identical layout to the 47-bit one from here on: 105 | Identical layout to the 47-bit one from here on:
109____________________________________________________________|____________________________________________________________ 106____________________________________________________________|____________________________________________________________
110 | | | | 107 | | | |
108 fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
109 | | | | vaddr_end for KASLR
110 fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
111 fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
112 ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
111 ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole 113 ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
112 ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space 114 ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
113 ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole 115 ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt
index 97b7adbceda4..68aed077f7b6 100644
--- a/Documentation/x86/zero-page.txt
+++ b/Documentation/x86/zero-page.txt
@@ -25,7 +25,7 @@ Offset Proto Name Meaning
250C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits 250C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits
26140/080 ALL edid_info Video mode setup (struct edid_info) 26140/080 ALL edid_info Video mode setup (struct edid_info)
271C0/020 ALL efi_info EFI 32 information (struct efi_info) 271C0/020 ALL efi_info EFI 32 information (struct efi_info)
281E0/004 ALL alk_mem_k Alternative mem check, in KB 281E0/004 ALL alt_mem_k Alternative mem check, in KB
291E4/004 ALL scratch Scratch field for the kernel setup code 291E4/004 ALL scratch Scratch field for the kernel setup code
301E8/001 ALL e820_entries Number of entries in e820_table (below) 301E8/001 ALL e820_entries Number of entries in e820_table (below)
311E9/001 ALL eddbuf_entries Number of entries in eddbuf (below) 311E9/001 ALL eddbuf_entries Number of entries in eddbuf (below)
diff --git a/MAINTAINERS b/MAINTAINERS
index f4855974f325..6682420421c1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -180,6 +180,7 @@ F: drivers/net/hamradio/6pack.c
180 180
1818169 10/100/1000 GIGABIT ETHERNET DRIVER 1818169 10/100/1000 GIGABIT ETHERNET DRIVER
182M: Realtek linux nic maintainers <nic_swsd@realtek.com> 182M: Realtek linux nic maintainers <nic_swsd@realtek.com>
183M: Heiner Kallweit <hkallweit1@gmail.com>
183L: netdev@vger.kernel.org 184L: netdev@vger.kernel.org
184S: Maintained 185S: Maintained
185F: drivers/net/ethernet/realtek/r8169.c 186F: drivers/net/ethernet/realtek/r8169.c
@@ -717,7 +718,7 @@ F: include/linux/mfd/altera-a10sr.h
717F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h 718F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
718 719
719ALTERA TRIPLE SPEED ETHERNET DRIVER 720ALTERA TRIPLE SPEED ETHERNET DRIVER
720M: Vince Bridgers <vbridger@opensource.altera.com> 721M: Thor Thayer <thor.thayer@linux.intel.com>
721L: netdev@vger.kernel.org 722L: netdev@vger.kernel.org
722L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) 723L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
723S: Maintained 724S: Maintained
@@ -1922,7 +1923,6 @@ ARM/QUALCOMM SUPPORT
1922M: Andy Gross <andy.gross@linaro.org> 1923M: Andy Gross <andy.gross@linaro.org>
1923M: David Brown <david.brown@linaro.org> 1924M: David Brown <david.brown@linaro.org>
1924L: linux-arm-msm@vger.kernel.org 1925L: linux-arm-msm@vger.kernel.org
1925L: linux-soc@vger.kernel.org
1926S: Maintained 1926S: Maintained
1927F: Documentation/devicetree/bindings/soc/qcom/ 1927F: Documentation/devicetree/bindings/soc/qcom/
1928F: arch/arm/boot/dts/qcom-*.dts 1928F: arch/arm/boot/dts/qcom-*.dts
@@ -2490,7 +2490,7 @@ F: drivers/net/wireless/ath/*
2490ATHEROS ATH5K WIRELESS DRIVER 2490ATHEROS ATH5K WIRELESS DRIVER
2491M: Jiri Slaby <jirislaby@gmail.com> 2491M: Jiri Slaby <jirislaby@gmail.com>
2492M: Nick Kossifidis <mickflemm@gmail.com> 2492M: Nick Kossifidis <mickflemm@gmail.com>
2493M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com> 2493M: Luis Chamberlain <mcgrof@kernel.org>
2494L: linux-wireless@vger.kernel.org 2494L: linux-wireless@vger.kernel.org
2495W: http://wireless.kernel.org/en/users/Drivers/ath5k 2495W: http://wireless.kernel.org/en/users/Drivers/ath5k
2496S: Maintained 2496S: Maintained
@@ -2800,7 +2800,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
2800T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git 2800T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
2801Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 2801Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
2802S: Supported 2802S: Supported
2803F: arch/x86/net/bpf_jit* 2803F: arch/*/net/*
2804F: Documentation/networking/filter.txt 2804F: Documentation/networking/filter.txt
2805F: Documentation/bpf/ 2805F: Documentation/bpf/
2806F: include/linux/bpf* 2806F: include/linux/bpf*
@@ -2820,6 +2820,67 @@ F: tools/bpf/
2820F: tools/lib/bpf/ 2820F: tools/lib/bpf/
2821F: tools/testing/selftests/bpf/ 2821F: tools/testing/selftests/bpf/
2822 2822
2823BPF JIT for ARM
2824M: Shubham Bansal <illusionist.neo@gmail.com>
2825L: netdev@vger.kernel.org
2826S: Maintained
2827F: arch/arm/net/
2828
2829BPF JIT for ARM64
2830M: Daniel Borkmann <daniel@iogearbox.net>
2831M: Alexei Starovoitov <ast@kernel.org>
2832M: Zi Shen Lim <zlim.lnx@gmail.com>
2833L: netdev@vger.kernel.org
2834S: Supported
2835F: arch/arm64/net/
2836
2837BPF JIT for MIPS (32-BIT AND 64-BIT)
2838M: Paul Burton <paul.burton@mips.com>
2839L: netdev@vger.kernel.org
2840S: Maintained
2841F: arch/mips/net/
2842
2843BPF JIT for NFP NICs
2844M: Jakub Kicinski <jakub.kicinski@netronome.com>
2845L: netdev@vger.kernel.org
2846S: Supported
2847F: drivers/net/ethernet/netronome/nfp/bpf/
2848
2849BPF JIT for POWERPC (32-BIT AND 64-BIT)
2850M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
2851M: Sandipan Das <sandipan@linux.ibm.com>
2852L: netdev@vger.kernel.org
2853S: Maintained
2854F: arch/powerpc/net/
2855
2856BPF JIT for S390
2857M: Martin Schwidefsky <schwidefsky@de.ibm.com>
2858M: Heiko Carstens <heiko.carstens@de.ibm.com>
2859L: netdev@vger.kernel.org
2860S: Maintained
2861F: arch/s390/net/
2862X: arch/s390/net/pnet.c
2863
2864BPF JIT for SPARC (32-BIT AND 64-BIT)
2865M: David S. Miller <davem@davemloft.net>
2866L: netdev@vger.kernel.org
2867S: Maintained
2868F: arch/sparc/net/
2869
2870BPF JIT for X86 32-BIT
2871M: Wang YanQing <udknight@gmail.com>
2872L: netdev@vger.kernel.org
2873S: Maintained
2874F: arch/x86/net/bpf_jit_comp32.c
2875
2876BPF JIT for X86 64-BIT
2877M: Alexei Starovoitov <ast@kernel.org>
2878M: Daniel Borkmann <daniel@iogearbox.net>
2879L: netdev@vger.kernel.org
2880S: Supported
2881F: arch/x86/net/
2882X: arch/x86/net/bpf_jit_comp32.c
2883
2823BROADCOM B44 10/100 ETHERNET DRIVER 2884BROADCOM B44 10/100 ETHERNET DRIVER
2824M: Michael Chan <michael.chan@broadcom.com> 2885M: Michael Chan <michael.chan@broadcom.com>
2825L: netdev@vger.kernel.org 2886L: netdev@vger.kernel.org
@@ -2860,7 +2921,7 @@ F: drivers/staging/vc04_services
2860BROADCOM BCM47XX MIPS ARCHITECTURE 2921BROADCOM BCM47XX MIPS ARCHITECTURE
2861M: Hauke Mehrtens <hauke@hauke-m.de> 2922M: Hauke Mehrtens <hauke@hauke-m.de>
2862M: Rafał Miłecki <zajec5@gmail.com> 2923M: Rafał Miłecki <zajec5@gmail.com>
2863L: linux-mips@linux-mips.org 2924L: linux-mips@vger.kernel.org
2864S: Maintained 2925S: Maintained
2865F: Documentation/devicetree/bindings/mips/brcm/ 2926F: Documentation/devicetree/bindings/mips/brcm/
2866F: arch/mips/bcm47xx/* 2927F: arch/mips/bcm47xx/*
@@ -2869,7 +2930,6 @@ F: arch/mips/include/asm/mach-bcm47xx/*
2869BROADCOM BCM5301X ARM ARCHITECTURE 2930BROADCOM BCM5301X ARM ARCHITECTURE
2870M: Hauke Mehrtens <hauke@hauke-m.de> 2931M: Hauke Mehrtens <hauke@hauke-m.de>
2871M: Rafał Miłecki <zajec5@gmail.com> 2932M: Rafał Miłecki <zajec5@gmail.com>
2872M: Jon Mason <jonmason@broadcom.com>
2873M: bcm-kernel-feedback-list@broadcom.com 2933M: bcm-kernel-feedback-list@broadcom.com
2874L: linux-arm-kernel@lists.infradead.org 2934L: linux-arm-kernel@lists.infradead.org
2875S: Maintained 2935S: Maintained
@@ -2924,7 +2984,7 @@ F: drivers/cpufreq/bmips-cpufreq.c
2924BROADCOM BMIPS MIPS ARCHITECTURE 2984BROADCOM BMIPS MIPS ARCHITECTURE
2925M: Kevin Cernekee <cernekee@gmail.com> 2985M: Kevin Cernekee <cernekee@gmail.com>
2926M: Florian Fainelli <f.fainelli@gmail.com> 2986M: Florian Fainelli <f.fainelli@gmail.com>
2927L: linux-mips@linux-mips.org 2987L: linux-mips@vger.kernel.org
2928T: git git://github.com/broadcom/stblinux.git 2988T: git git://github.com/broadcom/stblinux.git
2929S: Maintained 2989S: Maintained
2930F: arch/mips/bmips/* 2990F: arch/mips/bmips/*
@@ -3015,7 +3075,6 @@ F: drivers/net/ethernet/broadcom/genet/
3015BROADCOM IPROC ARM ARCHITECTURE 3075BROADCOM IPROC ARM ARCHITECTURE
3016M: Ray Jui <rjui@broadcom.com> 3076M: Ray Jui <rjui@broadcom.com>
3017M: Scott Branden <sbranden@broadcom.com> 3077M: Scott Branden <sbranden@broadcom.com>
3018M: Jon Mason <jonmason@broadcom.com>
3019M: bcm-kernel-feedback-list@broadcom.com 3078M: bcm-kernel-feedback-list@broadcom.com
3020L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 3079L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
3021T: git git://github.com/broadcom/cygnus-linux.git 3080T: git git://github.com/broadcom/cygnus-linux.git
@@ -3062,7 +3121,7 @@ F: include/uapi/rdma/bnxt_re-abi.h
3062 3121
3063BROADCOM NVRAM DRIVER 3122BROADCOM NVRAM DRIVER
3064M: Rafał Miłecki <zajec5@gmail.com> 3123M: Rafał Miłecki <zajec5@gmail.com>
3065L: linux-mips@linux-mips.org 3124L: linux-mips@vger.kernel.org
3066S: Maintained 3125S: Maintained
3067F: drivers/firmware/broadcom/* 3126F: drivers/firmware/broadcom/*
3068 3127
@@ -3276,6 +3335,12 @@ F: include/uapi/linux/caif/
3276F: include/net/caif/ 3335F: include/net/caif/
3277F: net/caif/ 3336F: net/caif/
3278 3337
3338CAKE QDISC
3339M: Toke Høiland-Jørgensen <toke@toke.dk>
3340L: cake@lists.bufferbloat.net (moderated for non-subscribers)
3341S: Maintained
3342F: net/sched/sch_cake.c
3343
3279CALGARY x86-64 IOMMU 3344CALGARY x86-64 IOMMU
3280M: Muli Ben-Yehuda <mulix@mulix.org> 3345M: Muli Ben-Yehuda <mulix@mulix.org>
3281M: Jon Mason <jdmason@kudzu.us> 3346M: Jon Mason <jdmason@kudzu.us>
@@ -4158,7 +4223,7 @@ F: net/decnet/
4158 4223
4159DECSTATION PLATFORM SUPPORT 4224DECSTATION PLATFORM SUPPORT
4160M: "Maciej W. Rozycki" <macro@linux-mips.org> 4225M: "Maciej W. Rozycki" <macro@linux-mips.org>
4161L: linux-mips@linux-mips.org 4226L: linux-mips@vger.kernel.org
4162W: http://www.linux-mips.org/wiki/DECstation 4227W: http://www.linux-mips.org/wiki/DECstation
4163S: Maintained 4228S: Maintained
4164F: arch/mips/dec/ 4229F: arch/mips/dec/
@@ -5249,7 +5314,7 @@ EDAC-CAVIUM OCTEON
5249M: Ralf Baechle <ralf@linux-mips.org> 5314M: Ralf Baechle <ralf@linux-mips.org>
5250M: David Daney <david.daney@cavium.com> 5315M: David Daney <david.daney@cavium.com>
5251L: linux-edac@vger.kernel.org 5316L: linux-edac@vger.kernel.org
5252L: linux-mips@linux-mips.org 5317L: linux-mips@vger.kernel.org
5253S: Supported 5318S: Supported
5254F: drivers/edac/octeon_edac* 5319F: drivers/edac/octeon_edac*
5255 5320
@@ -5528,6 +5593,7 @@ F: net/bridge/
5528ETHERNET PHY LIBRARY 5593ETHERNET PHY LIBRARY
5529M: Andrew Lunn <andrew@lunn.ch> 5594M: Andrew Lunn <andrew@lunn.ch>
5530M: Florian Fainelli <f.fainelli@gmail.com> 5595M: Florian Fainelli <f.fainelli@gmail.com>
5596M: Heiner Kallweit <hkallweit1@gmail.com>
5531L: netdev@vger.kernel.org 5597L: netdev@vger.kernel.org
5532S: Maintained 5598S: Maintained
5533F: Documentation/ABI/testing/sysfs-bus-mdio 5599F: Documentation/ABI/testing/sysfs-bus-mdio
@@ -5766,7 +5832,7 @@ F: include/uapi/linux/firewire*.h
5766F: tools/firewire/ 5832F: tools/firewire/
5767 5833
5768FIRMWARE LOADER (request_firmware) 5834FIRMWARE LOADER (request_firmware)
5769M: Luis R. Rodriguez <mcgrof@kernel.org> 5835M: Luis Chamberlain <mcgrof@kernel.org>
5770L: linux-kernel@vger.kernel.org 5836L: linux-kernel@vger.kernel.org
5771S: Maintained 5837S: Maintained
5772F: Documentation/firmware_class/ 5838F: Documentation/firmware_class/
@@ -6299,6 +6365,7 @@ F: tools/testing/selftests/gpio/
6299 6365
6300GPIO SUBSYSTEM 6366GPIO SUBSYSTEM
6301M: Linus Walleij <linus.walleij@linaro.org> 6367M: Linus Walleij <linus.walleij@linaro.org>
6368M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
6302L: linux-gpio@vger.kernel.org 6369L: linux-gpio@vger.kernel.org
6303T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git 6370T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
6304S: Maintained 6371S: Maintained
@@ -6607,9 +6674,9 @@ F: arch/*/include/asm/suspend*.h
6607 6674
6608HID CORE LAYER 6675HID CORE LAYER
6609M: Jiri Kosina <jikos@kernel.org> 6676M: Jiri Kosina <jikos@kernel.org>
6610R: Benjamin Tissoires <benjamin.tissoires@redhat.com> 6677M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
6611L: linux-input@vger.kernel.org 6678L: linux-input@vger.kernel.org
6612T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git 6679T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
6613S: Maintained 6680S: Maintained
6614F: drivers/hid/ 6681F: drivers/hid/
6615F: include/linux/hid* 6682F: include/linux/hid*
@@ -6861,6 +6928,13 @@ L: linux-acpi@vger.kernel.org
6861S: Maintained 6928S: Maintained
6862F: drivers/i2c/i2c-core-acpi.c 6929F: drivers/i2c/i2c-core-acpi.c
6863 6930
6931I2C CONTROLLER DRIVER FOR NVIDIA GPU
6932M: Ajay Gupta <ajayg@nvidia.com>
6933L: linux-i2c@vger.kernel.org
6934S: Maintained
6935F: Documentation/i2c/busses/i2c-nvidia-gpu
6936F: drivers/i2c/busses/i2c-nvidia-gpu.c
6937
6864I2C MUXES 6938I2C MUXES
6865M: Peter Rosin <peda@axentia.se> 6939M: Peter Rosin <peda@axentia.se>
6866L: linux-i2c@vger.kernel.org 6940L: linux-i2c@vger.kernel.org
@@ -7429,6 +7503,20 @@ S: Maintained
7429F: Documentation/fb/intelfb.txt 7503F: Documentation/fb/intelfb.txt
7430F: drivers/video/fbdev/intelfb/ 7504F: drivers/video/fbdev/intelfb/
7431 7505
7506INTEL GPIO DRIVERS
7507M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7508L: linux-gpio@vger.kernel.org
7509S: Maintained
7510T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
7511F: drivers/gpio/gpio-ich.c
7512F: drivers/gpio/gpio-intel-mid.c
7513F: drivers/gpio/gpio-lynxpoint.c
7514F: drivers/gpio/gpio-merrifield.c
7515F: drivers/gpio/gpio-ml-ioh.c
7516F: drivers/gpio/gpio-pch.c
7517F: drivers/gpio/gpio-sch.c
7518F: drivers/gpio/gpio-sodaville.c
7519
7432INTEL GVT-g DRIVERS (Intel GPU Virtualization) 7520INTEL GVT-g DRIVERS (Intel GPU Virtualization)
7433M: Zhenyu Wang <zhenyuw@linux.intel.com> 7521M: Zhenyu Wang <zhenyuw@linux.intel.com>
7434M: Zhi Wang <zhi.a.wang@intel.com> 7522M: Zhi Wang <zhi.a.wang@intel.com>
@@ -7439,12 +7527,6 @@ T: git https://github.com/intel/gvt-linux.git
7439S: Supported 7527S: Supported
7440F: drivers/gpu/drm/i915/gvt/ 7528F: drivers/gpu/drm/i915/gvt/
7441 7529
7442INTEL PMIC GPIO DRIVER
7443R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7444S: Maintained
7445F: drivers/gpio/gpio-*cove.c
7446F: drivers/gpio/gpio-msic.c
7447
7448INTEL HID EVENT DRIVER 7530INTEL HID EVENT DRIVER
7449M: Alex Hung <alex.hung@canonical.com> 7531M: Alex Hung <alex.hung@canonical.com>
7450L: platform-driver-x86@vger.kernel.org 7532L: platform-driver-x86@vger.kernel.org
@@ -7532,12 +7614,6 @@ W: https://01.org/linux-acpi
7532S: Supported 7614S: Supported
7533F: drivers/platform/x86/intel_menlow.c 7615F: drivers/platform/x86/intel_menlow.c
7534 7616
7535INTEL MERRIFIELD GPIO DRIVER
7536M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7537L: linux-gpio@vger.kernel.org
7538S: Maintained
7539F: drivers/gpio/gpio-merrifield.c
7540
7541INTEL MIC DRIVERS (mic) 7617INTEL MIC DRIVERS (mic)
7542M: Sudeep Dutt <sudeep.dutt@intel.com> 7618M: Sudeep Dutt <sudeep.dutt@intel.com>
7543M: Ashutosh Dixit <ashutosh.dixit@intel.com> 7619M: Ashutosh Dixit <ashutosh.dixit@intel.com>
@@ -7570,6 +7646,13 @@ F: drivers/platform/x86/intel_punit_ipc.c
7570F: arch/x86/include/asm/intel_pmc_ipc.h 7646F: arch/x86/include/asm/intel_pmc_ipc.h
7571F: arch/x86/include/asm/intel_punit_ipc.h 7647F: arch/x86/include/asm/intel_punit_ipc.h
7572 7648
7649INTEL PMIC GPIO DRIVERS
7650M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7651S: Maintained
7652T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
7653F: drivers/gpio/gpio-*cove.c
7654F: drivers/gpio/gpio-msic.c
7655
7573INTEL MULTIFUNCTION PMIC DEVICE DRIVERS 7656INTEL MULTIFUNCTION PMIC DEVICE DRIVERS
7574R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 7657R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7575S: Maintained 7658S: Maintained
@@ -7678,7 +7761,7 @@ F: Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
7678 7761
7679IOC3 ETHERNET DRIVER 7762IOC3 ETHERNET DRIVER
7680M: Ralf Baechle <ralf@linux-mips.org> 7763M: Ralf Baechle <ralf@linux-mips.org>
7681L: linux-mips@linux-mips.org 7764L: linux-mips@vger.kernel.org
7682S: Maintained 7765S: Maintained
7683F: drivers/net/ethernet/sgi/ioc3-eth.c 7766F: drivers/net/ethernet/sgi/ioc3-eth.c
7684 7767
@@ -8049,7 +8132,7 @@ F: tools/testing/selftests/
8049F: Documentation/dev-tools/kselftest* 8132F: Documentation/dev-tools/kselftest*
8050 8133
8051KERNEL USERMODE HELPER 8134KERNEL USERMODE HELPER
8052M: "Luis R. Rodriguez" <mcgrof@kernel.org> 8135M: Luis Chamberlain <mcgrof@kernel.org>
8053L: linux-kernel@vger.kernel.org 8136L: linux-kernel@vger.kernel.org
8054S: Maintained 8137S: Maintained
8055F: kernel/umh.c 8138F: kernel/umh.c
@@ -8106,7 +8189,7 @@ F: arch/arm64/kvm/
8106 8189
8107KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) 8190KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
8108M: James Hogan <jhogan@kernel.org> 8191M: James Hogan <jhogan@kernel.org>
8109L: linux-mips@linux-mips.org 8192L: linux-mips@vger.kernel.org
8110S: Supported 8193S: Supported
8111F: arch/mips/include/uapi/asm/kvm* 8194F: arch/mips/include/uapi/asm/kvm*
8112F: arch/mips/include/asm/kvm* 8195F: arch/mips/include/asm/kvm*
@@ -8225,7 +8308,7 @@ F: mm/kmemleak.c
8225F: mm/kmemleak-test.c 8308F: mm/kmemleak-test.c
8226 8309
8227KMOD KERNEL MODULE LOADER - USERMODE HELPER 8310KMOD KERNEL MODULE LOADER - USERMODE HELPER
8228M: "Luis R. Rodriguez" <mcgrof@kernel.org> 8311M: Luis Chamberlain <mcgrof@kernel.org>
8229L: linux-kernel@vger.kernel.org 8312L: linux-kernel@vger.kernel.org
8230S: Maintained 8313S: Maintained
8231F: kernel/kmod.c 8314F: kernel/kmod.c
@@ -8279,7 +8362,7 @@ F: drivers/net/dsa/lantiq_gswip.c
8279 8362
8280LANTIQ MIPS ARCHITECTURE 8363LANTIQ MIPS ARCHITECTURE
8281M: John Crispin <john@phrozen.org> 8364M: John Crispin <john@phrozen.org>
8282L: linux-mips@linux-mips.org 8365L: linux-mips@vger.kernel.org
8283S: Maintained 8366S: Maintained
8284F: arch/mips/lantiq 8367F: arch/mips/lantiq
8285F: drivers/soc/lantiq 8368F: drivers/soc/lantiq
@@ -8367,7 +8450,7 @@ F: drivers/media/dvb-frontends/lgdt3305.*
8367LIBATA PATA ARASAN COMPACT FLASH CONTROLLER 8450LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
8368M: Viresh Kumar <vireshk@kernel.org> 8451M: Viresh Kumar <vireshk@kernel.org>
8369L: linux-ide@vger.kernel.org 8452L: linux-ide@vger.kernel.org
8370T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git 8453T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
8371S: Maintained 8454S: Maintained
8372F: include/linux/pata_arasan_cf_data.h 8455F: include/linux/pata_arasan_cf_data.h
8373F: drivers/ata/pata_arasan_cf.c 8456F: drivers/ata/pata_arasan_cf.c
@@ -8384,7 +8467,7 @@ F: drivers/ata/ata_generic.c
8384LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS 8467LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
8385M: Linus Walleij <linus.walleij@linaro.org> 8468M: Linus Walleij <linus.walleij@linaro.org>
8386L: linux-ide@vger.kernel.org 8469L: linux-ide@vger.kernel.org
8387T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git 8470T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
8388S: Maintained 8471S: Maintained
8389F: drivers/ata/pata_ftide010.c 8472F: drivers/ata/pata_ftide010.c
8390F: drivers/ata/sata_gemini.c 8473F: drivers/ata/sata_gemini.c
@@ -8403,7 +8486,7 @@ F: include/linux/ahci_platform.h
8403LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER 8486LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
8404M: Mikael Pettersson <mikpelinux@gmail.com> 8487M: Mikael Pettersson <mikpelinux@gmail.com>
8405L: linux-ide@vger.kernel.org 8488L: linux-ide@vger.kernel.org
8406T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git 8489T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
8407S: Maintained 8490S: Maintained
8408F: drivers/ata/sata_promise.* 8491F: drivers/ata/sata_promise.*
8409 8492
@@ -8842,7 +8925,7 @@ S: Maintained
8842 8925
8843MARDUK (CREATOR CI40) DEVICE TREE SUPPORT 8926MARDUK (CREATOR CI40) DEVICE TREE SUPPORT
8844M: Rahul Bedarkar <rahulbedarkar89@gmail.com> 8927M: Rahul Bedarkar <rahulbedarkar89@gmail.com>
8845L: linux-mips@linux-mips.org 8928L: linux-mips@vger.kernel.org
8846S: Maintained 8929S: Maintained
8847F: arch/mips/boot/dts/img/pistachio_marduk.dts 8930F: arch/mips/boot/dts/img/pistachio_marduk.dts
8848 8931
@@ -9801,7 +9884,7 @@ F: drivers/dma/at_xdmac.c
9801 9884
9802MICROSEMI MIPS SOCS 9885MICROSEMI MIPS SOCS
9803M: Alexandre Belloni <alexandre.belloni@bootlin.com> 9886M: Alexandre Belloni <alexandre.belloni@bootlin.com>
9804L: linux-mips@linux-mips.org 9887L: linux-mips@vger.kernel.org
9805S: Maintained 9888S: Maintained
9806F: arch/mips/generic/board-ocelot.c 9889F: arch/mips/generic/board-ocelot.c
9807F: arch/mips/configs/generic/board-ocelot.config 9890F: arch/mips/configs/generic/board-ocelot.config
@@ -9841,7 +9924,7 @@ MIPS
9841M: Ralf Baechle <ralf@linux-mips.org> 9924M: Ralf Baechle <ralf@linux-mips.org>
9842M: Paul Burton <paul.burton@mips.com> 9925M: Paul Burton <paul.burton@mips.com>
9843M: James Hogan <jhogan@kernel.org> 9926M: James Hogan <jhogan@kernel.org>
9844L: linux-mips@linux-mips.org 9927L: linux-mips@vger.kernel.org
9845W: http://www.linux-mips.org/ 9928W: http://www.linux-mips.org/
9846T: git git://git.linux-mips.org/pub/scm/ralf/linux.git 9929T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
9847T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git 9930T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
@@ -9854,7 +9937,7 @@ F: drivers/platform/mips/
9854 9937
9855MIPS BOSTON DEVELOPMENT BOARD 9938MIPS BOSTON DEVELOPMENT BOARD
9856M: Paul Burton <paul.burton@mips.com> 9939M: Paul Burton <paul.burton@mips.com>
9857L: linux-mips@linux-mips.org 9940L: linux-mips@vger.kernel.org
9858S: Maintained 9941S: Maintained
9859F: Documentation/devicetree/bindings/clock/img,boston-clock.txt 9942F: Documentation/devicetree/bindings/clock/img,boston-clock.txt
9860F: arch/mips/boot/dts/img/boston.dts 9943F: arch/mips/boot/dts/img/boston.dts
@@ -9864,7 +9947,7 @@ F: include/dt-bindings/clock/boston-clock.h
9864 9947
9865MIPS GENERIC PLATFORM 9948MIPS GENERIC PLATFORM
9866M: Paul Burton <paul.burton@mips.com> 9949M: Paul Burton <paul.burton@mips.com>
9867L: linux-mips@linux-mips.org 9950L: linux-mips@vger.kernel.org
9868S: Supported 9951S: Supported
9869F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt 9952F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt
9870F: arch/mips/generic/ 9953F: arch/mips/generic/
@@ -9872,7 +9955,7 @@ F: arch/mips/tools/generic-board-config.sh
9872 9955
9873MIPS/LOONGSON1 ARCHITECTURE 9956MIPS/LOONGSON1 ARCHITECTURE
9874M: Keguang Zhang <keguang.zhang@gmail.com> 9957M: Keguang Zhang <keguang.zhang@gmail.com>
9875L: linux-mips@linux-mips.org 9958L: linux-mips@vger.kernel.org
9876S: Maintained 9959S: Maintained
9877F: arch/mips/loongson32/ 9960F: arch/mips/loongson32/
9878F: arch/mips/include/asm/mach-loongson32/ 9961F: arch/mips/include/asm/mach-loongson32/
@@ -9881,7 +9964,7 @@ F: drivers/*/*/*loongson1*
9881 9964
9882MIPS/LOONGSON2 ARCHITECTURE 9965MIPS/LOONGSON2 ARCHITECTURE
9883M: Jiaxun Yang <jiaxun.yang@flygoat.com> 9966M: Jiaxun Yang <jiaxun.yang@flygoat.com>
9884L: linux-mips@linux-mips.org 9967L: linux-mips@vger.kernel.org
9885S: Maintained 9968S: Maintained
9886F: arch/mips/loongson64/fuloong-2e/ 9969F: arch/mips/loongson64/fuloong-2e/
9887F: arch/mips/loongson64/lemote-2f/ 9970F: arch/mips/loongson64/lemote-2f/
@@ -9891,7 +9974,7 @@ F: drivers/*/*/*loongson2*
9891 9974
9892MIPS/LOONGSON3 ARCHITECTURE 9975MIPS/LOONGSON3 ARCHITECTURE
9893M: Huacai Chen <chenhc@lemote.com> 9976M: Huacai Chen <chenhc@lemote.com>
9894L: linux-mips@linux-mips.org 9977L: linux-mips@vger.kernel.org
9895S: Maintained 9978S: Maintained
9896F: arch/mips/loongson64/ 9979F: arch/mips/loongson64/
9897F: arch/mips/include/asm/mach-loongson64/ 9980F: arch/mips/include/asm/mach-loongson64/
@@ -9901,7 +9984,7 @@ F: drivers/*/*/*loongson3*
9901 9984
9902MIPS RINT INSTRUCTION EMULATION 9985MIPS RINT INSTRUCTION EMULATION
9903M: Aleksandar Markovic <aleksandar.markovic@mips.com> 9986M: Aleksandar Markovic <aleksandar.markovic@mips.com>
9904L: linux-mips@linux-mips.org 9987L: linux-mips@vger.kernel.org
9905S: Supported 9988S: Supported
9906F: arch/mips/math-emu/sp_rint.c 9989F: arch/mips/math-emu/sp_rint.c
9907F: arch/mips/math-emu/dp_rint.c 9990F: arch/mips/math-emu/dp_rint.c
@@ -10784,6 +10867,14 @@ L: linux-omap@vger.kernel.org
10784S: Maintained 10867S: Maintained
10785F: arch/arm/mach-omap2/omap_hwmod.* 10868F: arch/arm/mach-omap2/omap_hwmod.*
10786 10869
10870OMAP I2C DRIVER
10871M: Vignesh R <vigneshr@ti.com>
10872L: linux-omap@vger.kernel.org
10873L: linux-i2c@vger.kernel.org
10874S: Maintained
10875F: Documentation/devicetree/bindings/i2c/i2c-omap.txt
10876F: drivers/i2c/busses/i2c-omap.c
10877
10787OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS) 10878OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS)
10788M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 10879M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10789L: linux-media@vger.kernel.org 10880L: linux-media@vger.kernel.org
@@ -10793,9 +10884,9 @@ F: drivers/media/platform/omap3isp/
10793F: drivers/staging/media/omap4iss/ 10884F: drivers/staging/media/omap4iss/
10794 10885
10795OMAP MMC SUPPORT 10886OMAP MMC SUPPORT
10796M: Jarkko Lavinen <jarkko.lavinen@nokia.com> 10887M: Aaro Koskinen <aaro.koskinen@iki.fi>
10797L: linux-omap@vger.kernel.org 10888L: linux-omap@vger.kernel.org
10798S: Maintained 10889S: Odd Fixes
10799F: drivers/mmc/host/omap.c 10890F: drivers/mmc/host/omap.c
10800 10891
10801OMAP POWER MANAGEMENT SUPPORT 10892OMAP POWER MANAGEMENT SUPPORT
@@ -10878,7 +10969,7 @@ F: include/linux/platform_data/i2c-omap.h
10878 10969
10879ONION OMEGA2+ BOARD 10970ONION OMEGA2+ BOARD
10880M: Harvey Hunt <harveyhuntnexus@gmail.com> 10971M: Harvey Hunt <harveyhuntnexus@gmail.com>
10881L: linux-mips@linux-mips.org 10972L: linux-mips@vger.kernel.org
10882S: Maintained 10973S: Maintained
10883F: arch/mips/boot/dts/ralink/omega2p.dts 10974F: arch/mips/boot/dts/ralink/omega2p.dts
10884 10975
@@ -11730,6 +11821,7 @@ F: Documentation/devicetree/bindings/pinctrl/fsl,*
11730PIN CONTROLLER - INTEL 11821PIN CONTROLLER - INTEL
11731M: Mika Westerberg <mika.westerberg@linux.intel.com> 11822M: Mika Westerberg <mika.westerberg@linux.intel.com>
11732M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 11823M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
11824T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
11733S: Maintained 11825S: Maintained
11734F: drivers/pinctrl/intel/ 11826F: drivers/pinctrl/intel/
11735 11827
@@ -11786,7 +11878,7 @@ F: drivers/pinctrl/spear/
11786 11878
11787PISTACHIO SOC SUPPORT 11879PISTACHIO SOC SUPPORT
11788M: James Hartley <james.hartley@sondrel.com> 11880M: James Hartley <james.hartley@sondrel.com>
11789L: linux-mips@linux-mips.org 11881L: linux-mips@vger.kernel.org
11790S: Odd Fixes 11882S: Odd Fixes
11791F: arch/mips/pistachio/ 11883F: arch/mips/pistachio/
11792F: arch/mips/include/asm/mach-pistachio/ 11884F: arch/mips/include/asm/mach-pistachio/
@@ -11966,7 +12058,7 @@ F: kernel/printk/
11966F: include/linux/printk.h 12058F: include/linux/printk.h
11967 12059
11968PRISM54 WIRELESS DRIVER 12060PRISM54 WIRELESS DRIVER
11969M: "Luis R. Rodriguez" <mcgrof@gmail.com> 12061M: Luis Chamberlain <mcgrof@kernel.org>
11970L: linux-wireless@vger.kernel.org 12062L: linux-wireless@vger.kernel.org
11971W: http://wireless.kernel.org/en/users/Drivers/p54 12063W: http://wireless.kernel.org/en/users/Drivers/p54
11972S: Obsolete 12064S: Obsolete
@@ -11980,9 +12072,10 @@ S: Maintained
11980F: fs/proc/ 12072F: fs/proc/
11981F: include/linux/proc_fs.h 12073F: include/linux/proc_fs.h
11982F: tools/testing/selftests/proc/ 12074F: tools/testing/selftests/proc/
12075F: Documentation/filesystems/proc.txt
11983 12076
11984PROC SYSCTL 12077PROC SYSCTL
11985M: "Luis R. Rodriguez" <mcgrof@kernel.org> 12078M: Luis Chamberlain <mcgrof@kernel.org>
11986M: Kees Cook <keescook@chromium.org> 12079M: Kees Cook <keescook@chromium.org>
11987L: linux-kernel@vger.kernel.org 12080L: linux-kernel@vger.kernel.org
11988L: linux-fsdevel@vger.kernel.org 12081L: linux-fsdevel@vger.kernel.org
@@ -12445,7 +12538,7 @@ F: drivers/media/usb/rainshadow-cec/*
12445 12538
12446RALINK MIPS ARCHITECTURE 12539RALINK MIPS ARCHITECTURE
12447M: John Crispin <john@phrozen.org> 12540M: John Crispin <john@phrozen.org>
12448L: linux-mips@linux-mips.org 12541L: linux-mips@vger.kernel.org
12449S: Maintained 12542S: Maintained
12450F: arch/mips/ralink 12543F: arch/mips/ralink
12451 12544
@@ -12465,7 +12558,7 @@ F: drivers/block/brd.c
12465 12558
12466RANCHU VIRTUAL BOARD FOR MIPS 12559RANCHU VIRTUAL BOARD FOR MIPS
12467M: Miodrag Dinic <miodrag.dinic@mips.com> 12560M: Miodrag Dinic <miodrag.dinic@mips.com>
12468L: linux-mips@linux-mips.org 12561L: linux-mips@vger.kernel.org
12469S: Supported 12562S: Supported
12470F: arch/mips/generic/board-ranchu.c 12563F: arch/mips/generic/board-ranchu.c
12471F: arch/mips/configs/generic/board-ranchu.config 12564F: arch/mips/configs/generic/board-ranchu.config
@@ -13915,6 +14008,7 @@ S: Supported
13915F: Documentation/devicetree/bindings/sound/ 14008F: Documentation/devicetree/bindings/sound/
13916F: Documentation/sound/soc/ 14009F: Documentation/sound/soc/
13917F: sound/soc/ 14010F: sound/soc/
14011F: include/dt-bindings/sound/
13918F: include/sound/soc* 14012F: include/sound/soc*
13919 14013
13920SOUNDWIRE SUBSYSTEM 14014SOUNDWIRE SUBSYSTEM
@@ -13962,11 +14056,10 @@ F: drivers/tty/serial/sunzilog.h
13962F: drivers/tty/vcc.c 14056F: drivers/tty/vcc.c
13963 14057
13964SPARSE CHECKER 14058SPARSE CHECKER
13965M: "Christopher Li" <sparse@chrisli.org> 14059M: "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com>
13966L: linux-sparse@vger.kernel.org 14060L: linux-sparse@vger.kernel.org
13967W: https://sparse.wiki.kernel.org/ 14061W: https://sparse.wiki.kernel.org/
13968T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git 14062T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
13969T: git git://git.kernel.org/pub/scm/devel/sparse/chrisl/sparse.git
13970S: Maintained 14063S: Maintained
13971F: include/linux/compiler.h 14064F: include/linux/compiler.h
13972 14065
@@ -14063,6 +14156,7 @@ F: Documentation/devicetree/bindings/iio/proximity/vl53l0x.txt
14063 14156
14064STABLE BRANCH 14157STABLE BRANCH
14065M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 14158M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
14159M: Sasha Levin <sashal@kernel.org>
14066L: stable@vger.kernel.org 14160L: stable@vger.kernel.org
14067S: Supported 14161S: Supported
14068F: Documentation/process/stable-kernel-rules.rst 14162F: Documentation/process/stable-kernel-rules.rst
@@ -15200,7 +15294,7 @@ F: arch/um/os-Linux/drivers/
15200TURBOCHANNEL SUBSYSTEM 15294TURBOCHANNEL SUBSYSTEM
15201M: "Maciej W. Rozycki" <macro@linux-mips.org> 15295M: "Maciej W. Rozycki" <macro@linux-mips.org>
15202M: Ralf Baechle <ralf@linux-mips.org> 15296M: Ralf Baechle <ralf@linux-mips.org>
15203L: linux-mips@linux-mips.org 15297L: linux-mips@vger.kernel.org
15204Q: http://patchwork.linux-mips.org/project/linux-mips/list/ 15298Q: http://patchwork.linux-mips.org/project/linux-mips/list/
15205S: Maintained 15299S: Maintained
15206F: drivers/tc/ 15300F: drivers/tc/
@@ -15436,9 +15530,9 @@ F: include/linux/usb/gadget*
15436 15530
15437USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...) 15531USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
15438M: Jiri Kosina <jikos@kernel.org> 15532M: Jiri Kosina <jikos@kernel.org>
15439R: Benjamin Tissoires <benjamin.tissoires@redhat.com> 15533M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
15440L: linux-usb@vger.kernel.org 15534L: linux-usb@vger.kernel.org
15441T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git 15535T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
15442S: Maintained 15536S: Maintained
15443F: Documentation/hid/hiddev.txt 15537F: Documentation/hid/hiddev.txt
15444F: drivers/hid/usbhid/ 15538F: drivers/hid/usbhid/
@@ -16021,7 +16115,7 @@ F: drivers/net/vmxnet3/
16021 16115
16022VOCORE VOCORE2 BOARD 16116VOCORE VOCORE2 BOARD
16023M: Harvey Hunt <harveyhuntnexus@gmail.com> 16117M: Harvey Hunt <harveyhuntnexus@gmail.com>
16024L: linux-mips@linux-mips.org 16118L: linux-mips@vger.kernel.org
16025S: Maintained 16119S: Maintained
16026F: arch/mips/boot/dts/ralink/vocore2.dts 16120F: arch/mips/boot/dts/ralink/vocore2.dts
16027 16121
diff --git a/Makefile b/Makefile
index 9fce8b91c15f..e9fd22c8445e 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 20 3PATCHLEVEL = 20
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc1 5EXTRAVERSION = -rc5
6NAME = "People's Front" 6NAME = Shy Crocodile
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
9# To see a list of typical targets execute "make help" 9# To see a list of typical targets execute "make help"
diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h
index 6a8c53dec57e..b7c77bb1bfd2 100644
--- a/arch/alpha/include/asm/termios.h
+++ b/arch/alpha/include/asm/termios.h
@@ -73,9 +73,15 @@
73}) 73})
74 74
75#define user_termios_to_kernel_termios(k, u) \ 75#define user_termios_to_kernel_termios(k, u) \
76 copy_from_user(k, u, sizeof(struct termios)) 76 copy_from_user(k, u, sizeof(struct termios2))
77 77
78#define kernel_termios_to_user_termios(u, k) \ 78#define kernel_termios_to_user_termios(u, k) \
79 copy_to_user(u, k, sizeof(struct termios2))
80
81#define user_termios_to_kernel_termios_1(k, u) \
82 copy_from_user(k, u, sizeof(struct termios))
83
84#define kernel_termios_to_user_termios_1(u, k) \
79 copy_to_user(u, k, sizeof(struct termios)) 85 copy_to_user(u, k, sizeof(struct termios))
80 86
81#endif /* _ALPHA_TERMIOS_H */ 87#endif /* _ALPHA_TERMIOS_H */
diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h
index 1e9121c9b3c7..971311605288 100644
--- a/arch/alpha/include/uapi/asm/ioctls.h
+++ b/arch/alpha/include/uapi/asm/ioctls.h
@@ -32,6 +32,11 @@
32#define TCXONC _IO('t', 30) 32#define TCXONC _IO('t', 30)
33#define TCFLSH _IO('t', 31) 33#define TCFLSH _IO('t', 31)
34 34
35#define TCGETS2 _IOR('T', 42, struct termios2)
36#define TCSETS2 _IOW('T', 43, struct termios2)
37#define TCSETSW2 _IOW('T', 44, struct termios2)
38#define TCSETSF2 _IOW('T', 45, struct termios2)
39
35#define TIOCSWINSZ _IOW('t', 103, struct winsize) 40#define TIOCSWINSZ _IOW('t', 103, struct winsize)
36#define TIOCGWINSZ _IOR('t', 104, struct winsize) 41#define TIOCGWINSZ _IOR('t', 104, struct winsize)
37#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ 42#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h
index de6c8360fbe3..4575ba34a0ea 100644
--- a/arch/alpha/include/uapi/asm/termbits.h
+++ b/arch/alpha/include/uapi/asm/termbits.h
@@ -26,6 +26,19 @@ struct termios {
26 speed_t c_ospeed; /* output speed */ 26 speed_t c_ospeed; /* output speed */
27}; 27};
28 28
29/* Alpha has identical termios and termios2 */
30
31struct termios2 {
32 tcflag_t c_iflag; /* input mode flags */
33 tcflag_t c_oflag; /* output mode flags */
34 tcflag_t c_cflag; /* control mode flags */
35 tcflag_t c_lflag; /* local mode flags */
36 cc_t c_cc[NCCS]; /* control characters */
37 cc_t c_line; /* line discipline (== c_cc[19]) */
38 speed_t c_ispeed; /* input speed */
39 speed_t c_ospeed; /* output speed */
40};
41
29/* Alpha has matching termios and ktermios */ 42/* Alpha has matching termios and ktermios */
30 43
31struct ktermios { 44struct ktermios {
@@ -152,6 +165,7 @@ struct ktermios {
152#define B3000000 00034 165#define B3000000 00034
153#define B3500000 00035 166#define B3500000 00035
154#define B4000000 00036 167#define B4000000 00036
168#define BOTHER 00037
155 169
156#define CSIZE 00001400 170#define CSIZE 00001400
157#define CS5 00000000 171#define CS5 00000000
@@ -169,6 +183,9 @@ struct ktermios {
169#define CMSPAR 010000000000 /* mark or space (stick) parity */ 183#define CMSPAR 010000000000 /* mark or space (stick) parity */
170#define CRTSCTS 020000000000 /* flow control */ 184#define CRTSCTS 020000000000 /* flow control */
171 185
186#define CIBAUD 07600000
187#define IBSHIFT 16
188
172/* c_lflag bits */ 189/* c_lflag bits */
173#define ISIG 0x00000080 190#define ISIG 0x00000080
174#define ICANON 0x00000100 191#define ICANON 0x00000100
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c9e2a1323536..6dd783557330 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -109,7 +109,7 @@ endmenu
109 109
110choice 110choice
111 prompt "ARC Instruction Set" 111 prompt "ARC Instruction Set"
112 default ISA_ARCOMPACT 112 default ISA_ARCV2
113 113
114config ISA_ARCOMPACT 114config ISA_ARCOMPACT
115 bool "ARCompact ISA" 115 bool "ARCompact ISA"
@@ -176,13 +176,11 @@ endchoice
176 176
177config CPU_BIG_ENDIAN 177config CPU_BIG_ENDIAN
178 bool "Enable Big Endian Mode" 178 bool "Enable Big Endian Mode"
179 default n
180 help 179 help
181 Build kernel for Big Endian Mode of ARC CPU 180 Build kernel for Big Endian Mode of ARC CPU
182 181
183config SMP 182config SMP
184 bool "Symmetric Multi-Processing" 183 bool "Symmetric Multi-Processing"
185 default n
186 select ARC_MCIP if ISA_ARCV2 184 select ARC_MCIP if ISA_ARCV2
187 help 185 help
188 This enables support for systems with more than one CPU. 186 This enables support for systems with more than one CPU.
@@ -254,7 +252,6 @@ config ARC_CACHE_PAGES
254config ARC_CACHE_VIPT_ALIASING 252config ARC_CACHE_VIPT_ALIASING
255 bool "Support VIPT Aliasing D$" 253 bool "Support VIPT Aliasing D$"
256 depends on ARC_HAS_DCACHE && ISA_ARCOMPACT 254 depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
257 default n
258 255
259endif #ARC_CACHE 256endif #ARC_CACHE
260 257
@@ -262,7 +259,6 @@ config ARC_HAS_ICCM
262 bool "Use ICCM" 259 bool "Use ICCM"
263 help 260 help
264 Single Cycle RAMS to store Fast Path Code 261 Single Cycle RAMS to store Fast Path Code
265 default n
266 262
267config ARC_ICCM_SZ 263config ARC_ICCM_SZ
268 int "ICCM Size in KB" 264 int "ICCM Size in KB"
@@ -273,7 +269,6 @@ config ARC_HAS_DCCM
273 bool "Use DCCM" 269 bool "Use DCCM"
274 help 270 help
275 Single Cycle RAMS to store Fast Path Data 271 Single Cycle RAMS to store Fast Path Data
276 default n
277 272
278config ARC_DCCM_SZ 273config ARC_DCCM_SZ
279 int "DCCM Size in KB" 274 int "DCCM Size in KB"
@@ -366,13 +361,11 @@ if ISA_ARCOMPACT
366 361
367config ARC_COMPACT_IRQ_LEVELS 362config ARC_COMPACT_IRQ_LEVELS
368 bool "Setup Timer IRQ as high Priority" 363 bool "Setup Timer IRQ as high Priority"
369 default n
370 # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy 364 # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
371 depends on !SMP 365 depends on !SMP
372 366
373config ARC_FPU_SAVE_RESTORE 367config ARC_FPU_SAVE_RESTORE
374 bool "Enable FPU state persistence across context switch" 368 bool "Enable FPU state persistence across context switch"
375 default n
376 help 369 help
377 Double Precision Floating Point unit had dedicated regs which 370 Double Precision Floating Point unit had dedicated regs which
378 need to be saved/restored across context-switch. 371 need to be saved/restored across context-switch.
@@ -453,7 +446,6 @@ config HIGHMEM
453 446
454config ARC_HAS_PAE40 447config ARC_HAS_PAE40
455 bool "Support for the 40-bit Physical Address Extension" 448 bool "Support for the 40-bit Physical Address Extension"
456 default n
457 depends on ISA_ARCV2 449 depends on ISA_ARCV2
458 select HIGHMEM 450 select HIGHMEM
459 select PHYS_ADDR_T_64BIT 451 select PHYS_ADDR_T_64BIT
@@ -496,7 +488,6 @@ config HZ
496 488
497config ARC_METAWARE_HLINK 489config ARC_METAWARE_HLINK
498 bool "Support for Metaware debugger assisted Host access" 490 bool "Support for Metaware debugger assisted Host access"
499 default n
500 help 491 help
501 This options allows a Linux userland apps to directly access 492 This options allows a Linux userland apps to directly access
502 host file system (open/creat/read/write etc) with help from 493 host file system (open/creat/read/write etc) with help from
@@ -524,13 +515,11 @@ config ARC_DW2_UNWIND
524 515
525config ARC_DBG_TLB_PARANOIA 516config ARC_DBG_TLB_PARANOIA
526 bool "Paranoia Checks in Low Level TLB Handlers" 517 bool "Paranoia Checks in Low Level TLB Handlers"
527 default n
528 518
529endif 519endif
530 520
531config ARC_UBOOT_SUPPORT 521config ARC_UBOOT_SUPPORT
532 bool "Support uboot arg Handling" 522 bool "Support uboot arg Handling"
533 default n
534 help 523 help
535 ARC Linux by default checks for uboot provided args as pointers to 524 ARC Linux by default checks for uboot provided args as pointers to
536 external cmdline or DTB. This however breaks in absence of uboot, 525 external cmdline or DTB. This however breaks in absence of uboot,
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index c64c505d966c..df00578c279d 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -6,7 +6,7 @@
6# published by the Free Software Foundation. 6# published by the Free Software Foundation.
7# 7#
8 8
9KBUILD_DEFCONFIG := nsim_700_defconfig 9KBUILD_DEFCONFIG := nsim_hs_defconfig
10 10
11cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ 11cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
12cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 12cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index ef149f59929a..43f17b51ee89 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -222,6 +222,21 @@
222 bus-width = <4>; 222 bus-width = <4>;
223 dma-coherent; 223 dma-coherent;
224 }; 224 };
225
226 gpio: gpio@3000 {
227 compatible = "snps,dw-apb-gpio";
228 reg = <0x3000 0x20>;
229 #address-cells = <1>;
230 #size-cells = <0>;
231
232 gpio_port_a: gpio-controller@0 {
233 compatible = "snps,dw-apb-gpio-port";
234 gpio-controller;
235 #gpio-cells = <2>;
236 snps,nr-gpios = <24>;
237 reg = <0>;
238 };
239 };
225 }; 240 };
226 241
227 memory@80000000 { 242 memory@80000000 {
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 41bc08be6a3b..020d4493edfd 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -14,6 +14,7 @@ CONFIG_PERF_EVENTS=y
14# CONFIG_VM_EVENT_COUNTERS is not set 14# CONFIG_VM_EVENT_COUNTERS is not set
15# CONFIG_SLUB_DEBUG is not set 15# CONFIG_SLUB_DEBUG is not set
16# CONFIG_COMPAT_BRK is not set 16# CONFIG_COMPAT_BRK is not set
17CONFIG_ISA_ARCOMPACT=y
17CONFIG_MODULES=y 18CONFIG_MODULES=y
18CONFIG_MODULE_FORCE_LOAD=y 19CONFIG_MODULE_FORCE_LOAD=y
19CONFIG_MODULE_UNLOAD=y 20CONFIG_MODULE_UNLOAD=y
@@ -95,6 +96,7 @@ CONFIG_VFAT_FS=y
95CONFIG_NTFS_FS=y 96CONFIG_NTFS_FS=y
96CONFIG_TMPFS=y 97CONFIG_TMPFS=y
97CONFIG_NFS_FS=y 98CONFIG_NFS_FS=y
99CONFIG_NFS_V3_ACL=y
98CONFIG_NLS_CODEPAGE_437=y 100CONFIG_NLS_CODEPAGE_437=y
99CONFIG_NLS_ISO8859_1=y 101CONFIG_NLS_ISO8859_1=y
100# CONFIG_ENABLE_WARN_DEPRECATED is not set 102# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 1e1c4a8011b5..666314fffc60 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -94,6 +94,7 @@ CONFIG_VFAT_FS=y
94CONFIG_NTFS_FS=y 94CONFIG_NTFS_FS=y
95CONFIG_TMPFS=y 95CONFIG_TMPFS=y
96CONFIG_NFS_FS=y 96CONFIG_NFS_FS=y
97CONFIG_NFS_V3_ACL=y
97CONFIG_NLS_CODEPAGE_437=y 98CONFIG_NLS_CODEPAGE_437=y
98CONFIG_NLS_ISO8859_1=y 99CONFIG_NLS_ISO8859_1=y
99# CONFIG_ENABLE_WARN_DEPRECATED is not set 100# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 6b0c0cfd5c30..429832b8560b 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -97,6 +97,7 @@ CONFIG_VFAT_FS=y
97CONFIG_NTFS_FS=y 97CONFIG_NTFS_FS=y
98CONFIG_TMPFS=y 98CONFIG_TMPFS=y
99CONFIG_NFS_FS=y 99CONFIG_NFS_FS=y
100CONFIG_NFS_V3_ACL=y
100CONFIG_NLS_CODEPAGE_437=y 101CONFIG_NLS_CODEPAGE_437=y
101CONFIG_NLS_ISO8859_1=y 102CONFIG_NLS_ISO8859_1=y
102# CONFIG_ENABLE_WARN_DEPRECATED is not set 103# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 1dec2b4bc5e6..87b23b7fb781 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -45,6 +45,9 @@ CONFIG_SERIAL_8250_CONSOLE=y
45CONFIG_SERIAL_8250_DW=y 45CONFIG_SERIAL_8250_DW=y
46CONFIG_SERIAL_OF_PLATFORM=y 46CONFIG_SERIAL_OF_PLATFORM=y
47# CONFIG_HW_RANDOM is not set 47# CONFIG_HW_RANDOM is not set
48CONFIG_GPIOLIB=y
49CONFIG_GPIO_SYSFS=y
50CONFIG_GPIO_DWAPB=y
48# CONFIG_HWMON is not set 51# CONFIG_HWMON is not set
49CONFIG_DRM=y 52CONFIG_DRM=y
50# CONFIG_DRM_FBDEV_EMULATION is not set 53# CONFIG_DRM_FBDEV_EMULATION is not set
@@ -65,6 +68,7 @@ CONFIG_EXT3_FS=y
65CONFIG_VFAT_FS=y 68CONFIG_VFAT_FS=y
66CONFIG_TMPFS=y 69CONFIG_TMPFS=y
67CONFIG_NFS_FS=y 70CONFIG_NFS_FS=y
71CONFIG_NFS_V3_ACL=y
68CONFIG_NLS_CODEPAGE_437=y 72CONFIG_NLS_CODEPAGE_437=y
69CONFIG_NLS_ISO8859_1=y 73CONFIG_NLS_ISO8859_1=y
70# CONFIG_ENABLE_WARN_DEPRECATED is not set 74# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
index 31ba224bbfb4..6e84060e7c90 100644
--- a/arch/arc/configs/nps_defconfig
+++ b/arch/arc/configs/nps_defconfig
@@ -15,6 +15,7 @@ CONFIG_SYSCTL_SYSCALL=y
15CONFIG_EMBEDDED=y 15CONFIG_EMBEDDED=y
16CONFIG_PERF_EVENTS=y 16CONFIG_PERF_EVENTS=y
17# CONFIG_COMPAT_BRK is not set 17# CONFIG_COMPAT_BRK is not set
18CONFIG_ISA_ARCOMPACT=y
18CONFIG_KPROBES=y 19CONFIG_KPROBES=y
19CONFIG_MODULES=y 20CONFIG_MODULES=y
20CONFIG_MODULE_FORCE_LOAD=y 21CONFIG_MODULE_FORCE_LOAD=y
@@ -73,6 +74,7 @@ CONFIG_PROC_KCORE=y
73CONFIG_TMPFS=y 74CONFIG_TMPFS=y
74# CONFIG_MISC_FILESYSTEMS is not set 75# CONFIG_MISC_FILESYSTEMS is not set
75CONFIG_NFS_FS=y 76CONFIG_NFS_FS=y
77CONFIG_NFS_V3_ACL=y
76CONFIG_ROOT_NFS=y 78CONFIG_ROOT_NFS=y
77CONFIG_DEBUG_INFO=y 79CONFIG_DEBUG_INFO=y
78# CONFIG_ENABLE_WARN_DEPRECATED is not set 80# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 8e0b8b134cd9..219c2a65294b 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y
15CONFIG_PERF_EVENTS=y 15CONFIG_PERF_EVENTS=y
16# CONFIG_SLUB_DEBUG is not set 16# CONFIG_SLUB_DEBUG is not set
17# CONFIG_COMPAT_BRK is not set 17# CONFIG_COMPAT_BRK is not set
18CONFIG_ISA_ARCOMPACT=y
18CONFIG_KPROBES=y 19CONFIG_KPROBES=y
19CONFIG_MODULES=y 20CONFIG_MODULES=y
20# CONFIG_LBDAF is not set 21# CONFIG_LBDAF is not set
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index f14eeff7d308..35dfc6491a09 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y
15CONFIG_PERF_EVENTS=y 15CONFIG_PERF_EVENTS=y
16# CONFIG_SLUB_DEBUG is not set 16# CONFIG_SLUB_DEBUG is not set
17# CONFIG_COMPAT_BRK is not set 17# CONFIG_COMPAT_BRK is not set
18CONFIG_ISA_ARCOMPACT=y
18CONFIG_KPROBES=y 19CONFIG_KPROBES=y
19CONFIG_MODULES=y 20CONFIG_MODULES=y
20# CONFIG_LBDAF is not set 21# CONFIG_LBDAF is not set
@@ -66,5 +67,6 @@ CONFIG_EXT2_FS_XATTR=y
66CONFIG_TMPFS=y 67CONFIG_TMPFS=y
67# CONFIG_MISC_FILESYSTEMS is not set 68# CONFIG_MISC_FILESYSTEMS is not set
68CONFIG_NFS_FS=y 69CONFIG_NFS_FS=y
70CONFIG_NFS_V3_ACL=y
69# CONFIG_ENABLE_WARN_DEPRECATED is not set 71# CONFIG_ENABLE_WARN_DEPRECATED is not set
70# CONFIG_ENABLE_MUST_CHECK is not set 72# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 025298a48305..1638e5bc9672 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -65,5 +65,6 @@ CONFIG_EXT2_FS_XATTR=y
65CONFIG_TMPFS=y 65CONFIG_TMPFS=y
66# CONFIG_MISC_FILESYSTEMS is not set 66# CONFIG_MISC_FILESYSTEMS is not set
67CONFIG_NFS_FS=y 67CONFIG_NFS_FS=y
68CONFIG_NFS_V3_ACL=y
68# CONFIG_ENABLE_WARN_DEPRECATED is not set 69# CONFIG_ENABLE_WARN_DEPRECATED is not set
69# CONFIG_ENABLE_MUST_CHECK is not set 70# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index df7b77b13b82..11cfbdb0f441 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -76,6 +76,7 @@ CONFIG_EXT2_FS_XATTR=y
76CONFIG_TMPFS=y 76CONFIG_TMPFS=y
77# CONFIG_MISC_FILESYSTEMS is not set 77# CONFIG_MISC_FILESYSTEMS is not set
78CONFIG_NFS_FS=y 78CONFIG_NFS_FS=y
79CONFIG_NFS_V3_ACL=y
79# CONFIG_ENABLE_WARN_DEPRECATED is not set 80# CONFIG_ENABLE_WARN_DEPRECATED is not set
80# CONFIG_ENABLE_MUST_CHECK is not set 81# CONFIG_ENABLE_MUST_CHECK is not set
81CONFIG_FTRACE=y 82CONFIG_FTRACE=y
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index a7f65313f84a..e71ade3cf9c8 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -19,6 +19,7 @@ CONFIG_KALLSYMS_ALL=y
19# CONFIG_AIO is not set 19# CONFIG_AIO is not set
20CONFIG_EMBEDDED=y 20CONFIG_EMBEDDED=y
21# CONFIG_COMPAT_BRK is not set 21# CONFIG_COMPAT_BRK is not set
22CONFIG_ISA_ARCOMPACT=y
22CONFIG_SLAB=y 23CONFIG_SLAB=y
23CONFIG_MODULES=y 24CONFIG_MODULES=y
24CONFIG_MODULE_FORCE_LOAD=y 25CONFIG_MODULE_FORCE_LOAD=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index db47c3541f15..1e59a2e9c602 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -85,6 +85,7 @@ CONFIG_NTFS_FS=y
85CONFIG_TMPFS=y 85CONFIG_TMPFS=y
86CONFIG_JFFS2_FS=y 86CONFIG_JFFS2_FS=y
87CONFIG_NFS_FS=y 87CONFIG_NFS_FS=y
88CONFIG_NFS_V3_ACL=y
88CONFIG_NLS_CODEPAGE_437=y 89CONFIG_NLS_CODEPAGE_437=y
89CONFIG_NLS_ISO8859_1=y 90CONFIG_NLS_ISO8859_1=y
90# CONFIG_ENABLE_WARN_DEPRECATED is not set 91# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index a8ac5e917d9a..b5c3f6c54b03 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -90,6 +90,7 @@ CONFIG_NTFS_FS=y
90CONFIG_TMPFS=y 90CONFIG_TMPFS=y
91CONFIG_JFFS2_FS=y 91CONFIG_JFFS2_FS=y
92CONFIG_NFS_FS=y 92CONFIG_NFS_FS=y
93CONFIG_NFS_V3_ACL=y
93CONFIG_NLS_CODEPAGE_437=y 94CONFIG_NLS_CODEPAGE_437=y
94CONFIG_NLS_ISO8859_1=y 95CONFIG_NLS_ISO8859_1=y
95# CONFIG_ENABLE_WARN_DEPRECATED is not set 96# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index ff7d3232764a..f393b663413e 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -113,7 +113,9 @@ extern unsigned long perip_base, perip_end;
113 113
114/* IO coherency related Auxiliary registers */ 114/* IO coherency related Auxiliary registers */
115#define ARC_REG_IO_COH_ENABLE 0x500 115#define ARC_REG_IO_COH_ENABLE 0x500
116#define ARC_IO_COH_ENABLE_BIT BIT(0)
116#define ARC_REG_IO_COH_PARTIAL 0x501 117#define ARC_REG_IO_COH_PARTIAL 0x501
118#define ARC_IO_COH_PARTIAL_BIT BIT(0)
117#define ARC_REG_IO_COH_AP0_BASE 0x508 119#define ARC_REG_IO_COH_AP0_BASE 0x508
118#define ARC_REG_IO_COH_AP0_SIZE 0x509 120#define ARC_REG_IO_COH_AP0_SIZE 0x509
119 121
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index c22b181e8206..2f39d9b3886e 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/byteorder.h> 13#include <asm/byteorder.h>
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/unaligned.h>
15 16
16#ifdef CONFIG_ISA_ARCV2 17#ifdef CONFIG_ISA_ARCV2
17#include <asm/barrier.h> 18#include <asm/barrier.h>
@@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
94 return w; 95 return w;
95} 96}
96 97
98/*
99 * {read,write}s{b,w,l}() repeatedly access the same IO address in
100 * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
101 * @count times
102 */
103#define __raw_readsx(t,f) \
104static inline void __raw_reads##f(const volatile void __iomem *addr, \
105 void *ptr, unsigned int count) \
106{ \
107 bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
108 u##t *buf = ptr; \
109 \
110 if (!count) \
111 return; \
112 \
113 /* Some ARC CPU's don't support unaligned accesses */ \
114 if (is_aligned) { \
115 do { \
116 u##t x = __raw_read##f(addr); \
117 *buf++ = x; \
118 } while (--count); \
119 } else { \
120 do { \
121 u##t x = __raw_read##f(addr); \
122 put_unaligned(x, buf++); \
123 } while (--count); \
124 } \
125}
126
127#define __raw_readsb __raw_readsb
128__raw_readsx(8, b)
129#define __raw_readsw __raw_readsw
130__raw_readsx(16, w)
131#define __raw_readsl __raw_readsl
132__raw_readsx(32, l)
133
97#define __raw_writeb __raw_writeb 134#define __raw_writeb __raw_writeb
98static inline void __raw_writeb(u8 b, volatile void __iomem *addr) 135static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
99{ 136{
@@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
126 163
127} 164}
128 165
166#define __raw_writesx(t,f) \
167static inline void __raw_writes##f(volatile void __iomem *addr, \
168 const void *ptr, unsigned int count) \
169{ \
170 bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
171 const u##t *buf = ptr; \
172 \
173 if (!count) \
174 return; \
175 \
176 /* Some ARC CPU's don't support unaligned accesses */ \
177 if (is_aligned) { \
178 do { \
179 __raw_write##f(*buf++, addr); \
180 } while (--count); \
181 } else { \
182 do { \
183 __raw_write##f(get_unaligned(buf++), addr); \
184 } while (--count); \
185 } \
186}
187
188#define __raw_writesb __raw_writesb
189__raw_writesx(8, b)
190#define __raw_writesw __raw_writesw
191__raw_writesx(16, w)
192#define __raw_writesl __raw_writesl
193__raw_writesx(32, l)
194
129/* 195/*
130 * MMIO can also get buffered/optimized in micro-arch, so barriers needed 196 * MMIO can also get buffered/optimized in micro-arch, so barriers needed
131 * Based on ARM model for the typical use case 197 * Based on ARM model for the typical use case
@@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
141#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) 207#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
142#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) 208#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
143#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) 209#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
210#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
211#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
212#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
144 213
145#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) 214#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
146#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) 215#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
147#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) 216#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
217#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
218#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
219#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
148 220
149/* 221/*
150 * Relaxed API for drivers which can handle barrier ordering themselves 222 * Relaxed API for drivers which can handle barrier ordering themselves
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index b2cae79a25d7..eea8c5ce6335 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -243,7 +243,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
243{ 243{
244 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; 244 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
245 struct bcr_identity *core = &cpu->core; 245 struct bcr_identity *core = &cpu->core;
246 int i, n = 0; 246 int i, n = 0, ua = 0;
247 247
248 FIX_PTR(cpu); 248 FIX_PTR(cpu);
249 249
@@ -263,10 +263,13 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
263 IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT), 263 IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
264 IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT)); 264 IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
265 265
266 n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", 266#ifdef __ARC_UNALIGNED__
267 ua = 1;
268#endif
269 n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
267 IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC), 270 IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
268 IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64), 271 IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
269 IS_AVAIL1(cpu->isa.unalign, "unalign (not used)")); 272 IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));
270 273
271 if (i) 274 if (i)
272 n += scnprintf(buf + n, len - n, "\n\t\t: "); 275 n += scnprintf(buf + n, len - n, "\n\t\t: ");
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index f2701c13a66b..cf9619d4efb4 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -1145,6 +1145,20 @@ noinline void __init arc_ioc_setup(void)
1145 unsigned int ioc_base, mem_sz; 1145 unsigned int ioc_base, mem_sz;
1146 1146
1147 /* 1147 /*
1148 * If IOC was already enabled (due to bootloader) it technically needs to
1149 * be reconfigured with aperture base,size corresponding to Linux memory map
1150 * which will certainly be different than uboot's. But disabling and
1151 * reenabling IOC when DMA might be potentially active is tricky business.
1152 * To avoid random memory issues later, just panic here and ask user to
1153 * upgrade bootloader to one which doesn't enable IOC
1154 */
1155 if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
1156 panic("IOC already enabled, please upgrade bootloader!\n");
1157
1158 if (!ioc_enable)
1159 return;
1160
1161 /*
1148 * As for today we don't support both IOC and ZONE_HIGHMEM enabled 1162 * As for today we don't support both IOC and ZONE_HIGHMEM enabled
1149 * simultaneously. This happens because as of today IOC aperture covers 1163 * simultaneously. This happens because as of today IOC aperture covers
1150 * only ZONE_NORMAL (low mem) and any dma transactions outside this 1164 * only ZONE_NORMAL (low mem) and any dma transactions outside this
@@ -1187,8 +1201,8 @@ noinline void __init arc_ioc_setup(void)
1187 panic("IOC Aperture start must be aligned to the size of the aperture"); 1201 panic("IOC Aperture start must be aligned to the size of the aperture");
1188 1202
1189 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12); 1203 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
1190 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); 1204 write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
1191 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); 1205 write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
1192 1206
1193 /* Re-enable L1 dcache */ 1207 /* Re-enable L1 dcache */
1194 __dc_enable(); 1208 __dc_enable();
@@ -1265,7 +1279,7 @@ void __init arc_cache_init_master(void)
1265 if (is_isa_arcv2() && l2_line_sz && !slc_enable) 1279 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1266 arc_slc_disable(); 1280 arc_slc_disable();
1267 1281
1268 if (is_isa_arcv2() && ioc_enable) 1282 if (is_isa_arcv2() && ioc_exists)
1269 arc_ioc_setup(); 1283 arc_ioc_setup();
1270 1284
1271 if (is_isa_arcv2() && l2_line_sz && slc_enable) { 1285 if (is_isa_arcv2() && l2_line_sz && slc_enable) {
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index c9da6102eb4f..e2d9fc3fea01 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -66,7 +66,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
66 struct vm_area_struct *vma = NULL; 66 struct vm_area_struct *vma = NULL;
67 struct task_struct *tsk = current; 67 struct task_struct *tsk = current;
68 struct mm_struct *mm = tsk->mm; 68 struct mm_struct *mm = tsk->mm;
69 int si_code; 69 int si_code = 0;
70 int ret; 70 int ret;
71 vm_fault_t fault; 71 vm_fault_t fault;
72 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ 72 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
index d4d33cd7adad..1e2bb68231ad 100644
--- a/arch/arm/boot/dts/am3517-evm.dts
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -228,7 +228,7 @@
228 vmmc-supply = <&vmmc_fixed>; 228 vmmc-supply = <&vmmc_fixed>;
229 bus-width = <4>; 229 bus-width = <4>;
230 wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */ 230 wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
231 cd-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio_127 */ 231 cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio_127 */
232}; 232};
233 233
234&mmc3 { 234&mmc3 {
diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi
index dae6e458e59f..b1c988eed87c 100644
--- a/arch/arm/boot/dts/am3517-som.dtsi
+++ b/arch/arm/boot/dts/am3517-som.dtsi
@@ -163,7 +163,7 @@
163 compatible = "ti,wl1271"; 163 compatible = "ti,wl1271";
164 reg = <2>; 164 reg = <2>;
165 interrupt-parent = <&gpio6>; 165 interrupt-parent = <&gpio6>;
166 interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; /* gpio_170 */ 166 interrupts = <10 IRQ_TYPE_EDGE_RISING>; /* gpio_170 */
167 ref-clock-frequency = <26000000>; 167 ref-clock-frequency = <26000000>;
168 tcxo-clock-frequency = <26000000>; 168 tcxo-clock-frequency = <26000000>;
169 }; 169 };
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
index e45a15ceb94b..69d753cac89a 100644
--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
@@ -492,12 +492,6 @@
492 pinctrl-0 = <&pinctrl_i2c2>; 492 pinctrl-0 = <&pinctrl_i2c2>;
493 status = "okay"; 493 status = "okay";
494 494
495 eeprom@50 {
496 compatible = "atmel,24c04";
497 pagesize = <16>;
498 reg = <0x50>;
499 };
500
501 hpa1: amp@60 { 495 hpa1: amp@60 {
502 compatible = "ti,tpa6130a2"; 496 compatible = "ti,tpa6130a2";
503 reg = <0x60>; 497 reg = <0x60>;
diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
index b560ff88459b..5ff9a179c83c 100644
--- a/arch/arm/boot/dts/imx53-ppd.dts
+++ b/arch/arm/boot/dts/imx53-ppd.dts
@@ -55,7 +55,7 @@
55 }; 55 };
56 56
57 chosen { 57 chosen {
58 stdout-path = "&uart1:115200n8"; 58 stdout-path = "serial0:115200n8";
59 }; 59 };
60 60
61 memory@70000000 { 61 memory@70000000 {
diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
index ed9a980bce85..beefa1b2049d 100644
--- a/arch/arm/boot/dts/imx6sll.dtsi
+++ b/arch/arm/boot/dts/imx6sll.dtsi
@@ -740,7 +740,7 @@
740 i2c1: i2c@21a0000 { 740 i2c1: i2c@21a0000 {
741 #address-cells = <1>; 741 #address-cells = <1>;
742 #size-cells = <0>; 742 #size-cells = <0>;
743 compatible = "fs,imx6sll-i2c", "fsl,imx21-i2c"; 743 compatible = "fsl,imx6sll-i2c", "fsl,imx21-i2c";
744 reg = <0x021a0000 0x4000>; 744 reg = <0x021a0000 0x4000>;
745 interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>; 745 interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
746 clocks = <&clks IMX6SLL_CLK_I2C1>; 746 clocks = <&clks IMX6SLL_CLK_I2C1>;
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index 53b3408b5fab..7d7d679945d2 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -117,7 +117,9 @@
117 regulator-name = "enet_3v3"; 117 regulator-name = "enet_3v3";
118 regulator-min-microvolt = <3300000>; 118 regulator-min-microvolt = <3300000>;
119 regulator-max-microvolt = <3300000>; 119 regulator-max-microvolt = <3300000>;
120 gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; 120 gpio = <&gpio2 6 GPIO_ACTIVE_LOW>;
121 regulator-boot-on;
122 regulator-always-on;
121 }; 123 };
122 124
123 reg_pcie_gpio: regulator-pcie-gpio { 125 reg_pcie_gpio: regulator-pcie-gpio {
@@ -180,6 +182,7 @@
180 phy-supply = <&reg_enet_3v3>; 182 phy-supply = <&reg_enet_3v3>;
181 phy-mode = "rgmii"; 183 phy-mode = "rgmii";
182 phy-handle = <&ethphy1>; 184 phy-handle = <&ethphy1>;
185 phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
183 status = "okay"; 186 status = "okay";
184 187
185 mdio { 188 mdio {
@@ -373,6 +376,8 @@
373 MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081 376 MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081
374 MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081 377 MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081
375 MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91 378 MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91
379 /* phy reset */
380 MX6SX_PAD_ENET2_CRS__GPIO2_IO_7 0x10b0
376 >; 381 >;
377 }; 382 };
378 383
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index ac343330d0c8..98b682a8080c 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -129,7 +129,7 @@
129}; 129};
130 130
131&mmc3 { 131&mmc3 {
132 interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; 132 interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
133 pinctrl-0 = <&mmc3_pins &wl127x_gpio>; 133 pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
134 pinctrl-names = "default"; 134 pinctrl-names = "default";
135 vmmc-supply = <&wl12xx_vmmc>; 135 vmmc-supply = <&wl12xx_vmmc>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 9d5d53fbe9c0..c39cf2ca54da 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -35,7 +35,7 @@
35 * jumpering combinations for the long run. 35 * jumpering combinations for the long run.
36 */ 36 */
37&mmc3 { 37&mmc3 {
38 interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; 38 interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
39 pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>; 39 pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>;
40 pinctrl-names = "default"; 40 pinctrl-names = "default";
41 vmmc-supply = <&wl12xx_vmmc>; 41 vmmc-supply = <&wl12xx_vmmc>;
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 2075120cfc4d..d8bf939a3aff 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -10,7 +10,11 @@
10#include "rk3288.dtsi" 10#include "rk3288.dtsi"
11 11
12/ { 12/ {
13 memory@0 { 13 /*
14 * The default coreboot on veyron devices ignores memory@0 nodes
15 * and would instead create another memory node.
16 */
17 memory {
14 device_type = "memory"; 18 device_type = "memory";
15 reg = <0x0 0x0 0x0 0x80000000>; 19 reg = <0x0 0x0 0x0 0x80000000>;
16 }; 20 };
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 843052f14f1c..dd0dda6ed44b 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -314,7 +314,7 @@
314 0x1 0x0 0x60000000 0x10000000 314 0x1 0x0 0x60000000 0x10000000
315 0x2 0x0 0x70000000 0x10000000 315 0x2 0x0 0x70000000 0x10000000
316 0x3 0x0 0x80000000 0x10000000>; 316 0x3 0x0 0x80000000 0x10000000>;
317 clocks = <&mck>; 317 clocks = <&h32ck>;
318 status = "disabled"; 318 status = "disabled";
319 319
320 nand_controller: nand-controller { 320 nand_controller: nand-controller {
diff --git a/arch/arm/boot/dts/vf610m4-colibri.dts b/arch/arm/boot/dts/vf610m4-colibri.dts
index 41ec66a96990..ca6249558760 100644
--- a/arch/arm/boot/dts/vf610m4-colibri.dts
+++ b/arch/arm/boot/dts/vf610m4-colibri.dts
@@ -50,8 +50,8 @@
50 compatible = "fsl,vf610m4"; 50 compatible = "fsl,vf610m4";
51 51
52 chosen { 52 chosen {
53 bootargs = "console=ttyLP2,115200 clk_ignore_unused init=/linuxrc rw"; 53 bootargs = "clk_ignore_unused init=/linuxrc rw";
54 stdout-path = "&uart2"; 54 stdout-path = "serial2:115200";
55 }; 55 };
56 56
57 memory@8c000000 { 57 memory@8c000000 {
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 1c7616815a86..63af6234c1b6 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -1,7 +1,6 @@
1CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
2CONFIG_NO_HZ=y 2CONFIG_NO_HZ=y
3CONFIG_HIGH_RES_TIMERS=y 3CONFIG_HIGH_RES_TIMERS=y
4CONFIG_PREEMPT=y
5CONFIG_CGROUPS=y 4CONFIG_CGROUPS=y
6CONFIG_BLK_DEV_INITRD=y 5CONFIG_BLK_DEV_INITRD=y
7CONFIG_EMBEDDED=y 6CONFIG_EMBEDDED=y
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 0d289240b6ca..775cac3c02bb 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -111,6 +111,7 @@
111#include <linux/kernel.h> 111#include <linux/kernel.h>
112 112
113extern unsigned int processor_id; 113extern unsigned int processor_id;
114struct proc_info_list *lookup_processor(u32 midr);
114 115
115#ifdef CONFIG_CPU_CP15 116#ifdef CONFIG_CPU_CP15
116#define read_cpuid(reg) \ 117#define read_cpuid(reg) \
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 92fd2c8a9af0..12659ce5c1f3 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -10,7 +10,7 @@
10#ifndef _ASM_PGTABLE_2LEVEL_H 10#ifndef _ASM_PGTABLE_2LEVEL_H
11#define _ASM_PGTABLE_2LEVEL_H 11#define _ASM_PGTABLE_2LEVEL_H
12 12
13#define __PAGETABLE_PMD_FOLDED 13#define __PAGETABLE_PMD_FOLDED 1
14 14
15/* 15/*
16 * Hardware-wise, we have a two level page table structure, where the first 16 * Hardware-wise, we have a two level page table structure, where the first
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index e25f4392e1b2..e1b6f280ab08 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -23,7 +23,7 @@ struct mm_struct;
23/* 23/*
24 * Don't change this structure - ASM code relies on it. 24 * Don't change this structure - ASM code relies on it.
25 */ 25 */
26extern struct processor { 26struct processor {
27 /* MISC 27 /* MISC
28 * get data abort address/flags 28 * get data abort address/flags
29 */ 29 */
@@ -79,9 +79,13 @@ extern struct processor {
79 unsigned int suspend_size; 79 unsigned int suspend_size;
80 void (*do_suspend)(void *); 80 void (*do_suspend)(void *);
81 void (*do_resume)(void *); 81 void (*do_resume)(void *);
82} processor; 82};
83 83
84#ifndef MULTI_CPU 84#ifndef MULTI_CPU
85static inline void init_proc_vtable(const struct processor *p)
86{
87}
88
85extern void cpu_proc_init(void); 89extern void cpu_proc_init(void);
86extern void cpu_proc_fin(void); 90extern void cpu_proc_fin(void);
87extern int cpu_do_idle(void); 91extern int cpu_do_idle(void);
@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
98extern void cpu_do_suspend(void *); 102extern void cpu_do_suspend(void *);
99extern void cpu_do_resume(void *); 103extern void cpu_do_resume(void *);
100#else 104#else
101#define cpu_proc_init processor._proc_init
102#define cpu_proc_fin processor._proc_fin
103#define cpu_reset processor.reset
104#define cpu_do_idle processor._do_idle
105#define cpu_dcache_clean_area processor.dcache_clean_area
106#define cpu_set_pte_ext processor.set_pte_ext
107#define cpu_do_switch_mm processor.switch_mm
108 105
109/* These three are private to arch/arm/kernel/suspend.c */ 106extern struct processor processor;
110#define cpu_do_suspend processor.do_suspend 107#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
111#define cpu_do_resume processor.do_resume 108#include <linux/smp.h>
109/*
110 * This can't be a per-cpu variable because we need to access it before
111 * per-cpu has been initialised. We have a couple of functions that are
112 * called in a pre-emptible context, and so can't use smp_processor_id()
113 * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
114 * function pointers for these are identical across all CPUs.
115 */
116extern struct processor *cpu_vtable[];
117#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
118#define PROC_TABLE(f) cpu_vtable[0]->f
119static inline void init_proc_vtable(const struct processor *p)
120{
121 unsigned int cpu = smp_processor_id();
122 *cpu_vtable[cpu] = *p;
123 WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
124 cpu_vtable[0]->dcache_clean_area);
125 WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
126 cpu_vtable[0]->set_pte_ext);
127}
128#else
129#define PROC_VTABLE(f) processor.f
130#define PROC_TABLE(f) processor.f
131static inline void init_proc_vtable(const struct processor *p)
132{
133 processor = *p;
134}
135#endif
136
137#define cpu_proc_init PROC_VTABLE(_proc_init)
138#define cpu_check_bugs PROC_VTABLE(check_bugs)
139#define cpu_proc_fin PROC_VTABLE(_proc_fin)
140#define cpu_reset PROC_VTABLE(reset)
141#define cpu_do_idle PROC_VTABLE(_do_idle)
142#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
143#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
144#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
145
146/* These two are private to arch/arm/kernel/suspend.c */
147#define cpu_do_suspend PROC_VTABLE(do_suspend)
148#define cpu_do_resume PROC_VTABLE(do_resume)
112#endif 149#endif
113 150
114extern void cpu_resume(void); 151extern void cpu_resume(void);
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
index 7be511310191..d41d3598e5e5 100644
--- a/arch/arm/kernel/bugs.c
+++ b/arch/arm/kernel/bugs.c
@@ -6,8 +6,8 @@
6void check_other_bugs(void) 6void check_other_bugs(void)
7{ 7{
8#ifdef MULTI_CPU 8#ifdef MULTI_CPU
9 if (processor.check_bugs) 9 if (cpu_check_bugs)
10 processor.check_bugs(); 10 cpu_check_bugs();
11#endif 11#endif
12} 12}
13 13
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 0142fcfcc3d3..bda949fd84e8 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -183,9 +183,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
183 unsigned long frame_pointer) 183 unsigned long frame_pointer)
184{ 184{
185 unsigned long return_hooker = (unsigned long) &return_to_handler; 185 unsigned long return_hooker = (unsigned long) &return_to_handler;
186 struct ftrace_graph_ent trace;
187 unsigned long old; 186 unsigned long old;
188 int err;
189 187
190 if (unlikely(atomic_read(&current->tracing_graph_pause))) 188 if (unlikely(atomic_read(&current->tracing_graph_pause)))
191 return; 189 return;
@@ -193,21 +191,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
193 old = *parent; 191 old = *parent;
194 *parent = return_hooker; 192 *parent = return_hooker;
195 193
196 trace.func = self_addr; 194 if (function_graph_enter(old, self_addr, frame_pointer, NULL))
197 trace.depth = current->curr_ret_stack + 1;
198
199 /* Only trace if the calling function expects to */
200 if (!ftrace_graph_entry(&trace)) {
201 *parent = old; 195 *parent = old;
202 return;
203 }
204
205 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
206 frame_pointer, NULL);
207 if (err == -EBUSY) {
208 *parent = old;
209 return;
210 }
211} 196}
212 197
213#ifdef CONFIG_DYNAMIC_FTRACE 198#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 6e0375e7db05..997b02302c31 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -145,6 +145,9 @@ __mmap_switched_data:
145#endif 145#endif
146 .size __mmap_switched_data, . - __mmap_switched_data 146 .size __mmap_switched_data, . - __mmap_switched_data
147 147
148 __FINIT
149 .text
150
148/* 151/*
149 * This provides a C-API version of __lookup_processor_type 152 * This provides a C-API version of __lookup_processor_type
150 */ 153 */
@@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
156 ldmfd sp!, {r4 - r6, r9, pc} 159 ldmfd sp!, {r4 - r6, r9, pc}
157ENDPROC(lookup_processor_type) 160ENDPROC(lookup_processor_type)
158 161
159 __FINIT
160 .text
161
162/* 162/*
163 * Read processor ID register (CP#15, CR0), and look up in the linker-built 163 * Read processor ID register (CP#15, CR0), and look up in the linker-built
164 * supported processor list. Note that we can't use the absolute addresses 164 * supported processor list. Note that we can't use the absolute addresses
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ac7e08886863..375b13f7e780 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -114,6 +114,11 @@ EXPORT_SYMBOL(elf_hwcap2);
114 114
115#ifdef MULTI_CPU 115#ifdef MULTI_CPU
116struct processor processor __ro_after_init; 116struct processor processor __ro_after_init;
117#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
118struct processor *cpu_vtable[NR_CPUS] = {
119 [0] = &processor,
120};
121#endif
117#endif 122#endif
118#ifdef MULTI_TLB 123#ifdef MULTI_TLB
119struct cpu_tlb_fns cpu_tlb __ro_after_init; 124struct cpu_tlb_fns cpu_tlb __ro_after_init;
@@ -666,28 +671,33 @@ static void __init smp_build_mpidr_hash(void)
666} 671}
667#endif 672#endif
668 673
669static void __init setup_processor(void) 674/*
675 * locate processor in the list of supported processor types. The linker
676 * builds this table for us from the entries in arch/arm/mm/proc-*.S
677 */
678struct proc_info_list *lookup_processor(u32 midr)
670{ 679{
671 struct proc_info_list *list; 680 struct proc_info_list *list = lookup_processor_type(midr);
672 681
673 /*
674 * locate processor in the list of supported processor
675 * types. The linker builds this table for us from the
676 * entries in arch/arm/mm/proc-*.S
677 */
678 list = lookup_processor_type(read_cpuid_id());
679 if (!list) { 682 if (!list) {
680 pr_err("CPU configuration botched (ID %08x), unable to continue.\n", 683 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
681 read_cpuid_id()); 684 smp_processor_id(), midr);
682 while (1); 685 while (1)
686 /* can't use cpu_relax() here as it may require MMU setup */;
683 } 687 }
684 688
689 return list;
690}
691
692static void __init setup_processor(void)
693{
694 unsigned int midr = read_cpuid_id();
695 struct proc_info_list *list = lookup_processor(midr);
696
685 cpu_name = list->cpu_name; 697 cpu_name = list->cpu_name;
686 __cpu_architecture = __get_cpu_architecture(); 698 __cpu_architecture = __get_cpu_architecture();
687 699
688#ifdef MULTI_CPU 700 init_proc_vtable(list->proc);
689 processor = *list->proc;
690#endif
691#ifdef MULTI_TLB 701#ifdef MULTI_TLB
692 cpu_tlb = *list->tlb; 702 cpu_tlb = *list->tlb;
693#endif 703#endif
@@ -699,7 +709,7 @@ static void __init setup_processor(void)
699#endif 709#endif
700 710
701 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", 711 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
702 cpu_name, read_cpuid_id(), read_cpuid_id() & 15, 712 list->cpu_name, midr, midr & 15,
703 proc_arch[cpu_architecture()], get_cr()); 713 proc_arch[cpu_architecture()], get_cr());
704 714
705 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", 715 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 0978282d5fc2..12a6172263c0 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -42,6 +42,7 @@
42#include <asm/mmu_context.h> 42#include <asm/mmu_context.h>
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/pgalloc.h> 44#include <asm/pgalloc.h>
45#include <asm/procinfo.h>
45#include <asm/processor.h> 46#include <asm/processor.h>
46#include <asm/sections.h> 47#include <asm/sections.h>
47#include <asm/tlbflush.h> 48#include <asm/tlbflush.h>
@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
102#endif 103#endif
103} 104}
104 105
106#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
107static int secondary_biglittle_prepare(unsigned int cpu)
108{
109 if (!cpu_vtable[cpu])
110 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
111
112 return cpu_vtable[cpu] ? 0 : -ENOMEM;
113}
114
115static void secondary_biglittle_init(void)
116{
117 init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
118}
119#else
120static int secondary_biglittle_prepare(unsigned int cpu)
121{
122 return 0;
123}
124
125static void secondary_biglittle_init(void)
126{
127}
128#endif
129
105int __cpu_up(unsigned int cpu, struct task_struct *idle) 130int __cpu_up(unsigned int cpu, struct task_struct *idle)
106{ 131{
107 int ret; 132 int ret;
@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
109 if (!smp_ops.smp_boot_secondary) 134 if (!smp_ops.smp_boot_secondary)
110 return -ENOSYS; 135 return -ENOSYS;
111 136
137 ret = secondary_biglittle_prepare(cpu);
138 if (ret)
139 return ret;
140
112 /* 141 /*
113 * We need to tell the secondary core where to find 142 * We need to tell the secondary core where to find
114 * its stack and the page tables. 143 * its stack and the page tables.
@@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
359 struct mm_struct *mm = &init_mm; 388 struct mm_struct *mm = &init_mm;
360 unsigned int cpu; 389 unsigned int cpu;
361 390
391 secondary_biglittle_init();
392
362 /* 393 /*
363 * The identity mapping is uncached (strongly ordered), so 394 * The identity mapping is uncached (strongly ordered), so
364 * switch away from it before attempting any exclusive accesses. 395 * switch away from it before attempting any exclusive accesses.
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 0bc5bd2665df..2cc9fe4c3a91 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -759,7 +759,9 @@ static struct davinci_id da830_ids[] = {
759}; 759};
760 760
761static struct davinci_gpio_platform_data da830_gpio_platform_data = { 761static struct davinci_gpio_platform_data da830_gpio_platform_data = {
762 .ngpio = 128, 762 .no_auto_base = true,
763 .base = 0,
764 .ngpio = 128,
763}; 765};
764 766
765int __init da830_register_gpio(void) 767int __init da830_register_gpio(void)
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 4528bbf0c861..e7b78df2bfef 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -719,7 +719,9 @@ int __init da850_register_vpif_capture(struct vpif_capture_config
719} 719}
720 720
721static struct davinci_gpio_platform_data da850_gpio_platform_data = { 721static struct davinci_gpio_platform_data da850_gpio_platform_data = {
722 .ngpio = 144, 722 .no_auto_base = true,
723 .base = 0,
724 .ngpio = 144,
723}; 725};
724 726
725int __init da850_register_gpio(void) 727int __init da850_register_gpio(void)
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 1fd3619f6a09..cf78da5ab054 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -701,6 +701,46 @@ static struct resource da8xx_gpio_resources[] = {
701 }, 701 },
702 { /* interrupt */ 702 { /* interrupt */
703 .start = IRQ_DA8XX_GPIO0, 703 .start = IRQ_DA8XX_GPIO0,
704 .end = IRQ_DA8XX_GPIO0,
705 .flags = IORESOURCE_IRQ,
706 },
707 {
708 .start = IRQ_DA8XX_GPIO1,
709 .end = IRQ_DA8XX_GPIO1,
710 .flags = IORESOURCE_IRQ,
711 },
712 {
713 .start = IRQ_DA8XX_GPIO2,
714 .end = IRQ_DA8XX_GPIO2,
715 .flags = IORESOURCE_IRQ,
716 },
717 {
718 .start = IRQ_DA8XX_GPIO3,
719 .end = IRQ_DA8XX_GPIO3,
720 .flags = IORESOURCE_IRQ,
721 },
722 {
723 .start = IRQ_DA8XX_GPIO4,
724 .end = IRQ_DA8XX_GPIO4,
725 .flags = IORESOURCE_IRQ,
726 },
727 {
728 .start = IRQ_DA8XX_GPIO5,
729 .end = IRQ_DA8XX_GPIO5,
730 .flags = IORESOURCE_IRQ,
731 },
732 {
733 .start = IRQ_DA8XX_GPIO6,
734 .end = IRQ_DA8XX_GPIO6,
735 .flags = IORESOURCE_IRQ,
736 },
737 {
738 .start = IRQ_DA8XX_GPIO7,
739 .end = IRQ_DA8XX_GPIO7,
740 .flags = IORESOURCE_IRQ,
741 },
742 {
743 .start = IRQ_DA8XX_GPIO8,
704 .end = IRQ_DA8XX_GPIO8, 744 .end = IRQ_DA8XX_GPIO8,
705 .flags = IORESOURCE_IRQ, 745 .flags = IORESOURCE_IRQ,
706 }, 746 },
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 9f7d38d12c88..4c6e0bef4509 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -548,12 +548,44 @@ static struct resource dm355_gpio_resources[] = {
548 }, 548 },
549 { /* interrupt */ 549 { /* interrupt */
550 .start = IRQ_DM355_GPIOBNK0, 550 .start = IRQ_DM355_GPIOBNK0,
551 .end = IRQ_DM355_GPIOBNK0,
552 .flags = IORESOURCE_IRQ,
553 },
554 {
555 .start = IRQ_DM355_GPIOBNK1,
556 .end = IRQ_DM355_GPIOBNK1,
557 .flags = IORESOURCE_IRQ,
558 },
559 {
560 .start = IRQ_DM355_GPIOBNK2,
561 .end = IRQ_DM355_GPIOBNK2,
562 .flags = IORESOURCE_IRQ,
563 },
564 {
565 .start = IRQ_DM355_GPIOBNK3,
566 .end = IRQ_DM355_GPIOBNK3,
567 .flags = IORESOURCE_IRQ,
568 },
569 {
570 .start = IRQ_DM355_GPIOBNK4,
571 .end = IRQ_DM355_GPIOBNK4,
572 .flags = IORESOURCE_IRQ,
573 },
574 {
575 .start = IRQ_DM355_GPIOBNK5,
576 .end = IRQ_DM355_GPIOBNK5,
577 .flags = IORESOURCE_IRQ,
578 },
579 {
580 .start = IRQ_DM355_GPIOBNK6,
551 .end = IRQ_DM355_GPIOBNK6, 581 .end = IRQ_DM355_GPIOBNK6,
552 .flags = IORESOURCE_IRQ, 582 .flags = IORESOURCE_IRQ,
553 }, 583 },
554}; 584};
555 585
556static struct davinci_gpio_platform_data dm355_gpio_platform_data = { 586static struct davinci_gpio_platform_data dm355_gpio_platform_data = {
587 .no_auto_base = true,
588 .base = 0,
557 .ngpio = 104, 589 .ngpio = 104,
558}; 590};
559 591
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index abcf2a5ed89b..01fb2b0c82de 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -267,12 +267,49 @@ static struct resource dm365_gpio_resources[] = {
267 }, 267 },
268 { /* interrupt */ 268 { /* interrupt */
269 .start = IRQ_DM365_GPIO0, 269 .start = IRQ_DM365_GPIO0,
270 .end = IRQ_DM365_GPIO0,
271 .flags = IORESOURCE_IRQ,
272 },
273 {
274 .start = IRQ_DM365_GPIO1,
275 .end = IRQ_DM365_GPIO1,
276 .flags = IORESOURCE_IRQ,
277 },
278 {
279 .start = IRQ_DM365_GPIO2,
280 .end = IRQ_DM365_GPIO2,
281 .flags = IORESOURCE_IRQ,
282 },
283 {
284 .start = IRQ_DM365_GPIO3,
285 .end = IRQ_DM365_GPIO3,
286 .flags = IORESOURCE_IRQ,
287 },
288 {
289 .start = IRQ_DM365_GPIO4,
290 .end = IRQ_DM365_GPIO4,
291 .flags = IORESOURCE_IRQ,
292 },
293 {
294 .start = IRQ_DM365_GPIO5,
295 .end = IRQ_DM365_GPIO5,
296 .flags = IORESOURCE_IRQ,
297 },
298 {
299 .start = IRQ_DM365_GPIO6,
300 .end = IRQ_DM365_GPIO6,
301 .flags = IORESOURCE_IRQ,
302 },
303 {
304 .start = IRQ_DM365_GPIO7,
270 .end = IRQ_DM365_GPIO7, 305 .end = IRQ_DM365_GPIO7,
271 .flags = IORESOURCE_IRQ, 306 .flags = IORESOURCE_IRQ,
272 }, 307 },
273}; 308};
274 309
275static struct davinci_gpio_platform_data dm365_gpio_platform_data = { 310static struct davinci_gpio_platform_data dm365_gpio_platform_data = {
311 .no_auto_base = true,
312 .base = 0,
276 .ngpio = 104, 313 .ngpio = 104,
277 .gpio_unbanked = 8, 314 .gpio_unbanked = 8,
278}; 315};
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 0720da7809a6..38f92b7d413e 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -492,12 +492,34 @@ static struct resource dm644_gpio_resources[] = {
492 }, 492 },
493 { /* interrupt */ 493 { /* interrupt */
494 .start = IRQ_GPIOBNK0, 494 .start = IRQ_GPIOBNK0,
495 .end = IRQ_GPIOBNK0,
496 .flags = IORESOURCE_IRQ,
497 },
498 {
499 .start = IRQ_GPIOBNK1,
500 .end = IRQ_GPIOBNK1,
501 .flags = IORESOURCE_IRQ,
502 },
503 {
504 .start = IRQ_GPIOBNK2,
505 .end = IRQ_GPIOBNK2,
506 .flags = IORESOURCE_IRQ,
507 },
508 {
509 .start = IRQ_GPIOBNK3,
510 .end = IRQ_GPIOBNK3,
511 .flags = IORESOURCE_IRQ,
512 },
513 {
514 .start = IRQ_GPIOBNK4,
495 .end = IRQ_GPIOBNK4, 515 .end = IRQ_GPIOBNK4,
496 .flags = IORESOURCE_IRQ, 516 .flags = IORESOURCE_IRQ,
497 }, 517 },
498}; 518};
499 519
500static struct davinci_gpio_platform_data dm644_gpio_platform_data = { 520static struct davinci_gpio_platform_data dm644_gpio_platform_data = {
521 .no_auto_base = true,
522 .base = 0,
501 .ngpio = 71, 523 .ngpio = 71,
502}; 524};
503 525
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 6bd2ed069d0d..7dc54b2a610f 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -442,12 +442,24 @@ static struct resource dm646x_gpio_resources[] = {
442 }, 442 },
443 { /* interrupt */ 443 { /* interrupt */
444 .start = IRQ_DM646X_GPIOBNK0, 444 .start = IRQ_DM646X_GPIOBNK0,
445 .end = IRQ_DM646X_GPIOBNK0,
446 .flags = IORESOURCE_IRQ,
447 },
448 {
449 .start = IRQ_DM646X_GPIOBNK1,
450 .end = IRQ_DM646X_GPIOBNK1,
451 .flags = IORESOURCE_IRQ,
452 },
453 {
454 .start = IRQ_DM646X_GPIOBNK2,
445 .end = IRQ_DM646X_GPIOBNK2, 455 .end = IRQ_DM646X_GPIOBNK2,
446 .flags = IORESOURCE_IRQ, 456 .flags = IORESOURCE_IRQ,
447 }, 457 },
448}; 458};
449 459
450static struct davinci_gpio_platform_data dm646x_gpio_platform_data = { 460static struct davinci_gpio_platform_data dm646x_gpio_platform_data = {
461 .no_auto_base = true,
462 .base = 0,
451 .ngpio = 43, 463 .ngpio = 43,
452}; 464};
453 465
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 3d191fd52910..17886744dbe6 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -750,6 +750,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
750 struct modem_private_data *priv = port->private_data; 750 struct modem_private_data *priv = port->private_data;
751 int ret; 751 int ret;
752 752
753 if (!priv)
754 return;
755
753 if (IS_ERR(priv->regulator)) 756 if (IS_ERR(priv->regulator))
754 return; 757 return;
755 758
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 9500b6e27380..f86b72d1d59e 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -209,11 +209,61 @@ static int __init omapdss_init_fbdev(void)
209 209
210 return 0; 210 return 0;
211} 211}
212#else 212
213static inline int omapdss_init_fbdev(void) 213static const char * const omapdss_compat_names[] __initconst = {
214 "ti,omap2-dss",
215 "ti,omap3-dss",
216 "ti,omap4-dss",
217 "ti,omap5-dss",
218 "ti,dra7-dss",
219};
220
221static struct device_node * __init omapdss_find_dss_of_node(void)
214{ 222{
215 return 0; 223 struct device_node *node;
224 int i;
225
226 for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
227 node = of_find_compatible_node(NULL, NULL,
228 omapdss_compat_names[i]);
229 if (node)
230 return node;
231 }
232
233 return NULL;
216} 234}
235
236static int __init omapdss_init_of(void)
237{
238 int r;
239 struct device_node *node;
240 struct platform_device *pdev;
241
242 /* only create dss helper devices if dss is enabled in the .dts */
243
244 node = omapdss_find_dss_of_node();
245 if (!node)
246 return 0;
247
248 if (!of_device_is_available(node))
249 return 0;
250
251 pdev = of_find_device_by_node(node);
252
253 if (!pdev) {
254 pr_err("Unable to find DSS platform device\n");
255 return -ENODEV;
256 }
257
258 r = of_platform_populate(node, NULL, NULL, &pdev->dev);
259 if (r) {
260 pr_err("Unable to populate DSS submodule devices\n");
261 return r;
262 }
263
264 return omapdss_init_fbdev();
265}
266omap_device_initcall(omapdss_init_of);
217#endif /* CONFIG_FB_OMAP2 */ 267#endif /* CONFIG_FB_OMAP2 */
218 268
219static void dispc_disable_outputs(void) 269static void dispc_disable_outputs(void)
@@ -361,58 +411,3 @@ int omap_dss_reset(struct omap_hwmod *oh)
361 411
362 return r; 412 return r;
363} 413}
364
365static const char * const omapdss_compat_names[] __initconst = {
366 "ti,omap2-dss",
367 "ti,omap3-dss",
368 "ti,omap4-dss",
369 "ti,omap5-dss",
370 "ti,dra7-dss",
371};
372
373static struct device_node * __init omapdss_find_dss_of_node(void)
374{
375 struct device_node *node;
376 int i;
377
378 for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
379 node = of_find_compatible_node(NULL, NULL,
380 omapdss_compat_names[i]);
381 if (node)
382 return node;
383 }
384
385 return NULL;
386}
387
388static int __init omapdss_init_of(void)
389{
390 int r;
391 struct device_node *node;
392 struct platform_device *pdev;
393
394 /* only create dss helper devices if dss is enabled in the .dts */
395
396 node = omapdss_find_dss_of_node();
397 if (!node)
398 return 0;
399
400 if (!of_device_is_available(node))
401 return 0;
402
403 pdev = of_find_device_by_node(node);
404
405 if (!pdev) {
406 pr_err("Unable to find DSS platform device\n");
407 return -ENODEV;
408 }
409
410 r = of_platform_populate(node, NULL, NULL, &pdev->dev);
411 if (r) {
412 pr_err("Unable to populate DSS submodule devices\n");
413 return r;
414 }
415
416 return omapdss_init_fbdev();
417}
418omap_device_initcall(omapdss_init_of);
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 7b95729e8359..38a1be6c3694 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -351,7 +351,7 @@ static void omap44xx_prm_reconfigure_io_chain(void)
351 * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and 351 * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
352 * omap44xx_prm_reconfigure_io_chain() must be called. No return value. 352 * omap44xx_prm_reconfigure_io_chain() must be called. No return value.
353 */ 353 */
354static void __init omap44xx_prm_enable_io_wakeup(void) 354static void omap44xx_prm_enable_io_wakeup(void)
355{ 355{
356 s32 inst = omap4_prmst_get_prm_dev_inst(); 356 s32 inst = omap4_prmst_get_prm_dev_inst();
357 357
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index 5544b82a2e7a..9a07916af8dd 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
52 case ARM_CPU_PART_CORTEX_A17: 52 case ARM_CPU_PART_CORTEX_A17:
53 case ARM_CPU_PART_CORTEX_A73: 53 case ARM_CPU_PART_CORTEX_A73:
54 case ARM_CPU_PART_CORTEX_A75: 54 case ARM_CPU_PART_CORTEX_A75:
55 if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
56 goto bl_error;
57 per_cpu(harden_branch_predictor_fn, cpu) = 55 per_cpu(harden_branch_predictor_fn, cpu) =
58 harden_branch_predictor_bpiall; 56 harden_branch_predictor_bpiall;
59 spectre_v2_method = "BPIALL"; 57 spectre_v2_method = "BPIALL";
@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
61 59
62 case ARM_CPU_PART_CORTEX_A15: 60 case ARM_CPU_PART_CORTEX_A15:
63 case ARM_CPU_PART_BRAHMA_B15: 61 case ARM_CPU_PART_BRAHMA_B15:
64 if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
65 goto bl_error;
66 per_cpu(harden_branch_predictor_fn, cpu) = 62 per_cpu(harden_branch_predictor_fn, cpu) =
67 harden_branch_predictor_iciallu; 63 harden_branch_predictor_iciallu;
68 spectre_v2_method = "ICIALLU"; 64 spectre_v2_method = "ICIALLU";
@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
88 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 84 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
89 if ((int)res.a0 != 0) 85 if ((int)res.a0 != 0)
90 break; 86 break;
91 if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
92 goto bl_error;
93 per_cpu(harden_branch_predictor_fn, cpu) = 87 per_cpu(harden_branch_predictor_fn, cpu) =
94 call_hvc_arch_workaround_1; 88 call_hvc_arch_workaround_1;
95 processor.switch_mm = cpu_v7_hvc_switch_mm; 89 cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
96 spectre_v2_method = "hypervisor"; 90 spectre_v2_method = "hypervisor";
97 break; 91 break;
98 92
@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
101 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 95 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
102 if ((int)res.a0 != 0) 96 if ((int)res.a0 != 0)
103 break; 97 break;
104 if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
105 goto bl_error;
106 per_cpu(harden_branch_predictor_fn, cpu) = 98 per_cpu(harden_branch_predictor_fn, cpu) =
107 call_smc_arch_workaround_1; 99 call_smc_arch_workaround_1;
108 processor.switch_mm = cpu_v7_smc_switch_mm; 100 cpu_do_switch_mm = cpu_v7_smc_switch_mm;
109 spectre_v2_method = "firmware"; 101 spectre_v2_method = "firmware";
110 break; 102 break;
111 103
@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
119 if (spectre_v2_method) 111 if (spectre_v2_method)
120 pr_info("CPU%u: Spectre v2: using %s workaround\n", 112 pr_info("CPU%u: Spectre v2: using %s workaround\n",
121 smp_processor_id(), spectre_v2_method); 113 smp_processor_id(), spectre_v2_method);
122 return;
123
124bl_error:
125 pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
126 cpu);
127} 114}
128#else 115#else
129static void cpu_v7_spectre_init(void) 116static void cpu_v7_spectre_init(void)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 6fe52819e014..339eb17c9808 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm)
112 hvc #0 112 hvc #0
113 ldmfd sp!, {r0 - r3} 113 ldmfd sp!, {r0 - r3}
114 b cpu_v7_switch_mm 114 b cpu_v7_switch_mm
115ENDPROC(cpu_v7_smc_switch_mm) 115ENDPROC(cpu_v7_hvc_switch_mm)
116#endif 116#endif
117ENTRY(cpu_v7_iciallu_switch_mm) 117ENTRY(cpu_v7_iciallu_switch_mm)
118 mov r3, #0 118 mov r3, #0
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index aff6e6eadc70..ee7b07938dd5 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -573,7 +573,7 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
573 */ 573 */
574 ufp_exc->fpexc = hwstate->fpexc; 574 ufp_exc->fpexc = hwstate->fpexc;
575 ufp_exc->fpinst = hwstate->fpinst; 575 ufp_exc->fpinst = hwstate->fpinst;
576 ufp_exc->fpinst2 = ufp_exc->fpinst2; 576 ufp_exc->fpinst2 = hwstate->fpinst2;
577 577
578 /* Ensure that VFP is disabled. */ 578 /* Ensure that VFP is disabled. */
579 vfp_flush_hwstate(thread); 579 vfp_flush_hwstate(thread);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 787d7850e064..ea2ab0330e3a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -497,6 +497,24 @@ config ARM64_ERRATUM_1188873
497 497
498 If unsure, say Y. 498 If unsure, say Y.
499 499
500config ARM64_ERRATUM_1286807
501 bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
502 default y
503 select ARM64_WORKAROUND_REPEAT_TLBI
504 help
505 This option adds workaround for ARM Cortex-A76 erratum 1286807
506
507 On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual
508 address for a cacheable mapping of a location is being
509 accessed by a core while another core is remapping the virtual
510 address to a new physical page using the recommended
511 break-before-make sequence, then under very rare circumstances
512 TLBI+DSB completes before a read using the translation being
513 invalidated has been observed by other observers. The
514 workaround repeats the TLBI+DSB operation.
515
516 If unsure, say Y.
517
500config CAVIUM_ERRATUM_22375 518config CAVIUM_ERRATUM_22375
501 bool "Cavium erratum 22375, 24313" 519 bool "Cavium erratum 22375, 24313"
502 default y 520 default y
@@ -566,9 +584,16 @@ config QCOM_FALKOR_ERRATUM_1003
566 is unchanged. Work around the erratum by invalidating the walk cache 584 is unchanged. Work around the erratum by invalidating the walk cache
567 entries for the trampoline before entering the kernel proper. 585 entries for the trampoline before entering the kernel proper.
568 586
587config ARM64_WORKAROUND_REPEAT_TLBI
588 bool
589 help
590 Enable the repeat TLBI workaround for Falkor erratum 1009 and
591 Cortex-A76 erratum 1286807.
592
569config QCOM_FALKOR_ERRATUM_1009 593config QCOM_FALKOR_ERRATUM_1009
570 bool "Falkor E1009: Prematurely complete a DSB after a TLBI" 594 bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
571 default y 595 default y
596 select ARM64_WORKAROUND_REPEAT_TLBI
572 help 597 help
573 On Falkor v1, the CPU may prematurely complete a DSB following a 598 On Falkor v1, the CPU may prematurely complete a DSB following a
574 TLBI xxIS invalidate maintenance operation. Repeat the TLBI operation 599 TLBI xxIS invalidate maintenance operation. Repeat the TLBI operation
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 8253a1a9e985..fef7351e9f67 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -139,6 +139,7 @@
139 clock-names = "stmmaceth"; 139 clock-names = "stmmaceth";
140 tx-fifo-depth = <16384>; 140 tx-fifo-depth = <16384>;
141 rx-fifo-depth = <16384>; 141 rx-fifo-depth = <16384>;
142 snps,multicast-filter-bins = <256>;
142 status = "disabled"; 143 status = "disabled";
143 }; 144 };
144 145
@@ -154,6 +155,7 @@
154 clock-names = "stmmaceth"; 155 clock-names = "stmmaceth";
155 tx-fifo-depth = <16384>; 156 tx-fifo-depth = <16384>;
156 rx-fifo-depth = <16384>; 157 rx-fifo-depth = <16384>;
158 snps,multicast-filter-bins = <256>;
157 status = "disabled"; 159 status = "disabled";
158 }; 160 };
159 161
@@ -169,6 +171,7 @@
169 clock-names = "stmmaceth"; 171 clock-names = "stmmaceth";
170 tx-fifo-depth = <16384>; 172 tx-fifo-depth = <16384>;
171 rx-fifo-depth = <16384>; 173 rx-fifo-depth = <16384>;
174 snps,multicast-filter-bins = <256>;
172 status = "disabled"; 175 status = "disabled";
173 }; 176 };
174 177
diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
index b4276da1fb0d..11fd1fe8bdb5 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
@@ -241,3 +241,7 @@
241 }; 241 };
242 }; 242 };
243}; 243};
244
245&tlmm {
246 gpio-reserved-ranges = <0 4>, <81 4>;
247};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index eedfaf8922e2..d667eee4e6d0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -352,6 +352,10 @@
352 status = "okay"; 352 status = "okay";
353}; 353};
354 354
355&tlmm {
356 gpio-reserved-ranges = <0 4>, <81 4>;
357};
358
355&uart9 { 359&uart9 {
356 status = "okay"; 360 status = "okay";
357}; 361};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index b5f2273caca4..a79c8d369e0b 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -652,7 +652,7 @@
652 clock-names = "fck", "brg_int", "scif_clk"; 652 clock-names = "fck", "brg_int", "scif_clk";
653 dmas = <&dmac1 0x35>, <&dmac1 0x34>, 653 dmas = <&dmac1 0x35>, <&dmac1 0x34>,
654 <&dmac2 0x35>, <&dmac2 0x34>; 654 <&dmac2 0x35>, <&dmac2 0x34>;
655 dma-names = "tx", "rx"; 655 dma-names = "tx", "rx", "tx", "rx";
656 power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; 656 power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
657 resets = <&cpg 518>; 657 resets = <&cpg 518>;
658 status = "disabled"; 658 status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
index fe2e2c051cc9..5a7012be0d6a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
@@ -15,7 +15,7 @@
15 15
16 aliases { 16 aliases {
17 serial0 = &scif0; 17 serial0 = &scif0;
18 ethernet0 = &avb; 18 ethernet0 = &gether;
19 }; 19 };
20 20
21 chosen { 21 chosen {
@@ -97,23 +97,6 @@
97 }; 97 };
98}; 98};
99 99
100&avb {
101 pinctrl-0 = <&avb_pins>;
102 pinctrl-names = "default";
103
104 phy-mode = "rgmii-id";
105 phy-handle = <&phy0>;
106 renesas,no-ether-link;
107 status = "okay";
108
109 phy0: ethernet-phy@0 {
110 rxc-skew-ps = <1500>;
111 reg = <0>;
112 interrupt-parent = <&gpio1>;
113 interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
114 };
115};
116
117&canfd { 100&canfd {
118 pinctrl-0 = <&canfd0_pins>; 101 pinctrl-0 = <&canfd0_pins>;
119 pinctrl-names = "default"; 102 pinctrl-names = "default";
@@ -139,6 +122,23 @@
139 clock-frequency = <32768>; 122 clock-frequency = <32768>;
140}; 123};
141 124
125&gether {
126 pinctrl-0 = <&gether_pins>;
127 pinctrl-names = "default";
128
129 phy-mode = "rgmii-id";
130 phy-handle = <&phy0>;
131 renesas,no-ether-link;
132 status = "okay";
133
134 phy0: ethernet-phy@0 {
135 rxc-skew-ps = <1500>;
136 reg = <0>;
137 interrupt-parent = <&gpio4>;
138 interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
139 };
140};
141
142&i2c0 { 142&i2c0 {
143 pinctrl-0 = <&i2c0_pins>; 143 pinctrl-0 = <&i2c0_pins>;
144 pinctrl-names = "default"; 144 pinctrl-names = "default";
@@ -236,16 +236,17 @@
236}; 236};
237 237
238&pfc { 238&pfc {
239 avb_pins: avb {
240 groups = "avb_mdio", "avb_rgmii";
241 function = "avb";
242 };
243
244 canfd0_pins: canfd0 { 239 canfd0_pins: canfd0 {
245 groups = "canfd0_data_a"; 240 groups = "canfd0_data_a";
246 function = "canfd0"; 241 function = "canfd0";
247 }; 242 };
248 243
244 gether_pins: gether {
245 groups = "gether_mdio_a", "gether_rgmii",
246 "gether_txcrefclk", "gether_txcrefclk_mega";
247 function = "gether";
248 };
249
249 i2c0_pins: i2c0 { 250 i2c0_pins: i2c0 {
250 groups = "i2c0"; 251 groups = "i2c0";
251 function = "i2c0"; 252 function = "i2c0";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index 2dceeea29b83..1e6a71066c16 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -153,7 +153,7 @@
153}; 153};
154 154
155&pcie0 { 155&pcie0 {
156 ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_LOW>; 156 ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>;
157 num-lanes = <4>; 157 num-lanes = <4>;
158 pinctrl-names = "default"; 158 pinctrl-names = "default";
159 pinctrl-0 = <&pcie_clkreqn_cpm>; 159 pinctrl-0 = <&pcie_clkreqn_cpm>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
index 6c8c4ab044aa..56abbb08c133 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
@@ -57,18 +57,6 @@
57 regulator-always-on; 57 regulator-always-on;
58 vin-supply = <&vcc_sys>; 58 vin-supply = <&vcc_sys>;
59 }; 59 };
60
61 vdd_log: vdd-log {
62 compatible = "pwm-regulator";
63 pwms = <&pwm2 0 25000 0>;
64 regulator-name = "vdd_log";
65 regulator-min-microvolt = <800000>;
66 regulator-max-microvolt = <1400000>;
67 regulator-always-on;
68 regulator-boot-on;
69 vin-supply = <&vcc_sys>;
70 };
71
72}; 60};
73 61
74&cpu_l0 { 62&cpu_l0 {
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index affc3c309353..8d7b47f9dfbf 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -36,7 +36,7 @@
36 36
37 wkup_uart0: serial@42300000 { 37 wkup_uart0: serial@42300000 {
38 compatible = "ti,am654-uart"; 38 compatible = "ti,am654-uart";
39 reg = <0x00 0x42300000 0x00 0x100>; 39 reg = <0x42300000 0x100>;
40 reg-shift = <2>; 40 reg-shift = <2>;
41 reg-io-width = <4>; 41 reg-io-width = <4>;
42 interrupts = <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>; 42 interrupts = <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index caa955f10e19..fac54fb050d0 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -56,6 +56,19 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
56{ 56{
57 return is_compat_task(); 57 return is_compat_task();
58} 58}
59
60#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
61
62static inline bool arch_syscall_match_sym_name(const char *sym,
63 const char *name)
64{
65 /*
66 * Since all syscall functions have __arm64_ prefix, we must skip it.
67 * However, as we described above, we decided to ignore compat
68 * syscalls, so we don't care about __arm64_compat_ prefix here.
69 */
70 return !strcmp(sym + 8, name);
71}
59#endif /* ifndef __ASSEMBLY__ */ 72#endif /* ifndef __ASSEMBLY__ */
60 73
61#endif /* __ASM_FTRACE_H */ 74#endif /* __ASM_FTRACE_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 3e2091708b8e..6b0d4dff5012 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -24,6 +24,14 @@
24#define KERNEL_DS UL(-1) 24#define KERNEL_DS UL(-1)
25#define USER_DS (TASK_SIZE_64 - 1) 25#define USER_DS (TASK_SIZE_64 - 1)
26 26
27/*
28 * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
29 * no point in shifting all network buffers by 2 bytes just to make some IP
30 * header fields appear aligned in memory, potentially sacrificing some DMA
31 * performance on some platforms.
32 */
33#define NET_IP_ALIGN 0
34
27#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
28#ifdef __KERNEL__ 36#ifdef __KERNEL__
29 37
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 0c909c4a932f..842fb9572661 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -468,7 +468,7 @@
468 SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ 468 SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
469 SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) 469 SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
470 470
471#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff 471#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL
472#error "Inconsistent SCTLR_EL2 set/clear bits" 472#error "Inconsistent SCTLR_EL2 set/clear bits"
473#endif 473#endif
474 474
@@ -509,7 +509,7 @@
509 SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ 509 SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
510 SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0) 510 SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0)
511 511
512#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff 512#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL
513#error "Inconsistent SCTLR_EL1 set/clear bits" 513#error "Inconsistent SCTLR_EL1 set/clear bits"
514#endif 514#endif
515 515
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index c3c0387aee18..5dfd23897dea 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -41,14 +41,14 @@
41 ALTERNATIVE("nop\n nop", \ 41 ALTERNATIVE("nop\n nop", \
42 "dsb ish\n tlbi " #op, \ 42 "dsb ish\n tlbi " #op, \
43 ARM64_WORKAROUND_REPEAT_TLBI, \ 43 ARM64_WORKAROUND_REPEAT_TLBI, \
44 CONFIG_QCOM_FALKOR_ERRATUM_1009) \ 44 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
45 : : ) 45 : : )
46 46
47#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ 47#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
48 ALTERNATIVE("nop\n nop", \ 48 ALTERNATIVE("nop\n nop", \
49 "dsb ish\n tlbi " #op ", %0", \ 49 "dsb ish\n tlbi " #op ", %0", \
50 ARM64_WORKAROUND_REPEAT_TLBI, \ 50 ARM64_WORKAROUND_REPEAT_TLBI, \
51 CONFIG_QCOM_FALKOR_ERRATUM_1009) \ 51 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
52 : : "r" (arg)) 52 : : "r" (arg))
53 53
54#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) 54#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index a509e35132d2..6ad715d67df8 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -570,6 +570,20 @@ static const struct midr_range arm64_harden_el2_vectors[] = {
570 570
571#endif 571#endif
572 572
573#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
574
575static const struct midr_range arm64_repeat_tlbi_cpus[] = {
576#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
577 MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0),
578#endif
579#ifdef CONFIG_ARM64_ERRATUM_1286807
580 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
581#endif
582 {},
583};
584
585#endif
586
573const struct arm64_cpu_capabilities arm64_errata[] = { 587const struct arm64_cpu_capabilities arm64_errata[] = {
574#if defined(CONFIG_ARM64_ERRATUM_826319) || \ 588#if defined(CONFIG_ARM64_ERRATUM_826319) || \
575 defined(CONFIG_ARM64_ERRATUM_827319) || \ 589 defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -695,11 +709,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
695 .matches = is_kryo_midr, 709 .matches = is_kryo_midr,
696 }, 710 },
697#endif 711#endif
698#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 712#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
699 { 713 {
700 .desc = "Qualcomm Technologies Falkor erratum 1009", 714 .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
701 .capability = ARM64_WORKAROUND_REPEAT_TLBI, 715 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
702 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), 716 ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus),
703 }, 717 },
704#endif 718#endif
705#ifdef CONFIG_ARM64_ERRATUM_858921 719#ifdef CONFIG_ARM64_ERRATUM_858921
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index af50064dea51..aec5ecb85737 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1333,7 +1333,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
1333 .cpu_enable = cpu_enable_hw_dbm, 1333 .cpu_enable = cpu_enable_hw_dbm,
1334 }, 1334 },
1335#endif 1335#endif
1336#ifdef CONFIG_ARM64_SSBD
1337 { 1336 {
1338 .desc = "CRC32 instructions", 1337 .desc = "CRC32 instructions",
1339 .capability = ARM64_HAS_CRC32, 1338 .capability = ARM64_HAS_CRC32,
@@ -1343,6 +1342,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
1343 .field_pos = ID_AA64ISAR0_CRC32_SHIFT, 1342 .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
1344 .min_field_value = 1, 1343 .min_field_value = 1,
1345 }, 1344 },
1345#ifdef CONFIG_ARM64_SSBD
1346 { 1346 {
1347 .desc = "Speculative Store Bypassing Safe (SSBS)", 1347 .desc = "Speculative Store Bypassing Safe (SSBS)",
1348 .capability = ARM64_SSBS, 1348 .capability = ARM64_SSBS,
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 50986e388d2b..57e962290df3 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -216,8 +216,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
216{ 216{
217 unsigned long return_hooker = (unsigned long)&return_to_handler; 217 unsigned long return_hooker = (unsigned long)&return_to_handler;
218 unsigned long old; 218 unsigned long old;
219 struct ftrace_graph_ent trace;
220 int err;
221 219
222 if (unlikely(atomic_read(&current->tracing_graph_pause))) 220 if (unlikely(atomic_read(&current->tracing_graph_pause)))
223 return; 221 return;
@@ -229,18 +227,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
229 */ 227 */
230 old = *parent; 228 old = *parent;
231 229
232 trace.func = self_addr; 230 if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
233 trace.depth = current->curr_ret_stack + 1;
234
235 /* Only trace if the calling function expects to */
236 if (!ftrace_graph_entry(&trace))
237 return;
238
239 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
240 frame_pointer, NULL);
241 if (err == -EBUSY)
242 return;
243 else
244 *parent = return_hooker; 231 *parent = return_hooker;
245} 232}
246 233
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 953e316521fc..f4fc1e0544b7 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -313,6 +313,7 @@ void __init setup_arch(char **cmdline_p)
313 arm64_memblock_init(); 313 arm64_memblock_init();
314 314
315 paging_init(); 315 paging_init();
316 efi_apply_persistent_mem_reservations();
316 317
317 acpi_table_upgrade(); 318 acpi_table_upgrade();
318 319
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 9d9582cac6c4..9b432d9fcada 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -483,8 +483,6 @@ void __init arm64_memblock_init(void)
483 high_memory = __va(memblock_end_of_DRAM() - 1) + 1; 483 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
484 484
485 dma_contiguous_reserve(arm64_dma_phys_limit); 485 dma_contiguous_reserve(arm64_dma_phys_limit);
486
487 memblock_allow_resize();
488} 486}
489 487
490void __init bootmem_init(void) 488void __init bootmem_init(void)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 394b8d554def..d1d6601b385d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -659,6 +659,8 @@ void __init paging_init(void)
659 659
660 memblock_free(__pa_symbol(init_pg_dir), 660 memblock_free(__pa_symbol(init_pg_dir),
661 __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); 661 __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
662
663 memblock_allow_resize();
662} 664}
663 665
664/* 666/*
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index a6fdaea07c63..89198017e8e6 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -351,7 +351,8 @@ static void build_epilogue(struct jit_ctx *ctx)
351 * >0 - successfully JITed a 16-byte eBPF instruction. 351 * >0 - successfully JITed a 16-byte eBPF instruction.
352 * <0 - failed to JIT. 352 * <0 - failed to JIT.
353 */ 353 */
354static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) 354static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
355 bool extra_pass)
355{ 356{
356 const u8 code = insn->code; 357 const u8 code = insn->code;
357 const u8 dst = bpf2a64[insn->dst_reg]; 358 const u8 dst = bpf2a64[insn->dst_reg];
@@ -625,12 +626,19 @@ emit_cond_jmp:
625 case BPF_JMP | BPF_CALL: 626 case BPF_JMP | BPF_CALL:
626 { 627 {
627 const u8 r0 = bpf2a64[BPF_REG_0]; 628 const u8 r0 = bpf2a64[BPF_REG_0];
628 const u64 func = (u64)__bpf_call_base + imm; 629 bool func_addr_fixed;
630 u64 func_addr;
631 int ret;
629 632
630 if (ctx->prog->is_func) 633 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
631 emit_addr_mov_i64(tmp, func, ctx); 634 &func_addr, &func_addr_fixed);
635 if (ret < 0)
636 return ret;
637 if (func_addr_fixed)
638 /* We can use optimized emission here. */
639 emit_a64_mov_i64(tmp, func_addr, ctx);
632 else 640 else
633 emit_a64_mov_i64(tmp, func, ctx); 641 emit_addr_mov_i64(tmp, func_addr, ctx);
634 emit(A64_BLR(tmp), ctx); 642 emit(A64_BLR(tmp), ctx);
635 emit(A64_MOV(1, r0, A64_R(0)), ctx); 643 emit(A64_MOV(1, r0, A64_R(0)), ctx);
636 break; 644 break;
@@ -753,7 +761,7 @@ emit_cond_jmp:
753 return 0; 761 return 0;
754} 762}
755 763
756static int build_body(struct jit_ctx *ctx) 764static int build_body(struct jit_ctx *ctx, bool extra_pass)
757{ 765{
758 const struct bpf_prog *prog = ctx->prog; 766 const struct bpf_prog *prog = ctx->prog;
759 int i; 767 int i;
@@ -762,7 +770,7 @@ static int build_body(struct jit_ctx *ctx)
762 const struct bpf_insn *insn = &prog->insnsi[i]; 770 const struct bpf_insn *insn = &prog->insnsi[i];
763 int ret; 771 int ret;
764 772
765 ret = build_insn(insn, ctx); 773 ret = build_insn(insn, ctx, extra_pass);
766 if (ret > 0) { 774 if (ret > 0) {
767 i++; 775 i++;
768 if (ctx->image == NULL) 776 if (ctx->image == NULL)
@@ -858,7 +866,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
858 /* 1. Initial fake pass to compute ctx->idx. */ 866 /* 1. Initial fake pass to compute ctx->idx. */
859 867
860 /* Fake pass to fill in ctx->offset. */ 868 /* Fake pass to fill in ctx->offset. */
861 if (build_body(&ctx)) { 869 if (build_body(&ctx, extra_pass)) {
862 prog = orig_prog; 870 prog = orig_prog;
863 goto out_off; 871 goto out_off;
864 } 872 }
@@ -888,7 +896,7 @@ skip_init_ctx:
888 896
889 build_prologue(&ctx, was_classic); 897 build_prologue(&ctx, was_classic);
890 898
891 if (build_body(&ctx)) { 899 if (build_body(&ctx, extra_pass)) {
892 bpf_jit_binary_free(header); 900 bpf_jit_binary_free(header);
893 prog = orig_prog; 901 prog = orig_prog;
894 goto out_off; 902 goto out_off;
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index ebef7f40aabb..c5c253cb9bd6 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -59,7 +59,9 @@ extern struct node_cpuid_s node_cpuid[NR_CPUS];
59 */ 59 */
60 60
61extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES]; 61extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
62#define node_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)]) 62#define slit_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)])
63extern int __node_distance(int from, int to);
64#define node_distance(from,to) __node_distance(from, to)
63 65
64extern int paddr_to_nid(unsigned long paddr); 66extern int paddr_to_nid(unsigned long paddr);
65 67
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 1dacbf5e9e09..41eb281709da 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -578,8 +578,8 @@ void __init acpi_numa_fixup(void)
578 if (!slit_table) { 578 if (!slit_table) {
579 for (i = 0; i < MAX_NUMNODES; i++) 579 for (i = 0; i < MAX_NUMNODES; i++)
580 for (j = 0; j < MAX_NUMNODES; j++) 580 for (j = 0; j < MAX_NUMNODES; j++)
581 node_distance(i, j) = i == j ? LOCAL_DISTANCE : 581 slit_distance(i, j) = i == j ?
582 REMOTE_DISTANCE; 582 LOCAL_DISTANCE : REMOTE_DISTANCE;
583 return; 583 return;
584 } 584 }
585 585
@@ -592,7 +592,7 @@ void __init acpi_numa_fixup(void)
592 if (!pxm_bit_test(j)) 592 if (!pxm_bit_test(j))
593 continue; 593 continue;
594 node_to = pxm_to_node(j); 594 node_to = pxm_to_node(j);
595 node_distance(node_from, node_to) = 595 slit_distance(node_from, node_to) =
596 slit_table->entry[i * slit_table->locality_count + j]; 596 slit_table->entry[i * slit_table->locality_count + j];
597 } 597 }
598 } 598 }
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 3861d6e32d5f..a03803506b0c 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -36,6 +36,12 @@ struct node_cpuid_s node_cpuid[NR_CPUS] =
36 */ 36 */
37u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES]; 37u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
38 38
39int __node_distance(int from, int to)
40{
41 return slit_distance(from, to);
42}
43EXPORT_SYMBOL(__node_distance);
44
39/* Identify which cnode a physical address resides on */ 45/* Identify which cnode a physical address resides on */
40int 46int
41paddr_to_nid(unsigned long paddr) 47paddr_to_nid(unsigned long paddr)
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 6181e4134483..fe3ddd73a0cc 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -55,12 +55,12 @@
55 */ 55 */
56#ifdef CONFIG_SUN3 56#ifdef CONFIG_SUN3
57#define PTRS_PER_PTE 16 57#define PTRS_PER_PTE 16
58#define __PAGETABLE_PMD_FOLDED 58#define __PAGETABLE_PMD_FOLDED 1
59#define PTRS_PER_PMD 1 59#define PTRS_PER_PMD 1
60#define PTRS_PER_PGD 2048 60#define PTRS_PER_PGD 2048
61#elif defined(CONFIG_COLDFIRE) 61#elif defined(CONFIG_COLDFIRE)
62#define PTRS_PER_PTE 512 62#define PTRS_PER_PTE 512
63#define __PAGETABLE_PMD_FOLDED 63#define __PAGETABLE_PMD_FOLDED 1
64#define PTRS_PER_PMD 1 64#define PTRS_PER_PMD 1
65#define PTRS_PER_PGD 1024 65#define PTRS_PER_PGD 1024
66#else 66#else
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index f64ebb9c9a41..e14b6621c933 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -63,7 +63,7 @@ extern int mem_init_done;
63 63
64#include <asm-generic/4level-fixup.h> 64#include <asm-generic/4level-fixup.h>
65 65
66#define __PAGETABLE_PMD_FOLDED 66#define __PAGETABLE_PMD_FOLDED 1
67 67
68#ifdef __KERNEL__ 68#ifdef __KERNEL__
69#ifndef __ASSEMBLY__ 69#ifndef __ASSEMBLY__
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
index d57563c58a26..224eea40e1ee 100644
--- a/arch/microblaze/kernel/ftrace.c
+++ b/arch/microblaze/kernel/ftrace.c
@@ -22,8 +22,7 @@
22void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) 22void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
23{ 23{
24 unsigned long old; 24 unsigned long old;
25 int faulted, err; 25 int faulted;
26 struct ftrace_graph_ent trace;
27 unsigned long return_hooker = (unsigned long) 26 unsigned long return_hooker = (unsigned long)
28 &return_to_handler; 27 &return_to_handler;
29 28
@@ -63,18 +62,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
63 return; 62 return;
64 } 63 }
65 64
66 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL); 65 if (function_graph_enter(old, self_addr, 0, NULL))
67 if (err == -EBUSY) {
68 *parent = old; 66 *parent = old;
69 return;
70 }
71
72 trace.func = self_addr;
73 /* Only trace if the calling function expects to */
74 if (!ftrace_graph_entry(&trace)) {
75 current->curr_ret_stack--;
76 *parent = old;
77 }
78} 67}
79#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 68#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
80 69
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 75108ec669eb..6c79e8a16a26 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port,
67void (*cvmx_override_ipd_port_setup) (int ipd_port); 67void (*cvmx_override_ipd_port_setup) (int ipd_port);
68 68
69/* Port count per interface */ 69/* Port count per interface */
70static int interface_port_count[5]; 70static int interface_port_count[9];
71 71
72/** 72/**
73 * Return the number of interfaces the chip has. Each interface 73 * Return the number of interfaces the chip has. Each interface
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index 490b12af103c..c52d0efacd14 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y
140CONFIG_RTC_DRV_DS1307=y 140CONFIG_RTC_DRV_DS1307=y
141CONFIG_STAGING=y 141CONFIG_STAGING=y
142CONFIG_OCTEON_ETHERNET=y 142CONFIG_OCTEON_ETHERNET=y
143CONFIG_OCTEON_USB=y
143# CONFIG_IOMMU_SUPPORT is not set 144# CONFIG_IOMMU_SUPPORT is not set
144CONFIG_RAS=y 145CONFIG_RAS=y
145CONFIG_EXT4_FS=y 146CONFIG_EXT4_FS=y
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 0170602a1e4e..6cf8ffb5367e 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -73,7 +73,7 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
73#ifdef CONFIG_64BIT 73#ifdef CONFIG_64BIT
74 case 4: case 5: case 6: case 7: 74 case 4: case 5: case 6: case 7:
75#ifdef CONFIG_MIPS32_O32 75#ifdef CONFIG_MIPS32_O32
76 if (test_thread_flag(TIF_32BIT_REGS)) 76 if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
77 return get_user(*arg, (int *)usp + n); 77 return get_user(*arg, (int *)usp + n);
78 else 78 else
79#endif 79#endif
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 7f3dfdbc3657..b122cbb4aad1 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -322,7 +322,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
322 unsigned long fp) 322 unsigned long fp)
323{ 323{
324 unsigned long old_parent_ra; 324 unsigned long old_parent_ra;
325 struct ftrace_graph_ent trace;
326 unsigned long return_hooker = (unsigned long) 325 unsigned long return_hooker = (unsigned long)
327 &return_to_handler; 326 &return_to_handler;
328 int faulted, insns; 327 int faulted, insns;
@@ -369,12 +368,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
369 if (unlikely(faulted)) 368 if (unlikely(faulted))
370 goto out; 369 goto out;
371 370
372 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp,
373 NULL) == -EBUSY) {
374 *parent_ra_addr = old_parent_ra;
375 return;
376 }
377
378 /* 371 /*
379 * Get the recorded ip of the current mcount calling site in the 372 * Get the recorded ip of the current mcount calling site in the
380 * __mcount_loc section, which will be used to filter the function 373 * __mcount_loc section, which will be used to filter the function
@@ -382,13 +375,10 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
382 */ 375 */
383 376
384 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 377 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
385 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); 378 self_ra -= (MCOUNT_INSN_SIZE * insns);
386 379
387 /* Only trace if the calling function expects to */ 380 if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
388 if (!ftrace_graph_entry(&trace)) {
389 current->curr_ret_stack--;
390 *parent_ra_addr = old_parent_ra; 381 *parent_ra_addr = old_parent_ra;
391 }
392 return; 382 return;
393out: 383out:
394 ftrace_graph_stop(); 384 ftrace_graph_stop();
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index ea09ed6a80a9..8c6c48ed786a 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -794,6 +794,7 @@ static void __init arch_mem_init(char **cmdline_p)
794 794
795 /* call board setup routine */ 795 /* call board setup routine */
796 plat_mem_setup(); 796 plat_mem_setup();
797 memblock_set_bottom_up(true);
797 798
798 /* 799 /*
799 * Make sure all kernel memory is in the maps. The "UP" and 800 * Make sure all kernel memory is in the maps. The "UP" and
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 0f852e1b5891..15e103c6d799 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2260,10 +2260,8 @@ void __init trap_init(void)
2260 unsigned long size = 0x200 + VECTORSPACING*64; 2260 unsigned long size = 0x200 + VECTORSPACING*64;
2261 phys_addr_t ebase_pa; 2261 phys_addr_t ebase_pa;
2262 2262
2263 memblock_set_bottom_up(true);
2264 ebase = (unsigned long) 2263 ebase = (unsigned long)
2265 memblock_alloc_from(size, 1 << fls(size), 0); 2264 memblock_alloc_from(size, 1 << fls(size), 0);
2266 memblock_set_bottom_up(false);
2267 2265
2268 /* 2266 /*
2269 * Try to ensure ebase resides in KSeg0 if possible. 2267 * Try to ensure ebase resides in KSeg0 if possible.
@@ -2307,6 +2305,7 @@ void __init trap_init(void)
2307 if (board_ebase_setup) 2305 if (board_ebase_setup)
2308 board_ebase_setup(); 2306 board_ebase_setup();
2309 per_cpu_trap_init(true); 2307 per_cpu_trap_init(true);
2308 memblock_set_bottom_up(false);
2310 2309
2311 /* 2310 /*
2312 * Copy the generic exception handlers to their final destination. 2311 * Copy the generic exception handlers to their final destination.
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index 622761878cd1..60bf0a1cb757 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -231,6 +231,8 @@ static __init void prom_meminit(void)
231 cpumask_clear(&__node_data[(node)]->cpumask); 231 cpumask_clear(&__node_data[(node)]->cpumask);
232 } 232 }
233 } 233 }
234 max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
235
234 for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { 236 for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
235 node = cpu / loongson_sysconf.cores_per_node; 237 node = cpu / loongson_sysconf.cores_per_node;
236 if (node >= num_online_nodes()) 238 if (node >= num_online_nodes())
@@ -248,19 +250,9 @@ static __init void prom_meminit(void)
248 250
249void __init paging_init(void) 251void __init paging_init(void)
250{ 252{
251 unsigned node;
252 unsigned long zones_size[MAX_NR_ZONES] = {0, }; 253 unsigned long zones_size[MAX_NR_ZONES] = {0, };
253 254
254 pagetable_init(); 255 pagetable_init();
255
256 for_each_online_node(node) {
257 unsigned long start_pfn, end_pfn;
258
259 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
260
261 if (end_pfn > max_low_pfn)
262 max_low_pfn = end_pfn;
263 }
264#ifdef CONFIG_ZONE_DMA32 256#ifdef CONFIG_ZONE_DMA32
265 zones_size[ZONE_DMA32] = MAX_DMA32_PFN; 257 zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
266#endif 258#endif
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index e6c9485cadcf..cb38461391cb 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -50,7 +50,7 @@ void *arch_dma_alloc(struct device *dev, size_t size,
50 void *ret; 50 void *ret;
51 51
52 ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); 52 ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
53 if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) { 53 if (ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
54 dma_cache_wback_inv((unsigned long) ret, size); 54 dma_cache_wback_inv((unsigned long) ret, size);
55 ret = (void *)UNCAC_ADDR(ret); 55 ret = (void *)UNCAC_ADDR(ret);
56 } 56 }
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 41b71c4352c2..c1ce6f43642b 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -84,7 +84,7 @@ static struct rt2880_pmx_func pcie_rst_grp[] = {
84}; 84};
85static struct rt2880_pmx_func nd_sd_grp[] = { 85static struct rt2880_pmx_func nd_sd_grp[] = {
86 FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), 86 FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
87 FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15) 87 FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
88}; 88};
89 89
90static struct rt2880_pmx_group mt7620a_pinmux_data[] = { 90static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index d8b8444d6795..813d13f92957 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -435,6 +435,7 @@ void __init prom_meminit(void)
435 435
436 mlreset(); 436 mlreset();
437 szmem(); 437 szmem();
438 max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
438 439
439 for (node = 0; node < MAX_COMPACT_NODES; node++) { 440 for (node = 0; node < MAX_COMPACT_NODES; node++) {
440 if (node_online(node)) { 441 if (node_online(node)) {
@@ -455,18 +456,8 @@ extern void setup_zero_pages(void);
455void __init paging_init(void) 456void __init paging_init(void)
456{ 457{
457 unsigned long zones_size[MAX_NR_ZONES] = {0, }; 458 unsigned long zones_size[MAX_NR_ZONES] = {0, };
458 unsigned node;
459 459
460 pagetable_init(); 460 pagetable_init();
461
462 for_each_online_node(node) {
463 unsigned long start_pfn, end_pfn;
464
465 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
466
467 if (end_pfn > max_low_pfn)
468 max_low_pfn = end_pfn;
469 }
470 zones_size[ZONE_NORMAL] = max_low_pfn; 461 zones_size[ZONE_NORMAL] = max_low_pfn;
471 free_area_init_nodes(zones_size); 462 free_area_init_nodes(zones_size);
472} 463}
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index d3e19a55cf53..9f52db930c00 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -4,7 +4,7 @@
4#ifndef _ASMNDS32_PGTABLE_H 4#ifndef _ASMNDS32_PGTABLE_H
5#define _ASMNDS32_PGTABLE_H 5#define _ASMNDS32_PGTABLE_H
6 6
7#define __PAGETABLE_PMD_FOLDED 7#define __PAGETABLE_PMD_FOLDED 1
8#include <asm-generic/4level-fixup.h> 8#include <asm-generic/4level-fixup.h>
9#include <asm-generic/sizes.h> 9#include <asm-generic/sizes.h>
10 10
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
index a0a9679ad5de..8a41372551ff 100644
--- a/arch/nds32/kernel/ftrace.c
+++ b/arch/nds32/kernel/ftrace.c
@@ -211,29 +211,15 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
211 unsigned long frame_pointer) 211 unsigned long frame_pointer)
212{ 212{
213 unsigned long return_hooker = (unsigned long)&return_to_handler; 213 unsigned long return_hooker = (unsigned long)&return_to_handler;
214 struct ftrace_graph_ent trace;
215 unsigned long old; 214 unsigned long old;
216 int err;
217 215
218 if (unlikely(atomic_read(&current->tracing_graph_pause))) 216 if (unlikely(atomic_read(&current->tracing_graph_pause)))
219 return; 217 return;
220 218
221 old = *parent; 219 old = *parent;
222 220
223 trace.func = self_addr; 221 if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
224 trace.depth = current->curr_ret_stack + 1; 222 *parent = return_hooker;
225
226 /* Only trace if the calling function expects to */
227 if (!ftrace_graph_entry(&trace))
228 return;
229
230 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
231 frame_pointer, NULL);
232
233 if (err == -EBUSY)
234 return;
235
236 *parent = return_hooker;
237} 223}
238 224
239noinline void ftrace_graph_caller(void) 225noinline void ftrace_graph_caller(void)
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index d047a09d660f..1085385e1f06 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -71,6 +71,13 @@ ifdef CONFIG_MLONGCALLS
71KBUILD_CFLAGS_KERNEL += -mlong-calls 71KBUILD_CFLAGS_KERNEL += -mlong-calls
72endif 72endif
73 73
74# Without this, "ld -r" results in .text sections that are too big (> 0x40000)
75# for branches to reach stubs. And multiple .text sections trigger a warning
76# when creating the sysfs module information section.
77ifndef CONFIG_64BIT
78KBUILD_CFLAGS_MODULE += -ffunction-sections
79endif
80
74# select which processor to optimise for 81# select which processor to optimise for
75cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100 82cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100
76cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200 83cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index b941ac7d4e70..c7bb74e22436 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -111,7 +111,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
111#if CONFIG_PGTABLE_LEVELS == 3 111#if CONFIG_PGTABLE_LEVELS == 3
112#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) 112#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
113#else 113#else
114#define __PAGETABLE_PMD_FOLDED 114#define __PAGETABLE_PMD_FOLDED 1
115#define BITS_PER_PMD 0 115#define BITS_PER_PMD 0
116#endif 116#endif
117#define PTRS_PER_PMD (1UL << BITS_PER_PMD) 117#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 16aec9ba2580..8a63515f03bf 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
37 volatile unsigned int *a; 37 volatile unsigned int *a;
38 38
39 a = __ldcw_align(x); 39 a = __ldcw_align(x);
40 /* Release with ordered store. */ 40 mb();
41 __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory"); 41 *a = 1;
42} 42}
43 43
44static inline int arch_spin_trylock(arch_spinlock_t *x) 44static inline int arch_spin_trylock(arch_spinlock_t *x)
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 6fa8535d3cce..e46a4157a894 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -30,7 +30,6 @@ static void __hot prepare_ftrace_return(unsigned long *parent,
30 unsigned long self_addr) 30 unsigned long self_addr)
31{ 31{
32 unsigned long old; 32 unsigned long old;
33 struct ftrace_graph_ent trace;
34 extern int parisc_return_to_handler; 33 extern int parisc_return_to_handler;
35 34
36 if (unlikely(ftrace_graph_is_dead())) 35 if (unlikely(ftrace_graph_is_dead()))
@@ -41,19 +40,9 @@ static void __hot prepare_ftrace_return(unsigned long *parent,
41 40
42 old = *parent; 41 old = *parent;
43 42
44 trace.func = self_addr; 43 if (!function_graph_enter(old, self_addr, 0, NULL))
45 trace.depth = current->curr_ret_stack + 1; 44 /* activate parisc_return_to_handler() as return point */
46 45 *parent = (unsigned long) &parisc_return_to_handler;
47 /* Only trace if the calling function expects to */
48 if (!ftrace_graph_entry(&trace))
49 return;
50
51 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
52 0, NULL) == -EBUSY)
53 return;
54
55 /* activate parisc_return_to_handler() as return point */
56 *parent = (unsigned long) &parisc_return_to_handler;
57} 46}
58#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 47#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
59 48
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 9505c317818d..a9bc90dc4ae7 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -640,7 +640,8 @@ cas_action:
640 sub,<> %r28, %r25, %r0 640 sub,<> %r28, %r25, %r0
6412: stw %r24, 0(%r26) 6412: stw %r24, 0(%r26)
642 /* Free lock */ 642 /* Free lock */
643 stw,ma %r20, 0(%sr2,%r20) 643 sync
644 stw %r20, 0(%sr2,%r20)
644#if ENABLE_LWS_DEBUG 645#if ENABLE_LWS_DEBUG
645 /* Clear thread register indicator */ 646 /* Clear thread register indicator */
646 stw %r0, 4(%sr2,%r20) 647 stw %r0, 4(%sr2,%r20)
@@ -654,7 +655,8 @@ cas_action:
6543: 6553:
655 /* Error occurred on load or store */ 656 /* Error occurred on load or store */
656 /* Free lock */ 657 /* Free lock */
657 stw,ma %r20, 0(%sr2,%r20) 658 sync
659 stw %r20, 0(%sr2,%r20)
658#if ENABLE_LWS_DEBUG 660#if ENABLE_LWS_DEBUG
659 stw %r0, 4(%sr2,%r20) 661 stw %r0, 4(%sr2,%r20)
660#endif 662#endif
@@ -855,7 +857,8 @@ cas2_action:
855 857
856cas2_end: 858cas2_end:
857 /* Free lock */ 859 /* Free lock */
858 stw,ma %r20, 0(%sr2,%r20) 860 sync
861 stw %r20, 0(%sr2,%r20)
859 /* Enable interrupts */ 862 /* Enable interrupts */
860 ssm PSW_SM_I, %r0 863 ssm PSW_SM_I, %r0
861 /* Return to userspace, set no error */ 864 /* Return to userspace, set no error */
@@ -865,7 +868,8 @@ cas2_end:
86522: 86822:
866 /* Error occurred on load or store */ 869 /* Error occurred on load or store */
867 /* Free lock */ 870 /* Free lock */
868 stw,ma %r20, 0(%sr2,%r20) 871 sync
872 stw %r20, 0(%sr2,%r20)
869 ssm PSW_SM_I, %r0 873 ssm PSW_SM_I, %r0
870 ldo 1(%r0),%r28 874 ldo 1(%r0),%r28
871 b lws_exit 875 b lws_exit
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 3ef40b703c4a..e746becd9d6f 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -268,19 +268,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
268 * their hooks, a bitfield is reserved for use by the platform near the 268 * their hooks, a bitfield is reserved for use by the platform near the
269 * top of MMIO addresses (not PIO, those have to cope the hard way). 269 * top of MMIO addresses (not PIO, those have to cope the hard way).
270 * 270 *
271 * This bit field is 12 bits and is at the top of the IO virtual 271 * The highest address in the kernel virtual space are:
272 * addresses PCI_IO_INDIRECT_TOKEN_MASK.
273 * 272 *
274 * The kernel virtual space is thus: 273 * d0003fffffffffff # with Hash MMU
274 * c00fffffffffffff # with Radix MMU
275 * 275 *
276 * 0xD000000000000000 : vmalloc 276 * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
277 * 0xD000080000000000 : PCI PHB IO space 277 * that can be used for the field.
278 * 0xD000080080000000 : ioremap
279 * 0xD0000fffffffffff : end of ioremap region
280 *
281 * Since the top 4 bits are reserved as the region ID, we use thus
282 * the next 12 bits and keep 4 bits available for the future if the
283 * virtual address space is ever to be extended.
284 * 278 *
285 * The direct IO mapping operations will then mask off those bits 279 * The direct IO mapping operations will then mask off those bits
286 * before doing the actual access, though that only happen when 280 * before doing the actual access, though that only happen when
@@ -292,8 +286,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
292 */ 286 */
293 287
294#ifdef CONFIG_PPC_INDIRECT_MMIO 288#ifdef CONFIG_PPC_INDIRECT_MMIO
295#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul 289#define PCI_IO_IND_TOKEN_SHIFT 52
296#define PCI_IO_IND_TOKEN_SHIFT 48 290#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT)
297#define PCI_FIX_ADDR(addr) \ 291#define PCI_FIX_ADDR(addr) \
298 ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) 292 ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
299#define PCI_GET_ADDR_TOKEN(addr) \ 293#define PCI_GET_ADDR_TOKEN(addr) \
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 6093bc8f74e5..a6e9e314c707 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -493,6 +493,8 @@
493 __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) 493 __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
494#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ 494#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
495 __PPC_RT(t) | __PPC_RB(b)) 495 __PPC_RT(t) | __PPC_RB(b))
496#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
497 ___PPC_RT(t) | ___PPC_RB(b))
496#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \ 498#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
497 __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) 499 __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
498/* PASemi instructions */ 500/* PASemi instructions */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index f73886a1a7f5..0b8a735b6d85 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -54,6 +54,7 @@ struct pt_regs
54 54
55#ifdef CONFIG_PPC64 55#ifdef CONFIG_PPC64
56 unsigned long ppr; 56 unsigned long ppr;
57 unsigned long __pad; /* Maintain 16 byte interrupt stack alignment */
57#endif 58#endif
58}; 59};
59#endif 60#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2a51e4cc8246..236c1151a3a7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -636,6 +636,8 @@ static void *__init alloc_stack(unsigned long limit, int cpu)
636{ 636{
637 unsigned long pa; 637 unsigned long pa;
638 638
639 BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
640
639 pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, 641 pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
640 early_cpu_to_node(cpu), MEMBLOCK_NONE); 642 early_cpu_to_node(cpu), MEMBLOCK_NONE);
641 if (!pa) { 643 if (!pa) {
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 4bf051d3e21e..b65c8a34ad6e 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -950,7 +950,6 @@ int ftrace_disable_ftrace_graph_caller(void)
950 */ 950 */
951unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) 951unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
952{ 952{
953 struct ftrace_graph_ent trace;
954 unsigned long return_hooker; 953 unsigned long return_hooker;
955 954
956 if (unlikely(ftrace_graph_is_dead())) 955 if (unlikely(ftrace_graph_is_dead()))
@@ -961,18 +960,8 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
961 960
962 return_hooker = ppc_function_entry(return_to_handler); 961 return_hooker = ppc_function_entry(return_to_handler);
963 962
964 trace.func = ip; 963 if (!function_graph_enter(parent, ip, 0, NULL))
965 trace.depth = current->curr_ret_stack + 1; 964 parent = return_hooker;
966
967 /* Only trace if the calling function expects to */
968 if (!ftrace_graph_entry(&trace))
969 goto out;
970
971 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
972 NULL) == -EBUSY)
973 goto out;
974
975 parent = return_hooker;
976out: 965out:
977 return parent; 966 return parent;
978} 967}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index d65b961661fb..a56f8413758a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -983,6 +983,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
983 ret = kvmhv_enter_nested_guest(vcpu); 983 ret = kvmhv_enter_nested_guest(vcpu);
984 if (ret == H_INTERRUPT) { 984 if (ret == H_INTERRUPT) {
985 kvmppc_set_gpr(vcpu, 3, 0); 985 kvmppc_set_gpr(vcpu, 3, 0);
986 vcpu->arch.hcall_needed = 0;
986 return -EINTR; 987 return -EINTR;
987 } 988 }
988 break; 989 break;
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 491b0f715d6b..ea1d7c808319 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -6,8 +6,6 @@
6 6
7#undef TRACE_SYSTEM 7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm 8#define TRACE_SYSTEM kvm
9#define TRACE_INCLUDE_PATH .
10#define TRACE_INCLUDE_FILE trace
11 9
12/* 10/*
13 * Tracepoint for guest mode entry. 11 * Tracepoint for guest mode entry.
@@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests,
120#endif /* _TRACE_KVM_H */ 118#endif /* _TRACE_KVM_H */
121 119
122/* This part must be outside protection */ 120/* This part must be outside protection */
121#undef TRACE_INCLUDE_PATH
122#undef TRACE_INCLUDE_FILE
123
124#define TRACE_INCLUDE_PATH .
125#define TRACE_INCLUDE_FILE trace
126
123#include <trace/define_trace.h> 127#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
index ac640e81fdc5..3837842986aa 100644
--- a/arch/powerpc/kvm/trace_booke.h
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -6,8 +6,6 @@
6 6
7#undef TRACE_SYSTEM 7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm_booke 8#define TRACE_SYSTEM kvm_booke
9#define TRACE_INCLUDE_PATH .
10#define TRACE_INCLUDE_FILE trace_booke
11 9
12#define kvm_trace_symbol_exit \ 10#define kvm_trace_symbol_exit \
13 {0, "CRITICAL"}, \ 11 {0, "CRITICAL"}, \
@@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
218#endif 216#endif
219 217
220/* This part must be outside protection */ 218/* This part must be outside protection */
219
220#undef TRACE_INCLUDE_PATH
221#undef TRACE_INCLUDE_FILE
222
223#define TRACE_INCLUDE_PATH .
224#define TRACE_INCLUDE_FILE trace_booke
225
221#include <trace/define_trace.h> 226#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index bcfe8a987f6a..8a1e3b0047f1 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -9,8 +9,6 @@
9 9
10#undef TRACE_SYSTEM 10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM kvm_hv 11#define TRACE_SYSTEM kvm_hv
12#define TRACE_INCLUDE_PATH .
13#define TRACE_INCLUDE_FILE trace_hv
14 12
15#define kvm_trace_symbol_hcall \ 13#define kvm_trace_symbol_hcall \
16 {H_REMOVE, "H_REMOVE"}, \ 14 {H_REMOVE, "H_REMOVE"}, \
@@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
497#endif /* _TRACE_KVM_HV_H */ 495#endif /* _TRACE_KVM_HV_H */
498 496
499/* This part must be outside protection */ 497/* This part must be outside protection */
498
499#undef TRACE_INCLUDE_PATH
500#undef TRACE_INCLUDE_FILE
501
502#define TRACE_INCLUDE_PATH .
503#define TRACE_INCLUDE_FILE trace_hv
504
500#include <trace/define_trace.h> 505#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index 2f9a8829552b..46a46d328fbf 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -8,8 +8,6 @@
8 8
9#undef TRACE_SYSTEM 9#undef TRACE_SYSTEM
10#define TRACE_SYSTEM kvm_pr 10#define TRACE_SYSTEM kvm_pr
11#define TRACE_INCLUDE_PATH .
12#define TRACE_INCLUDE_FILE trace_pr
13 11
14TRACE_EVENT(kvm_book3s_reenter, 12TRACE_EVENT(kvm_book3s_reenter,
15 TP_PROTO(int r, struct kvm_vcpu *vcpu), 13 TP_PROTO(int r, struct kvm_vcpu *vcpu),
@@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit,
257#endif /* _TRACE_KVM_H */ 255#endif /* _TRACE_KVM_H */
258 256
259/* This part must be outside protection */ 257/* This part must be outside protection */
258
259#undef TRACE_INCLUDE_PATH
260#undef TRACE_INCLUDE_FILE
261
262#define TRACE_INCLUDE_PATH .
263#define TRACE_INCLUDE_FILE trace_pr
264
260#include <trace/define_trace.h> 265#include <trace/define_trace.h>
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 3a048e98a132..ce28ae5ca080 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1178,7 +1178,7 @@ static long vphn_get_associativity(unsigned long cpu,
1178 1178
1179 switch (rc) { 1179 switch (rc) {
1180 case H_FUNCTION: 1180 case H_FUNCTION:
1181 printk(KERN_INFO 1181 printk_once(KERN_INFO
1182 "VPHN is not supported. Disabling polling...\n"); 1182 "VPHN is not supported. Disabling polling...\n");
1183 stop_topology_update(); 1183 stop_topology_update();
1184 break; 1184 break;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index c3fdf2969d9f..bc3914d54e26 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -19,6 +19,7 @@
19#include <asm/mmu.h> 19#include <asm/mmu.h>
20#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
21#include <asm/paca.h> 21#include <asm/paca.h>
22#include <asm/ppc-opcode.h>
22#include <asm/cputable.h> 23#include <asm/cputable.h>
23#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
24#include <asm/smp.h> 25#include <asm/smp.h>
@@ -58,27 +59,19 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
58 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); 59 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
59} 60}
60 61
61static void assert_slb_exists(unsigned long ea) 62static void assert_slb_presence(bool present, unsigned long ea)
62{ 63{
63#ifdef CONFIG_DEBUG_VM 64#ifdef CONFIG_DEBUG_VM
64 unsigned long tmp; 65 unsigned long tmp;
65 66
66 WARN_ON_ONCE(mfmsr() & MSR_EE); 67 WARN_ON_ONCE(mfmsr() & MSR_EE);
67 68
68 asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); 69 if (!cpu_has_feature(CPU_FTR_ARCH_206))
69 WARN_ON(tmp == 0); 70 return;
70#endif
71}
72
73static void assert_slb_notexists(unsigned long ea)
74{
75#ifdef CONFIG_DEBUG_VM
76 unsigned long tmp;
77 71
78 WARN_ON_ONCE(mfmsr() & MSR_EE); 72 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
79 73
80 asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); 74 WARN_ON(present == (tmp == 0));
81 WARN_ON(tmp != 0);
82#endif 75#endif
83} 76}
84 77
@@ -114,7 +107,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
114 */ 107 */
115 slb_shadow_update(ea, ssize, flags, index); 108 slb_shadow_update(ea, ssize, flags, index);
116 109
117 assert_slb_notexists(ea); 110 assert_slb_presence(false, ea);
118 asm volatile("slbmte %0,%1" : 111 asm volatile("slbmte %0,%1" :
119 : "r" (mk_vsid_data(ea, ssize, flags)), 112 : "r" (mk_vsid_data(ea, ssize, flags)),
120 "r" (mk_esid_data(ea, ssize, index)) 113 "r" (mk_esid_data(ea, ssize, index))
@@ -137,7 +130,7 @@ void __slb_restore_bolted_realmode(void)
137 "r" (be64_to_cpu(p->save_area[index].esid))); 130 "r" (be64_to_cpu(p->save_area[index].esid)));
138 } 131 }
139 132
140 assert_slb_exists(local_paca->kstack); 133 assert_slb_presence(true, local_paca->kstack);
141} 134}
142 135
143/* 136/*
@@ -185,7 +178,7 @@ void slb_flush_and_restore_bolted(void)
185 :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), 178 :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
186 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) 179 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
187 : "memory"); 180 : "memory");
188 assert_slb_exists(get_paca()->kstack); 181 assert_slb_presence(true, get_paca()->kstack);
189 182
190 get_paca()->slb_cache_ptr = 0; 183 get_paca()->slb_cache_ptr = 0;
191 184
@@ -443,9 +436,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
443 ea = (unsigned long) 436 ea = (unsigned long)
444 get_paca()->slb_cache[i] << SID_SHIFT; 437 get_paca()->slb_cache[i] << SID_SHIFT;
445 /* 438 /*
446 * Could assert_slb_exists here, but hypervisor 439 * Could assert_slb_presence(true) here, but
447 * or machine check could have come in and 440 * hypervisor or machine check could have come
448 * removed the entry at this point. 441 * in and removed the entry at this point.
449 */ 442 */
450 443
451 slbie_data = ea; 444 slbie_data = ea;
@@ -676,7 +669,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
676 * User preloads should add isync afterwards in case the kernel 669 * User preloads should add isync afterwards in case the kernel
677 * accesses user memory before it returns to userspace with rfid. 670 * accesses user memory before it returns to userspace with rfid.
678 */ 671 */
679 assert_slb_notexists(ea); 672 assert_slb_presence(false, ea);
680 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); 673 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
681 674
682 barrier(); 675 barrier();
@@ -715,7 +708,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
715 return -EFAULT; 708 return -EFAULT;
716 709
717 if (ea < H_VMALLOC_END) 710 if (ea < H_VMALLOC_END)
718 flags = get_paca()->vmalloc_sllp; 711 flags = local_paca->vmalloc_sllp;
719 else 712 else
720 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; 713 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
721 } else { 714 } else {
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 50b129785aee..17482f5de3e2 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -166,7 +166,33 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
166 PPC_BLR(); 166 PPC_BLR();
167} 167}
168 168
169static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func) 169static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
170 u64 func)
171{
172#ifdef PPC64_ELF_ABI_v1
173 /* func points to the function descriptor */
174 PPC_LI64(b2p[TMP_REG_2], func);
175 /* Load actual entry point from function descriptor */
176 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
177 /* ... and move it to LR */
178 PPC_MTLR(b2p[TMP_REG_1]);
179 /*
180 * Load TOC from function descriptor at offset 8.
181 * We can clobber r2 since we get called through a
182 * function pointer (so caller will save/restore r2)
183 * and since we don't use a TOC ourself.
184 */
185 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
186#else
187 /* We can clobber r12 */
188 PPC_FUNC_ADDR(12, func);
189 PPC_MTLR(12);
190#endif
191 PPC_BLRL();
192}
193
194static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
195 u64 func)
170{ 196{
171 unsigned int i, ctx_idx = ctx->idx; 197 unsigned int i, ctx_idx = ctx->idx;
172 198
@@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
273{ 299{
274 const struct bpf_insn *insn = fp->insnsi; 300 const struct bpf_insn *insn = fp->insnsi;
275 int flen = fp->len; 301 int flen = fp->len;
276 int i; 302 int i, ret;
277 303
278 /* Start of epilogue code - will only be valid 2nd pass onwards */ 304 /* Start of epilogue code - will only be valid 2nd pass onwards */
279 u32 exit_addr = addrs[flen]; 305 u32 exit_addr = addrs[flen];
@@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
284 u32 src_reg = b2p[insn[i].src_reg]; 310 u32 src_reg = b2p[insn[i].src_reg];
285 s16 off = insn[i].off; 311 s16 off = insn[i].off;
286 s32 imm = insn[i].imm; 312 s32 imm = insn[i].imm;
313 bool func_addr_fixed;
314 u64 func_addr;
287 u64 imm64; 315 u64 imm64;
288 u8 *func;
289 u32 true_cond; 316 u32 true_cond;
290 u32 tmp_idx; 317 u32 tmp_idx;
291 318
@@ -711,23 +738,15 @@ emit_clear:
711 case BPF_JMP | BPF_CALL: 738 case BPF_JMP | BPF_CALL:
712 ctx->seen |= SEEN_FUNC; 739 ctx->seen |= SEEN_FUNC;
713 740
714 /* bpf function call */ 741 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
715 if (insn[i].src_reg == BPF_PSEUDO_CALL) 742 &func_addr, &func_addr_fixed);
716 if (!extra_pass) 743 if (ret < 0)
717 func = NULL; 744 return ret;
718 else if (fp->aux->func && off < fp->aux->func_cnt)
719 /* use the subprog id from the off
720 * field to lookup the callee address
721 */
722 func = (u8 *) fp->aux->func[off]->bpf_func;
723 else
724 return -EINVAL;
725 /* kernel helper call */
726 else
727 func = (u8 *) __bpf_call_base + imm;
728
729 bpf_jit_emit_func_call(image, ctx, (u64)func);
730 745
746 if (func_addr_fixed)
747 bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
748 else
749 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
731 /* move return value from r3 to BPF_REG_0 */ 750 /* move return value from r3 to BPF_REG_0 */
732 PPC_MR(b2p[BPF_REG_0], 3); 751 PPC_MR(b2p[BPF_REG_0], 3);
733 break; 752 break;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 6f60e0931922..75b935252981 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -102,63 +102,6 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
102} 102}
103EXPORT_SYMBOL(pnv_pci_get_npu_dev); 103EXPORT_SYMBOL(pnv_pci_get_npu_dev);
104 104
105#define NPU_DMA_OP_UNSUPPORTED() \
106 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
107 __func__)
108
109static void *dma_npu_alloc(struct device *dev, size_t size,
110 dma_addr_t *dma_handle, gfp_t flag,
111 unsigned long attrs)
112{
113 NPU_DMA_OP_UNSUPPORTED();
114 return NULL;
115}
116
117static void dma_npu_free(struct device *dev, size_t size,
118 void *vaddr, dma_addr_t dma_handle,
119 unsigned long attrs)
120{
121 NPU_DMA_OP_UNSUPPORTED();
122}
123
124static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
125 unsigned long offset, size_t size,
126 enum dma_data_direction direction,
127 unsigned long attrs)
128{
129 NPU_DMA_OP_UNSUPPORTED();
130 return 0;
131}
132
133static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
134 int nelems, enum dma_data_direction direction,
135 unsigned long attrs)
136{
137 NPU_DMA_OP_UNSUPPORTED();
138 return 0;
139}
140
141static int dma_npu_dma_supported(struct device *dev, u64 mask)
142{
143 NPU_DMA_OP_UNSUPPORTED();
144 return 0;
145}
146
147static u64 dma_npu_get_required_mask(struct device *dev)
148{
149 NPU_DMA_OP_UNSUPPORTED();
150 return 0;
151}
152
153static const struct dma_map_ops dma_npu_ops = {
154 .map_page = dma_npu_map_page,
155 .map_sg = dma_npu_map_sg,
156 .alloc = dma_npu_alloc,
157 .free = dma_npu_free,
158 .dma_supported = dma_npu_dma_supported,
159 .get_required_mask = dma_npu_get_required_mask,
160};
161
162/* 105/*
163 * Returns the PE assoicated with the PCI device of the given 106 * Returns the PE assoicated with the PCI device of the given
164 * NPU. Returns the linked pci device if pci_dev != NULL. 107 * NPU. Returns the linked pci device if pci_dev != NULL.
@@ -270,10 +213,11 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
270 rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]); 213 rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
271 214
272 /* 215 /*
273 * We don't initialise npu_pe->tce32_table as we always use 216 * NVLink devices use the same TCE table configuration as
274 * dma_npu_ops which are nops. 217 * their parent device so drivers shouldn't be doing DMA
218 * operations directly on these devices.
275 */ 219 */
276 set_dma_ops(&npe->pdev->dev, &dma_npu_ops); 220 set_dma_ops(&npe->pdev->dev, NULL);
277} 221}
278 222
279/* 223/*
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index d10146197533..4b594f2e4f7e 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -71,10 +71,27 @@ KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
71# arch specific predefines for sparse 71# arch specific predefines for sparse
72CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) 72CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
73 73
74# Default target when executing plain make
75boot := arch/riscv/boot
76KBUILD_IMAGE := $(boot)/Image.gz
77
74head-y := arch/riscv/kernel/head.o 78head-y := arch/riscv/kernel/head.o
75 79
76core-y += arch/riscv/kernel/ arch/riscv/mm/ 80core-y += arch/riscv/kernel/ arch/riscv/mm/
77 81
78libs-y += arch/riscv/lib/ 82libs-y += arch/riscv/lib/
79 83
80all: vmlinux 84PHONY += vdso_install
85vdso_install:
86 $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
87
88all: Image.gz
89
90Image: vmlinux
91 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
92
93Image.%: Image
94 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
95
96zinstall install:
97 $(Q)$(MAKE) $(build)=$(boot) $@
diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore
new file mode 100644
index 000000000000..8dab0bb6ae66
--- /dev/null
+++ b/arch/riscv/boot/.gitignore
@@ -0,0 +1,2 @@
1Image
2Image.gz
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
new file mode 100644
index 000000000000..0990a9fdbe5d
--- /dev/null
+++ b/arch/riscv/boot/Makefile
@@ -0,0 +1,33 @@
1#
2# arch/riscv/boot/Makefile
3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
7# This file is subject to the terms and conditions of the GNU General Public
8# License. See the file "COPYING" in the main directory of this archive
9# for more details.
10#
11# Copyright (C) 2018, Anup Patel.
12# Author: Anup Patel <anup@brainfault.org>
13#
14# Based on the ia64 and arm64 boot/Makefile.
15#
16
17OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
18
19targets := Image
20
21$(obj)/Image: vmlinux FORCE
22 $(call if_changed,objcopy)
23
24$(obj)/Image.gz: $(obj)/Image FORCE
25 $(call if_changed,gzip)
26
27install:
28 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
29 $(obj)/Image System.map "$(INSTALL_PATH)"
30
31zinstall:
32 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
33 $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/riscv/boot/install.sh b/arch/riscv/boot/install.sh
new file mode 100644
index 000000000000..18c39159c0ff
--- /dev/null
+++ b/arch/riscv/boot/install.sh
@@ -0,0 +1,60 @@
1#!/bin/sh
2#
3# arch/riscv/boot/install.sh
4#
5# This file is subject to the terms and conditions of the GNU General Public
6# License. See the file "COPYING" in the main directory of this archive
7# for more details.
8#
9# Copyright (C) 1995 by Linus Torvalds
10#
11# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
12# Adapted from code in arch/i386/boot/install.sh by Russell King
13#
14# "make install" script for the RISC-V Linux port
15#
16# Arguments:
17# $1 - kernel version
18# $2 - kernel image file
19# $3 - kernel map file
20# $4 - default install path (blank if root directory)
21#
22
23verify () {
24 if [ ! -f "$1" ]; then
25 echo "" 1>&2
26 echo " *** Missing file: $1" 1>&2
27 echo ' *** You need to run "make" before "make install".' 1>&2
28 echo "" 1>&2
29 exit 1
30 fi
31}
32
33# Make sure the files actually exist
34verify "$2"
35verify "$3"
36
37# User may have a custom install script
38if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
39if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
40
41if [ "$(basename $2)" = "Image.gz" ]; then
42# Compressed install
43 echo "Installing compressed kernel"
44 base=vmlinuz
45else
46# Normal install
47 echo "Installing normal kernel"
48 base=vmlinux
49fi
50
51if [ -f $4/$base-$1 ]; then
52 mv $4/$base-$1 $4/$base-$1.old
53fi
54cat $2 > $4/$base-$1
55
56# Install system map file
57if [ -f $4/System.map-$1 ]; then
58 mv $4/System.map-$1 $4/System.map-$1.old
59fi
60cp $3 $4/System.map-$1
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 07fa9ea75fea..ef4f15df9adf 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -76,4 +76,5 @@ CONFIG_NFS_V4_1=y
76CONFIG_NFS_V4_2=y 76CONFIG_NFS_V4_2=y
77CONFIG_ROOT_NFS=y 77CONFIG_ROOT_NFS=y
78CONFIG_CRYPTO_USER_API_HASH=y 78CONFIG_CRYPTO_USER_API_HASH=y
79CONFIG_PRINTK_TIME=y
79# CONFIG_RCU_TRACE is not set 80# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h
index 349df33808c4..cd2af4b013e3 100644
--- a/arch/riscv/include/asm/module.h
+++ b/arch/riscv/include/asm/module.h
@@ -8,6 +8,7 @@
8 8
9#define MODULE_ARCH_VERMAGIC "riscv" 9#define MODULE_ARCH_VERMAGIC "riscv"
10 10
11struct module;
11u64 module_emit_got_entry(struct module *mod, u64 val); 12u64 module_emit_got_entry(struct module *mod, u64 val);
12u64 module_emit_plt_entry(struct module *mod, u64 val); 13u64 module_emit_plt_entry(struct module *mod, u64 val);
13 14
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index 2c5df945d43c..bbe1862e8f80 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -56,8 +56,8 @@ struct pt_regs {
56 unsigned long sstatus; 56 unsigned long sstatus;
57 unsigned long sbadaddr; 57 unsigned long sbadaddr;
58 unsigned long scause; 58 unsigned long scause;
59 /* a0 value before the syscall */ 59 /* a0 value before the syscall */
60 unsigned long orig_a0; 60 unsigned long orig_a0;
61}; 61};
62 62
63#ifdef CONFIG_64BIT 63#ifdef CONFIG_64BIT
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 473cfc84e412..8c3e3e3c8be1 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -400,13 +400,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to,
400static inline unsigned long 400static inline unsigned long
401raw_copy_from_user(void *to, const void __user *from, unsigned long n) 401raw_copy_from_user(void *to, const void __user *from, unsigned long n)
402{ 402{
403 return __asm_copy_to_user(to, from, n); 403 return __asm_copy_from_user(to, from, n);
404} 404}
405 405
406static inline unsigned long 406static inline unsigned long
407raw_copy_to_user(void __user *to, const void *from, unsigned long n) 407raw_copy_to_user(void __user *to, const void *from, unsigned long n)
408{ 408{
409 return __asm_copy_from_user(to, from, n); 409 return __asm_copy_to_user(to, from, n);
410} 410}
411 411
412extern long strncpy_from_user(char *dest, const char __user *src, long count); 412extern long strncpy_from_user(char *dest, const char __user *src, long count);
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index eff7aa9aa163..fef96f117b4d 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -13,10 +13,9 @@
13 13
14/* 14/*
15 * There is explicitly no include guard here because this file is expected to 15 * There is explicitly no include guard here because this file is expected to
16 * be included multiple times. See uapi/asm/syscalls.h for more info. 16 * be included multiple times.
17 */ 17 */
18 18
19#define __ARCH_WANT_NEW_STAT
20#define __ARCH_WANT_SYS_CLONE 19#define __ARCH_WANT_SYS_CLONE
20
21#include <uapi/asm/unistd.h> 21#include <uapi/asm/unistd.h>
22#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/unistd.h
index 206dc4b0f6ea..1f3bd3ebbb0d 100644
--- a/arch/riscv/include/uapi/asm/syscalls.h
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -1,13 +1,25 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/* 2/*
3 * Copyright (C) 2017-2018 SiFive 3 * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
4 */ 16 */
5 17
6/* 18#ifdef __LP64__
7 * There is explicitly no include guard here because this file is expected to 19#define __ARCH_WANT_NEW_STAT
8 * be included multiple times in order to define the syscall macros via 20#endif /* __LP64__ */
9 * __SYSCALL. 21
10 */ 22#include <asm-generic/unistd.h>
11 23
12/* 24/*
13 * Allows the instruction cache to be flushed from userspace. Despite RISC-V 25 * Allows the instruction cache to be flushed from userspace. Despite RISC-V
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 3a5a2ee31547..b4a7d4427fbb 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -64,7 +64,7 @@ int riscv_of_processor_hartid(struct device_node *node)
64 64
65static void print_isa(struct seq_file *f, const char *orig_isa) 65static void print_isa(struct seq_file *f, const char *orig_isa)
66{ 66{
67 static const char *ext = "mafdc"; 67 static const char *ext = "mafdcsu";
68 const char *isa = orig_isa; 68 const char *isa = orig_isa;
69 const char *e; 69 const char *e;
70 70
@@ -88,11 +88,14 @@ static void print_isa(struct seq_file *f, const char *orig_isa)
88 /* 88 /*
89 * Check the rest of the ISA string for valid extensions, printing those 89 * Check the rest of the ISA string for valid extensions, printing those
90 * we find. RISC-V ISA strings define an order, so we only print the 90 * we find. RISC-V ISA strings define an order, so we only print the
91 * extension bits when they're in order. 91 * extension bits when they're in order. Hide the supervisor (S)
92 * extension from userspace as it's not accessible from there.
92 */ 93 */
93 for (e = ext; *e != '\0'; ++e) { 94 for (e = ext; *e != '\0'; ++e) {
94 if (isa[0] == e[0]) { 95 if (isa[0] == e[0]) {
95 seq_write(f, isa, 1); 96 if (isa[0] != 's')
97 seq_write(f, isa, 1);
98
96 isa++; 99 isa++;
97 } 100 }
98 } 101 }
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 1157b6b52d25..c433f6d3dd64 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -132,7 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
132{ 132{
133 unsigned long return_hooker = (unsigned long)&return_to_handler; 133 unsigned long return_hooker = (unsigned long)&return_to_handler;
134 unsigned long old; 134 unsigned long old;
135 struct ftrace_graph_ent trace;
136 int err; 135 int err;
137 136
138 if (unlikely(atomic_read(&current->tracing_graph_pause))) 137 if (unlikely(atomic_read(&current->tracing_graph_pause)))
@@ -144,17 +143,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
144 */ 143 */
145 old = *parent; 144 old = *parent;
146 145
147 trace.func = self_addr; 146 if (function_graph_enter(old, self_addr, frame_pointer, parent))
148 trace.depth = current->curr_ret_stack + 1; 147 *parent = return_hooker;
149
150 if (!ftrace_graph_entry(&trace))
151 return;
152
153 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
154 frame_pointer, parent);
155 if (err == -EBUSY)
156 return;
157 *parent = return_hooker;
158} 148}
159 149
160#ifdef CONFIG_DYNAMIC_FTRACE 150#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 711190d473d4..fe884cd69abd 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -44,6 +44,16 @@ ENTRY(_start)
44 amoadd.w a3, a2, (a3) 44 amoadd.w a3, a2, (a3)
45 bnez a3, .Lsecondary_start 45 bnez a3, .Lsecondary_start
46 46
47 /* Clear BSS for flat non-ELF images */
48 la a3, __bss_start
49 la a4, __bss_stop
50 ble a4, a3, clear_bss_done
51clear_bss:
52 REG_S zero, (a3)
53 add a3, a3, RISCV_SZPTR
54 blt a3, a4, clear_bss
55clear_bss_done:
56
47 /* Save hart ID and DTB physical address */ 57 /* Save hart ID and DTB physical address */
48 mv s0, a0 58 mv s0, a0
49 mv s1, a1 59 mv s1, a1
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 3303ed2cd419..7dd308129b40 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -21,7 +21,7 @@ static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
21{ 21{
22 if (v != (u32)v) { 22 if (v != (u32)v) {
23 pr_err("%s: value %016llx out of range for 32-bit field\n", 23 pr_err("%s: value %016llx out of range for 32-bit field\n",
24 me->name, v); 24 me->name, (long long)v);
25 return -EINVAL; 25 return -EINVAL;
26 } 26 }
27 *location = v; 27 *location = v;
@@ -102,7 +102,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
102 if (offset != (s32)offset) { 102 if (offset != (s32)offset) {
103 pr_err( 103 pr_err(
104 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", 104 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
105 me->name, v, location); 105 me->name, (long long)v, location);
106 return -EINVAL; 106 return -EINVAL;
107 } 107 }
108 108
@@ -144,7 +144,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
144 if (IS_ENABLED(CMODEL_MEDLOW)) { 144 if (IS_ENABLED(CMODEL_MEDLOW)) {
145 pr_err( 145 pr_err(
146 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", 146 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
147 me->name, v, location); 147 me->name, (long long)v, location);
148 return -EINVAL; 148 return -EINVAL;
149 } 149 }
150 150
@@ -188,7 +188,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
188 } else { 188 } else {
189 pr_err( 189 pr_err(
190 "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n", 190 "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
191 me->name, v, location); 191 me->name, (long long)v, location);
192 return -EINVAL; 192 return -EINVAL;
193 } 193 }
194 194
@@ -212,7 +212,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
212 } else { 212 } else {
213 pr_err( 213 pr_err(
214 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", 214 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
215 me->name, v, location); 215 me->name, (long long)v, location);
216 return -EINVAL; 216 return -EINVAL;
217 } 217 }
218 } 218 }
@@ -234,7 +234,7 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
234 if (offset != fill_v) { 234 if (offset != fill_v) {
235 pr_err( 235 pr_err(
236 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", 236 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
237 me->name, v, location); 237 me->name, (long long)v, location);
238 return -EINVAL; 238 return -EINVAL;
239 } 239 }
240 240
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index ece84991609c..65df1dfdc303 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -74,7 +74,7 @@ SECTIONS
74 *(.sbss*) 74 *(.sbss*)
75 } 75 }
76 76
77 BSS_SECTION(0, 0, 0) 77 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
78 78
79 EXCEPTION_TABLE(0x10) 79 EXCEPTION_TABLE(0x10)
80 NOTES 80 NOTES
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 5739bd05d289..4e2e600f7d53 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -3,6 +3,6 @@ lib-y += memcpy.o
3lib-y += memset.o 3lib-y += memset.o
4lib-y += uaccess.o 4lib-y += uaccess.o
5 5
6lib-(CONFIG_64BIT) += tishift.o 6lib-$(CONFIG_64BIT) += tishift.o
7 7
8lib-$(CONFIG_32BIT) += udivdi3.o 8lib-$(CONFIG_32BIT) += udivdi3.o
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 0b33577932c3..e21053e5e0da 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -27,7 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
27KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) 27KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
28KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) 28KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
29UTS_MACHINE := s390x 29UTS_MACHINE := s390x
30STACK_SIZE := $(if $(CONFIG_KASAN),32768,16384) 30STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384)
31CHECKFLAGS += -D__s390__ -D__s390x__ 31CHECKFLAGS += -D__s390__ -D__s390x__
32 32
33export LD_BFD 33export LD_BFD
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 593039620487..b1bdd15e3429 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -22,10 +22,10 @@ OBJCOPYFLAGS :=
22OBJECTS := $(addprefix $(obj)/,$(obj-y)) 22OBJECTS := $(addprefix $(obj)/,$(obj-y))
23 23
24LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T 24LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
25$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) 25$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE
26 $(call if_changed,ld) 26 $(call if_changed,ld)
27 27
28OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info 28OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load
29$(obj)/info.bin: vmlinux FORCE 29$(obj)/info.bin: vmlinux FORCE
30 $(call if_changed,objcopy) 30 $(call if_changed,objcopy)
31 31
@@ -46,17 +46,17 @@ suffix-$(CONFIG_KERNEL_LZMA) := .lzma
46suffix-$(CONFIG_KERNEL_LZO) := .lzo 46suffix-$(CONFIG_KERNEL_LZO) := .lzo
47suffix-$(CONFIG_KERNEL_XZ) := .xz 47suffix-$(CONFIG_KERNEL_XZ) := .xz
48 48
49$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) 49$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
50 $(call if_changed,gzip) 50 $(call if_changed,gzip)
51$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) 51$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
52 $(call if_changed,bzip2) 52 $(call if_changed,bzip2)
53$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) 53$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
54 $(call if_changed,lz4) 54 $(call if_changed,lz4)
55$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) 55$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
56 $(call if_changed,lzma) 56 $(call if_changed,lzma)
57$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) 57$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
58 $(call if_changed,lzo) 58 $(call if_changed,lzo)
59$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) 59$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
60 $(call if_changed,xzkern) 60 $(call if_changed,xzkern)
61 61
62OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed 62OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 259d1698ac50..c69cb04b7a59 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -64,6 +64,8 @@ CONFIG_NUMA=y
64CONFIG_PREEMPT=y 64CONFIG_PREEMPT=y
65CONFIG_HZ_100=y 65CONFIG_HZ_100=y
66CONFIG_KEXEC_FILE=y 66CONFIG_KEXEC_FILE=y
67CONFIG_EXPOLINE=y
68CONFIG_EXPOLINE_AUTO=y
67CONFIG_MEMORY_HOTPLUG=y 69CONFIG_MEMORY_HOTPLUG=y
68CONFIG_MEMORY_HOTREMOVE=y 70CONFIG_MEMORY_HOTREMOVE=y
69CONFIG_KSM=y 71CONFIG_KSM=y
@@ -84,9 +86,11 @@ CONFIG_PCI_DEBUG=y
84CONFIG_HOTPLUG_PCI=y 86CONFIG_HOTPLUG_PCI=y
85CONFIG_HOTPLUG_PCI_S390=y 87CONFIG_HOTPLUG_PCI_S390=y
86CONFIG_CHSC_SCH=y 88CONFIG_CHSC_SCH=y
89CONFIG_VFIO_AP=m
87CONFIG_CRASH_DUMP=y 90CONFIG_CRASH_DUMP=y
88CONFIG_BINFMT_MISC=m 91CONFIG_BINFMT_MISC=m
89CONFIG_HIBERNATION=y 92CONFIG_HIBERNATION=y
93CONFIG_PM_DEBUG=y
90CONFIG_NET=y 94CONFIG_NET=y
91CONFIG_PACKET=y 95CONFIG_PACKET=y
92CONFIG_PACKET_DIAG=m 96CONFIG_PACKET_DIAG=m
@@ -161,8 +165,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
161CONFIG_NF_CT_NETLINK=m 165CONFIG_NF_CT_NETLINK=m
162CONFIG_NF_CT_NETLINK_TIMEOUT=m 166CONFIG_NF_CT_NETLINK_TIMEOUT=m
163CONFIG_NF_TABLES=m 167CONFIG_NF_TABLES=m
164CONFIG_NFT_EXTHDR=m
165CONFIG_NFT_META=m
166CONFIG_NFT_CT=m 168CONFIG_NFT_CT=m
167CONFIG_NFT_COUNTER=m 169CONFIG_NFT_COUNTER=m
168CONFIG_NFT_LOG=m 170CONFIG_NFT_LOG=m
@@ -365,6 +367,8 @@ CONFIG_NET_ACT_SKBEDIT=m
365CONFIG_NET_ACT_CSUM=m 367CONFIG_NET_ACT_CSUM=m
366CONFIG_DNS_RESOLVER=y 368CONFIG_DNS_RESOLVER=y
367CONFIG_OPENVSWITCH=m 369CONFIG_OPENVSWITCH=m
370CONFIG_VSOCKETS=m
371CONFIG_VIRTIO_VSOCKETS=m
368CONFIG_NETLINK_DIAG=m 372CONFIG_NETLINK_DIAG=m
369CONFIG_CGROUP_NET_PRIO=y 373CONFIG_CGROUP_NET_PRIO=y
370CONFIG_BPF_JIT=y 374CONFIG_BPF_JIT=y
@@ -461,6 +465,7 @@ CONFIG_PPTP=m
461CONFIG_PPPOL2TP=m 465CONFIG_PPPOL2TP=m
462CONFIG_PPP_ASYNC=m 466CONFIG_PPP_ASYNC=m
463CONFIG_PPP_SYNC_TTY=m 467CONFIG_PPP_SYNC_TTY=m
468CONFIG_ISM=m
464CONFIG_INPUT_EVDEV=y 469CONFIG_INPUT_EVDEV=y
465# CONFIG_INPUT_KEYBOARD is not set 470# CONFIG_INPUT_KEYBOARD is not set
466# CONFIG_INPUT_MOUSE is not set 471# CONFIG_INPUT_MOUSE is not set
@@ -486,9 +491,12 @@ CONFIG_MLX4_INFINIBAND=m
486CONFIG_MLX5_INFINIBAND=m 491CONFIG_MLX5_INFINIBAND=m
487CONFIG_VFIO=m 492CONFIG_VFIO=m
488CONFIG_VFIO_PCI=m 493CONFIG_VFIO_PCI=m
494CONFIG_VFIO_MDEV=m
495CONFIG_VFIO_MDEV_DEVICE=m
489CONFIG_VIRTIO_PCI=m 496CONFIG_VIRTIO_PCI=m
490CONFIG_VIRTIO_BALLOON=m 497CONFIG_VIRTIO_BALLOON=m
491CONFIG_VIRTIO_INPUT=y 498CONFIG_VIRTIO_INPUT=y
499CONFIG_S390_AP_IOMMU=y
492CONFIG_EXT4_FS=y 500CONFIG_EXT4_FS=y
493CONFIG_EXT4_FS_POSIX_ACL=y 501CONFIG_EXT4_FS_POSIX_ACL=y
494CONFIG_EXT4_FS_SECURITY=y 502CONFIG_EXT4_FS_SECURITY=y
@@ -615,7 +623,6 @@ CONFIG_DEBUG_CREDENTIALS=y
615CONFIG_RCU_TORTURE_TEST=m 623CONFIG_RCU_TORTURE_TEST=m
616CONFIG_RCU_CPU_STALL_TIMEOUT=300 624CONFIG_RCU_CPU_STALL_TIMEOUT=300
617CONFIG_NOTIFIER_ERROR_INJECTION=m 625CONFIG_NOTIFIER_ERROR_INJECTION=m
618CONFIG_PM_NOTIFIER_ERROR_INJECT=m
619CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m 626CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
620CONFIG_FAULT_INJECTION=y 627CONFIG_FAULT_INJECTION=y
621CONFIG_FAILSLAB=y 628CONFIG_FAILSLAB=y
@@ -727,3 +734,4 @@ CONFIG_APPLDATA_BASE=y
727CONFIG_KVM=m 734CONFIG_KVM=m
728CONFIG_KVM_S390_UCONTROL=y 735CONFIG_KVM_S390_UCONTROL=y
729CONFIG_VHOST_NET=m 736CONFIG_VHOST_NET=m
737CONFIG_VHOST_VSOCK=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 37fd60c20e22..32f539dc9c19 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,8 @@ CONFIG_NR_CPUS=512
65CONFIG_NUMA=y 65CONFIG_NUMA=y
66CONFIG_HZ_100=y 66CONFIG_HZ_100=y
67CONFIG_KEXEC_FILE=y 67CONFIG_KEXEC_FILE=y
68CONFIG_EXPOLINE=y
69CONFIG_EXPOLINE_AUTO=y
68CONFIG_MEMORY_HOTPLUG=y 70CONFIG_MEMORY_HOTPLUG=y
69CONFIG_MEMORY_HOTREMOVE=y 71CONFIG_MEMORY_HOTREMOVE=y
70CONFIG_KSM=y 72CONFIG_KSM=y
@@ -82,9 +84,11 @@ CONFIG_PCI=y
82CONFIG_HOTPLUG_PCI=y 84CONFIG_HOTPLUG_PCI=y
83CONFIG_HOTPLUG_PCI_S390=y 85CONFIG_HOTPLUG_PCI_S390=y
84CONFIG_CHSC_SCH=y 86CONFIG_CHSC_SCH=y
87CONFIG_VFIO_AP=m
85CONFIG_CRASH_DUMP=y 88CONFIG_CRASH_DUMP=y
86CONFIG_BINFMT_MISC=m 89CONFIG_BINFMT_MISC=m
87CONFIG_HIBERNATION=y 90CONFIG_HIBERNATION=y
91CONFIG_PM_DEBUG=y
88CONFIG_NET=y 92CONFIG_NET=y
89CONFIG_PACKET=y 93CONFIG_PACKET=y
90CONFIG_PACKET_DIAG=m 94CONFIG_PACKET_DIAG=m
@@ -159,8 +163,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
159CONFIG_NF_CT_NETLINK=m 163CONFIG_NF_CT_NETLINK=m
160CONFIG_NF_CT_NETLINK_TIMEOUT=m 164CONFIG_NF_CT_NETLINK_TIMEOUT=m
161CONFIG_NF_TABLES=m 165CONFIG_NF_TABLES=m
162CONFIG_NFT_EXTHDR=m
163CONFIG_NFT_META=m
164CONFIG_NFT_CT=m 166CONFIG_NFT_CT=m
165CONFIG_NFT_COUNTER=m 167CONFIG_NFT_COUNTER=m
166CONFIG_NFT_LOG=m 168CONFIG_NFT_LOG=m
@@ -362,6 +364,8 @@ CONFIG_NET_ACT_SKBEDIT=m
362CONFIG_NET_ACT_CSUM=m 364CONFIG_NET_ACT_CSUM=m
363CONFIG_DNS_RESOLVER=y 365CONFIG_DNS_RESOLVER=y
364CONFIG_OPENVSWITCH=m 366CONFIG_OPENVSWITCH=m
367CONFIG_VSOCKETS=m
368CONFIG_VIRTIO_VSOCKETS=m
365CONFIG_NETLINK_DIAG=m 369CONFIG_NETLINK_DIAG=m
366CONFIG_CGROUP_NET_PRIO=y 370CONFIG_CGROUP_NET_PRIO=y
367CONFIG_BPF_JIT=y 371CONFIG_BPF_JIT=y
@@ -458,6 +462,7 @@ CONFIG_PPTP=m
458CONFIG_PPPOL2TP=m 462CONFIG_PPPOL2TP=m
459CONFIG_PPP_ASYNC=m 463CONFIG_PPP_ASYNC=m
460CONFIG_PPP_SYNC_TTY=m 464CONFIG_PPP_SYNC_TTY=m
465CONFIG_ISM=m
461CONFIG_INPUT_EVDEV=y 466CONFIG_INPUT_EVDEV=y
462# CONFIG_INPUT_KEYBOARD is not set 467# CONFIG_INPUT_KEYBOARD is not set
463# CONFIG_INPUT_MOUSE is not set 468# CONFIG_INPUT_MOUSE is not set
@@ -483,9 +488,12 @@ CONFIG_MLX4_INFINIBAND=m
483CONFIG_MLX5_INFINIBAND=m 488CONFIG_MLX5_INFINIBAND=m
484CONFIG_VFIO=m 489CONFIG_VFIO=m
485CONFIG_VFIO_PCI=m 490CONFIG_VFIO_PCI=m
491CONFIG_VFIO_MDEV=m
492CONFIG_VFIO_MDEV_DEVICE=m
486CONFIG_VIRTIO_PCI=m 493CONFIG_VIRTIO_PCI=m
487CONFIG_VIRTIO_BALLOON=m 494CONFIG_VIRTIO_BALLOON=m
488CONFIG_VIRTIO_INPUT=y 495CONFIG_VIRTIO_INPUT=y
496CONFIG_S390_AP_IOMMU=y
489CONFIG_EXT4_FS=y 497CONFIG_EXT4_FS=y
490CONFIG_EXT4_FS_POSIX_ACL=y 498CONFIG_EXT4_FS_POSIX_ACL=y
491CONFIG_EXT4_FS_SECURITY=y 499CONFIG_EXT4_FS_SECURITY=y
@@ -666,3 +674,4 @@ CONFIG_APPLDATA_BASE=y
666CONFIG_KVM=m 674CONFIG_KVM=m
667CONFIG_KVM_S390_UCONTROL=y 675CONFIG_KVM_S390_UCONTROL=y
668CONFIG_VHOST_NET=m 676CONFIG_VHOST_NET=m
677CONFIG_VHOST_VSOCK=m
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 7cb6a52f727d..4d58a92b5d97 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -26,14 +26,23 @@ CONFIG_CGROUP_CPUACCT=y
26CONFIG_CGROUP_PERF=y 26CONFIG_CGROUP_PERF=y
27CONFIG_NAMESPACES=y 27CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 28CONFIG_USER_NS=y
29CONFIG_CHECKPOINT_RESTORE=y
29CONFIG_BLK_DEV_INITRD=y 30CONFIG_BLK_DEV_INITRD=y
30CONFIG_EXPERT=y 31CONFIG_EXPERT=y
31# CONFIG_SYSFS_SYSCALL is not set 32# CONFIG_SYSFS_SYSCALL is not set
32CONFIG_CHECKPOINT_RESTORE=y
33CONFIG_BPF_SYSCALL=y 33CONFIG_BPF_SYSCALL=y
34CONFIG_USERFAULTFD=y 34CONFIG_USERFAULTFD=y
35# CONFIG_COMPAT_BRK is not set 35# CONFIG_COMPAT_BRK is not set
36CONFIG_PROFILING=y 36CONFIG_PROFILING=y
37CONFIG_LIVEPATCH=y
38CONFIG_NR_CPUS=256
39CONFIG_NUMA=y
40CONFIG_HZ_100=y
41CONFIG_KEXEC_FILE=y
42CONFIG_CRASH_DUMP=y
43CONFIG_HIBERNATION=y
44CONFIG_PM_DEBUG=y
45CONFIG_CMM=m
37CONFIG_OPROFILE=y 46CONFIG_OPROFILE=y
38CONFIG_KPROBES=y 47CONFIG_KPROBES=y
39CONFIG_JUMP_LABEL=y 48CONFIG_JUMP_LABEL=y
@@ -44,11 +53,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
44CONFIG_PARTITION_ADVANCED=y 53CONFIG_PARTITION_ADVANCED=y
45CONFIG_IBM_PARTITION=y 54CONFIG_IBM_PARTITION=y
46CONFIG_DEFAULT_DEADLINE=y 55CONFIG_DEFAULT_DEADLINE=y
47CONFIG_LIVEPATCH=y 56CONFIG_BINFMT_MISC=m
48CONFIG_NR_CPUS=256
49CONFIG_NUMA=y
50CONFIG_HZ_100=y
51CONFIG_KEXEC_FILE=y
52CONFIG_MEMORY_HOTPLUG=y 57CONFIG_MEMORY_HOTPLUG=y
53CONFIG_MEMORY_HOTREMOVE=y 58CONFIG_MEMORY_HOTREMOVE=y
54CONFIG_KSM=y 59CONFIG_KSM=y
@@ -60,9 +65,6 @@ CONFIG_ZBUD=m
60CONFIG_ZSMALLOC=m 65CONFIG_ZSMALLOC=m
61CONFIG_ZSMALLOC_STAT=y 66CONFIG_ZSMALLOC_STAT=y
62CONFIG_IDLE_PAGE_TRACKING=y 67CONFIG_IDLE_PAGE_TRACKING=y
63CONFIG_CRASH_DUMP=y
64CONFIG_BINFMT_MISC=m
65CONFIG_HIBERNATION=y
66CONFIG_NET=y 68CONFIG_NET=y
67CONFIG_PACKET=y 69CONFIG_PACKET=y
68CONFIG_UNIX=y 70CONFIG_UNIX=y
@@ -98,6 +100,7 @@ CONFIG_BLK_DEV_NBD=m
98CONFIG_BLK_DEV_RAM=y 100CONFIG_BLK_DEV_RAM=y
99CONFIG_VIRTIO_BLK=y 101CONFIG_VIRTIO_BLK=y
100CONFIG_SCSI=y 102CONFIG_SCSI=y
103# CONFIG_SCSI_MQ_DEFAULT is not set
101CONFIG_BLK_DEV_SD=y 104CONFIG_BLK_DEV_SD=y
102CONFIG_CHR_DEV_ST=y 105CONFIG_CHR_DEV_ST=y
103CONFIG_BLK_DEV_SR=y 106CONFIG_BLK_DEV_SR=y
@@ -131,6 +134,7 @@ CONFIG_EQUALIZER=m
131CONFIG_TUN=m 134CONFIG_TUN=m
132CONFIG_VIRTIO_NET=y 135CONFIG_VIRTIO_NET=y
133# CONFIG_NET_VENDOR_ALACRITECH is not set 136# CONFIG_NET_VENDOR_ALACRITECH is not set
137# CONFIG_NET_VENDOR_AURORA is not set
134# CONFIG_NET_VENDOR_CORTINA is not set 138# CONFIG_NET_VENDOR_CORTINA is not set
135# CONFIG_NET_VENDOR_SOLARFLARE is not set 139# CONFIG_NET_VENDOR_SOLARFLARE is not set
136# CONFIG_NET_VENDOR_SOCIONEXT is not set 140# CONFIG_NET_VENDOR_SOCIONEXT is not set
@@ -157,33 +161,6 @@ CONFIG_TMPFS=y
157CONFIG_TMPFS_POSIX_ACL=y 161CONFIG_TMPFS_POSIX_ACL=y
158CONFIG_HUGETLBFS=y 162CONFIG_HUGETLBFS=y
159# CONFIG_NETWORK_FILESYSTEMS is not set 163# CONFIG_NETWORK_FILESYSTEMS is not set
160CONFIG_DEBUG_INFO=y
161CONFIG_DEBUG_INFO_DWARF4=y
162CONFIG_GDB_SCRIPTS=y
163CONFIG_UNUSED_SYMBOLS=y
164CONFIG_DEBUG_SECTION_MISMATCH=y
165CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
166CONFIG_MAGIC_SYSRQ=y
167CONFIG_DEBUG_PAGEALLOC=y
168CONFIG_DETECT_HUNG_TASK=y
169CONFIG_PANIC_ON_OOPS=y
170CONFIG_PROVE_LOCKING=y
171CONFIG_LOCK_STAT=y
172CONFIG_DEBUG_LOCKDEP=y
173CONFIG_DEBUG_ATOMIC_SLEEP=y
174CONFIG_DEBUG_LIST=y
175CONFIG_DEBUG_SG=y
176CONFIG_DEBUG_NOTIFIERS=y
177CONFIG_RCU_CPU_STALL_TIMEOUT=60
178CONFIG_LATENCYTOP=y
179CONFIG_SCHED_TRACER=y
180CONFIG_FTRACE_SYSCALLS=y
181CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
182CONFIG_STACK_TRACER=y
183CONFIG_BLK_DEV_IO_TRACE=y
184CONFIG_FUNCTION_PROFILER=y
185# CONFIG_RUNTIME_TESTING_MENU is not set
186CONFIG_S390_PTDUMP=y
187CONFIG_CRYPTO_CRYPTD=m 164CONFIG_CRYPTO_CRYPTD=m
188CONFIG_CRYPTO_AUTHENC=m 165CONFIG_CRYPTO_AUTHENC=m
189CONFIG_CRYPTO_TEST=m 166CONFIG_CRYPTO_TEST=m
@@ -193,6 +170,7 @@ CONFIG_CRYPTO_CBC=y
193CONFIG_CRYPTO_CFB=m 170CONFIG_CRYPTO_CFB=m
194CONFIG_CRYPTO_CTS=m 171CONFIG_CRYPTO_CTS=m
195CONFIG_CRYPTO_LRW=m 172CONFIG_CRYPTO_LRW=m
173CONFIG_CRYPTO_OFB=m
196CONFIG_CRYPTO_PCBC=m 174CONFIG_CRYPTO_PCBC=m
197CONFIG_CRYPTO_XTS=m 175CONFIG_CRYPTO_XTS=m
198CONFIG_CRYPTO_CMAC=m 176CONFIG_CRYPTO_CMAC=m
@@ -231,7 +209,6 @@ CONFIG_CRYPTO_USER_API_HASH=m
231CONFIG_CRYPTO_USER_API_SKCIPHER=m 209CONFIG_CRYPTO_USER_API_SKCIPHER=m
232CONFIG_CRYPTO_USER_API_RNG=m 210CONFIG_CRYPTO_USER_API_RNG=m
233CONFIG_ZCRYPT=m 211CONFIG_ZCRYPT=m
234CONFIG_ZCRYPT_MULTIDEVNODES=y
235CONFIG_PKEY=m 212CONFIG_PKEY=m
236CONFIG_CRYPTO_PAES_S390=m 213CONFIG_CRYPTO_PAES_S390=m
237CONFIG_CRYPTO_SHA1_S390=m 214CONFIG_CRYPTO_SHA1_S390=m
@@ -247,4 +224,30 @@ CONFIG_CRC7=m
247# CONFIG_XZ_DEC_ARM is not set 224# CONFIG_XZ_DEC_ARM is not set
248# CONFIG_XZ_DEC_ARMTHUMB is not set 225# CONFIG_XZ_DEC_ARMTHUMB is not set
249# CONFIG_XZ_DEC_SPARC is not set 226# CONFIG_XZ_DEC_SPARC is not set
250CONFIG_CMM=m 227CONFIG_DEBUG_INFO=y
228CONFIG_DEBUG_INFO_DWARF4=y
229CONFIG_GDB_SCRIPTS=y
230CONFIG_UNUSED_SYMBOLS=y
231CONFIG_DEBUG_SECTION_MISMATCH=y
232CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
233CONFIG_MAGIC_SYSRQ=y
234CONFIG_DEBUG_PAGEALLOC=y
235CONFIG_DETECT_HUNG_TASK=y
236CONFIG_PANIC_ON_OOPS=y
237CONFIG_PROVE_LOCKING=y
238CONFIG_LOCK_STAT=y
239CONFIG_DEBUG_LOCKDEP=y
240CONFIG_DEBUG_ATOMIC_SLEEP=y
241CONFIG_DEBUG_LIST=y
242CONFIG_DEBUG_SG=y
243CONFIG_DEBUG_NOTIFIERS=y
244CONFIG_RCU_CPU_STALL_TIMEOUT=60
245CONFIG_LATENCYTOP=y
246CONFIG_SCHED_TRACER=y
247CONFIG_FTRACE_SYSCALLS=y
248CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
249CONFIG_STACK_TRACER=y
250CONFIG_BLK_DEV_IO_TRACE=y
251CONFIG_FUNCTION_PROFILER=y
252# CONFIG_RUNTIME_TESTING_MENU is not set
253CONFIG_S390_PTDUMP=y
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index dbd689d556ce..ccbb53e22024 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -46,8 +46,6 @@ static inline int init_new_context(struct task_struct *tsk,
46 mm->context.asce_limit = STACK_TOP_MAX; 46 mm->context.asce_limit = STACK_TOP_MAX;
47 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 47 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
48 _ASCE_USER_BITS | _ASCE_TYPE_REGION3; 48 _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
49 /* pgd_alloc() did not account this pud */
50 mm_inc_nr_puds(mm);
51 break; 49 break;
52 case -PAGE_SIZE: 50 case -PAGE_SIZE:
53 /* forked 5-level task, set new asce with new_mm->pgd */ 51 /* forked 5-level task, set new asce with new_mm->pgd */
@@ -63,9 +61,6 @@ static inline int init_new_context(struct task_struct *tsk,
63 /* forked 2-level compat task, set new asce with new mm->pgd */ 61 /* forked 2-level compat task, set new asce with new mm->pgd */
64 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 62 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
65 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 63 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
66 /* pgd_alloc() did not account this pmd */
67 mm_inc_nr_pmds(mm);
68 mm_inc_nr_puds(mm);
69 } 64 }
70 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 65 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
71 return 0; 66 return 0;
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f0f9bcf94c03..5ee733720a57 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -36,11 +36,11 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
36 36
37static inline unsigned long pgd_entry_type(struct mm_struct *mm) 37static inline unsigned long pgd_entry_type(struct mm_struct *mm)
38{ 38{
39 if (mm->context.asce_limit <= _REGION3_SIZE) 39 if (mm_pmd_folded(mm))
40 return _SEGMENT_ENTRY_EMPTY; 40 return _SEGMENT_ENTRY_EMPTY;
41 if (mm->context.asce_limit <= _REGION2_SIZE) 41 if (mm_pud_folded(mm))
42 return _REGION3_ENTRY_EMPTY; 42 return _REGION3_ENTRY_EMPTY;
43 if (mm->context.asce_limit <= _REGION1_SIZE) 43 if (mm_p4d_folded(mm))
44 return _REGION2_ENTRY_EMPTY; 44 return _REGION2_ENTRY_EMPTY;
45 return _REGION1_ENTRY_EMPTY; 45 return _REGION1_ENTRY_EMPTY;
46} 46}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 411d435e7a7d..063732414dfb 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -493,6 +493,24 @@ static inline int is_module_addr(void *addr)
493 _REGION_ENTRY_PROTECT | \ 493 _REGION_ENTRY_PROTECT | \
494 _REGION_ENTRY_NOEXEC) 494 _REGION_ENTRY_NOEXEC)
495 495
496static inline bool mm_p4d_folded(struct mm_struct *mm)
497{
498 return mm->context.asce_limit <= _REGION1_SIZE;
499}
500#define mm_p4d_folded(mm) mm_p4d_folded(mm)
501
502static inline bool mm_pud_folded(struct mm_struct *mm)
503{
504 return mm->context.asce_limit <= _REGION2_SIZE;
505}
506#define mm_pud_folded(mm) mm_pud_folded(mm)
507
508static inline bool mm_pmd_folded(struct mm_struct *mm)
509{
510 return mm->context.asce_limit <= _REGION3_SIZE;
511}
512#define mm_pmd_folded(mm) mm_pmd_folded(mm)
513
496static inline int mm_has_pgste(struct mm_struct *mm) 514static inline int mm_has_pgste(struct mm_struct *mm)
497{ 515{
498#ifdef CONFIG_PGSTE 516#ifdef CONFIG_PGSTE
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 302795c47c06..81038ab357ce 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -236,7 +236,7 @@ static inline unsigned long current_stack_pointer(void)
236 return sp; 236 return sp;
237} 237}
238 238
239static __no_sanitize_address_or_inline unsigned short stap(void) 239static __no_kasan_or_inline unsigned short stap(void)
240{ 240{
241 unsigned short cpu_address; 241 unsigned short cpu_address;
242 242
@@ -330,7 +330,7 @@ static inline void __load_psw(psw_t psw)
330 * Set PSW mask to specified value, while leaving the 330 * Set PSW mask to specified value, while leaving the
331 * PSW addr pointing to the next instruction. 331 * PSW addr pointing to the next instruction.
332 */ 332 */
333static __no_sanitize_address_or_inline void __load_psw_mask(unsigned long mask) 333static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
334{ 334{
335 unsigned long addr; 335 unsigned long addr;
336 psw_t psw; 336 psw_t psw;
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 27248f42a03c..ce4e17c9aad6 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -14,7 +14,7 @@
14 * General size of kernel stacks 14 * General size of kernel stacks
15 */ 15 */
16#ifdef CONFIG_KASAN 16#ifdef CONFIG_KASAN
17#define THREAD_SIZE_ORDER 3 17#define THREAD_SIZE_ORDER 4
18#else 18#else
19#define THREAD_SIZE_ORDER 2 19#define THREAD_SIZE_ORDER 2
20#endif 20#endif
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 457b7ba0fbb6..b31c779cf581 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -136,7 +136,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
136static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 136static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
137 unsigned long address) 137 unsigned long address)
138{ 138{
139 if (tlb->mm->context.asce_limit <= _REGION3_SIZE) 139 if (mm_pmd_folded(tlb->mm))
140 return; 140 return;
141 pgtable_pmd_page_dtor(virt_to_page(pmd)); 141 pgtable_pmd_page_dtor(virt_to_page(pmd));
142 tlb_remove_table(tlb, pmd); 142 tlb_remove_table(tlb, pmd);
@@ -152,7 +152,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
152static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, 152static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
153 unsigned long address) 153 unsigned long address)
154{ 154{
155 if (tlb->mm->context.asce_limit <= _REGION1_SIZE) 155 if (mm_p4d_folded(tlb->mm))
156 return; 156 return;
157 tlb_remove_table(tlb, p4d); 157 tlb_remove_table(tlb, p4d);
158} 158}
@@ -167,7 +167,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
167static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 167static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
168 unsigned long address) 168 unsigned long address)
169{ 169{
170 if (tlb->mm->context.asce_limit <= _REGION2_SIZE) 170 if (mm_pud_folded(tlb->mm))
171 return; 171 return;
172 tlb_remove_table(tlb, pud); 172 tlb_remove_table(tlb, pud);
173} 173}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 724fba4d09d2..39191a0feed1 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -236,10 +236,10 @@ ENTRY(__switch_to)
236 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 236 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
237 lghi %r4,__TASK_stack 237 lghi %r4,__TASK_stack
238 lghi %r1,__TASK_thread 238 lghi %r1,__TASK_thread
239 lg %r5,0(%r4,%r3) # start of kernel stack of next 239 llill %r5,STACK_INIT
240 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 240 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
241 lgr %r15,%r5 241 lg %r15,0(%r4,%r3) # start of kernel stack of next
242 aghi %r15,STACK_INIT # end of kernel stack of next 242 agr %r15,%r5 # end of kernel stack of next
243 stg %r3,__LC_CURRENT # store task struct of next 243 stg %r3,__LC_CURRENT # store task struct of next
244 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 244 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
245 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 245 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 84be7f02d0c2..39b13d71a8fe 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -203,22 +203,13 @@ device_initcall(ftrace_plt_init);
203 */ 203 */
204unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) 204unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
205{ 205{
206 struct ftrace_graph_ent trace;
207
208 if (unlikely(ftrace_graph_is_dead())) 206 if (unlikely(ftrace_graph_is_dead()))
209 goto out; 207 goto out;
210 if (unlikely(atomic_read(&current->tracing_graph_pause))) 208 if (unlikely(atomic_read(&current->tracing_graph_pause)))
211 goto out; 209 goto out;
212 ip -= MCOUNT_INSN_SIZE; 210 ip -= MCOUNT_INSN_SIZE;
213 trace.func = ip; 211 if (!function_graph_enter(parent, ip, 0, NULL))
214 trace.depth = current->curr_ret_stack + 1; 212 parent = (unsigned long) return_to_handler;
215 /* Only trace if the calling function expects to. */
216 if (!ftrace_graph_entry(&trace))
217 goto out;
218 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
219 NULL) == -EBUSY)
220 goto out;
221 parent = (unsigned long) return_to_handler;
222out: 213out:
223 return parent; 214 return parent;
224} 215}
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index cc085e2d2ce9..d5523adeddbf 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -346,6 +346,8 @@ static int __hw_perf_event_init(struct perf_event *event)
346 break; 346 break;
347 347
348 case PERF_TYPE_HARDWARE: 348 case PERF_TYPE_HARDWARE:
349 if (is_sampling_event(event)) /* No sampling support */
350 return -ENOENT;
349 ev = attr->config; 351 ev = attr->config;
350 /* Count user space (problem-state) only */ 352 /* Count user space (problem-state) only */
351 if (!attr->exclude_user && attr->exclude_kernel) { 353 if (!attr->exclude_user && attr->exclude_kernel) {
@@ -373,7 +375,7 @@ static int __hw_perf_event_init(struct perf_event *event)
373 return -ENOENT; 375 return -ENOENT;
374 376
375 if (ev > PERF_CPUM_CF_MAX_CTR) 377 if (ev > PERF_CPUM_CF_MAX_CTR)
376 return -EINVAL; 378 return -ENOENT;
377 379
378 /* Obtain the counter set to which the specified counter belongs */ 380 /* Obtain the counter set to which the specified counter belongs */
379 set = get_counter_set(ev); 381 set = get_counter_set(ev);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 7bf604ff50a1..bfabeb1889cc 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1842,10 +1842,30 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags)
1842CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); 1842CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
1843CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); 1843CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
1844 1844
1845static struct attribute *cpumsf_pmu_events_attr[] = { 1845/* Attribute list for CPU_SF.
1846 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), 1846 *
1847 NULL, 1847 * The availablitiy depends on the CPU_MF sampling facility authorization
1848 NULL, 1848 * for basic + diagnositic samples. This is determined at initialization
1849 * time by the sampling facility device driver.
1850 * If the authorization for basic samples is turned off, it should be
1851 * also turned off for diagnostic sampling.
1852 *
1853 * During initialization of the device driver, check the authorization
1854 * level for diagnostic sampling and installs the attribute
1855 * file for diagnostic sampling if necessary.
1856 *
1857 * For now install a placeholder to reference all possible attributes:
1858 * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG.
1859 * Add another entry for the final NULL pointer.
1860 */
1861enum {
1862 SF_CYCLES_BASIC_ATTR_IDX = 0,
1863 SF_CYCLES_BASIC_DIAG_ATTR_IDX,
1864 SF_CYCLES_ATTR_MAX
1865};
1866
1867static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = {
1868 [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC)
1849}; 1869};
1850 1870
1851PMU_FORMAT_ATTR(event, "config:0-63"); 1871PMU_FORMAT_ATTR(event, "config:0-63");
@@ -2040,7 +2060,10 @@ static int __init init_cpum_sampling_pmu(void)
2040 2060
2041 if (si.ad) { 2061 if (si.ad) {
2042 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 2062 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
2043 cpumsf_pmu_events_attr[1] = 2063 /* Sampling of diagnostic data authorized,
2064 * install event into attribute list of PMU device.
2065 */
2066 cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] =
2044 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); 2067 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
2045 } 2068 }
2046 2069
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index eb8aebea3ea7..e76309fbbcb3 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -37,7 +37,7 @@ KASAN_SANITIZE := n
37$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so 37$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
38 38
39# link rule for the .so file, .lds has to be first 39# link rule for the .so file, .lds has to be first
40$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) 40$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
41 $(call if_changed,vdso32ld) 41 $(call if_changed,vdso32ld)
42 42
43# strip rule for the .so file 43# strip rule for the .so file
@@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
46 $(call if_changed,objcopy) 46 $(call if_changed,objcopy)
47 47
48# assembly rules for the .S files 48# assembly rules for the .S files
49$(obj-vdso32): %.o: %.S 49$(obj-vdso32): %.o: %.S FORCE
50 $(call if_changed_dep,vdso32as) 50 $(call if_changed_dep,vdso32as)
51 51
52# actual build commands 52# actual build commands
53quiet_cmd_vdso32ld = VDSO32L $@ 53quiet_cmd_vdso32ld = VDSO32L $@
54 cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ 54 cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
55quiet_cmd_vdso32as = VDSO32A $@ 55quiet_cmd_vdso32as = VDSO32A $@
56 cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $< 56 cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
57 57
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index a22b2cf86eec..f849ac61c5da 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -37,7 +37,7 @@ KASAN_SANITIZE := n
37$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so 37$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
38 38
39# link rule for the .so file, .lds has to be first 39# link rule for the .so file, .lds has to be first
40$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) 40$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
41 $(call if_changed,vdso64ld) 41 $(call if_changed,vdso64ld)
42 42
43# strip rule for the .so file 43# strip rule for the .so file
@@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
46 $(call if_changed,objcopy) 46 $(call if_changed,objcopy)
47 47
48# assembly rules for the .S files 48# assembly rules for the .S files
49$(obj-vdso64): %.o: %.S 49$(obj-vdso64): %.o: %.S FORCE
50 $(call if_changed_dep,vdso64as) 50 $(call if_changed_dep,vdso64as)
51 51
52# actual build commands 52# actual build commands
53quiet_cmd_vdso64ld = VDSO64L $@ 53quiet_cmd_vdso64ld = VDSO64L $@
54 cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ 54 cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
55quiet_cmd_vdso64as = VDSO64A $@ 55quiet_cmd_vdso64as = VDSO64A $@
56 cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< 56 cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
57 57
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 21eb7407d51b..8429ab079715 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -154,14 +154,14 @@ SECTIONS
154 * uncompressed image info used by the decompressor 154 * uncompressed image info used by the decompressor
155 * it should match struct vmlinux_info 155 * it should match struct vmlinux_info
156 */ 156 */
157 .vmlinux.info 0 : { 157 .vmlinux.info 0 (INFO) : {
158 QUAD(_stext) /* default_lma */ 158 QUAD(_stext) /* default_lma */
159 QUAD(startup_continue) /* entry */ 159 QUAD(startup_continue) /* entry */
160 QUAD(__bss_start - _stext) /* image_size */ 160 QUAD(__bss_start - _stext) /* image_size */
161 QUAD(__bss_stop - __bss_start) /* bss_size */ 161 QUAD(__bss_stop - __bss_start) /* bss_size */
162 QUAD(__boot_data_start) /* bootdata_off */ 162 QUAD(__boot_data_start) /* bootdata_off */
163 QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */ 163 QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */
164 } 164 } :NONE
165 165
166 /* Debugging sections. */ 166 /* Debugging sections. */
167 STABS_DEBUG 167 STABS_DEBUG
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 76d89ee8b428..6791562779ee 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -101,6 +101,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
101 mm->context.asce_limit = _REGION1_SIZE; 101 mm->context.asce_limit = _REGION1_SIZE;
102 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 102 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
103 _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 103 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
104 mm_inc_nr_puds(mm);
104 } else { 105 } else {
105 crst_table_init(table, _REGION1_ENTRY_EMPTY); 106 crst_table_init(table, _REGION1_ENTRY_EMPTY);
106 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); 107 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
@@ -130,6 +131,7 @@ void crst_table_downgrade(struct mm_struct *mm)
130 } 131 }
131 132
132 pgd = mm->pgd; 133 pgd = mm->pgd;
134 mm_dec_nr_pmds(mm);
133 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); 135 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
134 mm->context.asce_limit = _REGION3_SIZE; 136 mm->context.asce_limit = _REGION3_SIZE;
135 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 137 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index ae0d9e889534..d31bde0870d8 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -53,6 +53,7 @@ int __node_distance(int a, int b)
53{ 53{
54 return mode->distance ? mode->distance(a, b) : 0; 54 return mode->distance ? mode->distance(a, b) : 0;
55} 55}
56EXPORT_SYMBOL(__node_distance);
56 57
57int numa_debug_enabled; 58int numa_debug_enabled;
58 59
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 96dd9f7da250..1b04270e5460 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -321,8 +321,7 @@ int ftrace_disable_ftrace_graph_caller(void)
321void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) 321void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
322{ 322{
323 unsigned long old; 323 unsigned long old;
324 int faulted, err; 324 int faulted;
325 struct ftrace_graph_ent trace;
326 unsigned long return_hooker = (unsigned long)&return_to_handler; 325 unsigned long return_hooker = (unsigned long)&return_to_handler;
327 326
328 if (unlikely(ftrace_graph_is_dead())) 327 if (unlikely(ftrace_graph_is_dead()))
@@ -365,18 +364,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
365 return; 364 return;
366 } 365 }
367 366
368 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL); 367 if (function_graph_enter(old, self_addr, 0, NULL))
369 if (err == -EBUSY) {
370 __raw_writel(old, parent); 368 __raw_writel(old, parent);
371 return;
372 }
373
374 trace.func = self_addr;
375
376 /* Only trace if the calling function expects to */
377 if (!ftrace_graph_entry(&trace)) {
378 current->curr_ret_stack--;
379 __raw_writel(old, parent);
380 }
381} 369}
382#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 370#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 915dda4ae412..684b84ce397f 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -126,20 +126,11 @@ unsigned long prepare_ftrace_return(unsigned long parent,
126 unsigned long frame_pointer) 126 unsigned long frame_pointer)
127{ 127{
128 unsigned long return_hooker = (unsigned long) &return_to_handler; 128 unsigned long return_hooker = (unsigned long) &return_to_handler;
129 struct ftrace_graph_ent trace;
130 129
131 if (unlikely(atomic_read(&current->tracing_graph_pause))) 130 if (unlikely(atomic_read(&current->tracing_graph_pause)))
132 return parent + 8UL; 131 return parent + 8UL;
133 132
134 trace.func = self_addr; 133 if (function_graph_enter(parent, self_addr, frame_pointer, NULL))
135 trace.depth = current->curr_ret_stack + 1;
136
137 /* Only trace if the calling function expects to */
138 if (!ftrace_graph_entry(&trace))
139 return parent + 8UL;
140
141 if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
142 frame_pointer, NULL) == -EBUSY)
143 return parent + 8UL; 134 return parent + 8UL;
144 135
145 return return_hooker; 136 return return_hooker;
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 40d008b0bd3e..05eb016fc41b 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -108,10 +108,9 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
108 /* Allocate and initialize the free area map. */ 108 /* Allocate and initialize the free area map. */
109 sz = num_tsb_entries / 8; 109 sz = num_tsb_entries / 8;
110 sz = (sz + 7UL) & ~7UL; 110 sz = (sz + 7UL) & ~7UL;
111 iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node); 111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
112 if (!iommu->tbl.map) 112 if (!iommu->tbl.map)
113 return -ENOMEM; 113 return -ENOMEM;
114 memset(iommu->tbl.map, 0, sz);
115 114
116 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, 115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
117 (tlb_type != hypervisor ? iommu_flushall : NULL), 116 (tlb_type != hypervisor ? iommu_flushall : NULL),
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 4c5b3fcbed94..e800ce13cc6e 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -683,6 +683,7 @@ void do_signal32(struct pt_regs * regs)
683 regs->tpc -= 4; 683 regs->tpc -= 4;
684 regs->tnpc -= 4; 684 regs->tnpc -= 4;
685 pt_regs_clear_syscall(regs); 685 pt_regs_clear_syscall(regs);
686 /* fall through */
686 case ERESTART_RESTARTBLOCK: 687 case ERESTART_RESTARTBLOCK:
687 regs->u_regs[UREG_G1] = __NR_restart_syscall; 688 regs->u_regs[UREG_G1] = __NR_restart_syscall;
688 regs->tpc -= 4; 689 regs->tpc -= 4;
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 5665261cee37..83953780ca01 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -508,6 +508,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
508 regs->pc -= 4; 508 regs->pc -= 4;
509 regs->npc -= 4; 509 regs->npc -= 4;
510 pt_regs_clear_syscall(regs); 510 pt_regs_clear_syscall(regs);
511 /* fall through */
511 case ERESTART_RESTARTBLOCK: 512 case ERESTART_RESTARTBLOCK:
512 regs->u_regs[UREG_G1] = __NR_restart_syscall; 513 regs->u_regs[UREG_G1] = __NR_restart_syscall;
513 regs->pc -= 4; 514 regs->pc -= 4;
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index e9de1803a22e..ca70787efd8e 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -533,6 +533,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
533 regs->tpc -= 4; 533 regs->tpc -= 4;
534 regs->tnpc -= 4; 534 regs->tnpc -= 4;
535 pt_regs_clear_syscall(regs); 535 pt_regs_clear_syscall(regs);
536 /* fall through */
536 case ERESTART_RESTARTBLOCK: 537 case ERESTART_RESTARTBLOCK:
537 regs->u_regs[UREG_G1] = __NR_restart_syscall; 538 regs->u_regs[UREG_G1] = __NR_restart_syscall;
538 regs->tpc -= 4; 539 regs->tpc -= 4;
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 222785af550b..5fda4f7bf15d 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -791,7 +791,7 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
791} 791}
792 792
793/* Just skip the save instruction and the ctx register move. */ 793/* Just skip the save instruction and the ctx register move. */
794#define BPF_TAILCALL_PROLOGUE_SKIP 16 794#define BPF_TAILCALL_PROLOGUE_SKIP 32
795#define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128) 795#define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128)
796 796
797static void build_prologue(struct jit_ctx *ctx) 797static void build_prologue(struct jit_ctx *ctx)
@@ -824,9 +824,15 @@ static void build_prologue(struct jit_ctx *ctx)
824 const u8 vfp = bpf2sparc[BPF_REG_FP]; 824 const u8 vfp = bpf2sparc[BPF_REG_FP];
825 825
826 emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx); 826 emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx);
827 } else {
828 emit_nop(ctx);
827 } 829 }
828 830
829 emit_reg_move(I0, O0, ctx); 831 emit_reg_move(I0, O0, ctx);
832 emit_reg_move(I1, O1, ctx);
833 emit_reg_move(I2, O2, ctx);
834 emit_reg_move(I3, O3, ctx);
835 emit_reg_move(I4, O4, ctx);
830 /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */ 836 /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */
831} 837}
832 838
@@ -1270,6 +1276,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1270 const u8 tmp2 = bpf2sparc[TMP_REG_2]; 1276 const u8 tmp2 = bpf2sparc[TMP_REG_2];
1271 u32 opcode = 0, rs2; 1277 u32 opcode = 0, rs2;
1272 1278
1279 if (insn->dst_reg == BPF_REG_FP)
1280 ctx->saw_frame_pointer = true;
1281
1273 ctx->tmp_2_used = true; 1282 ctx->tmp_2_used = true;
1274 emit_loadimm(imm, tmp2, ctx); 1283 emit_loadimm(imm, tmp2, ctx);
1275 1284
@@ -1308,6 +1317,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1308 const u8 tmp = bpf2sparc[TMP_REG_1]; 1317 const u8 tmp = bpf2sparc[TMP_REG_1];
1309 u32 opcode = 0, rs2; 1318 u32 opcode = 0, rs2;
1310 1319
1320 if (insn->dst_reg == BPF_REG_FP)
1321 ctx->saw_frame_pointer = true;
1322
1311 switch (BPF_SIZE(code)) { 1323 switch (BPF_SIZE(code)) {
1312 case BPF_W: 1324 case BPF_W:
1313 opcode = ST32; 1325 opcode = ST32;
@@ -1340,6 +1352,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1340 const u8 tmp2 = bpf2sparc[TMP_REG_2]; 1352 const u8 tmp2 = bpf2sparc[TMP_REG_2];
1341 const u8 tmp3 = bpf2sparc[TMP_REG_3]; 1353 const u8 tmp3 = bpf2sparc[TMP_REG_3];
1342 1354
1355 if (insn->dst_reg == BPF_REG_FP)
1356 ctx->saw_frame_pointer = true;
1357
1343 ctx->tmp_1_used = true; 1358 ctx->tmp_1_used = true;
1344 ctx->tmp_2_used = true; 1359 ctx->tmp_2_used = true;
1345 ctx->tmp_3_used = true; 1360 ctx->tmp_3_used = true;
@@ -1360,6 +1375,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1360 const u8 tmp2 = bpf2sparc[TMP_REG_2]; 1375 const u8 tmp2 = bpf2sparc[TMP_REG_2];
1361 const u8 tmp3 = bpf2sparc[TMP_REG_3]; 1376 const u8 tmp3 = bpf2sparc[TMP_REG_3];
1362 1377
1378 if (insn->dst_reg == BPF_REG_FP)
1379 ctx->saw_frame_pointer = true;
1380
1363 ctx->tmp_1_used = true; 1381 ctx->tmp_1_used = true;
1364 ctx->tmp_2_used = true; 1382 ctx->tmp_2_used = true;
1365 ctx->tmp_3_used = true; 1383 ctx->tmp_3_used = true;
@@ -1425,12 +1443,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1425 struct bpf_prog *tmp, *orig_prog = prog; 1443 struct bpf_prog *tmp, *orig_prog = prog;
1426 struct sparc64_jit_data *jit_data; 1444 struct sparc64_jit_data *jit_data;
1427 struct bpf_binary_header *header; 1445 struct bpf_binary_header *header;
1446 u32 prev_image_size, image_size;
1428 bool tmp_blinded = false; 1447 bool tmp_blinded = false;
1429 bool extra_pass = false; 1448 bool extra_pass = false;
1430 struct jit_ctx ctx; 1449 struct jit_ctx ctx;
1431 u32 image_size;
1432 u8 *image_ptr; 1450 u8 *image_ptr;
1433 int pass; 1451 int pass, i;
1434 1452
1435 if (!prog->jit_requested) 1453 if (!prog->jit_requested)
1436 return orig_prog; 1454 return orig_prog;
@@ -1461,61 +1479,82 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1461 header = jit_data->header; 1479 header = jit_data->header;
1462 extra_pass = true; 1480 extra_pass = true;
1463 image_size = sizeof(u32) * ctx.idx; 1481 image_size = sizeof(u32) * ctx.idx;
1482 prev_image_size = image_size;
1483 pass = 1;
1464 goto skip_init_ctx; 1484 goto skip_init_ctx;
1465 } 1485 }
1466 1486
1467 memset(&ctx, 0, sizeof(ctx)); 1487 memset(&ctx, 0, sizeof(ctx));
1468 ctx.prog = prog; 1488 ctx.prog = prog;
1469 1489
1470 ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL); 1490 ctx.offset = kmalloc_array(prog->len, sizeof(unsigned int), GFP_KERNEL);
1471 if (ctx.offset == NULL) { 1491 if (ctx.offset == NULL) {
1472 prog = orig_prog; 1492 prog = orig_prog;
1473 goto out_off; 1493 goto out_off;
1474 } 1494 }
1475 1495
1476 /* Fake pass to detect features used, and get an accurate assessment 1496 /* Longest sequence emitted is for bswap32, 12 instructions. Pre-cook
1477 * of what the final image size will be. 1497 * the offset array so that we converge faster.
1478 */ 1498 */
1479 if (build_body(&ctx)) { 1499 for (i = 0; i < prog->len; i++)
1480 prog = orig_prog; 1500 ctx.offset[i] = i * (12 * 4);
1481 goto out_off;
1482 }
1483 build_prologue(&ctx);
1484 build_epilogue(&ctx);
1485
1486 /* Now we know the actual image size. */
1487 image_size = sizeof(u32) * ctx.idx;
1488 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1489 sizeof(u32), jit_fill_hole);
1490 if (header == NULL) {
1491 prog = orig_prog;
1492 goto out_off;
1493 }
1494 1501
1495 ctx.image = (u32 *)image_ptr; 1502 prev_image_size = ~0U;
1496skip_init_ctx: 1503 for (pass = 1; pass < 40; pass++) {
1497 for (pass = 1; pass < 3; pass++) {
1498 ctx.idx = 0; 1504 ctx.idx = 0;
1499 1505
1500 build_prologue(&ctx); 1506 build_prologue(&ctx);
1501
1502 if (build_body(&ctx)) { 1507 if (build_body(&ctx)) {
1503 bpf_jit_binary_free(header);
1504 prog = orig_prog; 1508 prog = orig_prog;
1505 goto out_off; 1509 goto out_off;
1506 } 1510 }
1507
1508 build_epilogue(&ctx); 1511 build_epilogue(&ctx);
1509 1512
1510 if (bpf_jit_enable > 1) 1513 if (bpf_jit_enable > 1)
1511 pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c]\n", pass, 1514 pr_info("Pass %d: size = %u, seen = [%c%c%c%c%c%c]\n", pass,
1512 image_size - (ctx.idx * 4), 1515 ctx.idx * 4,
1513 ctx.tmp_1_used ? '1' : ' ', 1516 ctx.tmp_1_used ? '1' : ' ',
1514 ctx.tmp_2_used ? '2' : ' ', 1517 ctx.tmp_2_used ? '2' : ' ',
1515 ctx.tmp_3_used ? '3' : ' ', 1518 ctx.tmp_3_used ? '3' : ' ',
1516 ctx.saw_frame_pointer ? 'F' : ' ', 1519 ctx.saw_frame_pointer ? 'F' : ' ',
1517 ctx.saw_call ? 'C' : ' ', 1520 ctx.saw_call ? 'C' : ' ',
1518 ctx.saw_tail_call ? 'T' : ' '); 1521 ctx.saw_tail_call ? 'T' : ' ');
1522
1523 if (ctx.idx * 4 == prev_image_size)
1524 break;
1525 prev_image_size = ctx.idx * 4;
1526 cond_resched();
1527 }
1528
1529 /* Now we know the actual image size. */
1530 image_size = sizeof(u32) * ctx.idx;
1531 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1532 sizeof(u32), jit_fill_hole);
1533 if (header == NULL) {
1534 prog = orig_prog;
1535 goto out_off;
1536 }
1537
1538 ctx.image = (u32 *)image_ptr;
1539skip_init_ctx:
1540 ctx.idx = 0;
1541
1542 build_prologue(&ctx);
1543
1544 if (build_body(&ctx)) {
1545 bpf_jit_binary_free(header);
1546 prog = orig_prog;
1547 goto out_off;
1548 }
1549
1550 build_epilogue(&ctx);
1551
1552 if (ctx.idx * 4 != prev_image_size) {
1553 pr_err("bpf_jit: Failed to converge, prev_size=%u size=%d\n",
1554 prev_image_size, ctx.idx * 4);
1555 bpf_jit_binary_free(header);
1556 prog = orig_prog;
1557 goto out_off;
1519 } 1558 }
1520 1559
1521 if (bpf_jit_enable > 1) 1560 if (bpf_jit_enable > 1)
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 74c002ddc0ce..28c40624bcb6 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1305,6 +1305,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
1305 io_req->fds[0] = dev->cow.fd; 1305 io_req->fds[0] = dev->cow.fd;
1306 else 1306 else
1307 io_req->fds[0] = dev->fd; 1307 io_req->fds[0] = dev->fd;
1308 io_req->error = 0;
1308 1309
1309 if (req_op(req) == REQ_OP_FLUSH) { 1310 if (req_op(req) == REQ_OP_FLUSH) {
1310 io_req->op = UBD_FLUSH; 1311 io_req->op = UBD_FLUSH;
@@ -1313,9 +1314,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
1313 io_req->cow_offset = -1; 1314 io_req->cow_offset = -1;
1314 io_req->offset = off; 1315 io_req->offset = off;
1315 io_req->length = bvec->bv_len; 1316 io_req->length = bvec->bv_len;
1316 io_req->error = 0;
1317 io_req->sector_mask = 0; 1317 io_req->sector_mask = 0;
1318
1319 io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE; 1318 io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
1320 io_req->offsets[0] = 0; 1319 io_req->offsets[0] = 0;
1321 io_req->offsets[1] = dev->cow.data_offset; 1320 io_req->offsets[1] = dev->cow.data_offset;
@@ -1341,11 +1340,14 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
1341static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, 1340static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
1342 const struct blk_mq_queue_data *bd) 1341 const struct blk_mq_queue_data *bd)
1343{ 1342{
1343 struct ubd *ubd_dev = hctx->queue->queuedata;
1344 struct request *req = bd->rq; 1344 struct request *req = bd->rq;
1345 int ret = 0; 1345 int ret = 0;
1346 1346
1347 blk_mq_start_request(req); 1347 blk_mq_start_request(req);
1348 1348
1349 spin_lock_irq(&ubd_dev->lock);
1350
1349 if (req_op(req) == REQ_OP_FLUSH) { 1351 if (req_op(req) == REQ_OP_FLUSH) {
1350 ret = ubd_queue_one_vec(hctx, req, 0, NULL); 1352 ret = ubd_queue_one_vec(hctx, req, 0, NULL);
1351 } else { 1353 } else {
@@ -1361,9 +1363,11 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
1361 } 1363 }
1362 } 1364 }
1363out: 1365out:
1364 if (ret < 0) { 1366 spin_unlock_irq(&ubd_dev->lock);
1367
1368 if (ret < 0)
1365 blk_mq_requeue_request(req, true); 1369 blk_mq_requeue_request(req, true);
1366 } 1370
1367 return BLK_STS_OK; 1371 return BLK_STS_OK;
1368} 1372}
1369 1373
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba7e3464ee92..8689e794a43c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -444,10 +444,6 @@ config RETPOLINE
444 branches. Requires a compiler with -mindirect-branch=thunk-extern 444 branches. Requires a compiler with -mindirect-branch=thunk-extern
445 support for full protection. The kernel may run slower. 445 support for full protection. The kernel may run slower.
446 446
447 Without compiler support, at least indirect branches in assembler
448 code are eliminated. Since this includes the syscall entry path,
449 it is not entirely pointless.
450
451config INTEL_RDT 447config INTEL_RDT
452 bool "Intel Resource Director Technology support" 448 bool "Intel Resource Director Technology support"
453 depends on X86 && CPU_SUP_INTEL 449 depends on X86 && CPU_SUP_INTEL
@@ -525,7 +521,6 @@ config X86_VSMP
525 bool "ScaleMP vSMP" 521 bool "ScaleMP vSMP"
526 select HYPERVISOR_GUEST 522 select HYPERVISOR_GUEST
527 select PARAVIRT 523 select PARAVIRT
528 select PARAVIRT_XXL
529 depends on X86_64 && PCI 524 depends on X86_64 && PCI
530 depends on X86_EXTENDED_PLATFORM 525 depends on X86_EXTENDED_PLATFORM
531 depends on SMP 526 depends on SMP
@@ -1005,13 +1000,7 @@ config NR_CPUS
1005 to the kernel image. 1000 to the kernel image.
1006 1001
1007config SCHED_SMT 1002config SCHED_SMT
1008 bool "SMT (Hyperthreading) scheduler support" 1003 def_bool y if SMP
1009 depends on SMP
1010 ---help---
1011 SMT scheduler support improves the CPU scheduler's decision making
1012 when dealing with Intel Pentium 4 chips with HyperThreading at a
1013 cost of slightly increased overhead in some places. If unsure say
1014 N here.
1015 1004
1016config SCHED_MC 1005config SCHED_MC
1017 def_bool y 1006 def_bool y
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5b562e464009..f5d7f4134524 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -213,8 +213,6 @@ ifdef CONFIG_X86_64
213KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000) 213KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
214endif 214endif
215 215
216# Speed up the build
217KBUILD_CFLAGS += -pipe
218# Workaround for a gcc prelease that unfortunately was shipped in a suse release 216# Workaround for a gcc prelease that unfortunately was shipped in a suse release
219KBUILD_CFLAGS += -Wno-sign-compare 217KBUILD_CFLAGS += -Wno-sign-compare
220# 218#
@@ -222,9 +220,10 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
222 220
223# Avoid indirect branches in kernel to deal with Spectre 221# Avoid indirect branches in kernel to deal with Spectre
224ifdef CONFIG_RETPOLINE 222ifdef CONFIG_RETPOLINE
225ifneq ($(RETPOLINE_CFLAGS),) 223ifeq ($(RETPOLINE_CFLAGS),)
226 KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE 224 $(error You are building kernel with non-retpoline compiler, please update your compiler.)
227endif 225endif
226 KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
228endif 227endif
229 228
230archscripts: scripts_basic 229archscripts: scripts_basic
@@ -239,7 +238,7 @@ archheaders:
239archmacros: 238archmacros:
240 $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s 239 $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
241 240
242ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s -Wa,- 241ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s
243export ASM_MACRO_FLAGS 242export ASM_MACRO_FLAGS
244KBUILD_CFLAGS += $(ASM_MACRO_FLAGS) 243KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
245 244
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 4c881c850125..850b8762e889 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -300,7 +300,7 @@ _start:
300 # Part 2 of the header, from the old setup.S 300 # Part 2 of the header, from the old setup.S
301 301
302 .ascii "HdrS" # header signature 302 .ascii "HdrS" # header signature
303 .word 0x020e # header version number (>= 0x0105) 303 .word 0x020d # header version number (>= 0x0105)
304 # or else old loadlin-1.5 will fail) 304 # or else old loadlin-1.5 will fail)
305 .globl realmode_swtch 305 .globl realmode_swtch
306realmode_swtch: .word 0, 0 # default_switch, SETUPSEG 306realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
@@ -558,10 +558,6 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
558init_size: .long INIT_SIZE # kernel initialization size 558init_size: .long INIT_SIZE # kernel initialization size
559handover_offset: .long 0 # Filled in by build.c 559handover_offset: .long 0 # Filled in by build.c
560 560
561acpi_rsdp_addr: .quad 0 # 64-bit physical pointer to the
562 # ACPI RSDP table, added with
563 # version 2.14
564
565# End of setup header ##################################################### 561# End of setup header #####################################################
566 562
567 .section ".entrytext", "ax" 563 .section ".entrytext", "ax"
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 106911b603bd..374a19712e20 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -438,26 +438,6 @@ int x86_setup_perfctr(struct perf_event *event)
438 if (config == -1LL) 438 if (config == -1LL)
439 return -EINVAL; 439 return -EINVAL;
440 440
441 /*
442 * Branch tracing:
443 */
444 if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
445 !attr->freq && hwc->sample_period == 1) {
446 /* BTS is not supported by this architecture. */
447 if (!x86_pmu.bts_active)
448 return -EOPNOTSUPP;
449
450 /* BTS is currently only allowed for user-mode. */
451 if (!attr->exclude_kernel)
452 return -EOPNOTSUPP;
453
454 /* disallow bts if conflicting events are present */
455 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
456 return -EBUSY;
457
458 event->destroy = hw_perf_lbr_event_destroy;
459 }
460
461 hwc->config |= config; 441 hwc->config |= config;
462 442
463 return 0; 443 return 0;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 273c62e81546..ecc3e34ca955 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2306,14 +2306,18 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
2306 return handled; 2306 return handled;
2307} 2307}
2308 2308
2309static bool disable_counter_freezing; 2309static bool disable_counter_freezing = true;
2310static int __init intel_perf_counter_freezing_setup(char *s) 2310static int __init intel_perf_counter_freezing_setup(char *s)
2311{ 2311{
2312 disable_counter_freezing = true; 2312 bool res;
2313 pr_info("Intel PMU Counter freezing feature disabled\n"); 2313
2314 if (kstrtobool(s, &res))
2315 return -EINVAL;
2316
2317 disable_counter_freezing = !res;
2314 return 1; 2318 return 1;
2315} 2319}
2316__setup("disable_counter_freezing", intel_perf_counter_freezing_setup); 2320__setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
2317 2321
2318/* 2322/*
2319 * Simplified handler for Arch Perfmon v4: 2323 * Simplified handler for Arch Perfmon v4:
@@ -2470,16 +2474,7 @@ done:
2470static struct event_constraint * 2474static struct event_constraint *
2471intel_bts_constraints(struct perf_event *event) 2475intel_bts_constraints(struct perf_event *event)
2472{ 2476{
2473 struct hw_perf_event *hwc = &event->hw; 2477 if (unlikely(intel_pmu_has_bts(event)))
2474 unsigned int hw_event, bts_event;
2475
2476 if (event->attr.freq)
2477 return NULL;
2478
2479 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
2480 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
2481
2482 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
2483 return &bts_constraint; 2478 return &bts_constraint;
2484 2479
2485 return NULL; 2480 return NULL;
@@ -3098,6 +3093,43 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3098 return flags; 3093 return flags;
3099} 3094}
3100 3095
3096static int intel_pmu_bts_config(struct perf_event *event)
3097{
3098 struct perf_event_attr *attr = &event->attr;
3099
3100 if (unlikely(intel_pmu_has_bts(event))) {
3101 /* BTS is not supported by this architecture. */
3102 if (!x86_pmu.bts_active)
3103 return -EOPNOTSUPP;
3104
3105 /* BTS is currently only allowed for user-mode. */
3106 if (!attr->exclude_kernel)
3107 return -EOPNOTSUPP;
3108
3109 /* BTS is not allowed for precise events. */
3110 if (attr->precise_ip)
3111 return -EOPNOTSUPP;
3112
3113 /* disallow bts if conflicting events are present */
3114 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3115 return -EBUSY;
3116
3117 event->destroy = hw_perf_lbr_event_destroy;
3118 }
3119
3120 return 0;
3121}
3122
3123static int core_pmu_hw_config(struct perf_event *event)
3124{
3125 int ret = x86_pmu_hw_config(event);
3126
3127 if (ret)
3128 return ret;
3129
3130 return intel_pmu_bts_config(event);
3131}
3132
3101static int intel_pmu_hw_config(struct perf_event *event) 3133static int intel_pmu_hw_config(struct perf_event *event)
3102{ 3134{
3103 int ret = x86_pmu_hw_config(event); 3135 int ret = x86_pmu_hw_config(event);
@@ -3105,6 +3137,10 @@ static int intel_pmu_hw_config(struct perf_event *event)
3105 if (ret) 3137 if (ret)
3106 return ret; 3138 return ret;
3107 3139
3140 ret = intel_pmu_bts_config(event);
3141 if (ret)
3142 return ret;
3143
3108 if (event->attr.precise_ip) { 3144 if (event->attr.precise_ip) {
3109 if (!event->attr.freq) { 3145 if (!event->attr.freq) {
3110 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; 3146 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
@@ -3127,7 +3163,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
3127 /* 3163 /*
3128 * BTS is set up earlier in this path, so don't account twice 3164 * BTS is set up earlier in this path, so don't account twice
3129 */ 3165 */
3130 if (!intel_pmu_has_bts(event)) { 3166 if (!unlikely(intel_pmu_has_bts(event))) {
3131 /* disallow lbr if conflicting events are present */ 3167 /* disallow lbr if conflicting events are present */
3132 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 3168 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3133 return -EBUSY; 3169 return -EBUSY;
@@ -3596,7 +3632,7 @@ static __initconst const struct x86_pmu core_pmu = {
3596 .enable_all = core_pmu_enable_all, 3632 .enable_all = core_pmu_enable_all,
3597 .enable = core_pmu_enable_event, 3633 .enable = core_pmu_enable_event,
3598 .disable = x86_pmu_disable_event, 3634 .disable = x86_pmu_disable_event,
3599 .hw_config = x86_pmu_hw_config, 3635 .hw_config = core_pmu_hw_config,
3600 .schedule_events = x86_schedule_events, 3636 .schedule_events = x86_schedule_events,
3601 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 3637 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3602 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 3638 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index e17ab885b1e9..cb46d602a6b8 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -129,8 +129,15 @@ struct intel_uncore_box {
129 struct intel_uncore_extra_reg shared_regs[0]; 129 struct intel_uncore_extra_reg shared_regs[0];
130}; 130};
131 131
132#define UNCORE_BOX_FLAG_INITIATED 0 132/* CFL uncore 8th cbox MSRs */
133#define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */ 133#define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
134#define CFL_UNC_CBO_7_PER_CTR0 0xf76
135
136#define UNCORE_BOX_FLAG_INITIATED 0
137/* event config registers are 8-byte apart */
138#define UNCORE_BOX_FLAG_CTL_OFFS8 1
139/* CFL 8th CBOX has different MSR space */
140#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
134 141
135struct uncore_event_desc { 142struct uncore_event_desc {
136 struct kobj_attribute attr; 143 struct kobj_attribute attr;
@@ -297,17 +304,27 @@ unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
297static inline 304static inline
298unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) 305unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
299{ 306{
300 return box->pmu->type->event_ctl + 307 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
301 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 308 return CFL_UNC_CBO_7_PERFEVTSEL0 +
302 uncore_msr_box_offset(box); 309 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
310 } else {
311 return box->pmu->type->event_ctl +
312 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
313 uncore_msr_box_offset(box);
314 }
303} 315}
304 316
305static inline 317static inline
306unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) 318unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
307{ 319{
308 return box->pmu->type->perf_ctr + 320 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
309 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 321 return CFL_UNC_CBO_7_PER_CTR0 +
310 uncore_msr_box_offset(box); 322 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
323 } else {
324 return box->pmu->type->perf_ctr +
325 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
326 uncore_msr_box_offset(box);
327 }
311} 328}
312 329
313static inline 330static inline
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 8527c3e1038b..2593b0d7aeee 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -15,6 +15,25 @@
15#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 15#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
16#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f 16#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
17#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f 17#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
18#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
19#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
20#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
21#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
22#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
23#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
24#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
25#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
26#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
27#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f
28#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f
29#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2
30#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30
31#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18
32#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6
33#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31
34#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
35#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
36#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
18 37
19/* SNB event control */ 38/* SNB event control */
20#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff 39#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -202,6 +221,10 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
202 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 221 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
203 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 222 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
204 } 223 }
224
225 /* The 8th CBOX has different MSR space */
226 if (box->pmu->pmu_idx == 7)
227 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
205} 228}
206 229
207static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) 230static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
@@ -228,7 +251,7 @@ static struct intel_uncore_ops skl_uncore_msr_ops = {
228static struct intel_uncore_type skl_uncore_cbox = { 251static struct intel_uncore_type skl_uncore_cbox = {
229 .name = "cbox", 252 .name = "cbox",
230 .num_counters = 4, 253 .num_counters = 4,
231 .num_boxes = 5, 254 .num_boxes = 8,
232 .perf_ctr_bits = 44, 255 .perf_ctr_bits = 44,
233 .fixed_ctr_bits = 48, 256 .fixed_ctr_bits = 48,
234 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 257 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
@@ -569,7 +592,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
569 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), 592 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
570 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 593 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
571 }, 594 },
572 595 { /* IMC */
596 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
597 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
598 },
599 { /* IMC */
600 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
601 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
602 },
603 { /* IMC */
604 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
605 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
606 },
607 { /* IMC */
608 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
609 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
610 },
611 { /* IMC */
612 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
613 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
614 },
615 { /* IMC */
616 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
617 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
618 },
619 { /* IMC */
620 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
621 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
622 },
623 { /* IMC */
624 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
625 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
626 },
627 { /* IMC */
628 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
629 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
630 },
631 { /* IMC */
632 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
633 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
634 },
635 { /* IMC */
636 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
637 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
638 },
639 { /* IMC */
640 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
641 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
642 },
643 { /* IMC */
644 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
645 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
646 },
647 { /* IMC */
648 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
649 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
650 },
651 { /* IMC */
652 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
653 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
654 },
655 { /* IMC */
656 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
657 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
658 },
659 { /* IMC */
660 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
661 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
662 },
663 { /* IMC */
664 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
665 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
666 },
667 { /* IMC */
668 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
669 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
670 },
573 { /* end: all zeroes */ }, 671 { /* end: all zeroes */ },
574}; 672};
575 673
@@ -618,6 +716,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
618 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ 716 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
619 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ 717 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
620 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ 718 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
719 IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
720 IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
721 IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
722 IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */
723 IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */
724 IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */
725 IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */
726 IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */
727 IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */
728 IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */
729 IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */
730 IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */
731 IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */
732 IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */
733 IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */
734 IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */
735 IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
736 IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
737 IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
621 { /* end marker */ } 738 { /* end marker */ }
622}; 739};
623 740
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index adae087cecdd..78d7b7031bfc 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -859,11 +859,16 @@ static inline int amd_pmu_init(void)
859 859
860static inline bool intel_pmu_has_bts(struct perf_event *event) 860static inline bool intel_pmu_has_bts(struct perf_event *event)
861{ 861{
862 if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && 862 struct hw_perf_event *hwc = &event->hw;
863 !event->attr.freq && event->hw.sample_period == 1) 863 unsigned int hw_event, bts_event;
864 return true; 864
865 if (event->attr.freq)
866 return false;
867
868 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
869 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
865 870
866 return false; 871 return hw_event == bts_event && hwc->sample_period == 1;
867} 872}
868 873
869int intel_pmu_save_and_restart(struct perf_event *event); 874int intel_pmu_save_and_restart(struct perf_event *event);
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 5f7290e6e954..69dcdf195b61 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -226,7 +226,7 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
226 "3: movl $-2,%[err]\n\t" \ 226 "3: movl $-2,%[err]\n\t" \
227 "jmp 2b\n\t" \ 227 "jmp 2b\n\t" \
228 ".popsection\n\t" \ 228 ".popsection\n\t" \
229 _ASM_EXTABLE_UA(1b, 3b) \ 229 _ASM_EXTABLE(1b, 3b) \
230 : [err] "=r" (err) \ 230 : [err] "=r" (err) \
231 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ 231 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
232 : "memory") 232 : "memory")
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55e51ff7e421..fbda5a917c5b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1094,7 +1094,8 @@ struct kvm_x86_ops {
1094 bool (*has_wbinvd_exit)(void); 1094 bool (*has_wbinvd_exit)(void);
1095 1095
1096 u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); 1096 u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
1097 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 1097 /* Returns actual tsc_offset set in active VMCS */
1098 u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1098 1099
1099 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 1100 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
1100 1101
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 4da9b1c58d28..c1a812bd5a27 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -221,6 +221,8 @@ static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_am
221 221
222int mce_available(struct cpuinfo_x86 *c); 222int mce_available(struct cpuinfo_x86 *c);
223bool mce_is_memory_error(struct mce *m); 223bool mce_is_memory_error(struct mce *m);
224bool mce_is_correctable(struct mce *m);
225int mce_usable_address(struct mce *m);
224 226
225DECLARE_PER_CPU(unsigned, mce_exception_count); 227DECLARE_PER_CPU(unsigned, mce_exception_count);
226DECLARE_PER_CPU(unsigned, mce_poll_count); 228DECLARE_PER_CPU(unsigned, mce_poll_count);
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 0d6271cce198..1d0a7778e163 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -232,7 +232,7 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
232 : "cc"); 232 : "cc");
233 } 233 }
234#endif 234#endif
235 return hv_status; 235 return hv_status;
236} 236}
237 237
238/* 238/*
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 80f4a4f38c79..c8f73efb4ece 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -41,9 +41,10 @@
41 41
42#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ 42#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
43#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ 43#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
44#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ 44#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
45#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
45#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ 46#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
46#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ 47#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
47 48
48#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ 49#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
49#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ 50#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 80dc14422495..032b6009baab 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -3,6 +3,8 @@
3#ifndef _ASM_X86_NOSPEC_BRANCH_H_ 3#ifndef _ASM_X86_NOSPEC_BRANCH_H_
4#define _ASM_X86_NOSPEC_BRANCH_H_ 4#define _ASM_X86_NOSPEC_BRANCH_H_
5 5
6#include <linux/static_key.h>
7
6#include <asm/alternative.h> 8#include <asm/alternative.h>
7#include <asm/alternative-asm.h> 9#include <asm/alternative-asm.h>
8#include <asm/cpufeatures.h> 10#include <asm/cpufeatures.h>
@@ -162,11 +164,12 @@
162 _ASM_PTR " 999b\n\t" \ 164 _ASM_PTR " 999b\n\t" \
163 ".popsection\n\t" 165 ".popsection\n\t"
164 166
165#if defined(CONFIG_X86_64) && defined(RETPOLINE) 167#ifdef CONFIG_RETPOLINE
168#ifdef CONFIG_X86_64
166 169
167/* 170/*
168 * Since the inline asm uses the %V modifier which is only in newer GCC, 171 * Inline asm uses the %V modifier which is only in newer GCC
169 * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE. 172 * which is ensured when CONFIG_RETPOLINE is defined.
170 */ 173 */
171# define CALL_NOSPEC \ 174# define CALL_NOSPEC \
172 ANNOTATE_NOSPEC_ALTERNATIVE \ 175 ANNOTATE_NOSPEC_ALTERNATIVE \
@@ -181,7 +184,7 @@
181 X86_FEATURE_RETPOLINE_AMD) 184 X86_FEATURE_RETPOLINE_AMD)
182# define THUNK_TARGET(addr) [thunk_target] "r" (addr) 185# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
183 186
184#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE) 187#else /* CONFIG_X86_32 */
185/* 188/*
186 * For i386 we use the original ret-equivalent retpoline, because 189 * For i386 we use the original ret-equivalent retpoline, because
187 * otherwise we'll run out of registers. We don't care about CET 190 * otherwise we'll run out of registers. We don't care about CET
@@ -211,6 +214,7 @@
211 X86_FEATURE_RETPOLINE_AMD) 214 X86_FEATURE_RETPOLINE_AMD)
212 215
213# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 216# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
217#endif
214#else /* No retpoline for C / inline asm */ 218#else /* No retpoline for C / inline asm */
215# define CALL_NOSPEC "call *%[thunk_target]\n" 219# define CALL_NOSPEC "call *%[thunk_target]\n"
216# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 220# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
@@ -219,13 +223,19 @@
219/* The Spectre V2 mitigation variants */ 223/* The Spectre V2 mitigation variants */
220enum spectre_v2_mitigation { 224enum spectre_v2_mitigation {
221 SPECTRE_V2_NONE, 225 SPECTRE_V2_NONE,
222 SPECTRE_V2_RETPOLINE_MINIMAL,
223 SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
224 SPECTRE_V2_RETPOLINE_GENERIC, 226 SPECTRE_V2_RETPOLINE_GENERIC,
225 SPECTRE_V2_RETPOLINE_AMD, 227 SPECTRE_V2_RETPOLINE_AMD,
226 SPECTRE_V2_IBRS_ENHANCED, 228 SPECTRE_V2_IBRS_ENHANCED,
227}; 229};
228 230
231/* The indirect branch speculation control variants */
232enum spectre_v2_user_mitigation {
233 SPECTRE_V2_USER_NONE,
234 SPECTRE_V2_USER_STRICT,
235 SPECTRE_V2_USER_PRCTL,
236 SPECTRE_V2_USER_SECCOMP,
237};
238
229/* The Speculative Store Bypass disable variants */ 239/* The Speculative Store Bypass disable variants */
230enum ssb_mitigation { 240enum ssb_mitigation {
231 SPEC_STORE_BYPASS_NONE, 241 SPEC_STORE_BYPASS_NONE,
@@ -303,6 +313,10 @@ do { \
303 preempt_enable(); \ 313 preempt_enable(); \
304} while (0) 314} while (0)
305 315
316DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
317DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
318DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
319
306#endif /* __ASSEMBLY__ */ 320#endif /* __ASSEMBLY__ */
307 321
308/* 322/*
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index cd0cf1c568b4..8f657286d599 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -33,12 +33,14 @@
33 33
34/* 34/*
35 * Set __PAGE_OFFSET to the most negative possible address + 35 * Set __PAGE_OFFSET to the most negative possible address +
36 * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a 36 * PGDIR_SIZE*17 (pgd slot 273).
37 * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's 37 *
38 * what Xen requires. 38 * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for
39 * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary,
40 * but it's what Xen requires.
39 */ 41 */
40#define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL) 42#define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL)
41#define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL) 43#define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL)
42 44
43#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT 45#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
44#define __PAGE_OFFSET page_offset_base 46#define __PAGE_OFFSET page_offset_base
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 04edd2d58211..84bd9bdc1987 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -111,9 +111,7 @@ extern unsigned int ptrs_per_p4d;
111 */ 111 */
112#define MAXMEM (1UL << MAX_PHYSMEM_BITS) 112#define MAXMEM (1UL << MAX_PHYSMEM_BITS)
113 113
114#define LDT_PGD_ENTRY_L4 -3UL 114#define LDT_PGD_ENTRY -240UL
115#define LDT_PGD_ENTRY_L5 -112UL
116#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
117#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) 115#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
118#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) 116#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
119 117
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 87623c6b13db..bd5ac6cc37db 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -13,12 +13,15 @@
13#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire 13#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
14static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) 14static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
15{ 15{
16 u32 val = 0; 16 u32 val;
17
18 if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
19 "I", _Q_PENDING_OFFSET))
20 val |= _Q_PENDING_VAL;
21 17
18 /*
19 * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
20 * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
21 * statement expression, which GCC doesn't like.
22 */
23 val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
24 "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
22 val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; 25 val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
23 26
24 return val; 27 return val;
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index ae7c2c5cd7f0..5393babc0598 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
53 return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); 53 return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
54} 54}
55 55
56static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
57{
58 BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
59 return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
60}
61
56static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl) 62static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
57{ 63{
58 BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); 64 BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
59 return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); 65 return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
60} 66}
61 67
68static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
69{
70 BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
71 return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
72}
73
62static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) 74static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
63{ 75{
64 return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; 76 return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
@@ -70,11 +82,7 @@ extern void speculative_store_bypass_ht_init(void);
70static inline void speculative_store_bypass_ht_init(void) { } 82static inline void speculative_store_bypass_ht_init(void) { }
71#endif 83#endif
72 84
73extern void speculative_store_bypass_update(unsigned long tif); 85extern void speculation_ctrl_update(unsigned long tif);
74 86extern void speculation_ctrl_update_current(void);
75static inline void speculative_store_bypass_update_current(void)
76{
77 speculative_store_bypass_update(current_thread_info()->flags);
78}
79 87
80#endif 88#endif
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 36bd243843d6..7cf1a270d891 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -11,9 +11,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
11 11
12__visible struct task_struct *__switch_to(struct task_struct *prev, 12__visible struct task_struct *__switch_to(struct task_struct *prev,
13 struct task_struct *next); 13 struct task_struct *next);
14struct tss_struct;
15void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
16 struct tss_struct *tss);
17 14
18/* This runs runs on the previous thread's stack. */ 15/* This runs runs on the previous thread's stack. */
19static inline void prepare_switch_to(struct task_struct *next) 16static inline void prepare_switch_to(struct task_struct *next)
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2ff2a30a264f..82b73b75d67c 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -79,10 +79,12 @@ struct thread_info {
79#define TIF_SIGPENDING 2 /* signal pending */ 79#define TIF_SIGPENDING 2 /* signal pending */
80#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 80#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
81#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 81#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
82#define TIF_SSBD 5 /* Reduced data speculation */ 82#define TIF_SSBD 5 /* Speculative store bypass disable */
83#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 83#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
84#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 84#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
85#define TIF_SECCOMP 8 /* secure computing */ 85#define TIF_SECCOMP 8 /* secure computing */
86#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
87#define TIF_SPEC_FORCE_UPDATE 10 /* Force speculation MSR update in context switch */
86#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ 88#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
87#define TIF_UPROBE 12 /* breakpointed or singlestepping */ 89#define TIF_UPROBE 12 /* breakpointed or singlestepping */
88#define TIF_PATCH_PENDING 13 /* pending live patching update */ 90#define TIF_PATCH_PENDING 13 /* pending live patching update */
@@ -110,6 +112,8 @@ struct thread_info {
110#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 112#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
111#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 113#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
112#define _TIF_SECCOMP (1 << TIF_SECCOMP) 114#define _TIF_SECCOMP (1 << TIF_SECCOMP)
115#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
116#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
113#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) 117#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
114#define _TIF_UPROBE (1 << TIF_UPROBE) 118#define _TIF_UPROBE (1 << TIF_UPROBE)
115#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) 119#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
@@ -145,8 +149,18 @@ struct thread_info {
145 _TIF_FSCHECK) 149 _TIF_FSCHECK)
146 150
147/* flags to check in __switch_to() */ 151/* flags to check in __switch_to() */
148#define _TIF_WORK_CTXSW \ 152#define _TIF_WORK_CTXSW_BASE \
149 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD) 153 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
154 _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
155
156/*
157 * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
158 */
159#ifdef CONFIG_SMP
160# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
161#else
162# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE)
163#endif
150 164
151#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) 165#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
152#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) 166#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d760611cfc35..f4204bf377fc 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -169,10 +169,14 @@ struct tlb_state {
169 169
170#define LOADED_MM_SWITCHING ((struct mm_struct *)1) 170#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
171 171
172 /* Last user mm for optimizing IBPB */
173 union {
174 struct mm_struct *last_user_mm;
175 unsigned long last_user_mm_ibpb;
176 };
177
172 u16 loaded_mm_asid; 178 u16 loaded_mm_asid;
173 u16 next_asid; 179 u16 next_asid;
174 /* last user mm's ctx id */
175 u64 last_ctx_id;
176 180
177 /* 181 /*
178 * We can be in one of several states: 182 * We can be in one of several states:
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 0f842104862c..b85a7c54c6a1 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -303,6 +303,4 @@ extern void x86_init_noop(void);
303extern void x86_init_uint_noop(unsigned int unused); 303extern void x86_init_uint_noop(unsigned int unused);
304extern bool x86_pnpbios_disabled(void); 304extern bool x86_pnpbios_disabled(void);
305 305
306void x86_verify_bootdata_version(void);
307
308#endif 306#endif
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 123e669bf363..790ce08e41f2 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -9,7 +9,7 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/device.h> 10#include <linux/device.h>
11 11
12#include <linux/uaccess.h> 12#include <asm/extable.h>
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15 15
@@ -93,12 +93,39 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
93 */ 93 */
94static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) 94static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
95{ 95{
96 return __put_user(val, (unsigned long __user *)addr); 96 int ret = 0;
97
98 asm volatile("1: mov %[val], %[ptr]\n"
99 "2:\n"
100 ".section .fixup, \"ax\"\n"
101 "3: sub $1, %[ret]\n"
102 " jmp 2b\n"
103 ".previous\n"
104 _ASM_EXTABLE(1b, 3b)
105 : [ret] "+r" (ret), [ptr] "=m" (*addr)
106 : [val] "r" (val));
107
108 return ret;
97} 109}
98 110
99static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val) 111static inline int xen_safe_read_ulong(const unsigned long *addr,
112 unsigned long *val)
100{ 113{
101 return __get_user(*val, (unsigned long __user *)addr); 114 int ret = 0;
115 unsigned long rval = ~0ul;
116
117 asm volatile("1: mov %[ptr], %[rval]\n"
118 "2:\n"
119 ".section .fixup, \"ax\"\n"
120 "3: sub $1, %[ret]\n"
121 " jmp 2b\n"
122 ".previous\n"
123 _ASM_EXTABLE(1b, 3b)
124 : [ret] "+r" (ret), [rval] "+r" (rval)
125 : [ptr] "m" (*addr));
126 *val = rval;
127
128 return ret;
102} 129}
103 130
104#ifdef CONFIG_XEN_PV 131#ifdef CONFIG_XEN_PV
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 22f89d040ddd..60733f137e9a 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -16,9 +16,6 @@
16#define RAMDISK_PROMPT_FLAG 0x8000 16#define RAMDISK_PROMPT_FLAG 0x8000
17#define RAMDISK_LOAD_FLAG 0x4000 17#define RAMDISK_LOAD_FLAG 0x4000
18 18
19/* version flags */
20#define VERSION_WRITTEN 0x8000
21
22/* loadflags */ 19/* loadflags */
23#define LOADED_HIGH (1<<0) 20#define LOADED_HIGH (1<<0)
24#define KASLR_FLAG (1<<1) 21#define KASLR_FLAG (1<<1)
@@ -89,7 +86,6 @@ struct setup_header {
89 __u64 pref_address; 86 __u64 pref_address;
90 __u32 init_size; 87 __u32 init_size;
91 __u32 handover_offset; 88 __u32 handover_offset;
92 __u64 acpi_rsdp_addr;
93} __attribute__((packed)); 89} __attribute__((packed));
94 90
95struct sys_desc_table { 91struct sys_desc_table {
@@ -159,7 +155,8 @@ struct boot_params {
159 __u8 _pad2[4]; /* 0x054 */ 155 __u8 _pad2[4]; /* 0x054 */
160 __u64 tboot_addr; /* 0x058 */ 156 __u64 tboot_addr; /* 0x058 */
161 struct ist_info ist_info; /* 0x060 */ 157 struct ist_info ist_info; /* 0x060 */
162 __u8 _pad3[16]; /* 0x070 */ 158 __u64 acpi_rsdp_addr; /* 0x070 */
159 __u8 _pad3[8]; /* 0x078 */
163 __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */ 160 __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
164 __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */ 161 __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
165 struct sys_desc_table sys_desc_table; /* obsolete! */ /* 0x0a0 */ 162 struct sys_desc_table sys_desc_table; /* obsolete! */ /* 0x0a0 */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 92c76bf97ad8..06635fbca81c 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1776,5 +1776,5 @@ void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
1776 1776
1777u64 x86_default_get_root_pointer(void) 1777u64 x86_default_get_root_pointer(void)
1778{ 1778{
1779 return boot_params.hdr.acpi_rsdp_addr; 1779 return boot_params.acpi_rsdp_addr;
1780} 1780}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c37e66e493bf..500278f5308e 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/nospec.h> 15#include <linux/nospec.h>
16#include <linux/prctl.h> 16#include <linux/prctl.h>
17#include <linux/sched/smt.h>
17 18
18#include <asm/spec-ctrl.h> 19#include <asm/spec-ctrl.h>
19#include <asm/cmdline.h> 20#include <asm/cmdline.h>
@@ -53,6 +54,13 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
53u64 __ro_after_init x86_amd_ls_cfg_base; 54u64 __ro_after_init x86_amd_ls_cfg_base;
54u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 55u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
55 56
57/* Control conditional STIPB in switch_to() */
58DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
59/* Control conditional IBPB in switch_mm() */
60DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
61/* Control unconditional IBPB in switch_mm() */
62DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
63
56void __init check_bugs(void) 64void __init check_bugs(void)
57{ 65{
58 identify_boot_cpu(); 66 identify_boot_cpu();
@@ -123,31 +131,6 @@ void __init check_bugs(void)
123#endif 131#endif
124} 132}
125 133
126/* The kernel command line selection */
127enum spectre_v2_mitigation_cmd {
128 SPECTRE_V2_CMD_NONE,
129 SPECTRE_V2_CMD_AUTO,
130 SPECTRE_V2_CMD_FORCE,
131 SPECTRE_V2_CMD_RETPOLINE,
132 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
133 SPECTRE_V2_CMD_RETPOLINE_AMD,
134};
135
136static const char *spectre_v2_strings[] = {
137 [SPECTRE_V2_NONE] = "Vulnerable",
138 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
139 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
140 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
141 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
142 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
143};
144
145#undef pr_fmt
146#define pr_fmt(fmt) "Spectre V2 : " fmt
147
148static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
149 SPECTRE_V2_NONE;
150
151void 134void
152x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) 135x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
153{ 136{
@@ -169,6 +152,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
169 static_cpu_has(X86_FEATURE_AMD_SSBD)) 152 static_cpu_has(X86_FEATURE_AMD_SSBD))
170 hostval |= ssbd_tif_to_spec_ctrl(ti->flags); 153 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
171 154
155 /* Conditional STIBP enabled? */
156 if (static_branch_unlikely(&switch_to_cond_stibp))
157 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
158
172 if (hostval != guestval) { 159 if (hostval != guestval) {
173 msrval = setguest ? guestval : hostval; 160 msrval = setguest ? guestval : hostval;
174 wrmsrl(MSR_IA32_SPEC_CTRL, msrval); 161 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -202,7 +189,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
202 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 189 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
203 ssbd_spec_ctrl_to_tif(hostval); 190 ssbd_spec_ctrl_to_tif(hostval);
204 191
205 speculative_store_bypass_update(tif); 192 speculation_ctrl_update(tif);
206 } 193 }
207} 194}
208EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 195EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
@@ -217,6 +204,15 @@ static void x86_amd_ssb_disable(void)
217 wrmsrl(MSR_AMD64_LS_CFG, msrval); 204 wrmsrl(MSR_AMD64_LS_CFG, msrval);
218} 205}
219 206
207#undef pr_fmt
208#define pr_fmt(fmt) "Spectre V2 : " fmt
209
210static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
211 SPECTRE_V2_NONE;
212
213static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
214 SPECTRE_V2_USER_NONE;
215
220#ifdef RETPOLINE 216#ifdef RETPOLINE
221static bool spectre_v2_bad_module; 217static bool spectre_v2_bad_module;
222 218
@@ -238,67 +234,217 @@ static inline const char *spectre_v2_module_string(void)
238static inline const char *spectre_v2_module_string(void) { return ""; } 234static inline const char *spectre_v2_module_string(void) { return ""; }
239#endif 235#endif
240 236
241static void __init spec2_print_if_insecure(const char *reason) 237static inline bool match_option(const char *arg, int arglen, const char *opt)
242{ 238{
243 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 239 int len = strlen(opt);
244 pr_info("%s selected on command line.\n", reason); 240
241 return len == arglen && !strncmp(arg, opt, len);
245} 242}
246 243
247static void __init spec2_print_if_secure(const char *reason) 244/* The kernel command line selection for spectre v2 */
245enum spectre_v2_mitigation_cmd {
246 SPECTRE_V2_CMD_NONE,
247 SPECTRE_V2_CMD_AUTO,
248 SPECTRE_V2_CMD_FORCE,
249 SPECTRE_V2_CMD_RETPOLINE,
250 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
251 SPECTRE_V2_CMD_RETPOLINE_AMD,
252};
253
254enum spectre_v2_user_cmd {
255 SPECTRE_V2_USER_CMD_NONE,
256 SPECTRE_V2_USER_CMD_AUTO,
257 SPECTRE_V2_USER_CMD_FORCE,
258 SPECTRE_V2_USER_CMD_PRCTL,
259 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
260 SPECTRE_V2_USER_CMD_SECCOMP,
261 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
262};
263
264static const char * const spectre_v2_user_strings[] = {
265 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
266 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
267 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
268 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
269};
270
271static const struct {
272 const char *option;
273 enum spectre_v2_user_cmd cmd;
274 bool secure;
275} v2_user_options[] __initdata = {
276 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
277 { "off", SPECTRE_V2_USER_CMD_NONE, false },
278 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
279 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
280 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
281 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
282 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
283};
284
285static void __init spec_v2_user_print_cond(const char *reason, bool secure)
248{ 286{
249 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 287 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
250 pr_info("%s selected on command line.\n", reason); 288 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
251} 289}
252 290
253static inline bool retp_compiler(void) 291static enum spectre_v2_user_cmd __init
292spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
254{ 293{
255 return __is_defined(RETPOLINE); 294 char arg[20];
295 int ret, i;
296
297 switch (v2_cmd) {
298 case SPECTRE_V2_CMD_NONE:
299 return SPECTRE_V2_USER_CMD_NONE;
300 case SPECTRE_V2_CMD_FORCE:
301 return SPECTRE_V2_USER_CMD_FORCE;
302 default:
303 break;
304 }
305
306 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
307 arg, sizeof(arg));
308 if (ret < 0)
309 return SPECTRE_V2_USER_CMD_AUTO;
310
311 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
312 if (match_option(arg, ret, v2_user_options[i].option)) {
313 spec_v2_user_print_cond(v2_user_options[i].option,
314 v2_user_options[i].secure);
315 return v2_user_options[i].cmd;
316 }
317 }
318
319 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
320 return SPECTRE_V2_USER_CMD_AUTO;
256} 321}
257 322
258static inline bool match_option(const char *arg, int arglen, const char *opt) 323static void __init
324spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
259{ 325{
260 int len = strlen(opt); 326 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
327 bool smt_possible = IS_ENABLED(CONFIG_SMP);
328 enum spectre_v2_user_cmd cmd;
261 329
262 return len == arglen && !strncmp(arg, opt, len); 330 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
331 return;
332
333 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
334 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
335 smt_possible = false;
336
337 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
338 switch (cmd) {
339 case SPECTRE_V2_USER_CMD_NONE:
340 goto set_mode;
341 case SPECTRE_V2_USER_CMD_FORCE:
342 mode = SPECTRE_V2_USER_STRICT;
343 break;
344 case SPECTRE_V2_USER_CMD_PRCTL:
345 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
346 mode = SPECTRE_V2_USER_PRCTL;
347 break;
348 case SPECTRE_V2_USER_CMD_AUTO:
349 case SPECTRE_V2_USER_CMD_SECCOMP:
350 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
351 if (IS_ENABLED(CONFIG_SECCOMP))
352 mode = SPECTRE_V2_USER_SECCOMP;
353 else
354 mode = SPECTRE_V2_USER_PRCTL;
355 break;
356 }
357
358 /* Initialize Indirect Branch Prediction Barrier */
359 if (boot_cpu_has(X86_FEATURE_IBPB)) {
360 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
361
362 switch (cmd) {
363 case SPECTRE_V2_USER_CMD_FORCE:
364 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
365 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
366 static_branch_enable(&switch_mm_always_ibpb);
367 break;
368 case SPECTRE_V2_USER_CMD_PRCTL:
369 case SPECTRE_V2_USER_CMD_AUTO:
370 case SPECTRE_V2_USER_CMD_SECCOMP:
371 static_branch_enable(&switch_mm_cond_ibpb);
372 break;
373 default:
374 break;
375 }
376
377 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
378 static_key_enabled(&switch_mm_always_ibpb) ?
379 "always-on" : "conditional");
380 }
381
382 /* If enhanced IBRS is enabled no STIPB required */
383 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
384 return;
385
386 /*
387 * If SMT is not possible or STIBP is not available clear the STIPB
388 * mode.
389 */
390 if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
391 mode = SPECTRE_V2_USER_NONE;
392set_mode:
393 spectre_v2_user = mode;
394 /* Only print the STIBP mode when SMT possible */
395 if (smt_possible)
396 pr_info("%s\n", spectre_v2_user_strings[mode]);
263} 397}
264 398
399static const char * const spectre_v2_strings[] = {
400 [SPECTRE_V2_NONE] = "Vulnerable",
401 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
402 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
403 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
404};
405
265static const struct { 406static const struct {
266 const char *option; 407 const char *option;
267 enum spectre_v2_mitigation_cmd cmd; 408 enum spectre_v2_mitigation_cmd cmd;
268 bool secure; 409 bool secure;
269} mitigation_options[] = { 410} mitigation_options[] __initdata = {
270 { "off", SPECTRE_V2_CMD_NONE, false }, 411 { "off", SPECTRE_V2_CMD_NONE, false },
271 { "on", SPECTRE_V2_CMD_FORCE, true }, 412 { "on", SPECTRE_V2_CMD_FORCE, true },
272 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 413 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
273 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, 414 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
274 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 415 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
275 { "auto", SPECTRE_V2_CMD_AUTO, false }, 416 { "auto", SPECTRE_V2_CMD_AUTO, false },
276}; 417};
277 418
419static void __init spec_v2_print_cond(const char *reason, bool secure)
420{
421 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
422 pr_info("%s selected on command line.\n", reason);
423}
424
278static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 425static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
279{ 426{
427 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
280 char arg[20]; 428 char arg[20];
281 int ret, i; 429 int ret, i;
282 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
283 430
284 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) 431 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
285 return SPECTRE_V2_CMD_NONE; 432 return SPECTRE_V2_CMD_NONE;
286 else {
287 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
288 if (ret < 0)
289 return SPECTRE_V2_CMD_AUTO;
290 433
291 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 434 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
292 if (!match_option(arg, ret, mitigation_options[i].option)) 435 if (ret < 0)
293 continue; 436 return SPECTRE_V2_CMD_AUTO;
294 cmd = mitigation_options[i].cmd;
295 break;
296 }
297 437
298 if (i >= ARRAY_SIZE(mitigation_options)) { 438 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
299 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 439 if (!match_option(arg, ret, mitigation_options[i].option))
300 return SPECTRE_V2_CMD_AUTO; 440 continue;
301 } 441 cmd = mitigation_options[i].cmd;
442 break;
443 }
444
445 if (i >= ARRAY_SIZE(mitigation_options)) {
446 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
447 return SPECTRE_V2_CMD_AUTO;
302 } 448 }
303 449
304 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 450 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
@@ -316,54 +462,11 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
316 return SPECTRE_V2_CMD_AUTO; 462 return SPECTRE_V2_CMD_AUTO;
317 } 463 }
318 464
319 if (mitigation_options[i].secure) 465 spec_v2_print_cond(mitigation_options[i].option,
320 spec2_print_if_secure(mitigation_options[i].option); 466 mitigation_options[i].secure);
321 else
322 spec2_print_if_insecure(mitigation_options[i].option);
323
324 return cmd; 467 return cmd;
325} 468}
326 469
327static bool stibp_needed(void)
328{
329 if (spectre_v2_enabled == SPECTRE_V2_NONE)
330 return false;
331
332 if (!boot_cpu_has(X86_FEATURE_STIBP))
333 return false;
334
335 return true;
336}
337
338static void update_stibp_msr(void *info)
339{
340 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
341}
342
343void arch_smt_update(void)
344{
345 u64 mask;
346
347 if (!stibp_needed())
348 return;
349
350 mutex_lock(&spec_ctrl_mutex);
351 mask = x86_spec_ctrl_base;
352 if (cpu_smt_control == CPU_SMT_ENABLED)
353 mask |= SPEC_CTRL_STIBP;
354 else
355 mask &= ~SPEC_CTRL_STIBP;
356
357 if (mask != x86_spec_ctrl_base) {
358 pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
359 cpu_smt_control == CPU_SMT_ENABLED ?
360 "Enabling" : "Disabling");
361 x86_spec_ctrl_base = mask;
362 on_each_cpu(update_stibp_msr, NULL, 1);
363 }
364 mutex_unlock(&spec_ctrl_mutex);
365}
366
367static void __init spectre_v2_select_mitigation(void) 470static void __init spectre_v2_select_mitigation(void)
368{ 471{
369 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); 472 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -417,14 +520,12 @@ retpoline_auto:
417 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); 520 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
418 goto retpoline_generic; 521 goto retpoline_generic;
419 } 522 }
420 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : 523 mode = SPECTRE_V2_RETPOLINE_AMD;
421 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
422 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); 524 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
423 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 525 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
424 } else { 526 } else {
425 retpoline_generic: 527 retpoline_generic:
426 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : 528 mode = SPECTRE_V2_RETPOLINE_GENERIC;
427 SPECTRE_V2_RETPOLINE_MINIMAL;
428 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 529 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
429 } 530 }
430 531
@@ -443,12 +544,6 @@ specv2_set_mode:
443 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 544 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
444 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); 545 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
445 546
446 /* Initialize Indirect Branch Prediction Barrier if supported */
447 if (boot_cpu_has(X86_FEATURE_IBPB)) {
448 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
449 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
450 }
451
452 /* 547 /*
453 * Retpoline means the kernel is safe because it has no indirect 548 * Retpoline means the kernel is safe because it has no indirect
454 * branches. Enhanced IBRS protects firmware too, so, enable restricted 549 * branches. Enhanced IBRS protects firmware too, so, enable restricted
@@ -465,10 +560,67 @@ specv2_set_mode:
465 pr_info("Enabling Restricted Speculation for firmware calls\n"); 560 pr_info("Enabling Restricted Speculation for firmware calls\n");
466 } 561 }
467 562
563 /* Set up IBPB and STIBP depending on the general spectre V2 command */
564 spectre_v2_user_select_mitigation(cmd);
565
468 /* Enable STIBP if appropriate */ 566 /* Enable STIBP if appropriate */
469 arch_smt_update(); 567 arch_smt_update();
470} 568}
471 569
570static void update_stibp_msr(void * __unused)
571{
572 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
573}
574
575/* Update x86_spec_ctrl_base in case SMT state changed. */
576static void update_stibp_strict(void)
577{
578 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
579
580 if (sched_smt_active())
581 mask |= SPEC_CTRL_STIBP;
582
583 if (mask == x86_spec_ctrl_base)
584 return;
585
586 pr_info("Update user space SMT mitigation: STIBP %s\n",
587 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
588 x86_spec_ctrl_base = mask;
589 on_each_cpu(update_stibp_msr, NULL, 1);
590}
591
592/* Update the static key controlling the evaluation of TIF_SPEC_IB */
593static void update_indir_branch_cond(void)
594{
595 if (sched_smt_active())
596 static_branch_enable(&switch_to_cond_stibp);
597 else
598 static_branch_disable(&switch_to_cond_stibp);
599}
600
601void arch_smt_update(void)
602{
603 /* Enhanced IBRS implies STIBP. No update required. */
604 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
605 return;
606
607 mutex_lock(&spec_ctrl_mutex);
608
609 switch (spectre_v2_user) {
610 case SPECTRE_V2_USER_NONE:
611 break;
612 case SPECTRE_V2_USER_STRICT:
613 update_stibp_strict();
614 break;
615 case SPECTRE_V2_USER_PRCTL:
616 case SPECTRE_V2_USER_SECCOMP:
617 update_indir_branch_cond();
618 break;
619 }
620
621 mutex_unlock(&spec_ctrl_mutex);
622}
623
472#undef pr_fmt 624#undef pr_fmt
473#define pr_fmt(fmt) "Speculative Store Bypass: " fmt 625#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
474 626
@@ -483,7 +635,7 @@ enum ssb_mitigation_cmd {
483 SPEC_STORE_BYPASS_CMD_SECCOMP, 635 SPEC_STORE_BYPASS_CMD_SECCOMP,
484}; 636};
485 637
486static const char *ssb_strings[] = { 638static const char * const ssb_strings[] = {
487 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 639 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
488 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 640 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
489 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 641 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
@@ -493,7 +645,7 @@ static const char *ssb_strings[] = {
493static const struct { 645static const struct {
494 const char *option; 646 const char *option;
495 enum ssb_mitigation_cmd cmd; 647 enum ssb_mitigation_cmd cmd;
496} ssb_mitigation_options[] = { 648} ssb_mitigation_options[] __initdata = {
497 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 649 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
498 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 650 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
499 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 651 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
@@ -604,10 +756,25 @@ static void ssb_select_mitigation(void)
604#undef pr_fmt 756#undef pr_fmt
605#define pr_fmt(fmt) "Speculation prctl: " fmt 757#define pr_fmt(fmt) "Speculation prctl: " fmt
606 758
607static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 759static void task_update_spec_tif(struct task_struct *tsk)
608{ 760{
609 bool update; 761 /* Force the update of the real TIF bits */
762 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
610 763
764 /*
765 * Immediately update the speculation control MSRs for the current
766 * task, but for a non-current task delay setting the CPU
767 * mitigation until it is scheduled next.
768 *
769 * This can only happen for SECCOMP mitigation. For PRCTL it's
770 * always the current task.
771 */
772 if (tsk == current)
773 speculation_ctrl_update_current();
774}
775
776static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
777{
611 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 778 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
612 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 779 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
613 return -ENXIO; 780 return -ENXIO;
@@ -618,28 +785,56 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
618 if (task_spec_ssb_force_disable(task)) 785 if (task_spec_ssb_force_disable(task))
619 return -EPERM; 786 return -EPERM;
620 task_clear_spec_ssb_disable(task); 787 task_clear_spec_ssb_disable(task);
621 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); 788 task_update_spec_tif(task);
622 break; 789 break;
623 case PR_SPEC_DISABLE: 790 case PR_SPEC_DISABLE:
624 task_set_spec_ssb_disable(task); 791 task_set_spec_ssb_disable(task);
625 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); 792 task_update_spec_tif(task);
626 break; 793 break;
627 case PR_SPEC_FORCE_DISABLE: 794 case PR_SPEC_FORCE_DISABLE:
628 task_set_spec_ssb_disable(task); 795 task_set_spec_ssb_disable(task);
629 task_set_spec_ssb_force_disable(task); 796 task_set_spec_ssb_force_disable(task);
630 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); 797 task_update_spec_tif(task);
631 break; 798 break;
632 default: 799 default:
633 return -ERANGE; 800 return -ERANGE;
634 } 801 }
802 return 0;
803}
635 804
636 /* 805static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
637 * If being set on non-current task, delay setting the CPU 806{
638 * mitigation until it is next scheduled. 807 switch (ctrl) {
639 */ 808 case PR_SPEC_ENABLE:
640 if (task == current && update) 809 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
641 speculative_store_bypass_update_current(); 810 return 0;
642 811 /*
812 * Indirect branch speculation is always disabled in strict
813 * mode.
814 */
815 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
816 return -EPERM;
817 task_clear_spec_ib_disable(task);
818 task_update_spec_tif(task);
819 break;
820 case PR_SPEC_DISABLE:
821 case PR_SPEC_FORCE_DISABLE:
822 /*
823 * Indirect branch speculation is always allowed when
824 * mitigation is force disabled.
825 */
826 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
827 return -EPERM;
828 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
829 return 0;
830 task_set_spec_ib_disable(task);
831 if (ctrl == PR_SPEC_FORCE_DISABLE)
832 task_set_spec_ib_force_disable(task);
833 task_update_spec_tif(task);
834 break;
835 default:
836 return -ERANGE;
837 }
643 return 0; 838 return 0;
644} 839}
645 840
@@ -649,6 +844,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
649 switch (which) { 844 switch (which) {
650 case PR_SPEC_STORE_BYPASS: 845 case PR_SPEC_STORE_BYPASS:
651 return ssb_prctl_set(task, ctrl); 846 return ssb_prctl_set(task, ctrl);
847 case PR_SPEC_INDIRECT_BRANCH:
848 return ib_prctl_set(task, ctrl);
652 default: 849 default:
653 return -ENODEV; 850 return -ENODEV;
654 } 851 }
@@ -659,6 +856,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
659{ 856{
660 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 857 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
661 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 858 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
859 if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
860 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
662} 861}
663#endif 862#endif
664 863
@@ -681,11 +880,35 @@ static int ssb_prctl_get(struct task_struct *task)
681 } 880 }
682} 881}
683 882
883static int ib_prctl_get(struct task_struct *task)
884{
885 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
886 return PR_SPEC_NOT_AFFECTED;
887
888 switch (spectre_v2_user) {
889 case SPECTRE_V2_USER_NONE:
890 return PR_SPEC_ENABLE;
891 case SPECTRE_V2_USER_PRCTL:
892 case SPECTRE_V2_USER_SECCOMP:
893 if (task_spec_ib_force_disable(task))
894 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
895 if (task_spec_ib_disable(task))
896 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
897 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
898 case SPECTRE_V2_USER_STRICT:
899 return PR_SPEC_DISABLE;
900 default:
901 return PR_SPEC_NOT_AFFECTED;
902 }
903}
904
684int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 905int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
685{ 906{
686 switch (which) { 907 switch (which) {
687 case PR_SPEC_STORE_BYPASS: 908 case PR_SPEC_STORE_BYPASS:
688 return ssb_prctl_get(task); 909 return ssb_prctl_get(task);
910 case PR_SPEC_INDIRECT_BRANCH:
911 return ib_prctl_get(task);
689 default: 912 default:
690 return -ENODEV; 913 return -ENODEV;
691 } 914 }
@@ -823,7 +1046,7 @@ early_param("l1tf", l1tf_cmdline);
823#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 1046#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
824 1047
825#if IS_ENABLED(CONFIG_KVM_INTEL) 1048#if IS_ENABLED(CONFIG_KVM_INTEL)
826static const char *l1tf_vmx_states[] = { 1049static const char * const l1tf_vmx_states[] = {
827 [VMENTER_L1D_FLUSH_AUTO] = "auto", 1050 [VMENTER_L1D_FLUSH_AUTO] = "auto",
828 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 1051 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
829 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 1052 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
@@ -839,13 +1062,14 @@ static ssize_t l1tf_show_state(char *buf)
839 1062
840 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 1063 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
841 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 1064 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
842 cpu_smt_control == CPU_SMT_ENABLED)) 1065 sched_smt_active())) {
843 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 1066 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
844 l1tf_vmx_states[l1tf_vmx_mitigation]); 1067 l1tf_vmx_states[l1tf_vmx_mitigation]);
1068 }
845 1069
846 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 1070 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
847 l1tf_vmx_states[l1tf_vmx_mitigation], 1071 l1tf_vmx_states[l1tf_vmx_mitigation],
848 cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled"); 1072 sched_smt_active() ? "vulnerable" : "disabled");
849} 1073}
850#else 1074#else
851static ssize_t l1tf_show_state(char *buf) 1075static ssize_t l1tf_show_state(char *buf)
@@ -854,11 +1078,39 @@ static ssize_t l1tf_show_state(char *buf)
854} 1078}
855#endif 1079#endif
856 1080
1081static char *stibp_state(void)
1082{
1083 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1084 return "";
1085
1086 switch (spectre_v2_user) {
1087 case SPECTRE_V2_USER_NONE:
1088 return ", STIBP: disabled";
1089 case SPECTRE_V2_USER_STRICT:
1090 return ", STIBP: forced";
1091 case SPECTRE_V2_USER_PRCTL:
1092 case SPECTRE_V2_USER_SECCOMP:
1093 if (static_key_enabled(&switch_to_cond_stibp))
1094 return ", STIBP: conditional";
1095 }
1096 return "";
1097}
1098
1099static char *ibpb_state(void)
1100{
1101 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1102 if (static_key_enabled(&switch_mm_always_ibpb))
1103 return ", IBPB: always-on";
1104 if (static_key_enabled(&switch_mm_cond_ibpb))
1105 return ", IBPB: conditional";
1106 return ", IBPB: disabled";
1107 }
1108 return "";
1109}
1110
857static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 1111static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
858 char *buf, unsigned int bug) 1112 char *buf, unsigned int bug)
859{ 1113{
860 int ret;
861
862 if (!boot_cpu_has_bug(bug)) 1114 if (!boot_cpu_has_bug(bug))
863 return sprintf(buf, "Not affected\n"); 1115 return sprintf(buf, "Not affected\n");
864 1116
@@ -876,13 +1128,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
876 return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 1128 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
877 1129
878 case X86_BUG_SPECTRE_V2: 1130 case X86_BUG_SPECTRE_V2:
879 ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], 1131 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
880 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", 1132 ibpb_state(),
881 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", 1133 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
882 (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "", 1134 stibp_state(),
883 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", 1135 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
884 spectre_v2_module_string()); 1136 spectre_v2_module_string());
885 return ret;
886 1137
887 case X86_BUG_SPEC_STORE_BYPASS: 1138 case X86_BUG_SPEC_STORE_BYPASS:
888 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); 1139 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8c66d2fc8f81..36d2696c9563 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -485,7 +485,7 @@ static void mce_report_event(struct pt_regs *regs)
485 * be somewhat complicated (e.g. segment offset would require an instruction 485 * be somewhat complicated (e.g. segment offset would require an instruction
486 * parser). So only support physical addresses up to page granuality for now. 486 * parser). So only support physical addresses up to page granuality for now.
487 */ 487 */
488static int mce_usable_address(struct mce *m) 488int mce_usable_address(struct mce *m)
489{ 489{
490 if (!(m->status & MCI_STATUS_ADDRV)) 490 if (!(m->status & MCI_STATUS_ADDRV))
491 return 0; 491 return 0;
@@ -505,6 +505,7 @@ static int mce_usable_address(struct mce *m)
505 505
506 return 1; 506 return 1;
507} 507}
508EXPORT_SYMBOL_GPL(mce_usable_address);
508 509
509bool mce_is_memory_error(struct mce *m) 510bool mce_is_memory_error(struct mce *m)
510{ 511{
@@ -534,7 +535,7 @@ bool mce_is_memory_error(struct mce *m)
534} 535}
535EXPORT_SYMBOL_GPL(mce_is_memory_error); 536EXPORT_SYMBOL_GPL(mce_is_memory_error);
536 537
537static bool mce_is_correctable(struct mce *m) 538bool mce_is_correctable(struct mce *m)
538{ 539{
539 if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) 540 if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
540 return false; 541 return false;
@@ -547,6 +548,7 @@ static bool mce_is_correctable(struct mce *m)
547 548
548 return true; 549 return true;
549} 550}
551EXPORT_SYMBOL_GPL(mce_is_correctable);
550 552
551static bool cec_add_mce(struct mce *m) 553static bool cec_add_mce(struct mce *m)
552{ 554{
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index dd33c357548f..e12454e21b8a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -56,7 +56,7 @@
56/* Threshold LVT offset is at MSR0xC0000410[15:12] */ 56/* Threshold LVT offset is at MSR0xC0000410[15:12] */
57#define SMCA_THR_LVT_OFF 0xF000 57#define SMCA_THR_LVT_OFF 0xF000
58 58
59static bool thresholding_en; 59static bool thresholding_irq_en;
60 60
61static const char * const th_names[] = { 61static const char * const th_names[] = {
62 "load_store", 62 "load_store",
@@ -534,9 +534,8 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
534 534
535set_offset: 535set_offset:
536 offset = setup_APIC_mce_threshold(offset, new); 536 offset = setup_APIC_mce_threshold(offset, new);
537 537 if (offset == new)
538 if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt)) 538 thresholding_irq_en = true;
539 mce_threshold_vector = amd_threshold_interrupt;
540 539
541done: 540done:
542 mce_threshold_block_init(&b, offset); 541 mce_threshold_block_init(&b, offset);
@@ -1357,9 +1356,6 @@ int mce_threshold_remove_device(unsigned int cpu)
1357{ 1356{
1358 unsigned int bank; 1357 unsigned int bank;
1359 1358
1360 if (!thresholding_en)
1361 return 0;
1362
1363 for (bank = 0; bank < mca_cfg.banks; ++bank) { 1359 for (bank = 0; bank < mca_cfg.banks; ++bank) {
1364 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 1360 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1365 continue; 1361 continue;
@@ -1377,9 +1373,6 @@ int mce_threshold_create_device(unsigned int cpu)
1377 struct threshold_bank **bp; 1373 struct threshold_bank **bp;
1378 int err = 0; 1374 int err = 0;
1379 1375
1380 if (!thresholding_en)
1381 return 0;
1382
1383 bp = per_cpu(threshold_banks, cpu); 1376 bp = per_cpu(threshold_banks, cpu);
1384 if (bp) 1377 if (bp)
1385 return 0; 1378 return 0;
@@ -1408,9 +1401,6 @@ static __init int threshold_init_device(void)
1408{ 1401{
1409 unsigned lcpu = 0; 1402 unsigned lcpu = 0;
1410 1403
1411 if (mce_threshold_vector == amd_threshold_interrupt)
1412 thresholding_en = true;
1413
1414 /* to hit CPUs online before the notifier is up */ 1404 /* to hit CPUs online before the notifier is up */
1415 for_each_online_cpu(lcpu) { 1405 for_each_online_cpu(lcpu) {
1416 int err = mce_threshold_create_device(lcpu); 1406 int err = mce_threshold_create_device(lcpu);
@@ -1419,6 +1409,9 @@ static __init int threshold_init_device(void)
1419 return err; 1409 return err;
1420 } 1410 }
1421 1411
1412 if (thresholding_irq_en)
1413 mce_threshold_vector = amd_threshold_interrupt;
1414
1422 return 0; 1415 return 0;
1423} 1416}
1424/* 1417/*
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 1c72f3819eb1..e81a2db42df7 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -20,6 +20,7 @@
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/kexec.h> 22#include <linux/kexec.h>
23#include <linux/i8253.h>
23#include <asm/processor.h> 24#include <asm/processor.h>
24#include <asm/hypervisor.h> 25#include <asm/hypervisor.h>
25#include <asm/hyperv-tlfs.h> 26#include <asm/hyperv-tlfs.h>
@@ -295,6 +296,16 @@ static void __init ms_hyperv_init_platform(void)
295 if (efi_enabled(EFI_BOOT)) 296 if (efi_enabled(EFI_BOOT))
296 x86_platform.get_nmi_reason = hv_get_nmi_reason; 297 x86_platform.get_nmi_reason = hv_get_nmi_reason;
297 298
299 /*
300 * Hyper-V VMs have a PIT emulation quirk such that zeroing the
301 * counter register during PIT shutdown restarts the PIT. So it
302 * continues to interrupt @18.2 HZ. Setting i8253_clear_counter
303 * to false tells pit_shutdown() not to zero the counter so that
304 * the PIT really is shutdown. Generation 2 VMs don't have a PIT,
305 * and setting this value has no effect.
306 */
307 i8253_clear_counter_on_shutdown = false;
308
298#if IS_ENABLED(CONFIG_HYPERV) 309#if IS_ENABLED(CONFIG_HYPERV)
299 /* 310 /*
300 * Setup the hook to get control post apic initialization. 311 * Setup the hook to get control post apic initialization.
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index d9ab49bed8af..0eda91f8eeac 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s)
77} 77}
78early_param("no-vmw-sched-clock", setup_vmw_sched_clock); 78early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
79 79
80static unsigned long long vmware_sched_clock(void) 80static unsigned long long notrace vmware_sched_clock(void)
81{ 81{
82 unsigned long long ns; 82 unsigned long long ns;
83 83
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 61a949d84dfa..d99a8ee9e185 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -344,10 +344,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
344 sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); 344 sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
345 } 345 }
346 346
347 local_bh_disable();
347 fpu->initialized = 1; 348 fpu->initialized = 1;
348 preempt_disable();
349 fpu__restore(fpu); 349 fpu__restore(fpu);
350 preempt_enable(); 350 local_bh_enable();
351 351
352 return err; 352 return err;
353 } else { 353 } else {
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 01ebcb6f263e..7ee8067cbf45 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -994,7 +994,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
994{ 994{
995 unsigned long old; 995 unsigned long old;
996 int faulted; 996 int faulted;
997 struct ftrace_graph_ent trace;
998 unsigned long return_hooker = (unsigned long) 997 unsigned long return_hooker = (unsigned long)
999 &return_to_handler; 998 &return_to_handler;
1000 999
@@ -1046,19 +1045,7 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
1046 return; 1045 return;
1047 } 1046 }
1048 1047
1049 trace.func = self_addr; 1048 if (function_graph_enter(old, self_addr, frame_pointer, parent))
1050 trace.depth = current->curr_ret_stack + 1;
1051
1052 /* Only trace if the calling function expects to */
1053 if (!ftrace_graph_entry(&trace)) {
1054 *parent = old; 1049 *parent = old;
1055 return;
1056 }
1057
1058 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
1059 frame_pointer, parent) == -EBUSY) {
1060 *parent = old;
1061 return;
1062 }
1063} 1050}
1064#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1051#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 76fa3b836598..ec6fefbfd3c0 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -37,7 +37,6 @@ asmlinkage __visible void __init i386_start_kernel(void)
37 cr4_init_shadow(); 37 cr4_init_shadow();
38 38
39 sanitize_boot_params(&boot_params); 39 sanitize_boot_params(&boot_params);
40 x86_verify_bootdata_version();
41 40
42 x86_early_init_platform_quirks(); 41 x86_early_init_platform_quirks();
43 42
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 7663a8eb602b..16b1cbd3a61e 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -457,8 +457,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
457 if (!boot_params.hdr.version) 457 if (!boot_params.hdr.version)
458 copy_bootdata(__va(real_mode_data)); 458 copy_bootdata(__va(real_mode_data));
459 459
460 x86_verify_bootdata_version();
461
462 x86_early_init_platform_quirks(); 460 x86_early_init_platform_quirks();
463 461
464 switch (boot_params.hdr.hardware_subarch) { 462 switch (boot_params.hdr.hardware_subarch) {
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index ab18e0884dc6..6135ae8ce036 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm)
199/* 199/*
200 * If PTI is enabled, this maps the LDT into the kernelmode and 200 * If PTI is enabled, this maps the LDT into the kernelmode and
201 * usermode tables for the given mm. 201 * usermode tables for the given mm.
202 *
203 * There is no corresponding unmap function. Even if the LDT is freed, we
204 * leave the PTEs around until the slot is reused or the mm is destroyed.
205 * This is harmless: the LDT is always in ordinary memory, and no one will
206 * access the freed slot.
207 *
208 * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
209 * it useful, and the flush would slow down modify_ldt().
210 */ 202 */
211static int 203static int
212map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) 204map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
@@ -214,8 +206,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
214 unsigned long va; 206 unsigned long va;
215 bool is_vmalloc; 207 bool is_vmalloc;
216 spinlock_t *ptl; 208 spinlock_t *ptl;
217 pgd_t *pgd; 209 int i, nr_pages;
218 int i;
219 210
220 if (!static_cpu_has(X86_FEATURE_PTI)) 211 if (!static_cpu_has(X86_FEATURE_PTI))
221 return 0; 212 return 0;
@@ -229,16 +220,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
229 /* Check if the current mappings are sane */ 220 /* Check if the current mappings are sane */
230 sanity_check_ldt_mapping(mm); 221 sanity_check_ldt_mapping(mm);
231 222
232 /*
233 * Did we already have the top level entry allocated? We can't
234 * use pgd_none() for this because it doens't do anything on
235 * 4-level page table kernels.
236 */
237 pgd = pgd_offset(mm, LDT_BASE_ADDR);
238
239 is_vmalloc = is_vmalloc_addr(ldt->entries); 223 is_vmalloc = is_vmalloc_addr(ldt->entries);
240 224
241 for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { 225 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
226
227 for (i = 0; i < nr_pages; i++) {
242 unsigned long offset = i << PAGE_SHIFT; 228 unsigned long offset = i << PAGE_SHIFT;
243 const void *src = (char *)ldt->entries + offset; 229 const void *src = (char *)ldt->entries + offset;
244 unsigned long pfn; 230 unsigned long pfn;
@@ -272,13 +258,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
272 /* Propagate LDT mapping to the user page-table */ 258 /* Propagate LDT mapping to the user page-table */
273 map_ldt_struct_to_user(mm); 259 map_ldt_struct_to_user(mm);
274 260
275 va = (unsigned long)ldt_slot_va(slot);
276 flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false);
277
278 ldt->slot = slot; 261 ldt->slot = slot;
279 return 0; 262 return 0;
280} 263}
281 264
265static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
266{
267 unsigned long va;
268 int i, nr_pages;
269
270 if (!ldt)
271 return;
272
273 /* LDT map/unmap is only required for PTI */
274 if (!static_cpu_has(X86_FEATURE_PTI))
275 return;
276
277 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
278
279 for (i = 0; i < nr_pages; i++) {
280 unsigned long offset = i << PAGE_SHIFT;
281 spinlock_t *ptl;
282 pte_t *ptep;
283
284 va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
285 ptep = get_locked_pte(mm, va, &ptl);
286 pte_clear(mm, va, ptep);
287 pte_unmap_unlock(ptep, ptl);
288 }
289
290 va = (unsigned long)ldt_slot_va(ldt->slot);
291 flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
292}
293
282#else /* !CONFIG_PAGE_TABLE_ISOLATION */ 294#else /* !CONFIG_PAGE_TABLE_ISOLATION */
283 295
284static int 296static int
@@ -286,6 +298,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
286{ 298{
287 return 0; 299 return 0;
288} 300}
301
302static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
303{
304}
289#endif /* CONFIG_PAGE_TABLE_ISOLATION */ 305#endif /* CONFIG_PAGE_TABLE_ISOLATION */
290 306
291static void free_ldt_pgtables(struct mm_struct *mm) 307static void free_ldt_pgtables(struct mm_struct *mm)
@@ -524,6 +540,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
524 } 540 }
525 541
526 install_ldt(mm, new_ldt); 542 install_ldt(mm, new_ldt);
543 unmap_ldt_struct(mm, old_ldt);
527 free_ldt_struct(old_ldt); 544 free_ldt_struct(old_ldt);
528 error = 0; 545 error = 0;
529 546
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c93fcfdf1673..7d31192296a8 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -40,6 +40,8 @@
40#include <asm/prctl.h> 40#include <asm/prctl.h>
41#include <asm/spec-ctrl.h> 41#include <asm/spec-ctrl.h>
42 42
43#include "process.h"
44
43/* 45/*
44 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 46 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
45 * no more per-task TSS's. The TSS size is kept cacheline-aligned 47 * no more per-task TSS's. The TSS size is kept cacheline-aligned
@@ -252,11 +254,12 @@ void arch_setup_new_exec(void)
252 enable_cpuid(); 254 enable_cpuid();
253} 255}
254 256
255static inline void switch_to_bitmap(struct tss_struct *tss, 257static inline void switch_to_bitmap(struct thread_struct *prev,
256 struct thread_struct *prev,
257 struct thread_struct *next, 258 struct thread_struct *next,
258 unsigned long tifp, unsigned long tifn) 259 unsigned long tifp, unsigned long tifn)
259{ 260{
261 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
262
260 if (tifn & _TIF_IO_BITMAP) { 263 if (tifn & _TIF_IO_BITMAP) {
261 /* 264 /*
262 * Copy the relevant range of the IO bitmap. 265 * Copy the relevant range of the IO bitmap.
@@ -395,32 +398,85 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
395 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); 398 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
396} 399}
397 400
398static __always_inline void intel_set_ssb_state(unsigned long tifn) 401/*
402 * Update the MSRs managing speculation control, during context switch.
403 *
404 * tifp: Previous task's thread flags
405 * tifn: Next task's thread flags
406 */
407static __always_inline void __speculation_ctrl_update(unsigned long tifp,
408 unsigned long tifn)
399{ 409{
400 u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); 410 unsigned long tif_diff = tifp ^ tifn;
411 u64 msr = x86_spec_ctrl_base;
412 bool updmsr = false;
413
414 /*
415 * If TIF_SSBD is different, select the proper mitigation
416 * method. Note that if SSBD mitigation is disabled or permanentely
417 * enabled this branch can't be taken because nothing can set
418 * TIF_SSBD.
419 */
420 if (tif_diff & _TIF_SSBD) {
421 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
422 amd_set_ssb_virt_state(tifn);
423 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
424 amd_set_core_ssb_state(tifn);
425 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
426 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
427 msr |= ssbd_tif_to_spec_ctrl(tifn);
428 updmsr = true;
429 }
430 }
431
432 /*
433 * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
434 * otherwise avoid the MSR write.
435 */
436 if (IS_ENABLED(CONFIG_SMP) &&
437 static_branch_unlikely(&switch_to_cond_stibp)) {
438 updmsr |= !!(tif_diff & _TIF_SPEC_IB);
439 msr |= stibp_tif_to_spec_ctrl(tifn);
440 }
401 441
402 wrmsrl(MSR_IA32_SPEC_CTRL, msr); 442 if (updmsr)
443 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
403} 444}
404 445
405static __always_inline void __speculative_store_bypass_update(unsigned long tifn) 446static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
406{ 447{
407 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) 448 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
408 amd_set_ssb_virt_state(tifn); 449 if (task_spec_ssb_disable(tsk))
409 else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 450 set_tsk_thread_flag(tsk, TIF_SSBD);
410 amd_set_core_ssb_state(tifn); 451 else
411 else 452 clear_tsk_thread_flag(tsk, TIF_SSBD);
412 intel_set_ssb_state(tifn); 453
454 if (task_spec_ib_disable(tsk))
455 set_tsk_thread_flag(tsk, TIF_SPEC_IB);
456 else
457 clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
458 }
459 /* Return the updated threadinfo flags*/
460 return task_thread_info(tsk)->flags;
413} 461}
414 462
415void speculative_store_bypass_update(unsigned long tif) 463void speculation_ctrl_update(unsigned long tif)
416{ 464{
465 /* Forced update. Make sure all relevant TIF flags are different */
417 preempt_disable(); 466 preempt_disable();
418 __speculative_store_bypass_update(tif); 467 __speculation_ctrl_update(~tif, tif);
419 preempt_enable(); 468 preempt_enable();
420} 469}
421 470
422void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 471/* Called from seccomp/prctl update */
423 struct tss_struct *tss) 472void speculation_ctrl_update_current(void)
473{
474 preempt_disable();
475 speculation_ctrl_update(speculation_ctrl_update_tif(current));
476 preempt_enable();
477}
478
479void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
424{ 480{
425 struct thread_struct *prev, *next; 481 struct thread_struct *prev, *next;
426 unsigned long tifp, tifn; 482 unsigned long tifp, tifn;
@@ -430,7 +486,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
430 486
431 tifn = READ_ONCE(task_thread_info(next_p)->flags); 487 tifn = READ_ONCE(task_thread_info(next_p)->flags);
432 tifp = READ_ONCE(task_thread_info(prev_p)->flags); 488 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
433 switch_to_bitmap(tss, prev, next, tifp, tifn); 489 switch_to_bitmap(prev, next, tifp, tifn);
434 490
435 propagate_user_return_notify(prev_p, next_p); 491 propagate_user_return_notify(prev_p, next_p);
436 492
@@ -451,8 +507,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
451 if ((tifp ^ tifn) & _TIF_NOCPUID) 507 if ((tifp ^ tifn) & _TIF_NOCPUID)
452 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); 508 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
453 509
454 if ((tifp ^ tifn) & _TIF_SSBD) 510 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
455 __speculative_store_bypass_update(tifn); 511 __speculation_ctrl_update(tifp, tifn);
512 } else {
513 speculation_ctrl_update_tif(prev_p);
514 tifn = speculation_ctrl_update_tif(next_p);
515
516 /* Enforce MSR update to ensure consistent state */
517 __speculation_ctrl_update(~tifn, tifn);
518 }
456} 519}
457 520
458/* 521/*
diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h
new file mode 100644
index 000000000000..898e97cf6629
--- /dev/null
+++ b/arch/x86/kernel/process.h
@@ -0,0 +1,39 @@
1// SPDX-License-Identifier: GPL-2.0
2//
3// Code shared between 32 and 64 bit
4
5#include <asm/spec-ctrl.h>
6
7void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
8
9/*
10 * This needs to be inline to optimize for the common case where no extra
11 * work needs to be done.
12 */
13static inline void switch_to_extra(struct task_struct *prev,
14 struct task_struct *next)
15{
16 unsigned long next_tif = task_thread_info(next)->flags;
17 unsigned long prev_tif = task_thread_info(prev)->flags;
18
19 if (IS_ENABLED(CONFIG_SMP)) {
20 /*
21 * Avoid __switch_to_xtra() invocation when conditional
22 * STIPB is disabled and the only different bit is
23 * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
24 * in the TIF_WORK_CTXSW masks.
25 */
26 if (!static_branch_likely(&switch_to_cond_stibp)) {
27 prev_tif &= ~_TIF_SPEC_IB;
28 next_tif &= ~_TIF_SPEC_IB;
29 }
30 }
31
32 /*
33 * __switch_to_xtra() handles debug registers, i/o bitmaps,
34 * speculation mitigations etc.
35 */
36 if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT ||
37 prev_tif & _TIF_WORK_CTXSW_PREV))
38 __switch_to_xtra(prev, next);
39}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 5046a3c9dec2..d3e593eb189f 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -59,6 +59,8 @@
59#include <asm/intel_rdt_sched.h> 59#include <asm/intel_rdt_sched.h>
60#include <asm/proto.h> 60#include <asm/proto.h>
61 61
62#include "process.h"
63
62void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) 64void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
63{ 65{
64 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 66 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
@@ -232,7 +234,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
232 struct fpu *prev_fpu = &prev->fpu; 234 struct fpu *prev_fpu = &prev->fpu;
233 struct fpu *next_fpu = &next->fpu; 235 struct fpu *next_fpu = &next->fpu;
234 int cpu = smp_processor_id(); 236 int cpu = smp_processor_id();
235 struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
236 237
237 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 238 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
238 239
@@ -264,12 +265,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
264 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) 265 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
265 set_iopl_mask(next->iopl); 266 set_iopl_mask(next->iopl);
266 267
267 /* 268 switch_to_extra(prev_p, next_p);
268 * Now maybe handle debug registers and/or IO bitmaps
269 */
270 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
271 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
272 __switch_to_xtra(prev_p, next_p, tss);
273 269
274 /* 270 /*
275 * Leave lazy mode, flushing any hypercalls made here. 271 * Leave lazy mode, flushing any hypercalls made here.
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 0e0b4288a4b2..bbfbf017065c 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -60,6 +60,8 @@
60#include <asm/unistd_32_ia32.h> 60#include <asm/unistd_32_ia32.h>
61#endif 61#endif
62 62
63#include "process.h"
64
63/* Prints also some state that isn't saved in the pt_regs */ 65/* Prints also some state that isn't saved in the pt_regs */
64void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) 66void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
65{ 67{
@@ -553,7 +555,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
553 struct fpu *prev_fpu = &prev->fpu; 555 struct fpu *prev_fpu = &prev->fpu;
554 struct fpu *next_fpu = &next->fpu; 556 struct fpu *next_fpu = &next->fpu;
555 int cpu = smp_processor_id(); 557 int cpu = smp_processor_id();
556 struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
557 558
558 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && 559 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
559 this_cpu_read(irq_count) != -1); 560 this_cpu_read(irq_count) != -1);
@@ -617,12 +618,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
617 /* Reload sp0. */ 618 /* Reload sp0. */
618 update_task_stack(next_p); 619 update_task_stack(next_p);
619 620
620 /* 621 switch_to_extra(prev_p, next_p);
621 * Now maybe reload the debug registers and handle I/O bitmaps
622 */
623 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
624 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
625 __switch_to_xtra(prev_p, next_p, tss);
626 622
627#ifdef CONFIG_XEN_PV 623#ifdef CONFIG_XEN_PV
628 /* 624 /*
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b74e7bfed6ab..d494b9bfe618 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1280,23 +1280,6 @@ void __init setup_arch(char **cmdline_p)
1280 unwind_init(); 1280 unwind_init();
1281} 1281}
1282 1282
1283/*
1284 * From boot protocol 2.14 onwards we expect the bootloader to set the
1285 * version to "0x8000 | <used version>". In case we find a version >= 2.14
1286 * without the 0x8000 we assume the boot loader supports 2.13 only and
1287 * reset the version accordingly. The 0x8000 flag is removed in any case.
1288 */
1289void __init x86_verify_bootdata_version(void)
1290{
1291 if (boot_params.hdr.version & VERSION_WRITTEN)
1292 boot_params.hdr.version &= ~VERSION_WRITTEN;
1293 else if (boot_params.hdr.version >= 0x020e)
1294 boot_params.hdr.version = 0x020d;
1295
1296 if (boot_params.hdr.version < 0x020e)
1297 boot_params.hdr.acpi_rsdp_addr = 0;
1298}
1299
1300#ifdef CONFIG_X86_32 1283#ifdef CONFIG_X86_32
1301 1284
1302static struct resource video_ram_resource = { 1285static struct resource video_ram_resource = {
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 1eae5af491c2..891a75dbc131 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -26,65 +26,8 @@
26 26
27#define TOPOLOGY_REGISTER_OFFSET 0x10 27#define TOPOLOGY_REGISTER_OFFSET 0x10
28 28
29#if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL 29#ifdef CONFIG_PCI
30/* 30static void __init set_vsmp_ctl(void)
31 * Interrupt control on vSMPowered systems:
32 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
33 * and vice versa.
34 */
35
36asmlinkage __visible unsigned long vsmp_save_fl(void)
37{
38 unsigned long flags = native_save_fl();
39
40 if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
41 flags &= ~X86_EFLAGS_IF;
42 return flags;
43}
44PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
45
46__visible void vsmp_restore_fl(unsigned long flags)
47{
48 if (flags & X86_EFLAGS_IF)
49 flags &= ~X86_EFLAGS_AC;
50 else
51 flags |= X86_EFLAGS_AC;
52 native_restore_fl(flags);
53}
54PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
55
56asmlinkage __visible void vsmp_irq_disable(void)
57{
58 unsigned long flags = native_save_fl();
59
60 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
61}
62PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
63
64asmlinkage __visible void vsmp_irq_enable(void)
65{
66 unsigned long flags = native_save_fl();
67
68 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
69}
70PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
71
72static unsigned __init vsmp_patch(u8 type, void *ibuf,
73 unsigned long addr, unsigned len)
74{
75 switch (type) {
76 case PARAVIRT_PATCH(irq.irq_enable):
77 case PARAVIRT_PATCH(irq.irq_disable):
78 case PARAVIRT_PATCH(irq.save_fl):
79 case PARAVIRT_PATCH(irq.restore_fl):
80 return paravirt_patch_default(type, ibuf, addr, len);
81 default:
82 return native_patch(type, ibuf, addr, len);
83 }
84
85}
86
87static void __init set_vsmp_pv_ops(void)
88{ 31{
89 void __iomem *address; 32 void __iomem *address;
90 unsigned int cap, ctl, cfg; 33 unsigned int cap, ctl, cfg;
@@ -109,28 +52,12 @@ static void __init set_vsmp_pv_ops(void)
109 } 52 }
110#endif 53#endif
111 54
112 if (cap & ctl & (1 << 4)) {
113 /* Setup irq ops and turn on vSMP IRQ fastpath handling */
114 pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
115 pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
116 pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
117 pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
118 pv_ops.init.patch = vsmp_patch;
119 ctl &= ~(1 << 4);
120 }
121 writel(ctl, address + 4); 55 writel(ctl, address + 4);
122 ctl = readl(address + 4); 56 ctl = readl(address + 4);
123 pr_info("vSMP CTL: control set to:0x%08x\n", ctl); 57 pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
124 58
125 early_iounmap(address, 8); 59 early_iounmap(address, 8);
126} 60}
127#else
128static void __init set_vsmp_pv_ops(void)
129{
130}
131#endif
132
133#ifdef CONFIG_PCI
134static int is_vsmp = -1; 61static int is_vsmp = -1;
135 62
136static void __init detect_vsmp_box(void) 63static void __init detect_vsmp_box(void)
@@ -164,11 +91,14 @@ static int is_vsmp_box(void)
164{ 91{
165 return 0; 92 return 0;
166} 93}
94static void __init set_vsmp_ctl(void)
95{
96}
167#endif 97#endif
168 98
169static void __init vsmp_cap_cpus(void) 99static void __init vsmp_cap_cpus(void)
170{ 100{
171#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) 101#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
172 void __iomem *address; 102 void __iomem *address;
173 unsigned int cfg, topology, node_shift, maxcpus; 103 unsigned int cfg, topology, node_shift, maxcpus;
174 104
@@ -221,6 +151,6 @@ void __init vsmp_init(void)
221 151
222 vsmp_cap_cpus(); 152 vsmp_cap_cpus();
223 153
224 set_vsmp_pv_ops(); 154 set_vsmp_ctl();
225 return; 155 return;
226} 156}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 89db20f8cb70..c4533d05c214 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -55,7 +55,7 @@
55#define PRIo64 "o" 55#define PRIo64 "o"
56 56
57/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ 57/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
58#define apic_debug(fmt, arg...) 58#define apic_debug(fmt, arg...) do {} while (0)
59 59
60/* 14 is the version for Xeon and Pentium 8.4.8*/ 60/* 14 is the version for Xeon and Pentium 8.4.8*/
61#define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16)) 61#define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
@@ -576,6 +576,11 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
576 rcu_read_lock(); 576 rcu_read_lock();
577 map = rcu_dereference(kvm->arch.apic_map); 577 map = rcu_dereference(kvm->arch.apic_map);
578 578
579 if (unlikely(!map)) {
580 count = -EOPNOTSUPP;
581 goto out;
582 }
583
579 if (min > map->max_apic_id) 584 if (min > map->max_apic_id)
580 goto out; 585 goto out;
581 /* Bits above cluster_size are masked in the caller. */ 586 /* Bits above cluster_size are masked in the caller. */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cf5f572f2305..7c03c0f35444 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5074,9 +5074,9 @@ static bool need_remote_flush(u64 old, u64 new)
5074} 5074}
5075 5075
5076static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, 5076static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5077 const u8 *new, int *bytes) 5077 int *bytes)
5078{ 5078{
5079 u64 gentry; 5079 u64 gentry = 0;
5080 int r; 5080 int r;
5081 5081
5082 /* 5082 /*
@@ -5088,22 +5088,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5088 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ 5088 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5089 *gpa &= ~(gpa_t)7; 5089 *gpa &= ~(gpa_t)7;
5090 *bytes = 8; 5090 *bytes = 8;
5091 r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
5092 if (r)
5093 gentry = 0;
5094 new = (const u8 *)&gentry;
5095 } 5091 }
5096 5092
5097 switch (*bytes) { 5093 if (*bytes == 4 || *bytes == 8) {
5098 case 4: 5094 r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5099 gentry = *(const u32 *)new; 5095 if (r)
5100 break; 5096 gentry = 0;
5101 case 8:
5102 gentry = *(const u64 *)new;
5103 break;
5104 default:
5105 gentry = 0;
5106 break;
5107 } 5097 }
5108 5098
5109 return gentry; 5099 return gentry;
@@ -5207,8 +5197,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5207 5197
5208 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); 5198 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5209 5199
5210 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
5211
5212 /* 5200 /*
5213 * No need to care whether allocation memory is successful 5201 * No need to care whether allocation memory is successful
5214 * or not since pte prefetch is skiped if it does not have 5202 * or not since pte prefetch is skiped if it does not have
@@ -5217,6 +5205,9 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5217 mmu_topup_memory_caches(vcpu); 5205 mmu_topup_memory_caches(vcpu);
5218 5206
5219 spin_lock(&vcpu->kvm->mmu_lock); 5207 spin_lock(&vcpu->kvm->mmu_lock);
5208
5209 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5210
5220 ++vcpu->kvm->stat.mmu_pte_write; 5211 ++vcpu->kvm->stat.mmu_pte_write;
5221 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 5212 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5222 5213
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0e21ccc46792..cc6467b35a85 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1446,7 +1446,7 @@ static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
1446 return vcpu->arch.tsc_offset; 1446 return vcpu->arch.tsc_offset;
1447} 1447}
1448 1448
1449static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1449static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1450{ 1450{
1451 struct vcpu_svm *svm = to_svm(vcpu); 1451 struct vcpu_svm *svm = to_svm(vcpu);
1452 u64 g_tsc_offset = 0; 1452 u64 g_tsc_offset = 0;
@@ -1464,6 +1464,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1464 svm->vmcb->control.tsc_offset = offset + g_tsc_offset; 1464 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1465 1465
1466 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 1466 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1467 return svm->vmcb->control.tsc_offset;
1467} 1468}
1468 1469
1469static void avic_init_vmcb(struct vcpu_svm *svm) 1470static void avic_init_vmcb(struct vcpu_svm *svm)
@@ -1664,20 +1665,23 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1664static int avic_init_access_page(struct kvm_vcpu *vcpu) 1665static int avic_init_access_page(struct kvm_vcpu *vcpu)
1665{ 1666{
1666 struct kvm *kvm = vcpu->kvm; 1667 struct kvm *kvm = vcpu->kvm;
1667 int ret; 1668 int ret = 0;
1668 1669
1670 mutex_lock(&kvm->slots_lock);
1669 if (kvm->arch.apic_access_page_done) 1671 if (kvm->arch.apic_access_page_done)
1670 return 0; 1672 goto out;
1671 1673
1672 ret = x86_set_memory_region(kvm, 1674 ret = __x86_set_memory_region(kvm,
1673 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 1675 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1674 APIC_DEFAULT_PHYS_BASE, 1676 APIC_DEFAULT_PHYS_BASE,
1675 PAGE_SIZE); 1677 PAGE_SIZE);
1676 if (ret) 1678 if (ret)
1677 return ret; 1679 goto out;
1678 1680
1679 kvm->arch.apic_access_page_done = true; 1681 kvm->arch.apic_access_page_done = true;
1680 return 0; 1682out:
1683 mutex_unlock(&kvm->slots_lock);
1684 return ret;
1681} 1685}
1682 1686
1683static int avic_init_backing_page(struct kvm_vcpu *vcpu) 1687static int avic_init_backing_page(struct kvm_vcpu *vcpu)
@@ -2189,21 +2193,31 @@ out:
2189 return ERR_PTR(err); 2193 return ERR_PTR(err);
2190} 2194}
2191 2195
2196static void svm_clear_current_vmcb(struct vmcb *vmcb)
2197{
2198 int i;
2199
2200 for_each_online_cpu(i)
2201 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
2202}
2203
2192static void svm_free_vcpu(struct kvm_vcpu *vcpu) 2204static void svm_free_vcpu(struct kvm_vcpu *vcpu)
2193{ 2205{
2194 struct vcpu_svm *svm = to_svm(vcpu); 2206 struct vcpu_svm *svm = to_svm(vcpu);
2195 2207
2208 /*
2209 * The vmcb page can be recycled, causing a false negative in
2210 * svm_vcpu_load(). So, ensure that no logical CPU has this
2211 * vmcb page recorded as its current vmcb.
2212 */
2213 svm_clear_current_vmcb(svm->vmcb);
2214
2196 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); 2215 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
2197 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); 2216 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
2198 __free_page(virt_to_page(svm->nested.hsave)); 2217 __free_page(virt_to_page(svm->nested.hsave));
2199 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); 2218 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
2200 kvm_vcpu_uninit(vcpu); 2219 kvm_vcpu_uninit(vcpu);
2201 kmem_cache_free(kvm_vcpu_cache, svm); 2220 kmem_cache_free(kvm_vcpu_cache, svm);
2202 /*
2203 * The vmcb page can be recycled, causing a false negative in
2204 * svm_vcpu_load(). So do a full IBPB now.
2205 */
2206 indirect_branch_prediction_barrier();
2207} 2221}
2208 2222
2209static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2223static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -7149,7 +7163,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7149 .has_wbinvd_exit = svm_has_wbinvd_exit, 7163 .has_wbinvd_exit = svm_has_wbinvd_exit,
7150 7164
7151 .read_l1_tsc_offset = svm_read_l1_tsc_offset, 7165 .read_l1_tsc_offset = svm_read_l1_tsc_offset,
7152 .write_tsc_offset = svm_write_tsc_offset, 7166 .write_l1_tsc_offset = svm_write_l1_tsc_offset,
7153 7167
7154 .set_tdp_cr3 = set_tdp_cr3, 7168 .set_tdp_cr3 = set_tdp_cr3,
7155 7169
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4555077d69ce..02edd9960e9d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -174,6 +174,7 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
174 * refer SDM volume 3b section 21.6.13 & 22.1.3. 174 * refer SDM volume 3b section 21.6.13 & 22.1.3.
175 */ 175 */
176static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; 176static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
177module_param(ple_gap, uint, 0444);
177 178
178static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; 179static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
179module_param(ple_window, uint, 0444); 180module_param(ple_window, uint, 0444);
@@ -984,6 +985,7 @@ struct vcpu_vmx {
984 struct shared_msr_entry *guest_msrs; 985 struct shared_msr_entry *guest_msrs;
985 int nmsrs; 986 int nmsrs;
986 int save_nmsrs; 987 int save_nmsrs;
988 bool guest_msrs_dirty;
987 unsigned long host_idt_base; 989 unsigned long host_idt_base;
988#ifdef CONFIG_X86_64 990#ifdef CONFIG_X86_64
989 u64 msr_host_kernel_gs_base; 991 u64 msr_host_kernel_gs_base;
@@ -1306,7 +1308,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
1306static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 1308static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
1307 u16 error_code); 1309 u16 error_code);
1308static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); 1310static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
1309static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, 1311static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
1310 u32 msr, int type); 1312 u32 msr, int type);
1311 1313
1312static DEFINE_PER_CPU(struct vmcs *, vmxarea); 1314static DEFINE_PER_CPU(struct vmcs *, vmxarea);
@@ -1610,12 +1612,6 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
1610{ 1612{
1611 struct vcpu_vmx *vmx = to_vmx(vcpu); 1613 struct vcpu_vmx *vmx = to_vmx(vcpu);
1612 1614
1613 /* We don't support disabling the feature for simplicity. */
1614 if (vmx->nested.enlightened_vmcs_enabled)
1615 return 0;
1616
1617 vmx->nested.enlightened_vmcs_enabled = true;
1618
1619 /* 1615 /*
1620 * vmcs_version represents the range of supported Enlightened VMCS 1616 * vmcs_version represents the range of supported Enlightened VMCS
1621 * versions: lower 8 bits is the minimal version, higher 8 bits is the 1617 * versions: lower 8 bits is the minimal version, higher 8 bits is the
@@ -1625,6 +1621,12 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
1625 if (vmcs_version) 1621 if (vmcs_version)
1626 *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1; 1622 *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1;
1627 1623
1624 /* We don't support disabling the feature for simplicity. */
1625 if (vmx->nested.enlightened_vmcs_enabled)
1626 return 0;
1627
1628 vmx->nested.enlightened_vmcs_enabled = true;
1629
1628 vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; 1630 vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
1629 vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; 1631 vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
1630 vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; 1632 vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
@@ -2897,6 +2899,20 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
2897 2899
2898 vmx->req_immediate_exit = false; 2900 vmx->req_immediate_exit = false;
2899 2901
2902 /*
2903 * Note that guest MSRs to be saved/restored can also be changed
2904 * when guest state is loaded. This happens when guest transitions
2905 * to/from long-mode by setting MSR_EFER.LMA.
2906 */
2907 if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) {
2908 vmx->guest_msrs_dirty = false;
2909 for (i = 0; i < vmx->save_nmsrs; ++i)
2910 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2911 vmx->guest_msrs[i].data,
2912 vmx->guest_msrs[i].mask);
2913
2914 }
2915
2900 if (vmx->loaded_cpu_state) 2916 if (vmx->loaded_cpu_state)
2901 return; 2917 return;
2902 2918
@@ -2957,11 +2973,6 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
2957 vmcs_writel(HOST_GS_BASE, gs_base); 2973 vmcs_writel(HOST_GS_BASE, gs_base);
2958 host_state->gs_base = gs_base; 2974 host_state->gs_base = gs_base;
2959 } 2975 }
2960
2961 for (i = 0; i < vmx->save_nmsrs; ++i)
2962 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2963 vmx->guest_msrs[i].data,
2964 vmx->guest_msrs[i].mask);
2965} 2976}
2966 2977
2967static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) 2978static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
@@ -3436,6 +3447,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
3436 move_msr_up(vmx, index, save_nmsrs++); 3447 move_msr_up(vmx, index, save_nmsrs++);
3437 3448
3438 vmx->save_nmsrs = save_nmsrs; 3449 vmx->save_nmsrs = save_nmsrs;
3450 vmx->guest_msrs_dirty = true;
3439 3451
3440 if (cpu_has_vmx_msr_bitmap()) 3452 if (cpu_has_vmx_msr_bitmap())
3441 vmx_update_msr_bitmap(&vmx->vcpu); 3453 vmx_update_msr_bitmap(&vmx->vcpu);
@@ -3452,11 +3464,9 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
3452 return vcpu->arch.tsc_offset; 3464 return vcpu->arch.tsc_offset;
3453} 3465}
3454 3466
3455/* 3467static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
3456 * writes 'offset' into guest's timestamp counter offset register
3457 */
3458static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
3459{ 3468{
3469 u64 active_offset = offset;
3460 if (is_guest_mode(vcpu)) { 3470 if (is_guest_mode(vcpu)) {
3461 /* 3471 /*
3462 * We're here if L1 chose not to trap WRMSR to TSC. According 3472 * We're here if L1 chose not to trap WRMSR to TSC. According
@@ -3464,17 +3474,16 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
3464 * set for L2 remains unchanged, and still needs to be added 3474 * set for L2 remains unchanged, and still needs to be added
3465 * to the newly set TSC to get L2's TSC. 3475 * to the newly set TSC to get L2's TSC.
3466 */ 3476 */
3467 struct vmcs12 *vmcs12; 3477 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3468 /* recalculate vmcs02.TSC_OFFSET: */ 3478 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING))
3469 vmcs12 = get_vmcs12(vcpu); 3479 active_offset += vmcs12->tsc_offset;
3470 vmcs_write64(TSC_OFFSET, offset +
3471 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
3472 vmcs12->tsc_offset : 0));
3473 } else { 3480 } else {
3474 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 3481 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
3475 vmcs_read64(TSC_OFFSET), offset); 3482 vmcs_read64(TSC_OFFSET), offset);
3476 vmcs_write64(TSC_OFFSET, offset);
3477 } 3483 }
3484
3485 vmcs_write64(TSC_OFFSET, active_offset);
3486 return active_offset;
3478} 3487}
3479 3488
3480/* 3489/*
@@ -5944,7 +5953,7 @@ static void free_vpid(int vpid)
5944 spin_unlock(&vmx_vpid_lock); 5953 spin_unlock(&vmx_vpid_lock);
5945} 5954}
5946 5955
5947static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, 5956static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
5948 u32 msr, int type) 5957 u32 msr, int type)
5949{ 5958{
5950 int f = sizeof(unsigned long); 5959 int f = sizeof(unsigned long);
@@ -5982,7 +5991,7 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit
5982 } 5991 }
5983} 5992}
5984 5993
5985static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, 5994static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
5986 u32 msr, int type) 5995 u32 msr, int type)
5987{ 5996{
5988 int f = sizeof(unsigned long); 5997 int f = sizeof(unsigned long);
@@ -6020,7 +6029,7 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm
6020 } 6029 }
6021} 6030}
6022 6031
6023static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap, 6032static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
6024 u32 msr, int type, bool value) 6033 u32 msr, int type, bool value)
6025{ 6034{
6026 if (value) 6035 if (value)
@@ -8664,8 +8673,6 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
8664 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 8673 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
8665 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 8674 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
8666 8675
8667 vmcs12->hdr.revision_id = evmcs->revision_id;
8668
8669 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 8676 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
8670 vmcs12->tpr_threshold = evmcs->tpr_threshold; 8677 vmcs12->tpr_threshold = evmcs->tpr_threshold;
8671 vmcs12->guest_rip = evmcs->guest_rip; 8678 vmcs12->guest_rip = evmcs->guest_rip;
@@ -9369,7 +9376,30 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
9369 9376
9370 vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page); 9377 vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
9371 9378
9372 if (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION) { 9379 /*
9380 * Currently, KVM only supports eVMCS version 1
9381 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
9382 * value to first u32 field of eVMCS which should specify eVMCS
9383 * VersionNumber.
9384 *
9385 * Guest should be aware of supported eVMCS versions by host by
9386 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
9387 * expected to set this CPUID leaf according to the value
9388 * returned in vmcs_version from nested_enable_evmcs().
9389 *
9390 * However, it turns out that Microsoft Hyper-V fails to comply
9391 * to their own invented interface: When Hyper-V use eVMCS, it
9392 * just sets first u32 field of eVMCS to revision_id specified
9393 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
9394 * which is one of the supported versions specified in
9395 * CPUID.0x4000000A.EAX[0:15].
9396 *
9397 * To overcome Hyper-V bug, we accept here either a supported
9398 * eVMCS version or VMCS12 revision_id as valid values for first
9399 * u32 field of eVMCS.
9400 */
9401 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
9402 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
9373 nested_release_evmcs(vcpu); 9403 nested_release_evmcs(vcpu);
9374 return 0; 9404 return 0;
9375 } 9405 }
@@ -9390,9 +9420,11 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
9390 * present in struct hv_enlightened_vmcs, ...). Make sure there 9420 * present in struct hv_enlightened_vmcs, ...). Make sure there
9391 * are no leftovers. 9421 * are no leftovers.
9392 */ 9422 */
9393 if (from_launch) 9423 if (from_launch) {
9394 memset(vmx->nested.cached_vmcs12, 0, 9424 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9395 sizeof(*vmx->nested.cached_vmcs12)); 9425 memset(vmcs12, 0, sizeof(*vmcs12));
9426 vmcs12->hdr.revision_id = VMCS12_REVISION;
9427 }
9396 9428
9397 } 9429 }
9398 return 1; 9430 return 1;
@@ -15062,7 +15094,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
15062 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 15094 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
15063 15095
15064 .read_l1_tsc_offset = vmx_read_l1_tsc_offset, 15096 .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
15065 .write_tsc_offset = vmx_write_tsc_offset, 15097 .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
15066 15098
15067 .set_tdp_cr3 = vmx_set_cr3, 15099 .set_tdp_cr3 = vmx_set_cr3,
15068 15100
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5cd5647120f2..d02937760c3b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1665,8 +1665,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
1665 1665
1666static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1666static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1667{ 1667{
1668 kvm_x86_ops->write_tsc_offset(vcpu, offset); 1668 vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);
1669 vcpu->arch.tsc_offset = offset;
1670} 1669}
1671 1670
1672static inline bool kvm_check_tsc_unstable(void) 1671static inline bool kvm_check_tsc_unstable(void)
@@ -1794,7 +1793,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
1794static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, 1793static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
1795 s64 adjustment) 1794 s64 adjustment)
1796{ 1795{
1797 kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment); 1796 u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
1797 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
1798} 1798}
1799 1799
1800static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) 1800static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -6918,6 +6918,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
6918 clock_pairing.nsec = ts.tv_nsec; 6918 clock_pairing.nsec = ts.tv_nsec;
6919 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); 6919 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
6920 clock_pairing.flags = 0; 6920 clock_pairing.flags = 0;
6921 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
6921 6922
6922 ret = 0; 6923 ret = 0;
6923 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, 6924 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
@@ -7455,7 +7456,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
7455 else { 7456 else {
7456 if (vcpu->arch.apicv_active) 7457 if (vcpu->arch.apicv_active)
7457 kvm_x86_ops->sync_pir_to_irr(vcpu); 7458 kvm_x86_ops->sync_pir_to_irr(vcpu);
7458 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 7459 if (ioapic_in_kernel(vcpu->kvm))
7460 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
7459 } 7461 }
7460 7462
7461 if (is_guest_mode(vcpu)) 7463 if (is_guest_mode(vcpu))
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index bddd6b3cee1d..03b6b4c2238d 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -7,7 +7,6 @@
7#include <linux/export.h> 7#include <linux/export.h>
8#include <linux/cpu.h> 8#include <linux/cpu.h>
9#include <linux/debugfs.h> 9#include <linux/debugfs.h>
10#include <linux/ptrace.h>
11 10
12#include <asm/tlbflush.h> 11#include <asm/tlbflush.h>
13#include <asm/mmu_context.h> 12#include <asm/mmu_context.h>
@@ -31,6 +30,12 @@
31 */ 30 */
32 31
33/* 32/*
33 * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
34 * stored in cpu_tlb_state.last_user_mm_ibpb.
35 */
36#define LAST_USER_MM_IBPB 0x1UL
37
38/*
34 * We get here when we do something requiring a TLB invalidation 39 * We get here when we do something requiring a TLB invalidation
35 * but could not go invalidate all of the contexts. We do the 40 * but could not go invalidate all of the contexts. We do the
36 * necessary invalidation by clearing out the 'ctx_id' which 41 * necessary invalidation by clearing out the 'ctx_id' which
@@ -181,17 +186,87 @@ static void sync_current_stack_to_mm(struct mm_struct *mm)
181 } 186 }
182} 187}
183 188
184static bool ibpb_needed(struct task_struct *tsk, u64 last_ctx_id) 189static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
190{
191 unsigned long next_tif = task_thread_info(next)->flags;
192 unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
193
194 return (unsigned long)next->mm | ibpb;
195}
196
197static void cond_ibpb(struct task_struct *next)
185{ 198{
199 if (!next || !next->mm)
200 return;
201
186 /* 202 /*
187 * Check if the current (previous) task has access to the memory 203 * Both, the conditional and the always IBPB mode use the mm
188 * of the @tsk (next) task. If access is denied, make sure to 204 * pointer to avoid the IBPB when switching between tasks of the
189 * issue a IBPB to stop user->user Spectre-v2 attacks. 205 * same process. Using the mm pointer instead of mm->context.ctx_id
190 * 206 * opens a hypothetical hole vs. mm_struct reuse, which is more or
191 * Note: __ptrace_may_access() returns 0 or -ERRNO. 207 * less impossible to control by an attacker. Aside of that it
208 * would only affect the first schedule so the theoretically
209 * exposed data is not really interesting.
192 */ 210 */
193 return (tsk && tsk->mm && tsk->mm->context.ctx_id != last_ctx_id && 211 if (static_branch_likely(&switch_mm_cond_ibpb)) {
194 ptrace_may_access_sched(tsk, PTRACE_MODE_SPEC_IBPB)); 212 unsigned long prev_mm, next_mm;
213
214 /*
215 * This is a bit more complex than the always mode because
216 * it has to handle two cases:
217 *
218 * 1) Switch from a user space task (potential attacker)
219 * which has TIF_SPEC_IB set to a user space task
220 * (potential victim) which has TIF_SPEC_IB not set.
221 *
222 * 2) Switch from a user space task (potential attacker)
223 * which has TIF_SPEC_IB not set to a user space task
224 * (potential victim) which has TIF_SPEC_IB set.
225 *
226 * This could be done by unconditionally issuing IBPB when
227 * a task which has TIF_SPEC_IB set is either scheduled in
228 * or out. Though that results in two flushes when:
229 *
230 * - the same user space task is scheduled out and later
231 * scheduled in again and only a kernel thread ran in
232 * between.
233 *
234 * - a user space task belonging to the same process is
235 * scheduled in after a kernel thread ran in between
236 *
237 * - a user space task belonging to the same process is
238 * scheduled in immediately.
239 *
240 * Optimize this with reasonably small overhead for the
241 * above cases. Mangle the TIF_SPEC_IB bit into the mm
242 * pointer of the incoming task which is stored in
243 * cpu_tlbstate.last_user_mm_ibpb for comparison.
244 */
245 next_mm = mm_mangle_tif_spec_ib(next);
246 prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
247
248 /*
249 * Issue IBPB only if the mm's are different and one or
250 * both have the IBPB bit set.
251 */
252 if (next_mm != prev_mm &&
253 (next_mm | prev_mm) & LAST_USER_MM_IBPB)
254 indirect_branch_prediction_barrier();
255
256 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
257 }
258
259 if (static_branch_unlikely(&switch_mm_always_ibpb)) {
260 /*
261 * Only flush when switching to a user space task with a
262 * different context than the user space task which ran
263 * last on this CPU.
264 */
265 if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
266 indirect_branch_prediction_barrier();
267 this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
268 }
269 }
195} 270}
196 271
197void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 272void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
@@ -292,22 +367,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
292 new_asid = prev_asid; 367 new_asid = prev_asid;
293 need_flush = true; 368 need_flush = true;
294 } else { 369 } else {
295 u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
296
297 /* 370 /*
298 * Avoid user/user BTB poisoning by flushing the branch 371 * Avoid user/user BTB poisoning by flushing the branch
299 * predictor when switching between processes. This stops 372 * predictor when switching between processes. This stops
300 * one process from doing Spectre-v2 attacks on another. 373 * one process from doing Spectre-v2 attacks on another.
301 *
302 * As an optimization, flush indirect branches only when
303 * switching into a processes that can't be ptrace by the
304 * current one (as in such case, attacker has much more
305 * convenient way how to tamper with the next process than
306 * branch buffer poisoning).
307 */ 374 */
308 if (static_cpu_has(X86_FEATURE_USE_IBPB) && 375 cond_ibpb(tsk);
309 ibpb_needed(tsk, last_ctx_id))
310 indirect_branch_prediction_barrier();
311 376
312 if (IS_ENABLED(CONFIG_VMAP_STACK)) { 377 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
313 /* 378 /*
@@ -365,14 +430,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
365 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); 430 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
366 } 431 }
367 432
368 /*
369 * Record last user mm's context id, so we can avoid
370 * flushing branch buffer with IBPB if we switch back
371 * to the same user.
372 */
373 if (next != &init_mm)
374 this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
375
376 /* Make sure we write CR3 before loaded_mm. */ 433 /* Make sure we write CR3 before loaded_mm. */
377 barrier(); 434 barrier();
378 435
@@ -441,7 +498,7 @@ void initialize_tlbstate_and_flush(void)
441 write_cr3(build_cr3(mm->pgd, 0)); 498 write_cr3(build_cr3(mm->pgd, 0));
442 499
443 /* Reinitialize tlbstate. */ 500 /* Reinitialize tlbstate. */
444 this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id); 501 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
445 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); 502 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
446 this_cpu_write(cpu_tlbstate.next_asid, 1); 503 this_cpu_write(cpu_tlbstate.next_asid, 1);
447 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); 504 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index e996e8e744cb..750f46ad018a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -10,7 +10,6 @@
10#include <xen/xen.h> 10#include <xen/xen.h>
11#include <xen/features.h> 11#include <xen/features.h>
12#include <xen/page.h> 12#include <xen/page.h>
13#include <xen/interface/memory.h>
14 13
15#include <asm/xen/hypercall.h> 14#include <asm/xen/hypercall.h>
16#include <asm/xen/hypervisor.h> 15#include <asm/xen/hypervisor.h>
@@ -346,80 +345,3 @@ void xen_arch_unregister_cpu(int num)
346} 345}
347EXPORT_SYMBOL(xen_arch_unregister_cpu); 346EXPORT_SYMBOL(xen_arch_unregister_cpu);
348#endif 347#endif
349
350#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
351void __init arch_xen_balloon_init(struct resource *hostmem_resource)
352{
353 struct xen_memory_map memmap;
354 int rc;
355 unsigned int i, last_guest_ram;
356 phys_addr_t max_addr = PFN_PHYS(max_pfn);
357 struct e820_table *xen_e820_table;
358 const struct e820_entry *entry;
359 struct resource *res;
360
361 if (!xen_initial_domain())
362 return;
363
364 xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
365 if (!xen_e820_table)
366 return;
367
368 memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
369 set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
370 rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
371 if (rc) {
372 pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
373 goto out;
374 }
375
376 last_guest_ram = 0;
377 for (i = 0; i < memmap.nr_entries; i++) {
378 if (xen_e820_table->entries[i].addr >= max_addr)
379 break;
380 if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
381 last_guest_ram = i;
382 }
383
384 entry = &xen_e820_table->entries[last_guest_ram];
385 if (max_addr >= entry->addr + entry->size)
386 goto out; /* No unallocated host RAM. */
387
388 hostmem_resource->start = max_addr;
389 hostmem_resource->end = entry->addr + entry->size;
390
391 /*
392 * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
393 * as unavailable. The rest of that region can be used for hotplug-based
394 * ballooning.
395 */
396 for (; i < memmap.nr_entries; i++) {
397 entry = &xen_e820_table->entries[i];
398
399 if (entry->type == E820_TYPE_RAM)
400 continue;
401
402 if (entry->addr >= hostmem_resource->end)
403 break;
404
405 res = kzalloc(sizeof(*res), GFP_KERNEL);
406 if (!res)
407 goto out;
408
409 res->name = "Unavailable host RAM";
410 res->start = entry->addr;
411 res->end = (entry->addr + entry->size < hostmem_resource->end) ?
412 entry->addr + entry->size : hostmem_resource->end;
413 rc = insert_resource(hostmem_resource, res);
414 if (rc) {
415 pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
416 __func__, res->start, res->end, rc);
417 kfree(res);
418 goto out;
419 }
420 }
421
422 out:
423 kfree(xen_e820_table);
424}
425#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 0d7b3ae4960b..a5d7ed125337 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1905,7 +1905,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1905 init_top_pgt[0] = __pgd(0); 1905 init_top_pgt[0] = __pgd(0);
1906 1906
1907 /* Pre-constructed entries are in pfn, so convert to mfn */ 1907 /* Pre-constructed entries are in pfn, so convert to mfn */
1908 /* L4[272] -> level3_ident_pgt */ 1908 /* L4[273] -> level3_ident_pgt */
1909 /* L4[511] -> level3_kernel_pgt */ 1909 /* L4[511] -> level3_kernel_pgt */
1910 convert_pfn_mfn(init_top_pgt); 1910 convert_pfn_mfn(init_top_pgt);
1911 1911
@@ -1925,8 +1925,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1925 addr[0] = (unsigned long)pgd; 1925 addr[0] = (unsigned long)pgd;
1926 addr[1] = (unsigned long)l3; 1926 addr[1] = (unsigned long)l3;
1927 addr[2] = (unsigned long)l2; 1927 addr[2] = (unsigned long)l2;
1928 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: 1928 /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
1929 * Both L4[272][0] and L4[511][510] have entries that point to the same 1929 * Both L4[273][0] and L4[511][510] have entries that point to the same
1930 * L2 (PMD) tables. Meaning that if you modify it in __va space 1930 * L2 (PMD) tables. Meaning that if you modify it in __va space
1931 * it will be also modified in the __ka space! (But if you just 1931 * it will be also modified in the __ka space! (But if you just
1932 * modify the PMD table to point to other PTE's or none, then you 1932 * modify the PMD table to point to other PTE's or none, then you
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 2bce7958ce8b..0766a08bdf45 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -69,6 +69,11 @@ void xen_mc_flush(void)
69 69
70 trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); 70 trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
71 71
72#if MC_DEBUG
73 memcpy(b->debug, b->entries,
74 b->mcidx * sizeof(struct multicall_entry));
75#endif
76
72 switch (b->mcidx) { 77 switch (b->mcidx) {
73 case 0: 78 case 0:
74 /* no-op */ 79 /* no-op */
@@ -87,32 +92,34 @@ void xen_mc_flush(void)
87 break; 92 break;
88 93
89 default: 94 default:
90#if MC_DEBUG
91 memcpy(b->debug, b->entries,
92 b->mcidx * sizeof(struct multicall_entry));
93#endif
94
95 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) 95 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
96 BUG(); 96 BUG();
97 for (i = 0; i < b->mcidx; i++) 97 for (i = 0; i < b->mcidx; i++)
98 if (b->entries[i].result < 0) 98 if (b->entries[i].result < 0)
99 ret++; 99 ret++;
100 }
100 101
102 if (WARN_ON(ret)) {
103 pr_err("%d of %d multicall(s) failed: cpu %d\n",
104 ret, b->mcidx, smp_processor_id());
105 for (i = 0; i < b->mcidx; i++) {
106 if (b->entries[i].result < 0) {
101#if MC_DEBUG 107#if MC_DEBUG
102 if (ret) { 108 pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pF\n",
103 printk(KERN_ERR "%d multicall(s) failed: cpu %d\n", 109 i + 1,
104 ret, smp_processor_id());
105 dump_stack();
106 for (i = 0; i < b->mcidx; i++) {
107 printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n",
108 i+1, b->mcidx,
109 b->debug[i].op, 110 b->debug[i].op,
110 b->debug[i].args[0], 111 b->debug[i].args[0],
111 b->entries[i].result, 112 b->entries[i].result,
112 b->caller[i]); 113 b->caller[i]);
114#else
115 pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n",
116 i + 1,
117 b->entries[i].op,
118 b->entries[i].args[0],
119 b->entries[i].result);
120#endif
113 } 121 }
114 } 122 }
115#endif
116 } 123 }
117 124
118 b->mcidx = 0; 125 b->mcidx = 0;
@@ -126,8 +133,6 @@ void xen_mc_flush(void)
126 b->cbidx = 0; 133 b->cbidx = 0;
127 134
128 local_irq_restore(flags); 135 local_irq_restore(flags);
129
130 WARN_ON(ret);
131} 136}
132 137
133struct multicall_space __xen_mc_entry(size_t args) 138struct multicall_space __xen_mc_entry(size_t args)
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index b06731705529..055e37e43541 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -656,8 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
656 656
657 /* 657 /*
658 * The interface requires atomic updates on p2m elements. 658 * The interface requires atomic updates on p2m elements.
659 * xen_safe_write_ulong() is using __put_user which does an atomic 659 * xen_safe_write_ulong() is using an atomic store via asm().
660 * store via asm().
661 */ 660 */
662 if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn))) 661 if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
663 return true; 662 return true;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1163e33121fb..075ed47993bb 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -808,6 +808,7 @@ char * __init xen_memory_setup(void)
808 addr = xen_e820_table.entries[0].addr; 808 addr = xen_e820_table.entries[0].addr;
809 size = xen_e820_table.entries[0].size; 809 size = xen_e820_table.entries[0].size;
810 while (i < xen_e820_table.nr_entries) { 810 while (i < xen_e820_table.nr_entries) {
811 bool discard = false;
811 812
812 chunk_size = size; 813 chunk_size = size;
813 type = xen_e820_table.entries[i].type; 814 type = xen_e820_table.entries[i].type;
@@ -823,10 +824,11 @@ char * __init xen_memory_setup(void)
823 xen_add_extra_mem(pfn_s, n_pfns); 824 xen_add_extra_mem(pfn_s, n_pfns);
824 xen_max_p2m_pfn = pfn_s + n_pfns; 825 xen_max_p2m_pfn = pfn_s + n_pfns;
825 } else 826 } else
826 type = E820_TYPE_UNUSABLE; 827 discard = true;
827 } 828 }
828 829
829 xen_align_and_add_e820_region(addr, chunk_size, type); 830 if (!discard)
831 xen_align_and_add_e820_region(addr, chunk_size, type);
830 832
831 addr += chunk_size; 833 addr += chunk_size;
832 size -= chunk_size; 834 size -= chunk_size;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 441c88262169..3776122c87cc 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -3,24 +3,21 @@
3 * Split spinlock implementation out into its own file, so it can be 3 * Split spinlock implementation out into its own file, so it can be
4 * compiled in a FTRACE-compatible way. 4 * compiled in a FTRACE-compatible way.
5 */ 5 */
6#include <linux/kernel_stat.h> 6#include <linux/kernel.h>
7#include <linux/spinlock.h> 7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/log2.h>
10#include <linux/gfp.h>
11#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/atomic.h>
12 10
13#include <asm/paravirt.h> 11#include <asm/paravirt.h>
14#include <asm/qspinlock.h> 12#include <asm/qspinlock.h>
15 13
16#include <xen/interface/xen.h>
17#include <xen/events.h> 14#include <xen/events.h>
18 15
19#include "xen-ops.h" 16#include "xen-ops.h"
20#include "debugfs.h"
21 17
22static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 18static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
23static DEFINE_PER_CPU(char *, irq_name); 19static DEFINE_PER_CPU(char *, irq_name);
20static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
24static bool xen_pvspin = true; 21static bool xen_pvspin = true;
25 22
26static void xen_qlock_kick(int cpu) 23static void xen_qlock_kick(int cpu)
@@ -39,25 +36,25 @@ static void xen_qlock_kick(int cpu)
39 */ 36 */
40static void xen_qlock_wait(u8 *byte, u8 val) 37static void xen_qlock_wait(u8 *byte, u8 val)
41{ 38{
42 unsigned long flags;
43 int irq = __this_cpu_read(lock_kicker_irq); 39 int irq = __this_cpu_read(lock_kicker_irq);
40 atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
44 41
45 /* If kicker interrupts not initialized yet, just spin */ 42 /* If kicker interrupts not initialized yet, just spin */
46 if (irq == -1 || in_nmi()) 43 if (irq == -1 || in_nmi())
47 return; 44 return;
48 45
49 /* Guard against reentry. */ 46 /* Detect reentry. */
50 local_irq_save(flags); 47 atomic_inc(nest_cnt);
51 48
52 /* If irq pending already clear it. */ 49 /* If irq pending already and no nested call clear it. */
53 if (xen_test_irq_pending(irq)) { 50 if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
54 xen_clear_irq_pending(irq); 51 xen_clear_irq_pending(irq);
55 } else if (READ_ONCE(*byte) == val) { 52 } else if (READ_ONCE(*byte) == val) {
56 /* Block until irq becomes pending (or a spurious wakeup) */ 53 /* Block until irq becomes pending (or a spurious wakeup) */
57 xen_poll_irq(irq); 54 xen_poll_irq(irq);
58 } 55 }
59 56
60 local_irq_restore(flags); 57 atomic_dec(nest_cnt);
61} 58}
62 59
63static irqreturn_t dummy_handler(int irq, void *dev_id) 60static irqreturn_t dummy_handler(int irq, void *dev_id)
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index be9bfd9aa865..34a23016dd14 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -23,7 +23,11 @@
23# error Linux requires the Xtensa Windowed Registers Option. 23# error Linux requires the Xtensa Windowed Registers Option.
24#endif 24#endif
25 25
26#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH 26/* Xtensa ABI requires stack alignment to be at least 16 */
27
28#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
29
30#define ARCH_SLAB_MINALIGN STACK_ALIGN
27 31
28/* 32/*
29 * User space process size: 1 GB. 33 * User space process size: 1 GB.
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 67904f55f188..120dd746a147 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -94,14 +94,14 @@ int main(void)
94 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); 94 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
95 DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable)); 95 DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
96#if XTENSA_HAVE_COPROCESSORS 96#if XTENSA_HAVE_COPROCESSORS
97 DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp)); 97 DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
98 DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp)); 98 DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
99 DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp)); 99 DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
100 DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp)); 100 DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
101 DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp)); 101 DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
102 DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp)); 102 DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
103 DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp)); 103 DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
104 DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp)); 104 DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
105#endif 105#endif
106 DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); 106 DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
107 DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); 107 DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 2f76118ecf62..9053a5622d2c 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -88,9 +88,12 @@ _SetupMMU:
88 initialize_mmu 88 initialize_mmu
89#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY 89#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
90 rsr a2, excsave1 90 rsr a2, excsave1
91 movi a3, 0x08000000 91 movi a3, XCHAL_KSEG_PADDR
92 bltu a2, a3, 1f
93 sub a2, a2, a3
94 movi a3, XCHAL_KSEG_SIZE
92 bgeu a2, a3, 1f 95 bgeu a2, a3, 1f
93 movi a3, 0xd0000000 96 movi a3, XCHAL_KSEG_CACHED_VADDR
94 add a2, a2, a3 97 add a2, a2, a3
95 wsr a2, excsave1 98 wsr a2, excsave1
961: 991:
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 483dcfb6e681..4bb68133a72a 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -94,18 +94,21 @@ void coprocessor_release_all(struct thread_info *ti)
94 94
95void coprocessor_flush_all(struct thread_info *ti) 95void coprocessor_flush_all(struct thread_info *ti)
96{ 96{
97 unsigned long cpenable; 97 unsigned long cpenable, old_cpenable;
98 int i; 98 int i;
99 99
100 preempt_disable(); 100 preempt_disable();
101 101
102 RSR_CPENABLE(old_cpenable);
102 cpenable = ti->cpenable; 103 cpenable = ti->cpenable;
104 WSR_CPENABLE(cpenable);
103 105
104 for (i = 0; i < XCHAL_CP_MAX; i++) { 106 for (i = 0; i < XCHAL_CP_MAX; i++) {
105 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti) 107 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
106 coprocessor_flush(ti, i); 108 coprocessor_flush(ti, i);
107 cpenable >>= 1; 109 cpenable >>= 1;
108 } 110 }
111 WSR_CPENABLE(old_cpenable);
109 112
110 preempt_enable(); 113 preempt_enable();
111} 114}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index c0845cb1cbb9..d9541be0605a 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -127,12 +127,37 @@ static int ptrace_setregs(struct task_struct *child, void __user *uregs)
127} 127}
128 128
129 129
130#if XTENSA_HAVE_COPROCESSORS
131#define CP_OFFSETS(cp) \
132 { \
133 .elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \
134 .ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \
135 .sz = sizeof(xtregs_ ## cp ## _t), \
136 }
137
138static const struct {
139 size_t elf_xtregs_offset;
140 size_t ti_offset;
141 size_t sz;
142} cp_offsets[] = {
143 CP_OFFSETS(cp0),
144 CP_OFFSETS(cp1),
145 CP_OFFSETS(cp2),
146 CP_OFFSETS(cp3),
147 CP_OFFSETS(cp4),
148 CP_OFFSETS(cp5),
149 CP_OFFSETS(cp6),
150 CP_OFFSETS(cp7),
151};
152#endif
153
130static int ptrace_getxregs(struct task_struct *child, void __user *uregs) 154static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
131{ 155{
132 struct pt_regs *regs = task_pt_regs(child); 156 struct pt_regs *regs = task_pt_regs(child);
133 struct thread_info *ti = task_thread_info(child); 157 struct thread_info *ti = task_thread_info(child);
134 elf_xtregs_t __user *xtregs = uregs; 158 elf_xtregs_t __user *xtregs = uregs;
135 int ret = 0; 159 int ret = 0;
160 int i __maybe_unused;
136 161
137 if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t))) 162 if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
138 return -EIO; 163 return -EIO;
@@ -140,8 +165,13 @@ static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
140#if XTENSA_HAVE_COPROCESSORS 165#if XTENSA_HAVE_COPROCESSORS
141 /* Flush all coprocessor registers to memory. */ 166 /* Flush all coprocessor registers to memory. */
142 coprocessor_flush_all(ti); 167 coprocessor_flush_all(ti);
143 ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp, 168
144 sizeof(xtregs_coprocessor_t)); 169 for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
170 ret |= __copy_to_user((char __user *)xtregs +
171 cp_offsets[i].elf_xtregs_offset,
172 (const char *)ti +
173 cp_offsets[i].ti_offset,
174 cp_offsets[i].sz);
145#endif 175#endif
146 ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt, 176 ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
147 sizeof(xtregs->opt)); 177 sizeof(xtregs->opt));
@@ -157,6 +187,7 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
157 struct pt_regs *regs = task_pt_regs(child); 187 struct pt_regs *regs = task_pt_regs(child);
158 elf_xtregs_t *xtregs = uregs; 188 elf_xtregs_t *xtregs = uregs;
159 int ret = 0; 189 int ret = 0;
190 int i __maybe_unused;
160 191
161 if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t))) 192 if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
162 return -EFAULT; 193 return -EFAULT;
@@ -166,8 +197,11 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
166 coprocessor_flush_all(ti); 197 coprocessor_flush_all(ti);
167 coprocessor_release_all(ti); 198 coprocessor_release_all(ti);
168 199
169 ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0, 200 for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
170 sizeof(xtregs_coprocessor_t)); 201 ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset,
202 (const char __user *)xtregs +
203 cp_offsets[i].elf_xtregs_offset,
204 cp_offsets[i].sz);
171#endif 205#endif
172 ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt, 206 ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
173 sizeof(xtregs->opt)); 207 sizeof(xtregs->opt));
diff --git a/block/bio.c b/block/bio.c
index d5368a445561..4f4d9884443b 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -605,6 +605,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
605 if (bio_flagged(bio_src, BIO_THROTTLED)) 605 if (bio_flagged(bio_src, BIO_THROTTLED))
606 bio_set_flag(bio, BIO_THROTTLED); 606 bio_set_flag(bio, BIO_THROTTLED);
607 bio->bi_opf = bio_src->bi_opf; 607 bio->bi_opf = bio_src->bi_opf;
608 bio->bi_ioprio = bio_src->bi_ioprio;
608 bio->bi_write_hint = bio_src->bi_write_hint; 609 bio->bi_write_hint = bio_src->bi_write_hint;
609 bio->bi_iter = bio_src->bi_iter; 610 bio->bi_iter = bio_src->bi_iter;
610 bio->bi_io_vec = bio_src->bi_io_vec; 611 bio->bi_io_vec = bio_src->bi_io_vec;
@@ -1260,6 +1261,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1260 if (ret) 1261 if (ret)
1261 goto cleanup; 1262 goto cleanup;
1262 } else { 1263 } else {
1264 zero_fill_bio(bio);
1263 iov_iter_advance(iter, bio->bi_iter.bi_size); 1265 iov_iter_advance(iter, bio->bi_iter.bi_size);
1264 } 1266 }
1265 1267
diff --git a/block/blk-core.c b/block/blk-core.c
index ce12515f9b9b..deb56932f8c4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -798,9 +798,8 @@ void blk_cleanup_queue(struct request_queue *q)
798 * dispatch may still be in-progress since we dispatch requests 798 * dispatch may still be in-progress since we dispatch requests
799 * from more than one contexts. 799 * from more than one contexts.
800 * 800 *
801 * No need to quiesce queue if it isn't initialized yet since 801 * We rely on driver to deal with the race in case that queue
802 * blk_freeze_queue() should be enough for cases of passthrough 802 * initialization isn't done.
803 * request.
804 */ 803 */
805 if (q->mq_ops && blk_queue_init_done(q)) 804 if (q->mq_ops && blk_queue_init_done(q))
806 blk_mq_quiesce_queue(q); 805 blk_mq_quiesce_queue(q);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 76f867ea9a9b..5f2c429d4378 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -51,16 +51,14 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
51 if ((sector | nr_sects) & bs_mask) 51 if ((sector | nr_sects) & bs_mask)
52 return -EINVAL; 52 return -EINVAL;
53 53
54 while (nr_sects) { 54 if (!nr_sects)
55 unsigned int req_sects = nr_sects; 55 return -EINVAL;
56 sector_t end_sect;
57 56
58 if (!req_sects) 57 while (nr_sects) {
59 goto fail; 58 sector_t req_sects = min_t(sector_t, nr_sects,
60 if (req_sects > UINT_MAX >> 9) 59 bio_allowed_max_sectors(q));
61 req_sects = UINT_MAX >> 9;
62 60
63 end_sect = sector + req_sects; 61 WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
64 62
65 bio = blk_next_bio(bio, 0, gfp_mask); 63 bio = blk_next_bio(bio, 0, gfp_mask);
66 bio->bi_iter.bi_sector = sector; 64 bio->bi_iter.bi_sector = sector;
@@ -68,8 +66,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
68 bio_set_op_attrs(bio, op, 0); 66 bio_set_op_attrs(bio, op, 0);
69 67
70 bio->bi_iter.bi_size = req_sects << 9; 68 bio->bi_iter.bi_size = req_sects << 9;
69 sector += req_sects;
71 nr_sects -= req_sects; 70 nr_sects -= req_sects;
72 sector = end_sect;
73 71
74 /* 72 /*
75 * We can loop for a long time in here, if someone does 73 * We can loop for a long time in here, if someone does
@@ -82,14 +80,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
82 80
83 *biop = bio; 81 *biop = bio;
84 return 0; 82 return 0;
85
86fail:
87 if (bio) {
88 submit_bio_wait(bio);
89 bio_put(bio);
90 }
91 *biop = NULL;
92 return -EOPNOTSUPP;
93} 83}
94EXPORT_SYMBOL(__blkdev_issue_discard); 84EXPORT_SYMBOL(__blkdev_issue_discard);
95 85
@@ -161,7 +151,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
161 return -EOPNOTSUPP; 151 return -EOPNOTSUPP;
162 152
163 /* Ensure that max_write_same_sectors doesn't overflow bi_size */ 153 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
164 max_write_same_sectors = UINT_MAX >> 9; 154 max_write_same_sectors = bio_allowed_max_sectors(q);
165 155
166 while (nr_sects) { 156 while (nr_sects) {
167 bio = blk_next_bio(bio, 1, gfp_mask); 157 bio = blk_next_bio(bio, 1, gfp_mask);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6b5ad275ed56..7695034f4b87 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -46,7 +46,7 @@ static inline bool bio_will_gap(struct request_queue *q,
46 bio_get_first_bvec(prev_rq->bio, &pb); 46 bio_get_first_bvec(prev_rq->bio, &pb);
47 else 47 else
48 bio_get_first_bvec(prev, &pb); 48 bio_get_first_bvec(prev, &pb);
49 if (pb.bv_offset) 49 if (pb.bv_offset & queue_virt_boundary(q))
50 return true; 50 return true;
51 51
52 /* 52 /*
@@ -90,7 +90,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
90 /* Zero-sector (unknown) and one-sector granularities are the same. */ 90 /* Zero-sector (unknown) and one-sector granularities are the same. */
91 granularity = max(q->limits.discard_granularity >> 9, 1U); 91 granularity = max(q->limits.discard_granularity >> 9, 1U);
92 92
93 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 93 max_discard_sectors = min(q->limits.max_discard_sectors,
94 bio_allowed_max_sectors(q));
94 max_discard_sectors -= max_discard_sectors % granularity; 95 max_discard_sectors -= max_discard_sectors % granularity;
95 96
96 if (unlikely(!max_discard_sectors)) { 97 if (unlikely(!max_discard_sectors)) {
@@ -819,7 +820,7 @@ static struct request *attempt_merge(struct request_queue *q,
819 820
820 req->__data_len += blk_rq_bytes(next); 821 req->__data_len += blk_rq_bytes(next);
821 822
822 if (req_op(req) != REQ_OP_DISCARD) 823 if (!blk_discard_mergable(req))
823 elv_merge_requests(q, req, next); 824 elv_merge_requests(q, req, next);
824 825
825 /* 826 /*
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3f91c6e5b17a..3262d83b9e07 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1715,6 +1715,15 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1715 break; 1715 break;
1716 case BLK_STS_RESOURCE: 1716 case BLK_STS_RESOURCE:
1717 case BLK_STS_DEV_RESOURCE: 1717 case BLK_STS_DEV_RESOURCE:
1718 /*
1719 * If direct dispatch fails, we cannot allow any merging on
1720 * this IO. Drivers (like SCSI) may have set up permanent state
1721 * for this request, like SG tables and mappings, and if we
1722 * merge to it later on then we'll still only do IO to the
1723 * original part.
1724 */
1725 rq->cmd_flags |= REQ_NOMERGE;
1726
1718 blk_mq_update_dispatch_busy(hctx, true); 1727 blk_mq_update_dispatch_busy(hctx, true);
1719 __blk_mq_requeue_request(rq); 1728 __blk_mq_requeue_request(rq);
1720 break; 1729 break;
@@ -1727,6 +1736,18 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1727 return ret; 1736 return ret;
1728} 1737}
1729 1738
1739/*
1740 * Don't allow direct dispatch of anything but regular reads/writes,
1741 * as some of the other commands can potentially share request space
1742 * with data we need for the IO scheduler. If we attempt a direct dispatch
1743 * on those and fail, we can't safely add it to the scheduler afterwards
1744 * without potentially overwriting data that the driver has already written.
1745 */
1746static bool blk_rq_can_direct_dispatch(struct request *rq)
1747{
1748 return req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE;
1749}
1750
1730static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1751static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1731 struct request *rq, 1752 struct request *rq,
1732 blk_qc_t *cookie, 1753 blk_qc_t *cookie,
@@ -1748,7 +1769,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1748 goto insert; 1769 goto insert;
1749 } 1770 }
1750 1771
1751 if (q->elevator && !bypass_insert) 1772 if (!blk_rq_can_direct_dispatch(rq) || (q->elevator && !bypass_insert))
1752 goto insert; 1773 goto insert;
1753 1774
1754 if (!blk_mq_get_dispatch_budget(hctx)) 1775 if (!blk_mq_get_dispatch_budget(hctx))
@@ -1810,6 +1831,9 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1810 struct request *rq = list_first_entry(list, struct request, 1831 struct request *rq = list_first_entry(list, struct request,
1811 queuelist); 1832 queuelist);
1812 1833
1834 if (!blk_rq_can_direct_dispatch(rq))
1835 break;
1836
1813 list_del_init(&rq->queuelist); 1837 list_del_init(&rq->queuelist);
1814 ret = blk_mq_request_issue_directly(rq); 1838 ret = blk_mq_request_issue_directly(rq);
1815 if (ret != BLK_STS_OK) { 1839 if (ret != BLK_STS_OK) {
diff --git a/block/blk.h b/block/blk.h
index a1841b8ff129..0089fefdf771 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -169,7 +169,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
169static inline bool __bvec_gap_to_prev(struct request_queue *q, 169static inline bool __bvec_gap_to_prev(struct request_queue *q,
170 struct bio_vec *bprv, unsigned int offset) 170 struct bio_vec *bprv, unsigned int offset)
171{ 171{
172 return offset || 172 return (offset & queue_virt_boundary(q)) ||
173 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 173 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
174} 174}
175 175
@@ -396,6 +396,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq)
396} 396}
397 397
398/* 398/*
399 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
400 * is defined as 'unsigned int', meantime it has to aligned to with logical
401 * block size which is the minimum accepted unit by hardware.
402 */
403static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
404{
405 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
406}
407
408/*
399 * Internal io_context interface 409 * Internal io_context interface
400 */ 410 */
401void get_io_context(struct io_context *ioc); 411void get_io_context(struct io_context *ioc);
diff --git a/block/bounce.c b/block/bounce.c
index 36869afc258c..559c55bda040 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -248,6 +248,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
248 return NULL; 248 return NULL;
249 bio->bi_disk = bio_src->bi_disk; 249 bio->bi_disk = bio_src->bi_disk;
250 bio->bi_opf = bio_src->bi_opf; 250 bio->bi_opf = bio_src->bi_opf;
251 bio->bi_ioprio = bio_src->bi_ioprio;
251 bio->bi_write_hint = bio_src->bi_write_hint; 252 bio->bi_write_hint = bio_src->bi_write_hint;
252 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 253 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
253 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 254 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c
index e41f6cc33fff..784748dbb19f 100644
--- a/crypto/crypto_user_base.c
+++ b/crypto/crypto_user_base.c
@@ -84,7 +84,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
84{ 84{
85 struct crypto_report_cipher rcipher; 85 struct crypto_report_cipher rcipher;
86 86
87 strlcpy(rcipher.type, "cipher", sizeof(rcipher.type)); 87 strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
88 88
89 rcipher.blocksize = alg->cra_blocksize; 89 rcipher.blocksize = alg->cra_blocksize;
90 rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; 90 rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
@@ -103,7 +103,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
103{ 103{
104 struct crypto_report_comp rcomp; 104 struct crypto_report_comp rcomp;
105 105
106 strlcpy(rcomp.type, "compression", sizeof(rcomp.type)); 106 strncpy(rcomp.type, "compression", sizeof(rcomp.type));
107 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, 107 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
108 sizeof(struct crypto_report_comp), &rcomp)) 108 sizeof(struct crypto_report_comp), &rcomp))
109 goto nla_put_failure; 109 goto nla_put_failure;
@@ -117,7 +117,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
117{ 117{
118 struct crypto_report_acomp racomp; 118 struct crypto_report_acomp racomp;
119 119
120 strlcpy(racomp.type, "acomp", sizeof(racomp.type)); 120 strncpy(racomp.type, "acomp", sizeof(racomp.type));
121 121
122 if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, 122 if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
123 sizeof(struct crypto_report_acomp), &racomp)) 123 sizeof(struct crypto_report_acomp), &racomp))
@@ -132,7 +132,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
132{ 132{
133 struct crypto_report_akcipher rakcipher; 133 struct crypto_report_akcipher rakcipher;
134 134
135 strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); 135 strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
136 136
137 if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, 137 if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
138 sizeof(struct crypto_report_akcipher), &rakcipher)) 138 sizeof(struct crypto_report_akcipher), &rakcipher))
@@ -147,7 +147,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
147{ 147{
148 struct crypto_report_kpp rkpp; 148 struct crypto_report_kpp rkpp;
149 149
150 strlcpy(rkpp.type, "kpp", sizeof(rkpp.type)); 150 strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
151 151
152 if (nla_put(skb, CRYPTOCFGA_REPORT_KPP, 152 if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
153 sizeof(struct crypto_report_kpp), &rkpp)) 153 sizeof(struct crypto_report_kpp), &rkpp))
@@ -161,10 +161,10 @@ nla_put_failure:
161static int crypto_report_one(struct crypto_alg *alg, 161static int crypto_report_one(struct crypto_alg *alg,
162 struct crypto_user_alg *ualg, struct sk_buff *skb) 162 struct crypto_user_alg *ualg, struct sk_buff *skb)
163{ 163{
164 strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); 164 strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
165 strlcpy(ualg->cru_driver_name, alg->cra_driver_name, 165 strncpy(ualg->cru_driver_name, alg->cra_driver_name,
166 sizeof(ualg->cru_driver_name)); 166 sizeof(ualg->cru_driver_name));
167 strlcpy(ualg->cru_module_name, module_name(alg->cra_module), 167 strncpy(ualg->cru_module_name, module_name(alg->cra_module),
168 sizeof(ualg->cru_module_name)); 168 sizeof(ualg->cru_module_name));
169 169
170 ualg->cru_type = 0; 170 ualg->cru_type = 0;
@@ -177,7 +177,7 @@ static int crypto_report_one(struct crypto_alg *alg,
177 if (alg->cra_flags & CRYPTO_ALG_LARVAL) { 177 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
178 struct crypto_report_larval rl; 178 struct crypto_report_larval rl;
179 179
180 strlcpy(rl.type, "larval", sizeof(rl.type)); 180 strncpy(rl.type, "larval", sizeof(rl.type));
181 if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, 181 if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
182 sizeof(struct crypto_report_larval), &rl)) 182 sizeof(struct crypto_report_larval), &rl))
183 goto nla_put_failure; 183 goto nla_put_failure;
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
index 021ad06bbb62..1dfaa0ccd555 100644
--- a/crypto/crypto_user_stat.c
+++ b/crypto/crypto_user_stat.c
@@ -37,6 +37,8 @@ static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
37 u64 v64; 37 u64 v64;
38 u32 v32; 38 u32 v32;
39 39
40 memset(&raead, 0, sizeof(raead));
41
40 strncpy(raead.type, "aead", sizeof(raead.type)); 42 strncpy(raead.type, "aead", sizeof(raead.type));
41 43
42 v32 = atomic_read(&alg->encrypt_cnt); 44 v32 = atomic_read(&alg->encrypt_cnt);
@@ -65,6 +67,8 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
65 u64 v64; 67 u64 v64;
66 u32 v32; 68 u32 v32;
67 69
70 memset(&rcipher, 0, sizeof(rcipher));
71
68 strlcpy(rcipher.type, "cipher", sizeof(rcipher.type)); 72 strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
69 73
70 v32 = atomic_read(&alg->encrypt_cnt); 74 v32 = atomic_read(&alg->encrypt_cnt);
@@ -93,6 +97,8 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
93 u64 v64; 97 u64 v64;
94 u32 v32; 98 u32 v32;
95 99
100 memset(&rcomp, 0, sizeof(rcomp));
101
96 strlcpy(rcomp.type, "compression", sizeof(rcomp.type)); 102 strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
97 v32 = atomic_read(&alg->compress_cnt); 103 v32 = atomic_read(&alg->compress_cnt);
98 rcomp.stat_compress_cnt = v32; 104 rcomp.stat_compress_cnt = v32;
@@ -120,6 +126,8 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
120 u64 v64; 126 u64 v64;
121 u32 v32; 127 u32 v32;
122 128
129 memset(&racomp, 0, sizeof(racomp));
130
123 strlcpy(racomp.type, "acomp", sizeof(racomp.type)); 131 strlcpy(racomp.type, "acomp", sizeof(racomp.type));
124 v32 = atomic_read(&alg->compress_cnt); 132 v32 = atomic_read(&alg->compress_cnt);
125 racomp.stat_compress_cnt = v32; 133 racomp.stat_compress_cnt = v32;
@@ -147,6 +155,8 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
147 u64 v64; 155 u64 v64;
148 u32 v32; 156 u32 v32;
149 157
158 memset(&rakcipher, 0, sizeof(rakcipher));
159
150 strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); 160 strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
151 v32 = atomic_read(&alg->encrypt_cnt); 161 v32 = atomic_read(&alg->encrypt_cnt);
152 rakcipher.stat_encrypt_cnt = v32; 162 rakcipher.stat_encrypt_cnt = v32;
@@ -177,6 +187,8 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
177 struct crypto_stat rkpp; 187 struct crypto_stat rkpp;
178 u32 v; 188 u32 v;
179 189
190 memset(&rkpp, 0, sizeof(rkpp));
191
180 strlcpy(rkpp.type, "kpp", sizeof(rkpp.type)); 192 strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
181 193
182 v = atomic_read(&alg->setsecret_cnt); 194 v = atomic_read(&alg->setsecret_cnt);
@@ -203,6 +215,8 @@ static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
203 u64 v64; 215 u64 v64;
204 u32 v32; 216 u32 v32;
205 217
218 memset(&rhash, 0, sizeof(rhash));
219
206 strncpy(rhash.type, "ahash", sizeof(rhash.type)); 220 strncpy(rhash.type, "ahash", sizeof(rhash.type));
207 221
208 v32 = atomic_read(&alg->hash_cnt); 222 v32 = atomic_read(&alg->hash_cnt);
@@ -227,6 +241,8 @@ static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
227 u64 v64; 241 u64 v64;
228 u32 v32; 242 u32 v32;
229 243
244 memset(&rhash, 0, sizeof(rhash));
245
230 strncpy(rhash.type, "shash", sizeof(rhash.type)); 246 strncpy(rhash.type, "shash", sizeof(rhash.type));
231 247
232 v32 = atomic_read(&alg->hash_cnt); 248 v32 = atomic_read(&alg->hash_cnt);
@@ -251,6 +267,8 @@ static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
251 u64 v64; 267 u64 v64;
252 u32 v32; 268 u32 v32;
253 269
270 memset(&rrng, 0, sizeof(rrng));
271
254 strncpy(rrng.type, "rng", sizeof(rrng.type)); 272 strncpy(rrng.type, "rng", sizeof(rrng.type));
255 273
256 v32 = atomic_read(&alg->generate_cnt); 274 v32 = atomic_read(&alg->generate_cnt);
@@ -275,6 +293,8 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
275 struct crypto_user_alg *ualg, 293 struct crypto_user_alg *ualg,
276 struct sk_buff *skb) 294 struct sk_buff *skb)
277{ 295{
296 memset(ualg, 0, sizeof(*ualg));
297
278 strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); 298 strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
279 strlcpy(ualg->cru_driver_name, alg->cra_driver_name, 299 strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
280 sizeof(ualg->cru_driver_name)); 300 sizeof(ualg->cru_driver_name));
@@ -291,6 +311,7 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
291 if (alg->cra_flags & CRYPTO_ALG_LARVAL) { 311 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
292 struct crypto_stat rl; 312 struct crypto_stat rl;
293 313
314 memset(&rl, 0, sizeof(rl));
294 strlcpy(rl.type, "larval", sizeof(rl.type)); 315 strlcpy(rl.type, "larval", sizeof(rl.type));
295 if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, 316 if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
296 sizeof(struct crypto_stat), &rl)) 317 sizeof(struct crypto_stat), &rl))
diff --git a/crypto/simd.c b/crypto/simd.c
index ea7240be3001..78e8d037ae2b 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -124,8 +124,9 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm)
124 124
125 ctx->cryptd_tfm = cryptd_tfm; 125 ctx->cryptd_tfm = cryptd_tfm;
126 126
127 reqsize = sizeof(struct skcipher_request); 127 reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm));
128 reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base); 128 reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base));
129 reqsize += sizeof(struct skcipher_request);
129 130
130 crypto_skcipher_set_reqsize(tfm, reqsize); 131 crypto_skcipher_set_reqsize(tfm, reqsize);
131 132
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 8f3a444c6ea9..7cea769c37df 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -512,7 +512,7 @@ config CRC_PMIC_OPREGION
512 512
513config XPOWER_PMIC_OPREGION 513config XPOWER_PMIC_OPREGION
514 bool "ACPI operation region support for XPower AXP288 PMIC" 514 bool "ACPI operation region support for XPower AXP288 PMIC"
515 depends on MFD_AXP20X_I2C && IOSF_MBI 515 depends on MFD_AXP20X_I2C && IOSF_MBI=y
516 help 516 help
517 This config adds ACPI operation region support for XPower AXP288 PMIC. 517 This config adds ACPI operation region support for XPower AXP288 PMIC.
518 518
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index eaa60c94205a..1f32caa87686 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = {
30 {"PNP0200", 0}, /* AT DMA Controller */ 30 {"PNP0200", 0}, /* AT DMA Controller */
31 {"ACPI0009", 0}, /* IOxAPIC */ 31 {"ACPI0009", 0}, /* IOxAPIC */
32 {"ACPI000A", 0}, /* IOAPIC */ 32 {"ACPI000A", 0}, /* IOAPIC */
33 {"SMB0001", 0}, /* ACPI SMBUS virtual device */
33 {"", 0}, 34 {"", 0},
34}; 35};
35 36
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 0d42f30e5b25..9920fac6413f 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -244,7 +244,6 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
244{ 244{
245 acpi_status status; 245 acpi_status status;
246 u32 buffer_length; 246 u32 buffer_length;
247 u32 data_length;
248 void *buffer; 247 void *buffer;
249 union acpi_operand_object *buffer_desc; 248 union acpi_operand_object *buffer_desc;
250 u32 function; 249 u32 function;
@@ -282,14 +281,12 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
282 case ACPI_ADR_SPACE_SMBUS: 281 case ACPI_ADR_SPACE_SMBUS:
283 282
284 buffer_length = ACPI_SMBUS_BUFFER_SIZE; 283 buffer_length = ACPI_SMBUS_BUFFER_SIZE;
285 data_length = ACPI_SMBUS_DATA_SIZE;
286 function = ACPI_WRITE | (obj_desc->field.attribute << 16); 284 function = ACPI_WRITE | (obj_desc->field.attribute << 16);
287 break; 285 break;
288 286
289 case ACPI_ADR_SPACE_IPMI: 287 case ACPI_ADR_SPACE_IPMI:
290 288
291 buffer_length = ACPI_IPMI_BUFFER_SIZE; 289 buffer_length = ACPI_IPMI_BUFFER_SIZE;
292 data_length = ACPI_IPMI_DATA_SIZE;
293 function = ACPI_WRITE; 290 function = ACPI_WRITE;
294 break; 291 break;
295 292
@@ -310,7 +307,6 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
310 /* Add header length to get the full size of the buffer */ 307 /* Add header length to get the full size of the buffer */
311 308
312 buffer_length += ACPI_SERIAL_HEADER_SIZE; 309 buffer_length += ACPI_SERIAL_HEADER_SIZE;
313 data_length = source_desc->buffer.pointer[1];
314 function = ACPI_WRITE | (accessor_type << 16); 310 function = ACPI_WRITE | (accessor_type << 16);
315 break; 311 break;
316 312
@@ -318,20 +314,6 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
318 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); 314 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
319 } 315 }
320 316
321#if 0
322 OBSOLETE ?
323 /* Check for possible buffer overflow */
324 if (data_length > source_desc->buffer.length) {
325 ACPI_ERROR((AE_INFO,
326 "Length in buffer header (%u)(%u) is greater than "
327 "the physical buffer length (%u) and will overflow",
328 data_length, buffer_length,
329 source_desc->buffer.length));
330
331 return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
332 }
333#endif
334
335 /* Create the transfer/bidirectional/return buffer */ 317 /* Create the transfer/bidirectional/return buffer */
336 318
337 buffer_desc = acpi_ut_create_buffer_object(buffer_length); 319 buffer_desc = acpi_ut_create_buffer_object(buffer_length);
@@ -342,7 +324,8 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
342 /* Copy the input buffer data to the transfer buffer */ 324 /* Copy the input buffer data to the transfer buffer */
343 325
344 buffer = buffer_desc->buffer.pointer; 326 buffer = buffer_desc->buffer.pointer;
345 memcpy(buffer, source_desc->buffer.pointer, data_length); 327 memcpy(buffer, source_desc->buffer.pointer,
328 min(buffer_length, source_desc->buffer.length));
346 329
347 /* Lock entire transaction if requested */ 330 /* Lock entire transaction if requested */
348 331
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 2a361e22d38d..70f4e80b9246 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -700,7 +700,7 @@ static void iort_set_device_domain(struct device *dev,
700 */ 700 */
701static struct irq_domain *iort_get_platform_device_domain(struct device *dev) 701static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
702{ 702{
703 struct acpi_iort_node *node, *msi_parent; 703 struct acpi_iort_node *node, *msi_parent = NULL;
704 struct fwnode_handle *iort_fwnode; 704 struct fwnode_handle *iort_fwnode;
705 struct acpi_iort_its_group *its; 705 struct acpi_iort_its_group *its;
706 int i; 706 int i;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f8c638f3c946..14d9f5bea015 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2928,9 +2928,9 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
2928 return rc; 2928 return rc;
2929 2929
2930 if (ars_status_process_records(acpi_desc)) 2930 if (ars_status_process_records(acpi_desc))
2931 return -ENOMEM; 2931 dev_err(acpi_desc->dev, "Failed to process ARS records\n");
2932 2932
2933 return 0; 2933 return rc;
2934} 2934}
2935 2935
2936static int ars_register(struct acpi_nfit_desc *acpi_desc, 2936static int ars_register(struct acpi_nfit_desc *acpi_desc,
@@ -3341,8 +3341,6 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3341 struct nvdimm *nvdimm, unsigned int cmd) 3341 struct nvdimm *nvdimm, unsigned int cmd)
3342{ 3342{
3343 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3343 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3344 struct nfit_spa *nfit_spa;
3345 int rc = 0;
3346 3344
3347 if (nvdimm) 3345 if (nvdimm)
3348 return 0; 3346 return 0;
@@ -3355,17 +3353,10 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3355 * just needs guarantees that any ARS it initiates are not 3353 * just needs guarantees that any ARS it initiates are not
3356 * interrupted by any intervening start requests from userspace. 3354 * interrupted by any intervening start requests from userspace.
3357 */ 3355 */
3358 mutex_lock(&acpi_desc->init_mutex); 3356 if (work_busy(&acpi_desc->dwork.work))
3359 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 3357 return -EBUSY;
3360 if (acpi_desc->scrub_spa
3361 || test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)
3362 || test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) {
3363 rc = -EBUSY;
3364 break;
3365 }
3366 mutex_unlock(&acpi_desc->init_mutex);
3367 3358
3368 return rc; 3359 return 0;
3369} 3360}
3370 3361
3371int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, 3362int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index e9626bf6ca29..d6c1b10f6c25 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
25 struct acpi_nfit_desc *acpi_desc; 25 struct acpi_nfit_desc *acpi_desc;
26 struct nfit_spa *nfit_spa; 26 struct nfit_spa *nfit_spa;
27 27
28 /* We only care about memory errors */ 28 /* We only care about uncorrectable memory errors */
29 if (!mce_is_memory_error(mce)) 29 if (!mce_is_memory_error(mce) || mce_is_correctable(mce))
30 return NOTIFY_DONE;
31
32 /* Verify the address reported in the MCE is valid. */
33 if (!mce_usable_address(mce))
30 return NOTIFY_DONE; 34 return NOTIFY_DONE;
31 35
32 /* 36 /*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index cb30a524d16d..9f1000d2a40c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2974,7 +2974,6 @@ static void binder_transaction(struct binder_proc *proc,
2974 t->buffer = NULL; 2974 t->buffer = NULL;
2975 goto err_binder_alloc_buf_failed; 2975 goto err_binder_alloc_buf_failed;
2976 } 2976 }
2977 t->buffer->allow_user_free = 0;
2978 t->buffer->debug_id = t->debug_id; 2977 t->buffer->debug_id = t->debug_id;
2979 t->buffer->transaction = t; 2978 t->buffer->transaction = t;
2980 t->buffer->target_node = target_node; 2979 t->buffer->target_node = target_node;
@@ -3510,14 +3509,18 @@ static int binder_thread_write(struct binder_proc *proc,
3510 3509
3511 buffer = binder_alloc_prepare_to_free(&proc->alloc, 3510 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3512 data_ptr); 3511 data_ptr);
3513 if (buffer == NULL) { 3512 if (IS_ERR_OR_NULL(buffer)) {
3514 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 3513 if (PTR_ERR(buffer) == -EPERM) {
3515 proc->pid, thread->pid, (u64)data_ptr); 3514 binder_user_error(
3516 break; 3515 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3517 } 3516 proc->pid, thread->pid,
3518 if (!buffer->allow_user_free) { 3517 (u64)data_ptr);
3519 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 3518 } else {
3520 proc->pid, thread->pid, (u64)data_ptr); 3519 binder_user_error(
3520 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3521 proc->pid, thread->pid,
3522 (u64)data_ptr);
3523 }
3521 break; 3524 break;
3522 } 3525 }
3523 binder_debug(BINDER_DEBUG_FREE_BUFFER, 3526 binder_debug(BINDER_DEBUG_FREE_BUFFER,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 64fd96eada31..030c98f35cca 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -151,16 +151,12 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
151 else { 151 else {
152 /* 152 /*
153 * Guard against user threads attempting to 153 * Guard against user threads attempting to
154 * free the buffer twice 154 * free the buffer when in use by kernel or
155 * after it's already been freed.
155 */ 156 */
156 if (buffer->free_in_progress) { 157 if (!buffer->allow_user_free)
157 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 158 return ERR_PTR(-EPERM);
158 "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", 159 buffer->allow_user_free = 0;
159 alloc->pid, current->pid,
160 (u64)user_ptr);
161 return NULL;
162 }
163 buffer->free_in_progress = 1;
164 return buffer; 160 return buffer;
165 } 161 }
166 } 162 }
@@ -500,7 +496,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
500 496
501 rb_erase(best_fit, &alloc->free_buffers); 497 rb_erase(best_fit, &alloc->free_buffers);
502 buffer->free = 0; 498 buffer->free = 0;
503 buffer->free_in_progress = 0; 499 buffer->allow_user_free = 0;
504 binder_insert_allocated_buffer_locked(alloc, buffer); 500 binder_insert_allocated_buffer_locked(alloc, buffer);
505 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 501 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
506 "%d: binder_alloc_buf size %zd got %pK\n", 502 "%d: binder_alloc_buf size %zd got %pK\n",
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 9ef64e563856..fb3238c74c8a 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -50,8 +50,7 @@ struct binder_buffer {
50 unsigned free:1; 50 unsigned free:1;
51 unsigned allow_user_free:1; 51 unsigned allow_user_free:1;
52 unsigned async_transaction:1; 52 unsigned async_transaction:1;
53 unsigned free_in_progress:1; 53 unsigned debug_id:29;
54 unsigned debug_id:28;
55 54
56 struct binder_transaction *transaction; 55 struct binder_transaction *transaction;
57 56
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6e594644cb1d..b8c3f9e6af89 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4553,7 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4553 /* These specific Samsung models/firmware-revs do not handle LPM well */ 4553 /* These specific Samsung models/firmware-revs do not handle LPM well */
4554 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, 4554 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
4555 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, 4555 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
4556 { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, }, 4556 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
4557 4557
4558 /* devices that don't properly handle queued TRIM commands */ 4558 /* devices that don't properly handle queued TRIM commands */
4559 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4559 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
@@ -4602,6 +4602,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4602 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4602 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4603 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4603 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4604 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4604 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4605 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4605 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4606 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4606 4607
4607 /* 4608 /*
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 10ecb232245d..4b1ff5bc256a 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -1,14 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Renesas R-Car SATA driver 3 * Renesas R-Car SATA driver
3 * 4 *
4 * Author: Vladimir Barinov <source@cogentembedded.com> 5 * Author: Vladimir Barinov <source@cogentembedded.com>
5 * Copyright (C) 2013-2015 Cogent Embedded, Inc. 6 * Copyright (C) 2013-2015 Cogent Embedded, Inc.
6 * Copyright (C) 2013-2015 Renesas Solutions Corp. 7 * Copyright (C) 2013-2015 Renesas Solutions Corp.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */ 8 */
13 9
14#include <linux/kernel.h> 10#include <linux/kernel.h>
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 4e46dc9e41ad..11e1663bdc4d 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1410,7 +1410,7 @@ static int init_q(struct fs_dev *dev, struct queue *txq, int queue,
1410 1410
1411 func_enter (); 1411 func_enter ();
1412 1412
1413 fs_dprintk (FS_DEBUG_INIT, "Inititing queue at %x: %d entries:\n", 1413 fs_dprintk (FS_DEBUG_INIT, "Initializing queue at %x: %d entries:\n",
1414 queue, nentries); 1414 queue, nentries);
1415 1415
1416 p = aligned_kmalloc (sz, GFP_KERNEL, 0x10); 1416 p = aligned_kmalloc (sz, GFP_KERNEL, 0x10);
@@ -1443,7 +1443,7 @@ static int init_fp(struct fs_dev *dev, struct freepool *fp, int queue,
1443{ 1443{
1444 func_enter (); 1444 func_enter ();
1445 1445
1446 fs_dprintk (FS_DEBUG_INIT, "Inititing free pool at %x:\n", queue); 1446 fs_dprintk (FS_DEBUG_INIT, "Initializing free pool at %x:\n", queue);
1447 1447
1448 write_fs (dev, FP_CNF(queue), (bufsize * RBFP_RBS) | RBFP_RBSVAL | RBFP_CME); 1448 write_fs (dev, FP_CNF(queue), (bufsize * RBFP_RBS) | RBFP_RBSVAL | RBFP_CME);
1449 write_fs (dev, FP_SA(queue), 0); 1449 write_fs (dev, FP_SA(queue), 0);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 4aaf00d2098b..e038e2b3b7ea 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -26,8 +26,14 @@ struct devres_node {
26 26
27struct devres { 27struct devres {
28 struct devres_node node; 28 struct devres_node node;
29 /* -- 3 pointers */ 29 /*
30 unsigned long long data[]; /* guarantee ull alignment */ 30 * Some archs want to perform DMA into kmalloc caches
31 * and need a guaranteed alignment larger than
32 * the alignment of a 64-bit integer.
33 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
34 * buffer alignment as if it was allocated by plain kmalloc().
35 */
36 u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
31}; 37};
32 38
33struct devres_group { 39struct devres_group {
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a8cfa011c284..fb23578e9a41 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4148,10 +4148,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
4148 bio.bi_end_io = floppy_rb0_cb; 4148 bio.bi_end_io = floppy_rb0_cb;
4149 bio_set_op_attrs(&bio, REQ_OP_READ, 0); 4149 bio_set_op_attrs(&bio, REQ_OP_READ, 0);
4150 4150
4151 init_completion(&cbdata.complete);
4152
4151 submit_bio(&bio); 4153 submit_bio(&bio);
4152 process_fd_request(); 4154 process_fd_request();
4153 4155
4154 init_completion(&cbdata.complete);
4155 wait_for_completion(&cbdata.complete); 4156 wait_for_completion(&cbdata.complete);
4156 4157
4157 __free_page(page); 4158 __free_page(page);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 56452cabce5b..0ed4b200fa58 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info)
1919 GFP_KERNEL); 1919 GFP_KERNEL);
1920 if (!info->rinfo) { 1920 if (!info->rinfo) {
1921 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); 1921 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1922 info->nr_rings = 0;
1922 return -ENOMEM; 1923 return -ENOMEM;
1923 } 1924 }
1924 1925
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index ef0ca9414f37..ff83e899df71 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev)
210{ 210{
211 struct clk *clk = platform_get_drvdata(pdev); 211 struct clk *clk = platform_get_drvdata(pdev);
212 212
213 of_clk_del_provider(pdev->dev.of_node);
213 clk_unregister_fixed_factor(clk); 214 clk_unregister_fixed_factor(clk);
214 215
215 return 0; 216 return 0;
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index c981159b02c0..792735d7e46e 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -325,6 +325,7 @@ static struct clk_regmap axg_fclk_div2 = {
325 .ops = &clk_regmap_gate_ops, 325 .ops = &clk_regmap_gate_ops,
326 .parent_names = (const char *[]){ "fclk_div2_div" }, 326 .parent_names = (const char *[]){ "fclk_div2_div" },
327 .num_parents = 1, 327 .num_parents = 1,
328 .flags = CLK_IS_CRITICAL,
328 }, 329 },
329}; 330};
330 331
@@ -349,6 +350,18 @@ static struct clk_regmap axg_fclk_div3 = {
349 .ops = &clk_regmap_gate_ops, 350 .ops = &clk_regmap_gate_ops,
350 .parent_names = (const char *[]){ "fclk_div3_div" }, 351 .parent_names = (const char *[]){ "fclk_div3_div" },
351 .num_parents = 1, 352 .num_parents = 1,
353 /*
354 * FIXME:
355 * This clock, as fdiv2, is used by the SCPI FW and is required
356 * by the platform to operate correctly.
357 * Until the following condition are met, we need this clock to
358 * be marked as critical:
359 * a) The SCPI generic driver claims and enable all the clocks
360 * it needs
361 * b) CCF has a clock hand-off mechanism to make the sure the
362 * clock stays on until the proper driver comes along
363 */
364 .flags = CLK_IS_CRITICAL,
352 }, 365 },
353}; 366};
354 367
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 9309cfaaa464..4ada9668fd49 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -506,6 +506,18 @@ static struct clk_regmap gxbb_fclk_div3 = {
506 .ops = &clk_regmap_gate_ops, 506 .ops = &clk_regmap_gate_ops,
507 .parent_names = (const char *[]){ "fclk_div3_div" }, 507 .parent_names = (const char *[]){ "fclk_div3_div" },
508 .num_parents = 1, 508 .num_parents = 1,
509 /*
510 * FIXME:
511 * This clock, as fdiv2, is used by the SCPI FW and is required
512 * by the platform to operate correctly.
513 * Until the following condition are met, we need this clock to
514 * be marked as critical:
515 * a) The SCPI generic driver claims and enable all the clocks
516 * it needs
517 * b) CCF has a clock hand-off mechanism to make the sure the
518 * clock stays on until the proper driver comes along
519 */
520 .flags = CLK_IS_CRITICAL,
509 }, 521 },
510}; 522};
511 523
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index e4ca6a45f313..ef1b267cb058 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -265,7 +265,7 @@ static struct clk_fixed_factor cxo = {
265 .div = 1, 265 .div = 1,
266 .hw.init = &(struct clk_init_data){ 266 .hw.init = &(struct clk_init_data){
267 .name = "cxo", 267 .name = "cxo",
268 .parent_names = (const char *[]){ "xo_board" }, 268 .parent_names = (const char *[]){ "xo-board" },
269 .num_parents = 1, 269 .num_parents = 1,
270 .ops = &clk_fixed_factor_ops, 270 .ops = &clk_fixed_factor_ops,
271 }, 271 },
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 9c38895542f4..d4350bb10b83 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -20,6 +20,13 @@
20DEFINE_RAW_SPINLOCK(i8253_lock); 20DEFINE_RAW_SPINLOCK(i8253_lock);
21EXPORT_SYMBOL(i8253_lock); 21EXPORT_SYMBOL(i8253_lock);
22 22
23/*
24 * Handle PIT quirk in pit_shutdown() where zeroing the counter register
25 * restarts the PIT, negating the shutdown. On platforms with the quirk,
26 * platform specific code can set this to false.
27 */
28bool i8253_clear_counter_on_shutdown __ro_after_init = true;
29
23#ifdef CONFIG_CLKSRC_I8253 30#ifdef CONFIG_CLKSRC_I8253
24/* 31/*
25 * Since the PIT overflows every tick, its not very useful 32 * Since the PIT overflows every tick, its not very useful
@@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt)
109 raw_spin_lock(&i8253_lock); 116 raw_spin_lock(&i8253_lock);
110 117
111 outb_p(0x30, PIT_MODE); 118 outb_p(0x30, PIT_MODE);
112 outb_p(0, PIT_CH0); 119
113 outb_p(0, PIT_CH0); 120 if (i8253_clear_counter_on_shutdown) {
121 outb_p(0, PIT_CH0);
122 outb_p(0, PIT_CH0);
123 }
114 124
115 raw_spin_unlock(&i8253_lock); 125 raw_spin_unlock(&i8253_lock);
116 return 0; 126 return 0;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 8cfee0ab804b..d8c3595e9023 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -160,8 +160,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
160 /* Ensure the arm clock divider is what we expect */ 160 /* Ensure the arm clock divider is what we expect */
161 ret = clk_set_rate(clks[ARM].clk, new_freq * 1000); 161 ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
162 if (ret) { 162 if (ret) {
163 int ret1;
164
163 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); 165 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
164 regulator_set_voltage_tol(arm_reg, volt_old, 0); 166 ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
167 if (ret1)
168 dev_warn(cpu_dev,
169 "failed to restore vddarm voltage: %d\n", ret1);
165 return ret; 170 return ret;
166 } 171 }
167 172
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 3f0e2a14895a..22b53bf26817 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -201,19 +201,28 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
201 {}, 201 {},
202}; 202};
203 203
204static const struct of_device_id *ti_cpufreq_match_node(void)
205{
206 struct device_node *np;
207 const struct of_device_id *match;
208
209 np = of_find_node_by_path("/");
210 match = of_match_node(ti_cpufreq_of_match, np);
211 of_node_put(np);
212
213 return match;
214}
215
204static int ti_cpufreq_probe(struct platform_device *pdev) 216static int ti_cpufreq_probe(struct platform_device *pdev)
205{ 217{
206 u32 version[VERSION_COUNT]; 218 u32 version[VERSION_COUNT];
207 struct device_node *np;
208 const struct of_device_id *match; 219 const struct of_device_id *match;
209 struct opp_table *ti_opp_table; 220 struct opp_table *ti_opp_table;
210 struct ti_cpufreq_data *opp_data; 221 struct ti_cpufreq_data *opp_data;
211 const char * const reg_names[] = {"vdd", "vbb"}; 222 const char * const reg_names[] = {"vdd", "vbb"};
212 int ret; 223 int ret;
213 224
214 np = of_find_node_by_path("/"); 225 match = dev_get_platdata(&pdev->dev);
215 match = of_match_node(ti_cpufreq_of_match, np);
216 of_node_put(np);
217 if (!match) 226 if (!match)
218 return -ENODEV; 227 return -ENODEV;
219 228
@@ -290,7 +299,14 @@ fail_put_node:
290 299
291static int ti_cpufreq_init(void) 300static int ti_cpufreq_init(void)
292{ 301{
293 platform_device_register_simple("ti-cpufreq", -1, NULL, 0); 302 const struct of_device_id *match;
303
304 /* Check to ensure we are on a compatible platform */
305 match = ti_cpufreq_match_node();
306 if (match)
307 platform_device_register_data(NULL, "ti-cpufreq", -1, match,
308 sizeof(*match));
309
294 return 0; 310 return 0;
295} 311}
296module_init(ti_cpufreq_init); 312module_init(ti_cpufreq_init);
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 073557f433eb..3a407a3ef22b 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -82,7 +82,6 @@ static int __init arm_idle_init_cpu(int cpu)
82{ 82{
83 int ret; 83 int ret;
84 struct cpuidle_driver *drv; 84 struct cpuidle_driver *drv;
85 struct cpuidle_device *dev;
86 85
87 drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL); 86 drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
88 if (!drv) 87 if (!drv)
@@ -103,13 +102,6 @@ static int __init arm_idle_init_cpu(int cpu)
103 goto out_kfree_drv; 102 goto out_kfree_drv;
104 } 103 }
105 104
106 ret = cpuidle_register_driver(drv);
107 if (ret) {
108 if (ret != -EBUSY)
109 pr_err("Failed to register cpuidle driver\n");
110 goto out_kfree_drv;
111 }
112
113 /* 105 /*
114 * Call arch CPU operations in order to initialize 106 * Call arch CPU operations in order to initialize
115 * idle states suspend back-end specific data 107 * idle states suspend back-end specific data
@@ -117,37 +109,21 @@ static int __init arm_idle_init_cpu(int cpu)
117 ret = arm_cpuidle_init(cpu); 109 ret = arm_cpuidle_init(cpu);
118 110
119 /* 111 /*
120 * Skip the cpuidle device initialization if the reported 112 * Allow the initialization to continue for other CPUs, if the reported
121 * failure is a HW misconfiguration/breakage (-ENXIO). 113 * failure is a HW misconfiguration/breakage (-ENXIO).
122 */ 114 */
123 if (ret == -ENXIO)
124 return 0;
125
126 if (ret) { 115 if (ret) {
127 pr_err("CPU %d failed to init idle CPU ops\n", cpu); 116 pr_err("CPU %d failed to init idle CPU ops\n", cpu);
128 goto out_unregister_drv; 117 ret = ret == -ENXIO ? 0 : ret;
129 } 118 goto out_kfree_drv;
130
131 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
132 if (!dev) {
133 ret = -ENOMEM;
134 goto out_unregister_drv;
135 } 119 }
136 dev->cpu = cpu;
137 120
138 ret = cpuidle_register_device(dev); 121 ret = cpuidle_register(drv, NULL);
139 if (ret) { 122 if (ret)
140 pr_err("Failed to register cpuidle device for CPU %d\n", 123 goto out_kfree_drv;
141 cpu);
142 goto out_kfree_dev;
143 }
144 124
145 return 0; 125 return 0;
146 126
147out_kfree_dev:
148 kfree(dev);
149out_unregister_drv:
150 cpuidle_unregister_driver(drv);
151out_kfree_drv: 127out_kfree_drv:
152 kfree(drv); 128 kfree(drv);
153 return ret; 129 return ret;
@@ -178,9 +154,7 @@ out_fail:
178 while (--cpu >= 0) { 154 while (--cpu >= 0) {
179 dev = per_cpu(cpuidle_devices, cpu); 155 dev = per_cpu(cpuidle_devices, cpu);
180 drv = cpuidle_get_cpu_driver(dev); 156 drv = cpuidle_get_cpu_driver(dev);
181 cpuidle_unregister_device(dev); 157 cpuidle_unregister(drv);
182 cpuidle_unregister_driver(drv);
183 kfree(dev);
184 kfree(drv); 158 kfree(drv);
185 } 159 }
186 160
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index f7d6d690116e..cdc4f9a171d9 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -732,6 +732,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
732 int *splits_in_nents; 732 int *splits_in_nents;
733 int *splits_out_nents = NULL; 733 int *splits_out_nents = NULL;
734 struct sec_request_el *el, *temp; 734 struct sec_request_el *el, *temp;
735 bool split = skreq->src != skreq->dst;
735 736
736 mutex_init(&sec_req->lock); 737 mutex_init(&sec_req->lock);
737 sec_req->req_base = &skreq->base; 738 sec_req->req_base = &skreq->base;
@@ -750,7 +751,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
750 if (ret) 751 if (ret)
751 goto err_free_split_sizes; 752 goto err_free_split_sizes;
752 753
753 if (skreq->src != skreq->dst) { 754 if (split) {
754 sec_req->len_out = sg_nents(skreq->dst); 755 sec_req->len_out = sg_nents(skreq->dst);
755 ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, 756 ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
756 &splits_out, &splits_out_nents, 757 &splits_out, &splits_out_nents,
@@ -785,8 +786,9 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
785 split_sizes[i], 786 split_sizes[i],
786 skreq->src != skreq->dst, 787 skreq->src != skreq->dst,
787 splits_in[i], splits_in_nents[i], 788 splits_in[i], splits_in_nents[i],
788 splits_out[i], 789 split ? splits_out[i] : NULL,
789 splits_out_nents[i], info); 790 split ? splits_out_nents[i] : 0,
791 info);
790 if (IS_ERR(el)) { 792 if (IS_ERR(el)) {
791 ret = PTR_ERR(el); 793 ret = PTR_ERR(el);
792 goto err_free_elements; 794 goto err_free_elements;
@@ -806,13 +808,6 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
806 * more refined but this is unlikely to happen so no need. 808 * more refined but this is unlikely to happen so no need.
807 */ 809 */
808 810
809 /* Cleanup - all elements in pointer arrays have been coppied */
810 kfree(splits_in_nents);
811 kfree(splits_in);
812 kfree(splits_out_nents);
813 kfree(splits_out);
814 kfree(split_sizes);
815
816 /* Grab a big lock for a long time to avoid concurrency issues */ 811 /* Grab a big lock for a long time to avoid concurrency issues */
817 mutex_lock(&queue->queuelock); 812 mutex_lock(&queue->queuelock);
818 813
@@ -827,13 +822,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
827 (!queue->havesoftqueue || 822 (!queue->havesoftqueue ||
828 kfifo_avail(&queue->softqueue) > steps)) || 823 kfifo_avail(&queue->softqueue) > steps)) ||
829 !list_empty(&ctx->backlog)) { 824 !list_empty(&ctx->backlog)) {
825 ret = -EBUSY;
830 if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 826 if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
831 list_add_tail(&sec_req->backlog_head, &ctx->backlog); 827 list_add_tail(&sec_req->backlog_head, &ctx->backlog);
832 mutex_unlock(&queue->queuelock); 828 mutex_unlock(&queue->queuelock);
833 return -EBUSY; 829 goto out;
834 } 830 }
835 831
836 ret = -EBUSY;
837 mutex_unlock(&queue->queuelock); 832 mutex_unlock(&queue->queuelock);
838 goto err_free_elements; 833 goto err_free_elements;
839 } 834 }
@@ -842,7 +837,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
842 if (ret) 837 if (ret)
843 goto err_free_elements; 838 goto err_free_elements;
844 839
845 return -EINPROGRESS; 840 ret = -EINPROGRESS;
841out:
842 /* Cleanup - all elements in pointer arrays have been copied */
843 kfree(splits_in_nents);
844 kfree(splits_in);
845 kfree(splits_out_nents);
846 kfree(splits_out);
847 kfree(split_sizes);
848 return ret;
846 849
847err_free_elements: 850err_free_elements:
848 list_for_each_entry_safe(el, temp, &sec_req->elements, head) { 851 list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
@@ -854,7 +857,7 @@ err_free_elements:
854 crypto_skcipher_ivsize(atfm), 857 crypto_skcipher_ivsize(atfm),
855 DMA_BIDIRECTIONAL); 858 DMA_BIDIRECTIONAL);
856err_unmap_out_sg: 859err_unmap_out_sg:
857 if (skreq->src != skreq->dst) 860 if (split)
858 sec_unmap_sg_on_err(skreq->dst, steps, splits_out, 861 sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
859 splits_out_nents, sec_req->len_out, 862 splits_out_nents, sec_req->len_out,
860 info->dev); 863 info->dev);
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 5b44ef226904..fc359ca4503d 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -184,6 +184,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
184 exp_info.ops = &udmabuf_ops; 184 exp_info.ops = &udmabuf_ops;
185 exp_info.size = ubuf->pagecount << PAGE_SHIFT; 185 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
186 exp_info.priv = ubuf; 186 exp_info.priv = ubuf;
187 exp_info.flags = O_RDWR;
187 188
188 buf = dma_buf_export(&exp_info); 189 buf = dma_buf_export(&exp_info);
189 if (IS_ERR(buf)) { 190 if (IS_ERR(buf)) {
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7cbac6e8c113..01d936c9fe89 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1641,6 +1641,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
1641 atchan->descs_allocated = 0; 1641 atchan->descs_allocated = 0;
1642 atchan->status = 0; 1642 atchan->status = 0;
1643 1643
1644 /*
1645 * Free atslave allocated in at_dma_xlate()
1646 */
1647 kfree(chan->private);
1648 chan->private = NULL;
1649
1644 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1650 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1645} 1651}
1646 1652
@@ -1675,7 +1681,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1675 dma_cap_zero(mask); 1681 dma_cap_zero(mask);
1676 dma_cap_set(DMA_SLAVE, mask); 1682 dma_cap_set(DMA_SLAVE, mask);
1677 1683
1678 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); 1684 atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
1679 if (!atslave) 1685 if (!atslave)
1680 return NULL; 1686 return NULL;
1681 1687
@@ -2000,6 +2006,8 @@ static int at_dma_remove(struct platform_device *pdev)
2000 struct resource *io; 2006 struct resource *io;
2001 2007
2002 at_dma_off(atdma); 2008 at_dma_off(atdma);
2009 if (pdev->dev.of_node)
2010 of_dma_controller_free(pdev->dev.of_node);
2003 dma_async_device_unregister(&atdma->dma_common); 2011 dma_async_device_unregister(&atdma->dma_common);
2004 2012
2005 dma_pool_destroy(atdma->memset_pool); 2013 dma_pool_destroy(atdma->memset_pool);
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 388a929baf95..1a6a77df8a5e 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -265,6 +265,10 @@ void __init efi_init(void)
265 (params.mmap & ~PAGE_MASK))); 265 (params.mmap & ~PAGE_MASK)));
266 266
267 init_screen_info(); 267 init_screen_info();
268
269 /* ARM does not permit early mappings to persist across paging_init() */
270 if (IS_ENABLED(CONFIG_ARM))
271 efi_memmap_unmap();
268} 272}
269 273
270static int __init register_gop_device(void) 274static int __init register_gop_device(void)
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 922cfb813109..a00934d263c5 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -110,7 +110,7 @@ static int __init arm_enable_runtime_services(void)
110{ 110{
111 u64 mapsize; 111 u64 mapsize;
112 112
113 if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) { 113 if (!efi_enabled(EFI_BOOT)) {
114 pr_info("EFI services will not be available.\n"); 114 pr_info("EFI services will not be available.\n");
115 return 0; 115 return 0;
116 } 116 }
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 249eb70691b0..415849bab233 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -592,7 +592,11 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
592 592
593 early_memunmap(tbl, sizeof(*tbl)); 593 early_memunmap(tbl, sizeof(*tbl));
594 } 594 }
595 return 0;
596}
595 597
598int __init efi_apply_persistent_mem_reservations(void)
599{
596 if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { 600 if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
597 unsigned long prsv = efi.mem_reserve; 601 unsigned long prsv = efi.mem_reserve;
598 602
@@ -963,36 +967,59 @@ bool efi_is_table_address(unsigned long phys_addr)
963} 967}
964 968
965static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 969static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
970static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
966 971
967int efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 972static int __init efi_memreserve_map_root(void)
968{ 973{
969 struct linux_efi_memreserve *rsv, *parent;
970
971 if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR) 974 if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
972 return -ENODEV; 975 return -ENODEV;
973 976
974 rsv = kmalloc(sizeof(*rsv), GFP_KERNEL); 977 efi_memreserve_root = memremap(efi.mem_reserve,
975 if (!rsv) 978 sizeof(*efi_memreserve_root),
979 MEMREMAP_WB);
980 if (WARN_ON_ONCE(!efi_memreserve_root))
976 return -ENOMEM; 981 return -ENOMEM;
982 return 0;
983}
977 984
978 parent = memremap(efi.mem_reserve, sizeof(*rsv), MEMREMAP_WB); 985int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
979 if (!parent) { 986{
980 kfree(rsv); 987 struct linux_efi_memreserve *rsv;
981 return -ENOMEM; 988 int rc;
989
990 if (efi_memreserve_root == (void *)ULONG_MAX)
991 return -ENODEV;
992
993 if (!efi_memreserve_root) {
994 rc = efi_memreserve_map_root();
995 if (rc)
996 return rc;
982 } 997 }
983 998
999 rsv = kmalloc(sizeof(*rsv), GFP_ATOMIC);
1000 if (!rsv)
1001 return -ENOMEM;
1002
984 rsv->base = addr; 1003 rsv->base = addr;
985 rsv->size = size; 1004 rsv->size = size;
986 1005
987 spin_lock(&efi_mem_reserve_persistent_lock); 1006 spin_lock(&efi_mem_reserve_persistent_lock);
988 rsv->next = parent->next; 1007 rsv->next = efi_memreserve_root->next;
989 parent->next = __pa(rsv); 1008 efi_memreserve_root->next = __pa(rsv);
990 spin_unlock(&efi_mem_reserve_persistent_lock); 1009 spin_unlock(&efi_mem_reserve_persistent_lock);
991 1010
992 memunmap(parent); 1011 return 0;
1012}
993 1013
1014static int __init efi_memreserve_root_init(void)
1015{
1016 if (efi_memreserve_root)
1017 return 0;
1018 if (efi_memreserve_map_root())
1019 efi_memreserve_root = (void *)ULONG_MAX;
994 return 0; 1020 return 0;
995} 1021}
1022early_initcall(efi_memreserve_root_init);
996 1023
997#ifdef CONFIG_KEXEC 1024#ifdef CONFIG_KEXEC
998static int update_efi_random_seed(struct notifier_block *nb, 1025static int update_efi_random_seed(struct notifier_block *nb,
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 30ac0c975f8a..3d36142cf812 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -75,6 +75,9 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
75 efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; 75 efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
76 efi_status_t status; 76 efi_status_t status;
77 77
78 if (IS_ENABLED(CONFIG_ARM))
79 return;
80
78 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), 81 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
79 (void **)&rsv); 82 (void **)&rsv);
80 if (status != EFI_SUCCESS) { 83 if (status != EFI_SUCCESS) {
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 8830fa601e45..0c0d2312f4a8 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -158,6 +158,10 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
158 return efi_status; 158 return efi_status;
159 } 159 }
160 } 160 }
161
162 /* shrink the FDT back to its minimum size */
163 fdt_pack(fdt);
164
161 return EFI_SUCCESS; 165 return EFI_SUCCESS;
162 166
163fdt_set_fail: 167fdt_set_fail:
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index fa2904fb841f..38b686c67b17 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -118,6 +118,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data)
118 118
119void __init efi_memmap_unmap(void) 119void __init efi_memmap_unmap(void)
120{ 120{
121 if (!efi_enabled(EFI_MEMMAP))
122 return;
123
121 if (!efi.memmap.late) { 124 if (!efi.memmap.late) {
122 unsigned long size; 125 unsigned long size;
123 126
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index a19d845bdb06..8903b9ccfc2b 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -67,7 +67,7 @@ struct efi_runtime_work efi_rts_work;
67 } \ 67 } \
68 \ 68 \
69 init_completion(&efi_rts_work.efi_rts_comp); \ 69 init_completion(&efi_rts_work.efi_rts_comp); \
70 INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \ 70 INIT_WORK(&efi_rts_work.work, efi_call_rts); \
71 efi_rts_work.arg1 = _arg1; \ 71 efi_rts_work.arg1 = _arg1; \
72 efi_rts_work.arg2 = _arg2; \ 72 efi_rts_work.arg2 = _arg2; \
73 efi_rts_work.arg3 = _arg3; \ 73 efi_rts_work.arg3 = _arg3; \
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index af3a20dd5aa4..99c99a5d57fe 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -46,6 +46,7 @@ config FSI_MASTER_AST_CF
46 tristate "FSI master based on Aspeed ColdFire coprocessor" 46 tristate "FSI master based on Aspeed ColdFire coprocessor"
47 depends on GPIOLIB 47 depends on GPIOLIB
48 depends on GPIO_ASPEED 48 depends on GPIO_ASPEED
49 select GENERIC_ALLOCATOR
49 ---help--- 50 ---help---
50 This option enables a FSI master using the AST2400 and AST2500 GPIO 51 This option enables a FSI master using the AST2400 and AST2500 GPIO
51 lines driven by the internal ColdFire coprocessor. This requires 52 lines driven by the internal ColdFire coprocessor. This requires
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index df94021dd9d1..81dc01ac2351 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -20,7 +20,6 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/cdev.h>
24#include <linux/list.h> 23#include <linux/list.h>
25 24
26#include <uapi/linux/fsi.h> 25#include <uapi/linux/fsi.h>
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
index b01ba4438501..31e891f00175 100644
--- a/drivers/gnss/serial.c
+++ b/drivers/gnss/serial.c
@@ -13,6 +13,7 @@
13#include <linux/of.h> 13#include <linux/of.h>
14#include <linux/pm.h> 14#include <linux/pm.h>
15#include <linux/pm_runtime.h> 15#include <linux/pm_runtime.h>
16#include <linux/sched.h>
16#include <linux/serdev.h> 17#include <linux/serdev.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18 19
@@ -63,7 +64,7 @@ static int gnss_serial_write_raw(struct gnss_device *gdev,
63 int ret; 64 int ret;
64 65
65 /* write is only buffered synchronously */ 66 /* write is only buffered synchronously */
66 ret = serdev_device_write(serdev, buf, count, 0); 67 ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
67 if (ret < 0) 68 if (ret < 0)
68 return ret; 69 return ret;
69 70
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 79cb98950013..71d014edd167 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -16,6 +16,7 @@
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <linux/regulator/consumer.h> 18#include <linux/regulator/consumer.h>
19#include <linux/sched.h>
19#include <linux/serdev.h> 20#include <linux/serdev.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21#include <linux/wait.h> 22#include <linux/wait.h>
@@ -83,7 +84,7 @@ static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
83 int ret; 84 int ret;
84 85
85 /* write is only buffered synchronously */ 86 /* write is only buffered synchronously */
86 ret = serdev_device_write(serdev, buf, count, 0); 87 ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
87 if (ret < 0) 88 if (ret < 0)
88 return ret; 89 return ret;
89 90
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 5c1564fcc24e..bdb29e51b417 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -258,7 +258,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
258 chips->chip.set = davinci_gpio_set; 258 chips->chip.set = davinci_gpio_set;
259 259
260 chips->chip.ngpio = ngpio; 260 chips->chip.ngpio = ngpio;
261 chips->chip.base = -1; 261 chips->chip.base = pdata->no_auto_base ? pdata->base : -1;
262 262
263#ifdef CONFIG_OF_GPIO 263#ifdef CONFIG_OF_GPIO
264 chips->chip.of_gpio_n_cells = 2; 264 chips->chip.of_gpio_n_cells = 2;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 8269cffc2967..6a50f9f59c90 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -35,8 +35,8 @@
35#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__) 35#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
36 36
37enum { 37enum {
38 GPIO_MOCKUP_DIR_OUT = 0, 38 GPIO_MOCKUP_DIR_IN = 0,
39 GPIO_MOCKUP_DIR_IN = 1, 39 GPIO_MOCKUP_DIR_OUT = 1,
40}; 40};
41 41
42/* 42/*
@@ -131,7 +131,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
131{ 131{
132 struct gpio_mockup_chip *chip = gpiochip_get_data(gc); 132 struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
133 133
134 return chip->lines[offset].dir; 134 return !chip->lines[offset].dir;
135} 135}
136 136
137static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset) 137static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index bfe4c5c9f41c..e9600b556f39 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -268,8 +268,8 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
268 268
269 if (pxa_gpio_has_pinctrl()) { 269 if (pxa_gpio_has_pinctrl()) {
270 ret = pinctrl_gpio_direction_input(chip->base + offset); 270 ret = pinctrl_gpio_direction_input(chip->base + offset);
271 if (!ret) 271 if (ret)
272 return 0; 272 return ret;
273 } 273 }
274 274
275 spin_lock_irqsave(&gpio_lock, flags); 275 spin_lock_irqsave(&gpio_lock, flags);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 230e41562462..a2cbb474901c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1295,7 +1295,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1295 gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); 1295 gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
1296 if (!gdev->descs) { 1296 if (!gdev->descs) {
1297 status = -ENOMEM; 1297 status = -ENOMEM;
1298 goto err_free_gdev; 1298 goto err_free_ida;
1299 } 1299 }
1300 1300
1301 if (chip->ngpio == 0) { 1301 if (chip->ngpio == 0) {
@@ -1427,8 +1427,9 @@ err_free_label:
1427 kfree_const(gdev->label); 1427 kfree_const(gdev->label);
1428err_free_descs: 1428err_free_descs:
1429 kfree(gdev->descs); 1429 kfree(gdev->descs);
1430err_free_gdev: 1430err_free_ida:
1431 ida_simple_remove(&gpio_ida, gdev->id); 1431 ida_simple_remove(&gpio_ida, gdev->id);
1432err_free_gdev:
1432 /* failures here can mean systems won't boot... */ 1433 /* failures here can mean systems won't boot... */
1433 pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__, 1434 pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
1434 gdev->base, gdev->base + gdev->ngpio - 1, 1435 gdev->base, gdev->base + gdev->ngpio - 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d0102cfc8efb..104b2e0d893b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -151,6 +151,7 @@ extern int amdgpu_compute_multipipe;
151extern int amdgpu_gpu_recovery; 151extern int amdgpu_gpu_recovery;
152extern int amdgpu_emu_mode; 152extern int amdgpu_emu_mode;
153extern uint amdgpu_smu_memory_pool_size; 153extern uint amdgpu_smu_memory_pool_size;
154extern uint amdgpu_dc_feature_mask;
154extern struct amdgpu_mgpu_info mgpu_info; 155extern struct amdgpu_mgpu_info mgpu_info;
155 156
156#ifdef CONFIG_DRM_AMDGPU_SI 157#ifdef CONFIG_DRM_AMDGPU_SI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index c31a8849e9f8..1580ec60b89f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -501,8 +501,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
501{ 501{
502 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 502 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
503 503
504 amdgpu_dpm_switch_power_profile(adev, 504 if (adev->powerplay.pp_funcs &&
505 PP_SMC_POWER_PROFILE_COMPUTE, !idle); 505 adev->powerplay.pp_funcs->switch_power_profile)
506 amdgpu_dpm_switch_power_profile(adev,
507 PP_SMC_POWER_PROFILE_COMPUTE,
508 !idle);
506} 509}
507 510
508bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) 511bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6748cd7fc129..686a26de50f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -626,6 +626,13 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
626 "dither", 626 "dither",
627 amdgpu_dither_enum_list, sz); 627 amdgpu_dither_enum_list, sz);
628 628
629 if (amdgpu_device_has_dc_support(adev)) {
630 adev->mode_info.max_bpc_property =
631 drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
632 if (!adev->mode_info.max_bpc_property)
633 return -ENOMEM;
634 }
635
629 return 0; 636 return 0;
630} 637}
631 638
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 943dbf3c5da1..8de55f7f1a3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1;
127int amdgpu_gpu_recovery = -1; /* auto */ 127int amdgpu_gpu_recovery = -1; /* auto */
128int amdgpu_emu_mode = 0; 128int amdgpu_emu_mode = 0;
129uint amdgpu_smu_memory_pool_size = 0; 129uint amdgpu_smu_memory_pool_size = 0;
130/* FBC (bit 0) disabled by default*/
131uint amdgpu_dc_feature_mask = 0;
132
130struct amdgpu_mgpu_info mgpu_info = { 133struct amdgpu_mgpu_info mgpu_info = {
131 .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), 134 .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
132}; 135};
@@ -631,6 +634,14 @@ module_param(halt_if_hws_hang, int, 0644);
631MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); 634MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
632#endif 635#endif
633 636
637/**
638 * DOC: dcfeaturemask (uint)
639 * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
640 * The default is the current set of stable display features.
641 */
642MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
643module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
644
634static const struct pci_device_id pciidlist[] = { 645static const struct pci_device_id pciidlist[] = {
635#ifdef CONFIG_DRM_AMDGPU_SI 646#ifdef CONFIG_DRM_AMDGPU_SI
636 {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 647 {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index b9e9e8b02fb7..d1b4d9b6aae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -339,6 +339,8 @@ struct amdgpu_mode_info {
339 struct drm_property *audio_property; 339 struct drm_property *audio_property;
340 /* FMT dithering */ 340 /* FMT dithering */
341 struct drm_property *dither_property; 341 struct drm_property *dither_property;
342 /* maximum number of bits per channel for monitor color */
343 struct drm_property *max_bpc_property;
342 /* hardcoded DFP edid from BIOS */ 344 /* hardcoded DFP edid from BIOS */
343 struct edid *bios_hardcoded_edid; 345 struct edid *bios_hardcoded_edid;
344 int bios_hardcoded_edid_size; 346 int bios_hardcoded_edid_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 352b30409060..0877ff9a9594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -181,7 +181,7 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
181 181
182 if (level == adev->vm_manager.root_level) 182 if (level == adev->vm_manager.root_level)
183 /* For the root directory */ 183 /* For the root directory */
184 return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift; 184 return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
185 else if (level != AMDGPU_VM_PTB) 185 else if (level != AMDGPU_VM_PTB)
186 /* Everything in between */ 186 /* Everything in between */
187 return 512; 187 return 512;
@@ -1632,13 +1632,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1632 continue; 1632 continue;
1633 } 1633 }
1634 1634
1635 /* First check if the entry is already handled */
1636 if (cursor.pfn < frag_start) {
1637 cursor.entry->huge = true;
1638 amdgpu_vm_pt_next(adev, &cursor);
1639 continue;
1640 }
1641
1642 /* If it isn't already handled it can't be a huge page */ 1635 /* If it isn't already handled it can't be a huge page */
1643 if (cursor.entry->huge) { 1636 if (cursor.entry->huge) {
1644 /* Add the entry to the relocated list to update it. */ 1637 /* Add the entry to the relocated list to update it. */
@@ -1663,9 +1656,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1663 if (!amdgpu_vm_pt_descendant(adev, &cursor)) 1656 if (!amdgpu_vm_pt_descendant(adev, &cursor))
1664 return -ENOENT; 1657 return -ENOENT;
1665 continue; 1658 continue;
1666 } else if (frag >= parent_shift) { 1659 } else if (frag >= parent_shift &&
1660 cursor.level - 1 != adev->vm_manager.root_level) {
1667 /* If the fragment size is even larger than the parent 1661 /* If the fragment size is even larger than the parent
1668 * shift we should go up one level and check it again. 1662 * shift we should go up one level and check it again
1663 * unless one level up is the root level.
1669 */ 1664 */
1670 if (!amdgpu_vm_pt_ancestor(&cursor)) 1665 if (!amdgpu_vm_pt_ancestor(&cursor))
1671 return -ENOENT; 1666 return -ENOENT;
@@ -1673,10 +1668,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1673 } 1668 }
1674 1669
1675 /* Looks good so far, calculate parameters for the update */ 1670 /* Looks good so far, calculate parameters for the update */
1676 incr = AMDGPU_GPU_PAGE_SIZE << shift; 1671 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1677 mask = amdgpu_vm_entries_mask(adev, cursor.level); 1672 mask = amdgpu_vm_entries_mask(adev, cursor.level);
1678 pe_start = ((cursor.pfn >> shift) & mask) * 8; 1673 pe_start = ((cursor.pfn >> shift) & mask) * 8;
1679 entry_end = (mask + 1) << shift; 1674 entry_end = (uint64_t)(mask + 1) << shift;
1680 entry_end += cursor.pfn & ~(entry_end - 1); 1675 entry_end += cursor.pfn & ~(entry_end - 1);
1681 entry_end = min(entry_end, end); 1676 entry_end = min(entry_end, end);
1682 1677
@@ -1689,7 +1684,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1689 flags | AMDGPU_PTE_FRAG(frag)); 1684 flags | AMDGPU_PTE_FRAG(frag));
1690 1685
1691 pe_start += nptes * 8; 1686 pe_start += nptes * 8;
1692 dst += nptes * AMDGPU_GPU_PAGE_SIZE << shift; 1687 dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
1693 1688
1694 frag_start = upd_end; 1689 frag_start = upd_end;
1695 if (frag_start >= frag_end) { 1690 if (frag_start >= frag_end) {
@@ -1701,8 +1696,17 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1701 } 1696 }
1702 } while (frag_start < entry_end); 1697 } while (frag_start < entry_end);
1703 1698
1704 if (frag >= shift) 1699 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1700 /* Mark all child entries as huge */
1701 while (cursor.pfn < frag_start) {
1702 cursor.entry->huge = true;
1703 amdgpu_vm_pt_next(adev, &cursor);
1704 }
1705
1706 } else if (frag >= shift) {
1707 /* or just move on to the next on the same level. */
1705 amdgpu_vm_pt_next(adev, &cursor); 1708 amdgpu_vm_pt_next(adev, &cursor);
1709 }
1706 } 1710 }
1707 1711
1708 return 0; 1712 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6d7baf59d6e1..21363b2b2ee5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2440,12 +2440,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2440#endif 2440#endif
2441 2441
2442 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 2442 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2443 udelay(50);
2443 2444
2444 /* carrizo do enable cp interrupt after cp inited */ 2445 /* carrizo do enable cp interrupt after cp inited */
2445 if (!(adev->flags & AMD_IS_APU)) 2446 if (!(adev->flags & AMD_IS_APU)) {
2446 gfx_v9_0_enable_gui_idle_interrupt(adev, true); 2447 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2447 2448 udelay(50);
2448 udelay(50); 2449 }
2449 2450
2450#ifdef AMDGPU_RLC_DEBUG_RETRY 2451#ifdef AMDGPU_RLC_DEBUG_RETRY
2451 /* RLC_GPM_GENERAL_6 : RLC Ucode version */ 2452 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index ceb7847b504f..bfa317ad20a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -72,7 +72,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
72 72
73 /* Program the system aperture low logical page number. */ 73 /* Program the system aperture low logical page number. */
74 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 74 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
75 min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); 75 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
76 76
77 if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) 77 if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
78 /* 78 /*
@@ -82,11 +82,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
82 * to get rid of the VM fault and hardware hang. 82 * to get rid of the VM fault and hardware hang.
83 */ 83 */
84 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 84 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
85 max((adev->gmc.vram_end >> 18) + 0x1, 85 max((adev->gmc.fb_end >> 18) + 0x1,
86 adev->gmc.agp_end >> 18)); 86 adev->gmc.agp_end >> 18));
87 else 87 else
88 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 88 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
89 max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); 89 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
90 90
91 /* Set default page address. */ 91 /* Set default page address. */
92 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start 92 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index e1c2b4e9c7b2..73ad02aea2b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
46MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); 46MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
47MODULE_FIRMWARE("amdgpu/verde_mc.bin"); 47MODULE_FIRMWARE("amdgpu/verde_mc.bin");
48MODULE_FIRMWARE("amdgpu/oland_mc.bin"); 48MODULE_FIRMWARE("amdgpu/oland_mc.bin");
49MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
49MODULE_FIRMWARE("amdgpu/si58_mc.bin"); 50MODULE_FIRMWARE("amdgpu/si58_mc.bin");
50 51
51#define MC_SEQ_MISC0__MT__MASK 0xf0000000 52#define MC_SEQ_MISC0__MT__MASK 0xf0000000
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index fd23ba1226a5..a0db67adc34c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -90,7 +90,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
90 90
91 /* Program the system aperture low logical page number. */ 91 /* Program the system aperture low logical page number. */
92 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 92 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
93 min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); 93 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
94 94
95 if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) 95 if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
96 /* 96 /*
@@ -100,11 +100,11 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
100 * to get rid of the VM fault and hardware hang. 100 * to get rid of the VM fault and hardware hang.
101 */ 101 */
102 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 102 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
103 max((adev->gmc.vram_end >> 18) + 0x1, 103 max((adev->gmc.fb_end >> 18) + 0x1,
104 adev->gmc.agp_end >> 18)); 104 adev->gmc.agp_end >> 18));
105 else 105 else
106 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 106 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
107 max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); 107 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
108 108
109 /* Set default page address. */ 109 /* Set default page address. */
110 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 110 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index bf5e6a413dee..4cc0dcb1a187 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -65,6 +65,13 @@
65#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba 65#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
66#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 66#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
67 67
68/* for Vega20 register name change */
69#define mmHDP_MEM_POWER_CTRL 0x00d4
70#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
71#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
72#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
73#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
74#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
68/* 75/*
69 * Indirect registers accessor 76 * Indirect registers accessor
70 */ 77 */
@@ -870,15 +877,33 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
870{ 877{
871 uint32_t def, data; 878 uint32_t def, data;
872 879
873 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); 880 if (adev->asic_type == CHIP_VEGA20) {
881 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
874 882
875 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 883 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
876 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 884 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
877 else 885 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
878 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 886 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
887 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
888 else
889 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
890 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
891 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
892 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
879 893
880 if (def != data) 894 if (def != data)
881 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); 895 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
896 } else {
897 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
898
899 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
900 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
901 else
902 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
903
904 if (def != data)
905 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
906 }
882} 907}
883 908
884static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) 909static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a99f71797aa3..a0fda6f9252a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
129 else 129 else
130 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); 130 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
131 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); 131 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
132 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); 132 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF);
133 133
134 /* set rptr, wptr to 0 */ 134 /* set rptr, wptr to 0 */
135 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); 135 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 2d4473557b0d..d13fc4fcb517 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -49,6 +49,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
49 adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); 49 adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
50 adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); 50 adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
51 adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); 51 adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
52 adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
52 } 53 }
53 return 0; 54 return 0;
54} 55}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b0df6dc9a775..ca925200fe09 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -429,6 +429,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
429 adev->asic_type < CHIP_RAVEN) 429 adev->asic_type < CHIP_RAVEN)
430 init_data.flags.gpu_vm_support = true; 430 init_data.flags.gpu_vm_support = true;
431 431
432 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
433 init_data.flags.fbc_support = true;
434
432 /* Display Core create. */ 435 /* Display Core create. */
433 adev->dm.dc = dc_create(&init_data); 436 adev->dm.dc = dc_create(&init_data);
434 437
@@ -1524,13 +1527,6 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1524{ 1527{
1525 struct amdgpu_display_manager *dm = bl_get_data(bd); 1528 struct amdgpu_display_manager *dm = bl_get_data(bd);
1526 1529
1527 /*
1528 * PWM interperts 0 as 100% rather than 0% because of HW
1529 * limitation for level 0.So limiting minimum brightness level
1530 * to 1.
1531 */
1532 if (bd->props.brightness < 1)
1533 return 1;
1534 if (dc_link_set_backlight_level(dm->backlight_link, 1530 if (dc_link_set_backlight_level(dm->backlight_link,
1535 bd->props.brightness, 0, 0)) 1531 bd->props.brightness, 0, 0))
1536 return 0; 1532 return 0;
@@ -2362,8 +2358,15 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2362static enum dc_color_depth 2358static enum dc_color_depth
2363convert_color_depth_from_display_info(const struct drm_connector *connector) 2359convert_color_depth_from_display_info(const struct drm_connector *connector)
2364{ 2360{
2361 struct dm_connector_state *dm_conn_state =
2362 to_dm_connector_state(connector->state);
2365 uint32_t bpc = connector->display_info.bpc; 2363 uint32_t bpc = connector->display_info.bpc;
2366 2364
2365 /* TODO: Remove this when there's support for max_bpc in drm */
2366 if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2367 /* Round down to nearest even number. */
2368 bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
2369
2367 switch (bpc) { 2370 switch (bpc) {
2368 case 0: 2371 case 0:
2369 /* 2372 /*
@@ -2707,18 +2710,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2707 drm_connector = &aconnector->base; 2710 drm_connector = &aconnector->base;
2708 2711
2709 if (!aconnector->dc_sink) { 2712 if (!aconnector->dc_sink) {
2710 /* 2713 if (!aconnector->mst_port) {
2711 * Create dc_sink when necessary to MST 2714 sink = create_fake_sink(aconnector);
2712 * Don't apply fake_sink to MST 2715 if (!sink)
2713 */ 2716 return stream;
2714 if (aconnector->mst_port) {
2715 dm_dp_mst_dc_sink_create(drm_connector);
2716 return stream;
2717 } 2717 }
2718
2719 sink = create_fake_sink(aconnector);
2720 if (!sink)
2721 return stream;
2722 } else { 2718 } else {
2723 sink = aconnector->dc_sink; 2719 sink = aconnector->dc_sink;
2724 } 2720 }
@@ -2954,6 +2950,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2954 } else if (property == adev->mode_info.underscan_property) { 2950 } else if (property == adev->mode_info.underscan_property) {
2955 dm_new_state->underscan_enable = val; 2951 dm_new_state->underscan_enable = val;
2956 ret = 0; 2952 ret = 0;
2953 } else if (property == adev->mode_info.max_bpc_property) {
2954 dm_new_state->max_bpc = val;
2955 ret = 0;
2957 } 2956 }
2958 2957
2959 return ret; 2958 return ret;
@@ -2996,6 +2995,9 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2996 } else if (property == adev->mode_info.underscan_property) { 2995 } else if (property == adev->mode_info.underscan_property) {
2997 *val = dm_state->underscan_enable; 2996 *val = dm_state->underscan_enable;
2998 ret = 0; 2997 ret = 0;
2998 } else if (property == adev->mode_info.max_bpc_property) {
2999 *val = dm_state->max_bpc;
3000 ret = 0;
2999 } 3001 }
3000 return ret; 3002 return ret;
3001} 3003}
@@ -3308,7 +3310,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane,
3308static const struct drm_plane_funcs dm_plane_funcs = { 3310static const struct drm_plane_funcs dm_plane_funcs = {
3309 .update_plane = drm_atomic_helper_update_plane, 3311 .update_plane = drm_atomic_helper_update_plane,
3310 .disable_plane = drm_atomic_helper_disable_plane, 3312 .disable_plane = drm_atomic_helper_disable_plane,
3311 .destroy = drm_plane_cleanup, 3313 .destroy = drm_primary_helper_destroy,
3312 .reset = dm_drm_plane_reset, 3314 .reset = dm_drm_plane_reset,
3313 .atomic_duplicate_state = dm_drm_plane_duplicate_state, 3315 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
3314 .atomic_destroy_state = dm_drm_plane_destroy_state, 3316 .atomic_destroy_state = dm_drm_plane_destroy_state,
@@ -3806,6 +3808,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3806 drm_object_attach_property(&aconnector->base.base, 3808 drm_object_attach_property(&aconnector->base.base,
3807 adev->mode_info.underscan_vborder_property, 3809 adev->mode_info.underscan_vborder_property,
3808 0); 3810 0);
3811 drm_object_attach_property(&aconnector->base.base,
3812 adev->mode_info.max_bpc_property,
3813 0);
3809 3814
3810} 3815}
3811 3816
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 978b34a5011c..6e069d777ab2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -160,8 +160,6 @@ struct amdgpu_dm_connector {
160 struct mutex hpd_lock; 160 struct mutex hpd_lock;
161 161
162 bool fake_enable; 162 bool fake_enable;
163
164 bool mst_connected;
165}; 163};
166 164
167#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) 165#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
@@ -206,6 +204,7 @@ struct dm_connector_state {
206 enum amdgpu_rmx_type scaling; 204 enum amdgpu_rmx_type scaling;
207 uint8_t underscan_vborder; 205 uint8_t underscan_vborder;
208 uint8_t underscan_hborder; 206 uint8_t underscan_hborder;
207 uint8_t max_bpc;
209 bool underscan_enable; 208 bool underscan_enable;
210 bool freesync_enable; 209 bool freesync_enable;
211 bool freesync_capable; 210 bool freesync_capable;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 03601d717fed..1b0d209d8367 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
205 .atomic_get_property = amdgpu_dm_connector_atomic_get_property 205 .atomic_get_property = amdgpu_dm_connector_atomic_get_property
206}; 206};
207 207
208void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
209{
210 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
211 struct dc_sink *dc_sink;
212 struct dc_sink_init_data init_params = {
213 .link = aconnector->dc_link,
214 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
215
216 /* FIXME none of this is safe. we shouldn't touch aconnector here in
217 * atomic_check
218 */
219
220 /*
221 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
222 */
223 if (!aconnector->port || !aconnector->port->aux.ddc.algo)
224 return;
225
226 ASSERT(aconnector->edid);
227
228 dc_sink = dc_link_add_remote_sink(
229 aconnector->dc_link,
230 (uint8_t *)aconnector->edid,
231 (aconnector->edid->extensions + 1) * EDID_LENGTH,
232 &init_params);
233
234 dc_sink->priv = aconnector;
235 aconnector->dc_sink = dc_sink;
236
237 if (aconnector->dc_sink)
238 amdgpu_dm_update_freesync_caps(
239 connector, aconnector->edid);
240}
241
242static int dm_dp_mst_get_modes(struct drm_connector *connector) 208static int dm_dp_mst_get_modes(struct drm_connector *connector)
243{ 209{
244 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 210 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
319 struct amdgpu_device *adev = dev->dev_private; 285 struct amdgpu_device *adev = dev->dev_private;
320 struct amdgpu_encoder *amdgpu_encoder; 286 struct amdgpu_encoder *amdgpu_encoder;
321 struct drm_encoder *encoder; 287 struct drm_encoder *encoder;
322 const struct drm_connector_helper_funcs *connector_funcs =
323 connector->base.helper_private;
324 struct drm_encoder *enc_master =
325 connector_funcs->best_encoder(&connector->base);
326 288
327 DRM_DEBUG_KMS("enc master is %p\n", enc_master);
328 amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); 289 amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
329 if (!amdgpu_encoder) 290 if (!amdgpu_encoder)
330 return NULL; 291 return NULL;
@@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
354 struct amdgpu_device *adev = dev->dev_private; 315 struct amdgpu_device *adev = dev->dev_private;
355 struct amdgpu_dm_connector *aconnector; 316 struct amdgpu_dm_connector *aconnector;
356 struct drm_connector *connector; 317 struct drm_connector *connector;
357 struct drm_connector_list_iter conn_iter;
358
359 drm_connector_list_iter_begin(dev, &conn_iter);
360 drm_for_each_connector_iter(connector, &conn_iter) {
361 aconnector = to_amdgpu_dm_connector(connector);
362 if (aconnector->mst_port == master
363 && !aconnector->port) {
364 DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
365 aconnector, connector->base.id, aconnector->mst_port);
366
367 aconnector->port = port;
368 drm_connector_set_path_property(connector, pathprop);
369
370 drm_connector_list_iter_end(&conn_iter);
371 aconnector->mst_connected = true;
372 return &aconnector->base;
373 }
374 }
375 drm_connector_list_iter_end(&conn_iter);
376 318
377 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 319 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
378 if (!aconnector) 320 if (!aconnector)
@@ -400,10 +342,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
400 master->connector_id); 342 master->connector_id);
401 343
402 aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); 344 aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
345 drm_connector_attach_encoder(&aconnector->base,
346 &aconnector->mst_encoder->base);
403 347
404 /*
405 * TODO: understand why this one is needed
406 */
407 drm_object_attach_property( 348 drm_object_attach_property(
408 &connector->base, 349 &connector->base,
409 dev->mode_config.path_property, 350 dev->mode_config.path_property,
@@ -421,8 +362,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
421 */ 362 */
422 amdgpu_dm_connector_funcs_reset(connector); 363 amdgpu_dm_connector_funcs_reset(connector);
423 364
424 aconnector->mst_connected = true;
425
426 DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", 365 DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
427 aconnector, connector->base.id, aconnector->mst_port); 366 aconnector, connector->base.id, aconnector->mst_port);
428 367
@@ -434,6 +373,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
434static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, 373static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
435 struct drm_connector *connector) 374 struct drm_connector *connector)
436{ 375{
376 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
377 struct drm_device *dev = master->base.dev;
378 struct amdgpu_device *adev = dev->dev_private;
437 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 379 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
438 380
439 DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", 381 DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
@@ -447,7 +389,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
447 aconnector->dc_sink = NULL; 389 aconnector->dc_sink = NULL;
448 } 390 }
449 391
450 aconnector->mst_connected = false; 392 drm_connector_unregister(connector);
393 if (adev->mode_info.rfbdev)
394 drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
395 drm_connector_put(connector);
451} 396}
452 397
453static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) 398static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -458,18 +403,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
458 drm_kms_helper_hotplug_event(dev); 403 drm_kms_helper_hotplug_event(dev);
459} 404}
460 405
461static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
462{
463 mutex_lock(&connector->dev->mode_config.mutex);
464 drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
465 mutex_unlock(&connector->dev->mode_config.mutex);
466}
467
468static void dm_dp_mst_register_connector(struct drm_connector *connector) 406static void dm_dp_mst_register_connector(struct drm_connector *connector)
469{ 407{
470 struct drm_device *dev = connector->dev; 408 struct drm_device *dev = connector->dev;
471 struct amdgpu_device *adev = dev->dev_private; 409 struct amdgpu_device *adev = dev->dev_private;
472 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
473 410
474 if (adev->mode_info.rfbdev) 411 if (adev->mode_info.rfbdev)
475 drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); 412 drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -477,9 +414,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
477 DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); 414 DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
478 415
479 drm_connector_register(connector); 416 drm_connector_register(connector);
480
481 if (aconnector->mst_connected)
482 dm_dp_mst_link_status_reset(connector);
483} 417}
484 418
485static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { 419static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 8cf51da26657..2da851b40042 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -31,6 +31,5 @@ struct amdgpu_dm_connector;
31 31
32void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, 32void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
33 struct amdgpu_dm_connector *aconnector); 33 struct amdgpu_dm_connector *aconnector);
34void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
35 34
36#endif 35#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fb04a4ad141f..5da2186b3615 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1722,7 +1722,7 @@ static void write_i2c_retimer_setting(
1722 i2c_success = i2c_write(pipe_ctx, slave_address, 1722 i2c_success = i2c_write(pipe_ctx, slave_address,
1723 buffer, sizeof(buffer)); 1723 buffer, sizeof(buffer));
1724 RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ 1724 RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
1725 offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", 1725 offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
1726 slave_address, buffer[0], buffer[1], i2c_success?1:0); 1726 slave_address, buffer[0], buffer[1], i2c_success?1:0);
1727 if (!i2c_success) 1727 if (!i2c_success)
1728 /* Write failure */ 1728 /* Write failure */
@@ -1734,7 +1734,7 @@ static void write_i2c_retimer_setting(
1734 i2c_success = i2c_write(pipe_ctx, slave_address, 1734 i2c_success = i2c_write(pipe_ctx, slave_address,
1735 buffer, sizeof(buffer)); 1735 buffer, sizeof(buffer));
1736 RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ 1736 RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
1737 offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", 1737 offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
1738 slave_address, buffer[0], buffer[1], i2c_success?1:0); 1738 slave_address, buffer[0], buffer[1], i2c_success?1:0);
1739 if (!i2c_success) 1739 if (!i2c_success)
1740 /* Write failure */ 1740 /* Write failure */
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 199527171100..b57fa61b3034 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -169,6 +169,7 @@ struct link_training_settings;
169struct dc_config { 169struct dc_config {
170 bool gpu_vm_support; 170 bool gpu_vm_support;
171 bool disable_disp_pll_sharing; 171 bool disable_disp_pll_sharing;
172 bool fbc_support;
172}; 173};
173 174
174enum visual_confirm { 175enum visual_confirm {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index b75ede5f84f7..b459867a05b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1736,7 +1736,12 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
1736 if (events->force_trigger) 1736 if (events->force_trigger)
1737 value |= 0x1; 1737 value |= 0x1;
1738 1738
1739 value |= 0x84; 1739 if (num_pipes) {
1740 struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
1741
1742 if (dc->fbc_compressor)
1743 value |= 0x84;
1744 }
1740 1745
1741 for (i = 0; i < num_pipes; i++) 1746 for (i = 0; i < num_pipes; i++)
1742 pipe_ctx[i]->stream_res.tg->funcs-> 1747 pipe_ctx[i]->stream_res.tg->funcs->
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index e3624ca24574..7c9fd9052ee2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -1362,7 +1362,8 @@ static bool construct(
1362 pool->base.sw_i2cs[i] = NULL; 1362 pool->base.sw_i2cs[i] = NULL;
1363 } 1363 }
1364 1364
1365 dc->fbc_compressor = dce110_compressor_create(ctx); 1365 if (dc->config.fbc_support)
1366 dc->fbc_compressor = dce110_compressor_create(ctx);
1366 1367
1367 if (!underlay_create(ctx, &pool->base)) 1368 if (!underlay_create(ctx, &pool->base))
1368 goto res_create_fail; 1369 goto res_create_fail;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 2083c308007c..470d7b89071a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -133,6 +133,10 @@ enum PP_FEATURE_MASK {
133 PP_AVFS_MASK = 0x40000, 133 PP_AVFS_MASK = 0x40000,
134}; 134};
135 135
136enum DC_FEATURE_MASK {
137 DC_FBC_MASK = 0x1,
138};
139
136/** 140/**
137 * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks 141 * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
138 */ 142 */
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index d2e7c0fa96c2..8eb0bb241210 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1325,7 +1325,7 @@ struct atom_smu_info_v3_3 {
1325 struct atom_common_table_header table_header; 1325 struct atom_common_table_header table_header;
1326 uint8_t smuip_min_ver; 1326 uint8_t smuip_min_ver;
1327 uint8_t smuip_max_ver; 1327 uint8_t smuip_max_ver;
1328 uint8_t smu_rsd1; 1328 uint8_t waflclk_ss_mode;
1329 uint8_t gpuclk_ss_mode; 1329 uint8_t gpuclk_ss_mode;
1330 uint16_t sclk_ss_percentage; 1330 uint16_t sclk_ss_percentage;
1331 uint16_t sclk_ss_rate_10hz; 1331 uint16_t sclk_ss_rate_10hz;
@@ -1355,7 +1355,10 @@ struct atom_smu_info_v3_3 {
1355 uint32_t syspll3_1_vco_freq_10khz; 1355 uint32_t syspll3_1_vco_freq_10khz;
1356 uint32_t bootup_fclk_10khz; 1356 uint32_t bootup_fclk_10khz;
1357 uint32_t bootup_waflclk_10khz; 1357 uint32_t bootup_waflclk_10khz;
1358 uint32_t reserved[3]; 1358 uint32_t smu_info_caps;
1359 uint16_t waflclk_ss_percentage; // in unit of 0.001%
1360 uint16_t smuinitoffset;
1361 uint32_t reserved;
1359}; 1362};
1360 1363
1361/* 1364/*
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index ed35ec0341e6..88f6b35ea6fe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4525,12 +4525,12 @@ static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4525 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 4525 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4526 struct smu7_single_dpm_table *golden_sclk_table = 4526 struct smu7_single_dpm_table *golden_sclk_table =
4527 &(data->golden_dpm_table.sclk_table); 4527 &(data->golden_dpm_table.sclk_table);
4528 int value; 4528 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4529 int golden_value = golden_sclk_table->dpm_levels
4530 [golden_sclk_table->count - 1].value;
4529 4531
4530 value = (sclk_table->dpm_levels[sclk_table->count - 1].value - 4532 value -= golden_value;
4531 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 4533 value = DIV_ROUND_UP(value * 100, golden_value);
4532 100 /
4533 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4534 4534
4535 return value; 4535 return value;
4536} 4536}
@@ -4567,12 +4567,12 @@ static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4567 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 4567 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4568 struct smu7_single_dpm_table *golden_mclk_table = 4568 struct smu7_single_dpm_table *golden_mclk_table =
4569 &(data->golden_dpm_table.mclk_table); 4569 &(data->golden_dpm_table.mclk_table);
4570 int value; 4570 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
4571 int golden_value = golden_mclk_table->dpm_levels
4572 [golden_mclk_table->count - 1].value;
4571 4573
4572 value = (mclk_table->dpm_levels[mclk_table->count - 1].value - 4574 value -= golden_value;
4573 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 4575 value = DIV_ROUND_UP(value * 100, golden_value);
4574 100 /
4575 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4576 4576
4577 return value; 4577 return value;
4578} 4578}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 99a33c33a32c..101c09b212ad 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -713,20 +713,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
713 for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) { 713 for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
714 table->WatermarkRow[1][i].MinClock = 714 table->WatermarkRow[1][i].MinClock =
715 cpu_to_le16((uint16_t) 715 cpu_to_le16((uint16_t)
716 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) / 716 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
717 1000); 717 1000));
718 table->WatermarkRow[1][i].MaxClock = 718 table->WatermarkRow[1][i].MaxClock =
719 cpu_to_le16((uint16_t) 719 cpu_to_le16((uint16_t)
720 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) / 720 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
721 1000); 721 1000));
722 table->WatermarkRow[1][i].MinUclk = 722 table->WatermarkRow[1][i].MinUclk =
723 cpu_to_le16((uint16_t) 723 cpu_to_le16((uint16_t)
724 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) / 724 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
725 1000); 725 1000));
726 table->WatermarkRow[1][i].MaxUclk = 726 table->WatermarkRow[1][i].MaxUclk =
727 cpu_to_le16((uint16_t) 727 cpu_to_le16((uint16_t)
728 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) / 728 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
729 1000); 729 1000));
730 table->WatermarkRow[1][i].WmSetting = (uint8_t) 730 table->WatermarkRow[1][i].WmSetting = (uint8_t)
731 wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; 731 wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
732 } 732 }
@@ -734,20 +734,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
734 for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) { 734 for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
735 table->WatermarkRow[0][i].MinClock = 735 table->WatermarkRow[0][i].MinClock =
736 cpu_to_le16((uint16_t) 736 cpu_to_le16((uint16_t)
737 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) / 737 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
738 1000); 738 1000));
739 table->WatermarkRow[0][i].MaxClock = 739 table->WatermarkRow[0][i].MaxClock =
740 cpu_to_le16((uint16_t) 740 cpu_to_le16((uint16_t)
741 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) / 741 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
742 1000); 742 1000));
743 table->WatermarkRow[0][i].MinUclk = 743 table->WatermarkRow[0][i].MinUclk =
744 cpu_to_le16((uint16_t) 744 cpu_to_le16((uint16_t)
745 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) / 745 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
746 1000); 746 1000));
747 table->WatermarkRow[0][i].MaxUclk = 747 table->WatermarkRow[0][i].MaxUclk =
748 cpu_to_le16((uint16_t) 748 cpu_to_le16((uint16_t)
749 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) / 749 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
750 1000); 750 1000));
751 table->WatermarkRow[0][i].WmSetting = (uint8_t) 751 table->WatermarkRow[0][i].WmSetting = (uint8_t)
752 wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; 752 wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
753 } 753 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 8c4db86bb4b7..e2bc6e0c229f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4522,15 +4522,13 @@ static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4522 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); 4522 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4523 struct vega10_single_dpm_table *golden_sclk_table = 4523 struct vega10_single_dpm_table *golden_sclk_table =
4524 &(data->golden_dpm_table.gfx_table); 4524 &(data->golden_dpm_table.gfx_table);
4525 int value; 4525 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4526 4526 int golden_value = golden_sclk_table->dpm_levels
4527 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4528 golden_sclk_table->dpm_levels
4529 [golden_sclk_table->count - 1].value) *
4530 100 /
4531 golden_sclk_table->dpm_levels
4532 [golden_sclk_table->count - 1].value; 4527 [golden_sclk_table->count - 1].value;
4533 4528
4529 value -= golden_value;
4530 value = DIV_ROUND_UP(value * 100, golden_value);
4531
4534 return value; 4532 return value;
4535} 4533}
4536 4534
@@ -4575,16 +4573,13 @@ static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4575 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); 4573 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4576 struct vega10_single_dpm_table *golden_mclk_table = 4574 struct vega10_single_dpm_table *golden_mclk_table =
4577 &(data->golden_dpm_table.mem_table); 4575 &(data->golden_dpm_table.mem_table);
4578 int value; 4576 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
4579 4577 int golden_value = golden_mclk_table->dpm_levels
4580 value = (mclk_table->dpm_levels
4581 [mclk_table->count - 1].value -
4582 golden_mclk_table->dpm_levels
4583 [golden_mclk_table->count - 1].value) *
4584 100 /
4585 golden_mclk_table->dpm_levels
4586 [golden_mclk_table->count - 1].value; 4578 [golden_mclk_table->count - 1].value;
4587 4579
4580 value -= golden_value;
4581 value = DIV_ROUND_UP(value * 100, golden_value);
4582
4588 return value; 4583 return value;
4589} 4584}
4590 4585
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 74bc37308dc0..54364444ecd1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -2243,12 +2243,12 @@ static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
2243 struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); 2243 struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
2244 struct vega12_single_dpm_table *golden_sclk_table = 2244 struct vega12_single_dpm_table *golden_sclk_table =
2245 &(data->golden_dpm_table.gfx_table); 2245 &(data->golden_dpm_table.gfx_table);
2246 int value; 2246 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
2247 int golden_value = golden_sclk_table->dpm_levels
2248 [golden_sclk_table->count - 1].value;
2247 2249
2248 value = (sclk_table->dpm_levels[sclk_table->count - 1].value - 2250 value -= golden_value;
2249 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 2251 value = DIV_ROUND_UP(value * 100, golden_value);
2250 100 /
2251 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
2252 2252
2253 return value; 2253 return value;
2254} 2254}
@@ -2264,16 +2264,13 @@ static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
2264 struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); 2264 struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
2265 struct vega12_single_dpm_table *golden_mclk_table = 2265 struct vega12_single_dpm_table *golden_mclk_table =
2266 &(data->golden_dpm_table.mem_table); 2266 &(data->golden_dpm_table.mem_table);
2267 int value; 2267 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
2268 2268 int golden_value = golden_mclk_table->dpm_levels
2269 value = (mclk_table->dpm_levels
2270 [mclk_table->count - 1].value -
2271 golden_mclk_table->dpm_levels
2272 [golden_mclk_table->count - 1].value) *
2273 100 /
2274 golden_mclk_table->dpm_levels
2275 [golden_mclk_table->count - 1].value; 2269 [golden_mclk_table->count - 1].value;
2276 2270
2271 value -= golden_value;
2272 value = DIV_ROUND_UP(value * 100, golden_value);
2273
2277 return value; 2274 return value;
2278} 2275}
2279 2276
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 57143d51e3ee..b4eadd47f3a4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -75,7 +75,17 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
75 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 75 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
76 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 76 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
77 77
78 data->registry_data.disallowed_features = 0x0; 78 /*
79 * Disable the following features for now:
80 * GFXCLK DS
81 * SOCLK DS
82 * LCLK DS
83 * DCEFCLK DS
84 * FCLK DS
85 * MP1CLK DS
86 * MP0CLK DS
87 */
88 data->registry_data.disallowed_features = 0xE0041C00;
79 data->registry_data.od_state_in_dc_support = 0; 89 data->registry_data.od_state_in_dc_support = 0;
80 data->registry_data.thermal_support = 1; 90 data->registry_data.thermal_support = 1;
81 data->registry_data.skip_baco_hardware = 0; 91 data->registry_data.skip_baco_hardware = 0;
@@ -120,6 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
120 data->registry_data.disable_auto_wattman = 1; 130 data->registry_data.disable_auto_wattman = 1;
121 data->registry_data.auto_wattman_debug = 0; 131 data->registry_data.auto_wattman_debug = 0;
122 data->registry_data.auto_wattman_sample_period = 100; 132 data->registry_data.auto_wattman_sample_period = 100;
133 data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
123 data->registry_data.auto_wattman_threshold = 50; 134 data->registry_data.auto_wattman_threshold = 50;
124 data->registry_data.gfxoff_controlled_by_driver = 1; 135 data->registry_data.gfxoff_controlled_by_driver = 1;
125 data->gfxoff_allowed = false; 136 data->gfxoff_allowed = false;
@@ -829,6 +840,28 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
829 return 0; 840 return 0;
830} 841}
831 842
843static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
844{
845 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
846
847 if (data->smu_features[GNLD_DPM_UCLK].enabled)
848 return smum_send_msg_to_smc_with_parameter(hwmgr,
849 PPSMC_MSG_SetUclkFastSwitch,
850 1);
851
852 return 0;
853}
854
855static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
856{
857 struct vega20_hwmgr *data =
858 (struct vega20_hwmgr *)(hwmgr->backend);
859
860 return smum_send_msg_to_smc_with_parameter(hwmgr,
861 PPSMC_MSG_SetFclkGfxClkRatio,
862 data->registry_data.fclk_gfxclk_ratio);
863}
864
832static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) 865static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
833{ 866{
834 struct vega20_hwmgr *data = 867 struct vega20_hwmgr *data =
@@ -1290,12 +1323,13 @@ static int vega20_get_sclk_od(
1290 &(data->dpm_table.gfx_table); 1323 &(data->dpm_table.gfx_table);
1291 struct vega20_single_dpm_table *golden_sclk_table = 1324 struct vega20_single_dpm_table *golden_sclk_table =
1292 &(data->golden_dpm_table.gfx_table); 1325 &(data->golden_dpm_table.gfx_table);
1293 int value; 1326 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
1327 int golden_value = golden_sclk_table->dpm_levels
1328 [golden_sclk_table->count - 1].value;
1294 1329
1295 /* od percentage */ 1330 /* od percentage */
1296 value = DIV_ROUND_UP((sclk_table->dpm_levels[sclk_table->count - 1].value - 1331 value -= golden_value;
1297 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 100, 1332 value = DIV_ROUND_UP(value * 100, golden_value);
1298 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value);
1299 1333
1300 return value; 1334 return value;
1301} 1335}
@@ -1335,12 +1369,13 @@ static int vega20_get_mclk_od(
1335 &(data->dpm_table.mem_table); 1369 &(data->dpm_table.mem_table);
1336 struct vega20_single_dpm_table *golden_mclk_table = 1370 struct vega20_single_dpm_table *golden_mclk_table =
1337 &(data->golden_dpm_table.mem_table); 1371 &(data->golden_dpm_table.mem_table);
1338 int value; 1372 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
1373 int golden_value = golden_mclk_table->dpm_levels
1374 [golden_mclk_table->count - 1].value;
1339 1375
1340 /* od percentage */ 1376 /* od percentage */
1341 value = DIV_ROUND_UP((mclk_table->dpm_levels[mclk_table->count - 1].value - 1377 value -= golden_value;
1342 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 100, 1378 value = DIV_ROUND_UP(value * 100, golden_value);
1343 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value);
1344 1379
1345 return value; 1380 return value;
1346} 1381}
@@ -1532,6 +1567,16 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1532 "[EnableDPMTasks] Failed to enable all smu features!", 1567 "[EnableDPMTasks] Failed to enable all smu features!",
1533 return result); 1568 return result);
1534 1569
1570 result = vega20_notify_smc_display_change(hwmgr);
1571 PP_ASSERT_WITH_CODE(!result,
1572 "[EnableDPMTasks] Failed to notify smc display change!",
1573 return result);
1574
1575 result = vega20_send_clock_ratio(hwmgr);
1576 PP_ASSERT_WITH_CODE(!result,
1577 "[EnableDPMTasks] Failed to send clock ratio!",
1578 return result);
1579
1535 /* Initialize UVD/VCE powergating state */ 1580 /* Initialize UVD/VCE powergating state */
1536 vega20_init_powergate_state(hwmgr); 1581 vega20_init_powergate_state(hwmgr);
1537 1582
@@ -1972,19 +2017,6 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1972 return ret; 2017 return ret;
1973} 2018}
1974 2019
1975static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr,
1976 bool has_disp)
1977{
1978 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1979
1980 if (data->smu_features[GNLD_DPM_UCLK].enabled)
1981 return smum_send_msg_to_smc_with_parameter(hwmgr,
1982 PPSMC_MSG_SetUclkFastSwitch,
1983 has_disp ? 1 : 0);
1984
1985 return 0;
1986}
1987
1988int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 2020int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
1989 struct pp_display_clock_request *clock_req) 2021 struct pp_display_clock_request *clock_req)
1990{ 2022{
@@ -2044,13 +2076,6 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
2044 struct pp_display_clock_request clock_req; 2076 struct pp_display_clock_request clock_req;
2045 int ret = 0; 2077 int ret = 0;
2046 2078
2047 if ((hwmgr->display_config->num_display > 1) &&
2048 !hwmgr->display_config->multi_monitor_in_sync &&
2049 !hwmgr->display_config->nb_pstate_switch_disable)
2050 vega20_notify_smc_display_change(hwmgr, false);
2051 else
2052 vega20_notify_smc_display_change(hwmgr, true);
2053
2054 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; 2079 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
2055 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; 2080 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
2056 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; 2081 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index 56fe6a0d42e8..25faaa5c5b10 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -328,6 +328,7 @@ struct vega20_registry_data {
328 uint8_t disable_auto_wattman; 328 uint8_t disable_auto_wattman;
329 uint32_t auto_wattman_debug; 329 uint32_t auto_wattman_debug;
330 uint32_t auto_wattman_sample_period; 330 uint32_t auto_wattman_sample_period;
331 uint32_t fclk_gfxclk_ratio;
331 uint8_t auto_wattman_threshold; 332 uint8_t auto_wattman_threshold;
332 uint8_t log_avfs_param; 333 uint8_t log_avfs_param;
333 uint8_t enable_enginess; 334 uint8_t enable_enginess;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index 45d64a81e945..4f63a736ea0e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -105,7 +105,8 @@
105#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B 105#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B
106#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C 106#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C
107#define PPSMC_MSG_WaflTest 0x4D 107#define PPSMC_MSG_WaflTest 0x4D
108// Unused ID 0x4E to 0x50 108#define PPSMC_MSG_SetFclkGfxClkRatio 0x4E
109// Unused ID 0x4F to 0x50
109#define PPSMC_MSG_AllowGfxOff 0x51 110#define PPSMC_MSG_AllowGfxOff 0x51
110#define PPSMC_MSG_DisallowGfxOff 0x52 111#define PPSMC_MSG_DisallowGfxOff 0x52
111#define PPSMC_MSG_GetPptLimit 0x53 112#define PPSMC_MSG_GetPptLimit 0x53
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 69dab82a3771..bf589c53b908 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = {
60 60
61MODULE_DEVICE_TABLE(pci, pciidlist); 61MODULE_DEVICE_TABLE(pci, pciidlist);
62 62
63static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
64{
65 struct apertures_struct *ap;
66 bool primary = false;
67
68 ap = alloc_apertures(1);
69 if (!ap)
70 return;
71
72 ap->ranges[0].base = pci_resource_start(pdev, 0);
73 ap->ranges[0].size = pci_resource_len(pdev, 0);
74
75#ifdef CONFIG_X86
76 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
77#endif
78 drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary);
79 kfree(ap);
80}
81
63static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 82static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
64{ 83{
84 ast_kick_out_firmware_fb(pdev);
85
65 return drm_get_pci_dev(pdev, ent, &driver); 86 return drm_get_pci_dev(pdev, ent, &driver);
66} 87}
67 88
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index dac355812adc..373700c05a00 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -583,7 +583,8 @@ void ast_driver_unload(struct drm_device *dev)
583 drm_mode_config_cleanup(dev); 583 drm_mode_config_cleanup(dev);
584 584
585 ast_mm_fini(ast); 585 ast_mm_fini(ast);
586 pci_iounmap(dev->pdev, ast->ioregs); 586 if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET)
587 pci_iounmap(dev->pdev, ast->ioregs);
587 pci_iounmap(dev->pdev, ast->regs); 588 pci_iounmap(dev->pdev, ast->regs);
588 kfree(ast); 589 kfree(ast);
589} 590}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 5e77d456d9bb..8bb355d5d43d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
568 } 568 }
569 ast_bo_unreserve(bo); 569 ast_bo_unreserve(bo);
570 570
571 ast_set_offset_reg(crtc);
571 ast_set_start_address_crt1(crtc, (u32)gpu_addr); 572 ast_set_start_address_crt1(crtc, (u32)gpu_addr);
572 573
573 return 0; 574 return 0;
@@ -972,9 +973,21 @@ static int get_clock(void *i2c_priv)
972{ 973{
973 struct ast_i2c_chan *i2c = i2c_priv; 974 struct ast_i2c_chan *i2c = i2c_priv;
974 struct ast_private *ast = i2c->dev->dev_private; 975 struct ast_private *ast = i2c->dev->dev_private;
975 uint32_t val; 976 uint32_t val, val2, count, pass;
977
978 count = 0;
979 pass = 0;
980 val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
981 do {
982 val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
983 if (val == val2) {
984 pass++;
985 } else {
986 pass = 0;
987 val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
988 }
989 } while ((pass < 5) && (count++ < 0x10000));
976 990
977 val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
978 return val & 1 ? 1 : 0; 991 return val & 1 ? 1 : 0;
979} 992}
980 993
@@ -982,9 +995,21 @@ static int get_data(void *i2c_priv)
982{ 995{
983 struct ast_i2c_chan *i2c = i2c_priv; 996 struct ast_i2c_chan *i2c = i2c_priv;
984 struct ast_private *ast = i2c->dev->dev_private; 997 struct ast_private *ast = i2c->dev->dev_private;
985 uint32_t val; 998 uint32_t val, val2, count, pass;
999
1000 count = 0;
1001 pass = 0;
1002 val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
1003 do {
1004 val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
1005 if (val == val2) {
1006 pass++;
1007 } else {
1008 pass = 0;
1009 val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
1010 }
1011 } while ((pass < 5) && (count++ < 0x10000));
986 1012
987 val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
988 return val & 1 ? 1 : 0; 1013 return val & 1 ? 1 : 0;
989} 1014}
990 1015
@@ -997,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock)
997 1022
998 for (i = 0; i < 0x10000; i++) { 1023 for (i = 0; i < 0x10000; i++) {
999 ujcrb7 = ((clock & 0x01) ? 0 : 1); 1024 ujcrb7 = ((clock & 0x01) ? 0 : 1);
1000 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7); 1025 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
1001 jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01); 1026 jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
1002 if (ujcrb7 == jtemp) 1027 if (ujcrb7 == jtemp)
1003 break; 1028 break;
@@ -1013,7 +1038,7 @@ static void set_data(void *i2c_priv, int data)
1013 1038
1014 for (i = 0; i < 0x10000; i++) { 1039 for (i = 0; i < 0x10000; i++) {
1015 ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; 1040 ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
1016 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7); 1041 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
1017 jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04); 1042 jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
1018 if (ujcrb7 == jtemp) 1043 if (ujcrb7 == jtemp)
1019 break; 1044 break;
@@ -1254,7 +1279,7 @@ static int ast_cursor_move(struct drm_crtc *crtc,
1254 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07)); 1279 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
1255 1280
1256 /* dummy write to fire HWC */ 1281 /* dummy write to fire HWC */
1257 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00); 1282 ast_show_cursor(crtc);
1258 1283
1259 return 0; 1284 return 0;
1260} 1285}
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index d9c0f7573905..1669c42c40ed 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -142,6 +142,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
142 142
143 lockdep_assert_held_once(&dev->master_mutex); 143 lockdep_assert_held_once(&dev->master_mutex);
144 144
145 WARN_ON(fpriv->is_master);
145 old_master = fpriv->master; 146 old_master = fpriv->master;
146 fpriv->master = drm_master_create(dev); 147 fpriv->master = drm_master_create(dev);
147 if (!fpriv->master) { 148 if (!fpriv->master) {
@@ -170,6 +171,7 @@ out_err:
170 /* drop references and restore old master on failure */ 171 /* drop references and restore old master on failure */
171 drm_master_put(&fpriv->master); 172 drm_master_put(&fpriv->master);
172 fpriv->master = old_master; 173 fpriv->master = old_master;
174 fpriv->is_master = 0;
173 175
174 return ret; 176 return ret;
175} 177}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 5ff1d79b86c4..0e0df398222d 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1275,6 +1275,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
1275 mutex_lock(&mgr->lock); 1275 mutex_lock(&mgr->lock);
1276 mstb = mgr->mst_primary; 1276 mstb = mgr->mst_primary;
1277 1277
1278 if (!mstb)
1279 goto out;
1280
1278 for (i = 0; i < lct - 1; i++) { 1281 for (i = 0; i < lct - 1; i++) {
1279 int shift = (i % 2) ? 0 : 4; 1282 int shift = (i % 2) ? 0 : 4;
1280 int port_num = (rad[i / 2] >> shift) & 0xf; 1283 int port_num = (rad[i / 2] >> shift) & 0xf;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a502f3e519fd..dd852a25d375 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -219,6 +219,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
219 mutex_lock(&fb_helper->lock); 219 mutex_lock(&fb_helper->lock);
220 drm_connector_list_iter_begin(dev, &conn_iter); 220 drm_connector_list_iter_begin(dev, &conn_iter);
221 drm_for_each_connector_iter(connector, &conn_iter) { 221 drm_for_each_connector_iter(connector, &conn_iter) {
222 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
223 continue;
224
222 ret = __drm_fb_helper_add_one_connector(fb_helper, connector); 225 ret = __drm_fb_helper_add_one_connector(fb_helper, connector);
223 if (ret) 226 if (ret)
224 goto fail; 227 goto fail;
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 90a1c846fc25..8aaa5e86a979 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -97,9 +97,9 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
97 97
98/** 98/**
99 * drm_driver_legacy_fb_format - compute drm fourcc code from legacy description 99 * drm_driver_legacy_fb_format - compute drm fourcc code from legacy description
100 * @dev: DRM device
100 * @bpp: bits per pixels 101 * @bpp: bits per pixels
101 * @depth: bit depth per pixel 102 * @depth: bit depth per pixel
102 * @native: use host native byte order
103 * 103 *
104 * Computes a drm fourcc pixel format code for the given @bpp/@depth values. 104 * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
105 * Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config, 105 * Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index e7c3ed6c9a2e..9b476368aa31 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
93 * If the GPU managed to complete this jobs fence, the timout is 93 * If the GPU managed to complete this jobs fence, the timout is
94 * spurious. Bail out. 94 * spurious. Bail out.
95 */ 95 */
96 if (fence_completed(gpu, submit->out_fence->seqno)) 96 if (dma_fence_is_signaled(submit->out_fence))
97 return; 97 return;
98 98
99 /* 99 /*
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 94529aa82339..aef487dd8731 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -164,13 +164,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
164 return frm; 164 return frm;
165} 165}
166 166
167static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
168{
169 struct decon_context *ctx = crtc->ctx;
170
171 return decon_get_frame_count(ctx, false);
172}
173
174static void decon_setup_trigger(struct decon_context *ctx) 167static void decon_setup_trigger(struct decon_context *ctx)
175{ 168{
176 if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG)) 169 if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
@@ -536,7 +529,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
536 .disable = decon_disable, 529 .disable = decon_disable,
537 .enable_vblank = decon_enable_vblank, 530 .enable_vblank = decon_enable_vblank,
538 .disable_vblank = decon_disable_vblank, 531 .disable_vblank = decon_disable_vblank,
539 .get_vblank_counter = decon_get_vblank_counter,
540 .atomic_begin = decon_atomic_begin, 532 .atomic_begin = decon_atomic_begin,
541 .update_plane = decon_update_plane, 533 .update_plane = decon_update_plane,
542 .disable_plane = decon_disable_plane, 534 .disable_plane = decon_disable_plane,
@@ -554,7 +546,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
554 int ret; 546 int ret;
555 547
556 ctx->drm_dev = drm_dev; 548 ctx->drm_dev = drm_dev;
557 drm_dev->max_vblank_count = 0xffffffff;
558 549
559 for (win = ctx->first_win; win < WINDOWS_NR; win++) { 550 for (win = ctx->first_win; win < WINDOWS_NR; win++) {
560 ctx->configs[win].pixel_formats = decon_formats; 551 ctx->configs[win].pixel_formats = decon_formats;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index eea90251808f..2696289ecc78 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -162,16 +162,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc)
162 exynos_crtc->ops->disable_vblank(exynos_crtc); 162 exynos_crtc->ops->disable_vblank(exynos_crtc);
163} 163}
164 164
165static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc)
166{
167 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
168
169 if (exynos_crtc->ops->get_vblank_counter)
170 return exynos_crtc->ops->get_vblank_counter(exynos_crtc);
171
172 return 0;
173}
174
175static const struct drm_crtc_funcs exynos_crtc_funcs = { 165static const struct drm_crtc_funcs exynos_crtc_funcs = {
176 .set_config = drm_atomic_helper_set_config, 166 .set_config = drm_atomic_helper_set_config,
177 .page_flip = drm_atomic_helper_page_flip, 167 .page_flip = drm_atomic_helper_page_flip,
@@ -181,7 +171,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = {
181 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 171 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
182 .enable_vblank = exynos_drm_crtc_enable_vblank, 172 .enable_vblank = exynos_drm_crtc_enable_vblank,
183 .disable_vblank = exynos_drm_crtc_disable_vblank, 173 .disable_vblank = exynos_drm_crtc_disable_vblank,
184 .get_vblank_counter = exynos_drm_crtc_get_vblank_counter,
185}; 174};
186 175
187struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, 176struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index ec9604f1272b..5e61e707f955 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -135,7 +135,6 @@ struct exynos_drm_crtc_ops {
135 void (*disable)(struct exynos_drm_crtc *crtc); 135 void (*disable)(struct exynos_drm_crtc *crtc);
136 int (*enable_vblank)(struct exynos_drm_crtc *crtc); 136 int (*enable_vblank)(struct exynos_drm_crtc *crtc);
137 void (*disable_vblank)(struct exynos_drm_crtc *crtc); 137 void (*disable_vblank)(struct exynos_drm_crtc *crtc);
138 u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
139 enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc, 138 enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
140 const struct drm_display_mode *mode); 139 const struct drm_display_mode *mode);
141 bool (*mode_fixup)(struct exynos_drm_crtc *crtc, 140 bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 07af7758066d..d81e62ae286a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -14,6 +14,7 @@
14 14
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_fb_helper.h>
17#include <drm/drm_mipi_dsi.h> 18#include <drm/drm_mipi_dsi.h>
18#include <drm/drm_panel.h> 19#include <drm/drm_panel.h>
19#include <drm/drm_atomic_helper.h> 20#include <drm/drm_atomic_helper.h>
@@ -1474,12 +1475,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
1474{ 1475{
1475 struct exynos_dsi *dsi = encoder_to_dsi(encoder); 1476 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
1476 struct drm_connector *connector = &dsi->connector; 1477 struct drm_connector *connector = &dsi->connector;
1478 struct drm_device *drm = encoder->dev;
1477 int ret; 1479 int ret;
1478 1480
1479 connector->polled = DRM_CONNECTOR_POLL_HPD; 1481 connector->polled = DRM_CONNECTOR_POLL_HPD;
1480 1482
1481 ret = drm_connector_init(encoder->dev, connector, 1483 ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs,
1482 &exynos_dsi_connector_funcs,
1483 DRM_MODE_CONNECTOR_DSI); 1484 DRM_MODE_CONNECTOR_DSI);
1484 if (ret) { 1485 if (ret) {
1485 DRM_ERROR("Failed to initialize connector with drm\n"); 1486 DRM_ERROR("Failed to initialize connector with drm\n");
@@ -1489,7 +1490,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
1489 connector->status = connector_status_disconnected; 1490 connector->status = connector_status_disconnected;
1490 drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs); 1491 drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
1491 drm_connector_attach_encoder(connector, encoder); 1492 drm_connector_attach_encoder(connector, encoder);
1493 if (!drm->registered)
1494 return 0;
1492 1495
1496 connector->funcs->reset(connector);
1497 drm_fb_helper_add_one_connector(drm->fb_helper, connector);
1498 drm_connector_register(connector);
1493 return 0; 1499 return 0;
1494} 1500}
1495 1501
@@ -1527,7 +1533,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
1527 } 1533 }
1528 1534
1529 dsi->panel = of_drm_find_panel(device->dev.of_node); 1535 dsi->panel = of_drm_find_panel(device->dev.of_node);
1530 if (dsi->panel) { 1536 if (IS_ERR(dsi->panel)) {
1537 dsi->panel = NULL;
1538 } else {
1531 drm_panel_attach(dsi->panel, &dsi->connector); 1539 drm_panel_attach(dsi->panel, &dsi->connector);
1532 dsi->connector.status = connector_status_connected; 1540 dsi->connector.status = connector_status_connected;
1533 } 1541 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 918dd2c82209..01d182289efa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -192,7 +192,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
192 struct drm_fb_helper *helper; 192 struct drm_fb_helper *helper;
193 int ret; 193 int ret;
194 194
195 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) 195 if (!dev->mode_config.num_crtc)
196 return 0; 196 return 0;
197 197
198 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); 198 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index fe754022e356..359d37d5c958 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,10 +61,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
61 } 61 }
62 62
63 mutex_lock(&dev_priv->drm.struct_mutex); 63 mutex_lock(&dev_priv->drm.struct_mutex);
64 mmio_hw_access_pre(dev_priv);
64 ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, 65 ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
65 size, I915_GTT_PAGE_SIZE, 66 size, I915_GTT_PAGE_SIZE,
66 I915_COLOR_UNEVICTABLE, 67 I915_COLOR_UNEVICTABLE,
67 start, end, flags); 68 start, end, flags);
69 mmio_hw_access_post(dev_priv);
68 mutex_unlock(&dev_priv->drm.struct_mutex); 70 mutex_unlock(&dev_priv->drm.struct_mutex);
69 if (ret) 71 if (ret)
70 gvt_err("fail to alloc %s gm space from host\n", 72 gvt_err("fail to alloc %s gm space from host\n",
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2402395a068d..c7103dd2d8d5 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1905 vgpu_free_mm(mm); 1905 vgpu_free_mm(mm);
1906 return ERR_PTR(-ENOMEM); 1906 return ERR_PTR(-ENOMEM);
1907 } 1907 }
1908 mm->ggtt_mm.last_partial_off = -1UL;
1909 1908
1910 return mm; 1909 return mm;
1911} 1910}
@@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1930 invalidate_ppgtt_mm(mm); 1929 invalidate_ppgtt_mm(mm);
1931 } else { 1930 } else {
1932 vfree(mm->ggtt_mm.virtual_ggtt); 1931 vfree(mm->ggtt_mm.virtual_ggtt);
1933 mm->ggtt_mm.last_partial_off = -1UL;
1934 } 1932 }
1935 1933
1936 vgpu_free_mm(mm); 1934 vgpu_free_mm(mm);
@@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2168 struct intel_gvt_gtt_entry e, m; 2166 struct intel_gvt_gtt_entry e, m;
2169 dma_addr_t dma_addr; 2167 dma_addr_t dma_addr;
2170 int ret; 2168 int ret;
2169 struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2170 bool partial_update = false;
2171 2171
2172 if (bytes != 4 && bytes != 8) 2172 if (bytes != 4 && bytes != 8)
2173 return -EINVAL; 2173 return -EINVAL;
@@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2178 if (!vgpu_gmadr_is_valid(vgpu, gma)) 2178 if (!vgpu_gmadr_is_valid(vgpu, gma))
2179 return 0; 2179 return 0;
2180 2180
2181 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); 2181 e.type = GTT_TYPE_GGTT_PTE;
2182
2183 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 2182 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2184 bytes); 2183 bytes);
2185 2184
2186 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes 2185 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2187 * write, we assume the two 4 bytes writes are consecutive. 2186 * write, save the first 4 bytes in a list and update virtual
2188 * Otherwise, we abort and report error 2187 * PTE. Only update shadow PTE when the second 4 bytes comes.
2189 */ 2188 */
2190 if (bytes < info->gtt_entry_size) { 2189 if (bytes < info->gtt_entry_size) {
2191 if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { 2190 bool found = false;
2192 /* the first partial part*/ 2191
2193 ggtt_mm->ggtt_mm.last_partial_off = off; 2192 list_for_each_entry_safe(pos, n,
2194 ggtt_mm->ggtt_mm.last_partial_data = e.val64; 2193 &ggtt_mm->ggtt_mm.partial_pte_list, list) {
2195 return 0; 2194 if (g_gtt_index == pos->offset >>
2196 } else if ((g_gtt_index == 2195 info->gtt_entry_size_shift) {
2197 (ggtt_mm->ggtt_mm.last_partial_off >> 2196 if (off != pos->offset) {
2198 info->gtt_entry_size_shift)) && 2197 /* the second partial part*/
2199 (off != ggtt_mm->ggtt_mm.last_partial_off)) { 2198 int last_off = pos->offset &
2200 /* the second partial part */ 2199 (info->gtt_entry_size - 1);
2201 2200
2202 int last_off = ggtt_mm->ggtt_mm.last_partial_off & 2201 memcpy((void *)&e.val64 + last_off,
2203 (info->gtt_entry_size - 1); 2202 (void *)&pos->data + last_off,
2204 2203 bytes);
2205 memcpy((void *)&e.val64 + last_off, 2204
2206 (void *)&ggtt_mm->ggtt_mm.last_partial_data + 2205 list_del(&pos->list);
2207 last_off, bytes); 2206 kfree(pos);
2208 2207 found = true;
2209 ggtt_mm->ggtt_mm.last_partial_off = -1UL; 2208 break;
2210 } else { 2209 }
2211 int last_offset; 2210
2212 2211 /* update of the first partial part */
2213 gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", 2212 pos->data = e.val64;
2214 ggtt_mm->ggtt_mm.last_partial_off, off, 2213 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2215 bytes, info->gtt_entry_size); 2214 return 0;
2216 2215 }
2217 /* set host ggtt entry to scratch page and clear 2216 }
2218 * virtual ggtt entry as not present for last
2219 * partially write offset
2220 */
2221 last_offset = ggtt_mm->ggtt_mm.last_partial_off &
2222 (~(info->gtt_entry_size - 1));
2223
2224 ggtt_get_host_entry(ggtt_mm, &m, last_offset);
2225 ggtt_invalidate_pte(vgpu, &m);
2226 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2227 ops->clear_present(&m);
2228 ggtt_set_host_entry(ggtt_mm, &m, last_offset);
2229 ggtt_invalidate(gvt->dev_priv);
2230
2231 ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
2232 ops->clear_present(&e);
2233 ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
2234
2235 ggtt_mm->ggtt_mm.last_partial_off = off;
2236 ggtt_mm->ggtt_mm.last_partial_data = e.val64;
2237 2217
2238 return 0; 2218 if (!found) {
2219 /* the first partial part */
2220 partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2221 if (!partial_pte)
2222 return -ENOMEM;
2223 partial_pte->offset = off;
2224 partial_pte->data = e.val64;
2225 list_add_tail(&partial_pte->list,
2226 &ggtt_mm->ggtt_mm.partial_pte_list);
2227 partial_update = true;
2239 } 2228 }
2240 } 2229 }
2241 2230
2242 if (ops->test_present(&e)) { 2231 if (!partial_update && (ops->test_present(&e))) {
2243 gfn = ops->get_pfn(&e); 2232 gfn = ops->get_pfn(&e);
2244 m = e; 2233 m = e;
2245 2234
@@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2263 } else 2252 } else
2264 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 2253 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
2265 } else { 2254 } else {
2266 ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
2267 ggtt_invalidate_pte(vgpu, &m);
2268 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 2255 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2269 ops->clear_present(&m); 2256 ops->clear_present(&m);
2270 } 2257 }
2271 2258
2272out: 2259out:
2260 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2261
2262 ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2263 ggtt_invalidate_pte(vgpu, &e);
2264
2273 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); 2265 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
2274 ggtt_invalidate(gvt->dev_priv); 2266 ggtt_invalidate(gvt->dev_priv);
2275 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2276 return 0; 2267 return 0;
2277} 2268}
2278 2269
@@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2430 2421
2431 intel_vgpu_reset_ggtt(vgpu, false); 2422 intel_vgpu_reset_ggtt(vgpu, false);
2432 2423
2424 INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
2425
2433 return create_scratch_page_tree(vgpu); 2426 return create_scratch_page_tree(vgpu);
2434} 2427}
2435 2428
@@ -2454,6 +2447,15 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2454 2447
2455static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) 2448static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2456{ 2449{
2450 struct intel_gvt_partial_pte *pos, *next;
2451
2452 list_for_each_entry_safe(pos, next,
2453 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2454 list) {
2455 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2456 pos->offset, pos->data);
2457 kfree(pos);
2458 }
2457 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); 2459 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2458 vgpu->gtt.ggtt_mm = NULL; 2460 vgpu->gtt.ggtt_mm = NULL;
2459} 2461}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 7a9b36176efb..d8cb04cc946d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -35,7 +35,6 @@
35#define _GVT_GTT_H_ 35#define _GVT_GTT_H_
36 36
37#define I915_GTT_PAGE_SHIFT 12 37#define I915_GTT_PAGE_SHIFT 12
38#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1))
39 38
40struct intel_vgpu_mm; 39struct intel_vgpu_mm;
41 40
@@ -133,6 +132,12 @@ enum intel_gvt_mm_type {
133 132
134#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES 133#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
135 134
135struct intel_gvt_partial_pte {
136 unsigned long offset;
137 u64 data;
138 struct list_head list;
139};
140
136struct intel_vgpu_mm { 141struct intel_vgpu_mm {
137 enum intel_gvt_mm_type type; 142 enum intel_gvt_mm_type type;
138 struct intel_vgpu *vgpu; 143 struct intel_vgpu *vgpu;
@@ -157,8 +162,7 @@ struct intel_vgpu_mm {
157 } ppgtt_mm; 162 } ppgtt_mm;
158 struct { 163 struct {
159 void *virtual_ggtt; 164 void *virtual_ggtt;
160 unsigned long last_partial_off; 165 struct list_head partial_pte_list;
161 u64 last_partial_data;
162 } ggtt_mm; 166 } ggtt_mm;
163 }; 167 };
164}; 168};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 90f50f67909a..aa280bb07125 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1609,7 +1609,7 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
1609 return 0; 1609 return 0;
1610} 1610}
1611 1611
1612static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu, 1612static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
1613 unsigned int offset, void *p_data, unsigned int bytes) 1613 unsigned int offset, void *p_data, unsigned int bytes)
1614{ 1614{
1615 vgpu_vreg(vgpu, offset) = 0; 1615 vgpu_vreg(vgpu, offset) = 0;
@@ -2607,6 +2607,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2607 MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2607 MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2608 MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2608 MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2609 MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2609 MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2610
2611 MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2612 MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2610 return 0; 2613 return 0;
2611} 2614}
2612 2615
@@ -3205,9 +3208,6 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
3205 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); 3208 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
3206 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); 3209 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
3207 3210
3208 MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
3209 MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
3210
3211 MMIO_D(RC6_CTX_BASE, D_BXT); 3211 MMIO_D(RC6_CTX_BASE, D_BXT);
3212 3212
3213 MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); 3213 MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 10e63eea5492..d6e02c15ef97 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -131,7 +131,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
131 {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ 131 {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
132 132
133 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ 133 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
134 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */ 134 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
135 135
136 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ 136 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
137 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ 137 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
@@ -158,6 +158,8 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
158 int ring_id, i; 158 int ring_id, i;
159 159
160 for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { 160 for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
161 if (!HAS_ENGINE(dev_priv, ring_id))
162 continue;
161 offset.reg = regs[ring_id]; 163 offset.reg = regs[ring_id];
162 for (i = 0; i < GEN9_MOCS_SIZE; i++) { 164 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
163 gen9_render_mocs.control_table[ring_id][i] = 165 gen9_render_mocs.control_table[ring_id][i] =
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 44e2c0f5ec50..ffdbbac4400e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1175,8 +1175,6 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
1175 return -EINVAL; 1175 return -EINVAL;
1176 } 1176 }
1177 1177
1178 dram_info->valid_dimm = true;
1179
1180 /* 1178 /*
1181 * If any of the channel is single rank channel, worst case output 1179 * If any of the channel is single rank channel, worst case output
1182 * will be same as if single rank memory, so consider single rank 1180 * will be same as if single rank memory, so consider single rank
@@ -1193,8 +1191,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
1193 return -EINVAL; 1191 return -EINVAL;
1194 } 1192 }
1195 1193
1196 if (ch0.is_16gb_dimm || ch1.is_16gb_dimm) 1194 dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
1197 dram_info->is_16gb_dimm = true;
1198 1195
1199 dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0, 1196 dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
1200 val_ch1, 1197 val_ch1,
@@ -1314,7 +1311,6 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
1314 return -EINVAL; 1311 return -EINVAL;
1315 } 1312 }
1316 1313
1317 dram_info->valid_dimm = true;
1318 dram_info->valid = true; 1314 dram_info->valid = true;
1319 return 0; 1315 return 0;
1320} 1316}
@@ -1327,12 +1323,17 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
1327 int ret; 1323 int ret;
1328 1324
1329 dram_info->valid = false; 1325 dram_info->valid = false;
1330 dram_info->valid_dimm = false;
1331 dram_info->is_16gb_dimm = false;
1332 dram_info->rank = I915_DRAM_RANK_INVALID; 1326 dram_info->rank = I915_DRAM_RANK_INVALID;
1333 dram_info->bandwidth_kbps = 0; 1327 dram_info->bandwidth_kbps = 0;
1334 dram_info->num_channels = 0; 1328 dram_info->num_channels = 0;
1335 1329
1330 /*
1331 * Assume 16Gb DIMMs are present until proven otherwise.
1332 * This is only used for the level 0 watermark latency
1333 * w/a which does not apply to bxt/glk.
1334 */
1335 dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
1336
1336 if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv)) 1337 if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
1337 return; 1338 return;
1338 1339
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8624b4bdc242..9102571e9692 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1948,7 +1948,6 @@ struct drm_i915_private {
1948 1948
1949 struct dram_info { 1949 struct dram_info {
1950 bool valid; 1950 bool valid;
1951 bool valid_dimm;
1952 bool is_16gb_dimm; 1951 bool is_16gb_dimm;
1953 u8 num_channels; 1952 u8 num_channels;
1954 enum dram_rank { 1953 enum dram_rank {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 09187286d346..d4fac09095f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -460,7 +460,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
460 * any non-page-aligned or non-canonical addresses. 460 * any non-page-aligned or non-canonical addresses.
461 */ 461 */
462 if (unlikely(entry->flags & EXEC_OBJECT_PINNED && 462 if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
463 entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK))) 463 entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
464 return -EINVAL; 464 return -EINVAL;
465 465
466 /* pad_to_size was once a reserved field, so sanitize it */ 466 /* pad_to_size was once a reserved field, so sanitize it */
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
1268 else if (gen >= 4) 1268 else if (gen >= 4)
1269 len = 4; 1269 len = 4;
1270 else 1270 else
1271 len = 3; 1271 len = 6;
1272 1272
1273 batch = reloc_gpu(eb, vma, len); 1273 batch = reloc_gpu(eb, vma, len);
1274 if (IS_ERR(batch)) 1274 if (IS_ERR(batch))
@@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma,
1309 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 1309 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1310 *batch++ = addr; 1310 *batch++ = addr;
1311 *batch++ = target_offset; 1311 *batch++ = target_offset;
1312
1313 /* And again for good measure (blb/pnv) */
1314 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1315 *batch++ = addr;
1316 *batch++ = target_offset;
1312 } 1317 }
1313 1318
1314 goto out; 1319 goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 56c7f8637311..07999fe09ad2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1757,7 +1757,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
1757 if (i == 4) 1757 if (i == 4)
1758 continue; 1758 continue;
1759 1759
1760 seq_printf(m, "\t\t(%03d, %04d) %08lx: ", 1760 seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
1761 pde, pte, 1761 pde, pte,
1762 (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE); 1762 (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
1763 for (i = 0; i < 4; i++) { 1763 for (i = 0; i < 4; i++) {
@@ -3413,6 +3413,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3413 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; 3413 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
3414 if (ggtt->vm.clear_range != nop_clear_range) 3414 if (ggtt->vm.clear_range != nop_clear_range)
3415 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; 3415 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3416
3417 /* Prevent recursively calling stop_machine() and deadlocks. */
3418 dev_info(dev_priv->drm.dev,
3419 "Disabling error capture for VT-d workaround\n");
3420 i915_disable_error_state(dev_priv, -ENODEV);
3416 } 3421 }
3417 3422
3418 ggtt->invalidate = gen6_ggtt_invalidate; 3423 ggtt->invalidate = gen6_ggtt_invalidate;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 7e2af5f4f39b..28039290655c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,13 +42,15 @@
42#include "i915_selftest.h" 42#include "i915_selftest.h"
43#include "i915_timeline.h" 43#include "i915_timeline.h"
44 44
45#define I915_GTT_PAGE_SIZE_4K BIT(12) 45#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
46#define I915_GTT_PAGE_SIZE_64K BIT(16) 46#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
47#define I915_GTT_PAGE_SIZE_2M BIT(21) 47#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
48 48
49#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K 49#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
50#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M 50#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
51 51
52#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
53
52#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE 54#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
53 55
54#define I915_FENCE_REG_NONE -1 56#define I915_FENCE_REG_NONE -1
@@ -659,20 +661,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
659 u64 start, u64 end, unsigned int flags); 661 u64 start, u64 end, unsigned int flags);
660 662
661/* Flags used by pin/bind&friends. */ 663/* Flags used by pin/bind&friends. */
662#define PIN_NONBLOCK BIT(0) 664#define PIN_NONBLOCK BIT_ULL(0)
663#define PIN_MAPPABLE BIT(1) 665#define PIN_MAPPABLE BIT_ULL(1)
664#define PIN_ZONE_4G BIT(2) 666#define PIN_ZONE_4G BIT_ULL(2)
665#define PIN_NONFAULT BIT(3) 667#define PIN_NONFAULT BIT_ULL(3)
666#define PIN_NOEVICT BIT(4) 668#define PIN_NOEVICT BIT_ULL(4)
667 669
668#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ 670#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */
669#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ 671#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */
670#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */ 672#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */
671#define PIN_UPDATE BIT(8) 673#define PIN_UPDATE BIT_ULL(8)
672 674
673#define PIN_HIGH BIT(9) 675#define PIN_HIGH BIT_ULL(9)
674#define PIN_OFFSET_BIAS BIT(10) 676#define PIN_OFFSET_BIAS BIT_ULL(10)
675#define PIN_OFFSET_FIXED BIT(11) 677#define PIN_OFFSET_FIXED BIT_ULL(11)
676#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) 678#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
677 679
678#endif 680#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8762d17b6659..3eb33e000d6f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -648,6 +648,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
648 return 0; 648 return 0;
649 } 649 }
650 650
651 if (IS_ERR(error))
652 return PTR_ERR(error);
653
651 if (*error->error_msg) 654 if (*error->error_msg)
652 err_printf(m, "%s\n", error->error_msg); 655 err_printf(m, "%s\n", error->error_msg);
653 err_printf(m, "Kernel: " UTS_RELEASE "\n"); 656 err_printf(m, "Kernel: " UTS_RELEASE "\n");
@@ -1859,6 +1862,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
1859 error = i915_capture_gpu_state(i915); 1862 error = i915_capture_gpu_state(i915);
1860 if (!error) { 1863 if (!error) {
1861 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1864 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1865 i915_disable_error_state(i915, -ENOMEM);
1862 return; 1866 return;
1863 } 1867 }
1864 1868
@@ -1914,5 +1918,14 @@ void i915_reset_error_state(struct drm_i915_private *i915)
1914 i915->gpu_error.first_error = NULL; 1918 i915->gpu_error.first_error = NULL;
1915 spin_unlock_irq(&i915->gpu_error.lock); 1919 spin_unlock_irq(&i915->gpu_error.lock);
1916 1920
1917 i915_gpu_state_put(error); 1921 if (!IS_ERR(error))
1922 i915_gpu_state_put(error);
1923}
1924
1925void i915_disable_error_state(struct drm_i915_private *i915, int err)
1926{
1927 spin_lock_irq(&i915->gpu_error.lock);
1928 if (!i915->gpu_error.first_error)
1929 i915->gpu_error.first_error = ERR_PTR(err);
1930 spin_unlock_irq(&i915->gpu_error.lock);
1918} 1931}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 8710fb18ed74..3ec89a504de5 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -343,6 +343,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
343 343
344struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); 344struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
345void i915_reset_error_state(struct drm_i915_private *i915); 345void i915_reset_error_state(struct drm_i915_private *i915);
346void i915_disable_error_state(struct drm_i915_private *i915, int err);
346 347
347#else 348#else
348 349
@@ -355,13 +356,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
355static inline struct i915_gpu_state * 356static inline struct i915_gpu_state *
356i915_first_error_state(struct drm_i915_private *i915) 357i915_first_error_state(struct drm_i915_private *i915)
357{ 358{
358 return NULL; 359 return ERR_PTR(-ENODEV);
359} 360}
360 361
361static inline void i915_reset_error_state(struct drm_i915_private *i915) 362static inline void i915_reset_error_state(struct drm_i915_private *i915)
362{ 363{
363} 364}
364 365
366static inline void i915_disable_error_state(struct drm_i915_private *i915,
367 int err)
368{
369}
370
365#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ 371#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
366 372
367#endif /* _I915_GPU_ERROR_H_ */ 373#endif /* _I915_GPU_ERROR_H_ */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7c491ea3d052..e31c27e45734 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2095,8 +2095,12 @@ enum i915_power_well_id {
2095 2095
2096/* ICL PHY DFLEX registers */ 2096/* ICL PHY DFLEX registers */
2097#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0) 2097#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
2098#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n))) 2098#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
2099#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n))) 2099#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
2100#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
2101#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port)))
2102#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port)))
2103#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port)))
2100 2104
2101/* BXT PHY Ref registers */ 2105/* BXT PHY Ref registers */
2102#define _PORT_REF_DW3_A 0x16218C 2106#define _PORT_REF_DW3_A 0x16218C
@@ -4593,12 +4597,12 @@ enum {
4593 4597
4594#define DRM_DIP_ENABLE (1 << 28) 4598#define DRM_DIP_ENABLE (1 << 28)
4595#define PSR_VSC_BIT_7_SET (1 << 27) 4599#define PSR_VSC_BIT_7_SET (1 << 27)
4596#define VSC_SELECT_MASK (0x3 << 26) 4600#define VSC_SELECT_MASK (0x3 << 25)
4597#define VSC_SELECT_SHIFT 26 4601#define VSC_SELECT_SHIFT 25
4598#define VSC_DIP_HW_HEA_DATA (0 << 26) 4602#define VSC_DIP_HW_HEA_DATA (0 << 25)
4599#define VSC_DIP_HW_HEA_SW_DATA (1 << 26) 4603#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
4600#define VSC_DIP_HW_DATA_SW_HEA (2 << 26) 4604#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
4601#define VSC_DIP_SW_HEA_DATA (3 << 26) 4605#define VSC_DIP_SW_HEA_DATA (3 << 25)
4602#define VDIP_ENABLE_PPS (1 << 24) 4606#define VDIP_ENABLE_PPS (1 << 24)
4603 4607
4604/* Panel power sequencing */ 4608/* Panel power sequencing */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 769f3f586661..ee3ca2de983b 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -144,6 +144,9 @@ static const struct {
144/* HDMI N/CTS table */ 144/* HDMI N/CTS table */
145#define TMDS_297M 297000 145#define TMDS_297M 297000
146#define TMDS_296M 296703 146#define TMDS_296M 296703
147#define TMDS_594M 594000
148#define TMDS_593M 593407
149
147static const struct { 150static const struct {
148 int sample_rate; 151 int sample_rate;
149 int clock; 152 int clock;
@@ -164,6 +167,20 @@ static const struct {
164 { 176400, TMDS_297M, 18816, 247500 }, 167 { 176400, TMDS_297M, 18816, 247500 },
165 { 192000, TMDS_296M, 23296, 281250 }, 168 { 192000, TMDS_296M, 23296, 281250 },
166 { 192000, TMDS_297M, 20480, 247500 }, 169 { 192000, TMDS_297M, 20480, 247500 },
170 { 44100, TMDS_593M, 8918, 937500 },
171 { 44100, TMDS_594M, 9408, 990000 },
172 { 48000, TMDS_593M, 5824, 562500 },
173 { 48000, TMDS_594M, 6144, 594000 },
174 { 32000, TMDS_593M, 5824, 843750 },
175 { 32000, TMDS_594M, 3072, 445500 },
176 { 88200, TMDS_593M, 17836, 937500 },
177 { 88200, TMDS_594M, 18816, 990000 },
178 { 96000, TMDS_593M, 11648, 562500 },
179 { 96000, TMDS_594M, 12288, 594000 },
180 { 176400, TMDS_593M, 35672, 937500 },
181 { 176400, TMDS_594M, 37632, 990000 },
182 { 192000, TMDS_593M, 23296, 562500 },
183 { 192000, TMDS_594M, 24576, 594000 },
167}; 184};
168 185
169/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ 186/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 29075c763428..8d74276029e6 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2138,16 +2138,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
2138static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv, 2138static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
2139 int pixel_rate) 2139 int pixel_rate)
2140{ 2140{
2141 if (INTEL_GEN(dev_priv) >= 10) 2141 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
2142 return DIV_ROUND_UP(pixel_rate, 2); 2142 return DIV_ROUND_UP(pixel_rate, 2);
2143 else if (IS_GEMINILAKE(dev_priv))
2144 /*
2145 * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
2146 * as a temporary workaround. Use a higher cdclk instead. (Note that
2147 * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
2148 * cdclk.)
2149 */
2150 return DIV_ROUND_UP(pixel_rate * 100, 2 * 99);
2151 else if (IS_GEN9(dev_priv) || 2143 else if (IS_GEN9(dev_priv) ||
2152 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2144 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2153 return pixel_rate; 2145 return pixel_rate;
@@ -2543,14 +2535,8 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
2543{ 2535{
2544 int max_cdclk_freq = dev_priv->max_cdclk_freq; 2536 int max_cdclk_freq = dev_priv->max_cdclk_freq;
2545 2537
2546 if (INTEL_GEN(dev_priv) >= 10) 2538 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
2547 return 2 * max_cdclk_freq; 2539 return 2 * max_cdclk_freq;
2548 else if (IS_GEMINILAKE(dev_priv))
2549 /*
2550 * FIXME: Limiting to 99% as a temporary workaround. See
2551 * intel_min_cdclk() for details.
2552 */
2553 return 2 * max_cdclk_freq * 99 / 100;
2554 else if (IS_GEN9(dev_priv) || 2540 else if (IS_GEN9(dev_priv) ||
2555 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2541 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2556 return max_cdclk_freq; 2542 return max_cdclk_freq;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 0ef0c6448d53..01fa98299bae 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
474 u8 eu_disabled_mask; 474 u8 eu_disabled_mask;
475 u32 n_disabled; 475 u32 n_disabled;
476 476
477 if (!(sseu->subslice_mask[ss] & BIT(ss))) 477 if (!(sseu->subslice_mask[s] & BIT(ss)))
478 /* skip disabled subslice */ 478 /* skip disabled subslice */
479 continue; 479 continue;
480 480
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9741cc419e1b..c9878dd1f7cd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2890,6 +2890,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2890 return; 2890 return;
2891 2891
2892valid_fb: 2892valid_fb:
2893 intel_state->base.rotation = plane_config->rotation;
2893 intel_fill_fb_ggtt_view(&intel_state->view, fb, 2894 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2894 intel_state->base.rotation); 2895 intel_state->base.rotation);
2895 intel_state->color_plane[0].stride = 2896 intel_state->color_plane[0].stride =
@@ -4850,8 +4851,31 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4850 * chroma samples for both of the luma samples, and thus we don't 4851 * chroma samples for both of the luma samples, and thus we don't
4851 * actually get the expected MPEG2 chroma siting convention :( 4852 * actually get the expected MPEG2 chroma siting convention :(
4852 * The same behaviour is observed on pre-SKL platforms as well. 4853 * The same behaviour is observed on pre-SKL platforms as well.
4854 *
4855 * Theory behind the formula (note that we ignore sub-pixel
4856 * source coordinates):
4857 * s = source sample position
4858 * d = destination sample position
4859 *
4860 * Downscaling 4:1:
4861 * -0.5
4862 * | 0.0
4863 * | | 1.5 (initial phase)
4864 * | | |
4865 * v v v
4866 * | s | s | s | s |
4867 * | d |
4868 *
4869 * Upscaling 1:4:
4870 * -0.5
4871 * | -0.375 (initial phase)
4872 * | | 0.0
4873 * | | |
4874 * v v v
4875 * | s |
4876 * | d | d | d | d |
4853 */ 4877 */
4854u16 skl_scaler_calc_phase(int sub, bool chroma_cosited) 4878u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
4855{ 4879{
4856 int phase = -0x8000; 4880 int phase = -0x8000;
4857 u16 trip = 0; 4881 u16 trip = 0;
@@ -4859,6 +4883,15 @@ u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
4859 if (chroma_cosited) 4883 if (chroma_cosited)
4860 phase += (sub - 1) * 0x8000 / sub; 4884 phase += (sub - 1) * 0x8000 / sub;
4861 4885
4886 phase += scale / (2 * sub);
4887
4888 /*
4889 * Hardware initial phase limited to [-0.5:1.5].
4890 * Since the max hardware scale factor is 3.0, we
4891 * should never actually excdeed 1.0 here.
4892 */
4893 WARN_ON(phase < -0x8000 || phase > 0x18000);
4894
4862 if (phase < 0) 4895 if (phase < 0)
4863 phase = 0x10000 + phase; 4896 phase = 0x10000 + phase;
4864 else 4897 else
@@ -5067,13 +5100,20 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
5067 5100
5068 if (crtc->config->pch_pfit.enabled) { 5101 if (crtc->config->pch_pfit.enabled) {
5069 u16 uv_rgb_hphase, uv_rgb_vphase; 5102 u16 uv_rgb_hphase, uv_rgb_vphase;
5103 int pfit_w, pfit_h, hscale, vscale;
5070 int id; 5104 int id;
5071 5105
5072 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) 5106 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
5073 return; 5107 return;
5074 5108
5075 uv_rgb_hphase = skl_scaler_calc_phase(1, false); 5109 pfit_w = (crtc->config->pch_pfit.size >> 16) & 0xFFFF;
5076 uv_rgb_vphase = skl_scaler_calc_phase(1, false); 5110 pfit_h = crtc->config->pch_pfit.size & 0xFFFF;
5111
5112 hscale = (crtc->config->pipe_src_w << 16) / pfit_w;
5113 vscale = (crtc->config->pipe_src_h << 16) / pfit_h;
5114
5115 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5116 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5077 5117
5078 id = scaler_state->scaler_id; 5118 id = scaler_state->scaler_id;
5079 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 5119 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
@@ -7843,8 +7883,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7843 plane_config->tiling = I915_TILING_X; 7883 plane_config->tiling = I915_TILING_X;
7844 fb->modifier = I915_FORMAT_MOD_X_TILED; 7884 fb->modifier = I915_FORMAT_MOD_X_TILED;
7845 } 7885 }
7886
7887 if (val & DISPPLANE_ROTATE_180)
7888 plane_config->rotation = DRM_MODE_ROTATE_180;
7846 } 7889 }
7847 7890
7891 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
7892 val & DISPPLANE_MIRROR)
7893 plane_config->rotation |= DRM_MODE_REFLECT_X;
7894
7848 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7895 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7849 fourcc = i9xx_format_to_fourcc(pixel_format); 7896 fourcc = i9xx_format_to_fourcc(pixel_format);
7850 fb->format = drm_format_info(fourcc); 7897 fb->format = drm_format_info(fourcc);
@@ -8913,6 +8960,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
8913 goto error; 8960 goto error;
8914 } 8961 }
8915 8962
8963 /*
8964 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
8965 * while i915 HW rotation is clockwise, thats why this swapping.
8966 */
8967 switch (val & PLANE_CTL_ROTATE_MASK) {
8968 case PLANE_CTL_ROTATE_0:
8969 plane_config->rotation = DRM_MODE_ROTATE_0;
8970 break;
8971 case PLANE_CTL_ROTATE_90:
8972 plane_config->rotation = DRM_MODE_ROTATE_270;
8973 break;
8974 case PLANE_CTL_ROTATE_180:
8975 plane_config->rotation = DRM_MODE_ROTATE_180;
8976 break;
8977 case PLANE_CTL_ROTATE_270:
8978 plane_config->rotation = DRM_MODE_ROTATE_90;
8979 break;
8980 }
8981
8982 if (INTEL_GEN(dev_priv) >= 10 &&
8983 val & PLANE_CTL_FLIP_HORIZONTAL)
8984 plane_config->rotation |= DRM_MODE_REFLECT_X;
8985
8916 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; 8986 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
8917 plane_config->base = base; 8987 plane_config->base = base;
8918 8988
@@ -12768,17 +12838,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12768 intel_check_cpu_fifo_underruns(dev_priv); 12838 intel_check_cpu_fifo_underruns(dev_priv);
12769 intel_check_pch_fifo_underruns(dev_priv); 12839 intel_check_pch_fifo_underruns(dev_priv);
12770 12840
12771 if (!new_crtc_state->active) { 12841 /* FIXME unify this for all platforms */
12772 /* 12842 if (!new_crtc_state->active &&
12773 * Make sure we don't call initial_watermarks 12843 !HAS_GMCH_DISPLAY(dev_priv) &&
12774 * for ILK-style watermark updates. 12844 dev_priv->display.initial_watermarks)
12775 * 12845 dev_priv->display.initial_watermarks(intel_state,
12776 * No clue what this is supposed to achieve. 12846 to_intel_crtc_state(new_crtc_state));
12777 */
12778 if (INTEL_GEN(dev_priv) >= 9)
12779 dev_priv->display.initial_watermarks(intel_state,
12780 to_intel_crtc_state(new_crtc_state));
12781 }
12782 } 12847 }
12783 } 12848 }
12784 12849
@@ -14646,7 +14711,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14646 fb->height < SKL_MIN_YUV_420_SRC_H || 14711 fb->height < SKL_MIN_YUV_420_SRC_H ||
14647 (fb->width % 4) != 0 || (fb->height % 4) != 0)) { 14712 (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
14648 DRM_DEBUG_KMS("src dimensions not correct for NV12\n"); 14713 DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
14649 return -EINVAL; 14714 goto err;
14650 } 14715 }
14651 14716
14652 for (i = 0; i < fb->format->num_planes; i++) { 14717 for (i = 0; i < fb->format->num_planes; i++) {
@@ -15233,6 +15298,14 @@ retry:
15233 ret = drm_atomic_add_affected_planes(state, crtc); 15298 ret = drm_atomic_add_affected_planes(state, crtc);
15234 if (ret) 15299 if (ret)
15235 goto out; 15300 goto out;
15301
15302 /*
15303 * FIXME hack to force a LUT update to avoid the
15304 * plane update forcing the pipe gamma on without
15305 * having a proper LUT loaded. Remove once we
15306 * have readout for pipe gamma enable.
15307 */
15308 crtc_state->color_mgmt_changed = true;
15236 } 15309 }
15237 } 15310 }
15238 15311
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 1b00f8ea145b..a911691dbd0f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -452,6 +452,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
452 if (!intel_connector) 452 if (!intel_connector)
453 return NULL; 453 return NULL;
454 454
455 intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
456 intel_connector->mst_port = intel_dp;
457 intel_connector->port = port;
458
455 connector = &intel_connector->base; 459 connector = &intel_connector->base;
456 ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, 460 ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
457 DRM_MODE_CONNECTOR_DisplayPort); 461 DRM_MODE_CONNECTOR_DisplayPort);
@@ -462,10 +466,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
462 466
463 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); 467 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
464 468
465 intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
466 intel_connector->mst_port = intel_dp;
467 intel_connector->port = port;
468
469 for_each_pipe(dev_priv, pipe) { 469 for_each_pipe(dev_priv, pipe) {
470 struct drm_encoder *enc = 470 struct drm_encoder *enc =
471 &intel_dp->mst_encoders[pipe]->base.base; 471 &intel_dp->mst_encoders[pipe]->base.base;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f8dc84b2d2d3..db6fa1d0cbda 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -547,6 +547,7 @@ struct intel_initial_plane_config {
547 unsigned int tiling; 547 unsigned int tiling;
548 int size; 548 int size;
549 u32 base; 549 u32 base;
550 u8 rotation;
550}; 551};
551 552
552#define SKL_MIN_SRC_W 8 553#define SKL_MIN_SRC_W 8
@@ -1646,7 +1647,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1646void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 1647void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
1647 struct intel_crtc_state *crtc_state); 1648 struct intel_crtc_state *crtc_state);
1648 1649
1649u16 skl_scaler_calc_phase(int sub, bool chroma_center); 1650u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
1650int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); 1651int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
1651int skl_max_scale(const struct intel_crtc_state *crtc_state, 1652int skl_max_scale(const struct intel_crtc_state *crtc_state,
1652 u32 pixel_format); 1653 u32 pixel_format);
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 648a13c6043c..9a8018130237 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -228,7 +228,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
228 drm_for_each_connector_iter(connector, &conn_iter) { 228 drm_for_each_connector_iter(connector, &conn_iter) {
229 struct intel_connector *intel_connector = to_intel_connector(connector); 229 struct intel_connector *intel_connector = to_intel_connector(connector);
230 230
231 if (intel_connector->encoder->hpd_pin == pin) { 231 /* Don't check MST ports, they don't have pins */
232 if (!intel_connector->mst_port &&
233 intel_connector->encoder->hpd_pin == pin) {
232 if (connector->polled != intel_connector->polled) 234 if (connector->polled != intel_connector->polled)
233 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 235 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
234 connector->name); 236 connector->name);
@@ -395,37 +397,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
395 struct intel_encoder *encoder; 397 struct intel_encoder *encoder;
396 bool storm_detected = false; 398 bool storm_detected = false;
397 bool queue_dig = false, queue_hp = false; 399 bool queue_dig = false, queue_hp = false;
400 u32 long_hpd_pulse_mask = 0;
401 u32 short_hpd_pulse_mask = 0;
402 enum hpd_pin pin;
398 403
399 if (!pin_mask) 404 if (!pin_mask)
400 return; 405 return;
401 406
402 spin_lock(&dev_priv->irq_lock); 407 spin_lock(&dev_priv->irq_lock);
408
409 /*
410 * Determine whether ->hpd_pulse() exists for each pin, and
411 * whether we have a short or a long pulse. This is needed
412 * as each pin may have up to two encoders (HDMI and DP) and
413 * only the one of them (DP) will have ->hpd_pulse().
414 */
403 for_each_intel_encoder(&dev_priv->drm, encoder) { 415 for_each_intel_encoder(&dev_priv->drm, encoder) {
404 enum hpd_pin pin = encoder->hpd_pin;
405 bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); 416 bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
417 enum port port = encoder->port;
418 bool long_hpd;
406 419
420 pin = encoder->hpd_pin;
407 if (!(BIT(pin) & pin_mask)) 421 if (!(BIT(pin) & pin_mask))
408 continue; 422 continue;
409 423
410 if (has_hpd_pulse) { 424 if (!has_hpd_pulse)
411 bool long_hpd = long_mask & BIT(pin); 425 continue;
412 enum port port = encoder->port;
413 426
414 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), 427 long_hpd = long_mask & BIT(pin);
415 long_hpd ? "long" : "short"); 428
416 /* 429 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
417 * For long HPD pulses we want to have the digital queue happen, 430 long_hpd ? "long" : "short");
418 * but we still want HPD storm detection to function. 431 queue_dig = true;
419 */ 432
420 queue_dig = true; 433 if (long_hpd) {
421 if (long_hpd) { 434 long_hpd_pulse_mask |= BIT(pin);
422 dev_priv->hotplug.long_port_mask |= (1 << port); 435 dev_priv->hotplug.long_port_mask |= BIT(port);
423 } else { 436 } else {
424 /* for short HPD just trigger the digital queue */ 437 short_hpd_pulse_mask |= BIT(pin);
425 dev_priv->hotplug.short_port_mask |= (1 << port); 438 dev_priv->hotplug.short_port_mask |= BIT(port);
426 continue;
427 }
428 } 439 }
440 }
441
442 /* Now process each pin just once */
443 for_each_hpd_pin(pin) {
444 bool long_hpd;
445
446 if (!(BIT(pin) & pin_mask))
447 continue;
429 448
430 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { 449 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
431 /* 450 /*
@@ -442,11 +461,22 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
442 if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) 461 if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
443 continue; 462 continue;
444 463
445 if (!has_hpd_pulse) { 464 /*
465 * Delegate to ->hpd_pulse() if one of the encoders for this
466 * pin has it, otherwise let the hotplug_work deal with this
467 * pin directly.
468 */
469 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
470 long_hpd = long_hpd_pulse_mask & BIT(pin);
471 } else {
446 dev_priv->hotplug.event_bits |= BIT(pin); 472 dev_priv->hotplug.event_bits |= BIT(pin);
473 long_hpd = true;
447 queue_hp = true; 474 queue_hp = true;
448 } 475 }
449 476
477 if (!long_hpd)
478 continue;
479
450 if (intel_hpd_irq_storm_detect(dev_priv, pin)) { 480 if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
451 dev_priv->hotplug.event_bits &= ~BIT(pin); 481 dev_priv->hotplug.event_bits &= ~BIT(pin);
452 storm_detected = true; 482 storm_detected = true;
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index cdf19553ffac..5d5336fbe7b0 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
297 lpe_audio_platdev_destroy(dev_priv); 297 lpe_audio_platdev_destroy(dev_priv);
298 298
299 irq_free_desc(dev_priv->lpe_audio.irq); 299 irq_free_desc(dev_priv->lpe_audio.irq);
300}
301 300
301 dev_priv->lpe_audio.irq = -1;
302 dev_priv->lpe_audio.platdev = NULL;
303}
302 304
303/** 305/**
304 * intel_lpe_audio_notify() - notify lpe audio event 306 * intel_lpe_audio_notify() - notify lpe audio event
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 43957bb37a42..37c94a54efcb 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -424,7 +424,8 @@ static u64 execlists_update_context(struct i915_request *rq)
424 424
425 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); 425 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
426 426
427 /* True 32b PPGTT with dynamic page allocation: update PDP 427 /*
428 * True 32b PPGTT with dynamic page allocation: update PDP
428 * registers and point the unallocated PDPs to scratch page. 429 * registers and point the unallocated PDPs to scratch page.
429 * PML4 is allocated during ppgtt init, so this is not needed 430 * PML4 is allocated during ppgtt init, so this is not needed
430 * in 48-bit mode. 431 * in 48-bit mode.
@@ -432,6 +433,17 @@ static u64 execlists_update_context(struct i915_request *rq)
432 if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm)) 433 if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
433 execlists_update_context_pdps(ppgtt, reg_state); 434 execlists_update_context_pdps(ppgtt, reg_state);
434 435
436 /*
437 * Make sure the context image is complete before we submit it to HW.
438 *
439 * Ostensibly, writes (including the WCB) should be flushed prior to
440 * an uncached write such as our mmio register access, the empirical
441 * evidence (esp. on Braswell) suggests that the WC write into memory
442 * may not be visible to the HW prior to the completion of the UC
443 * register write and that we may begin execution from the context
444 * before its image is complete leading to invalid PD chasing.
445 */
446 wmb();
435 return ce->lrc_desc; 447 return ce->lrc_desc;
436} 448}
437 449
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1db9b8328275..3fe358db1276 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2493 uint32_t method1, method2; 2493 uint32_t method1, method2;
2494 int cpp; 2494 int cpp;
2495 2495
2496 if (mem_value == 0)
2497 return U32_MAX;
2498
2496 if (!intel_wm_plane_visible(cstate, pstate)) 2499 if (!intel_wm_plane_visible(cstate, pstate))
2497 return 0; 2500 return 0;
2498 2501
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2522 uint32_t method1, method2; 2525 uint32_t method1, method2;
2523 int cpp; 2526 int cpp;
2524 2527
2528 if (mem_value == 0)
2529 return U32_MAX;
2530
2525 if (!intel_wm_plane_visible(cstate, pstate)) 2531 if (!intel_wm_plane_visible(cstate, pstate))
2526 return 0; 2532 return 0;
2527 2533
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2545{ 2551{
2546 int cpp; 2552 int cpp;
2547 2553
2554 if (mem_value == 0)
2555 return U32_MAX;
2556
2548 if (!intel_wm_plane_visible(cstate, pstate)) 2557 if (!intel_wm_plane_visible(cstate, pstate))
2549 return 0; 2558 return 0;
2550 2559
@@ -2881,8 +2890,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2881 * any underrun. If not able to get Dimm info assume 16GB dimm 2890 * any underrun. If not able to get Dimm info assume 16GB dimm
2882 * to avoid any underrun. 2891 * to avoid any underrun.
2883 */ 2892 */
2884 if (!dev_priv->dram_info.valid_dimm || 2893 if (dev_priv->dram_info.is_16gb_dimm)
2885 dev_priv->dram_info.is_16gb_dimm)
2886 wm[0] += 1; 2894 wm[0] += 1;
2887 2895
2888 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2896 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3009,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3009 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 3017 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3010} 3018}
3011 3019
3020static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3021{
3022 /*
3023 * On some SNB machines (Thinkpad X220 Tablet at least)
3024 * LP3 usage can cause vblank interrupts to be lost.
3025 * The DEIIR bit will go high but it looks like the CPU
3026 * never gets interrupted.
3027 *
3028 * It's not clear whether other interrupt source could
3029 * be affected or if this is somehow limited to vblank
3030 * interrupts only. To play it safe we disable LP3
3031 * watermarks entirely.
3032 */
3033 if (dev_priv->wm.pri_latency[3] == 0 &&
3034 dev_priv->wm.spr_latency[3] == 0 &&
3035 dev_priv->wm.cur_latency[3] == 0)
3036 return;
3037
3038 dev_priv->wm.pri_latency[3] = 0;
3039 dev_priv->wm.spr_latency[3] = 0;
3040 dev_priv->wm.cur_latency[3] = 0;
3041
3042 DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
3043 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3044 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3045 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3046}
3047
3012static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) 3048static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3013{ 3049{
3014 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); 3050 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@@ -3025,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3025 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); 3061 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3026 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 3062 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3027 3063
3028 if (IS_GEN6(dev_priv)) 3064 if (IS_GEN6(dev_priv)) {
3029 snb_wm_latency_quirk(dev_priv); 3065 snb_wm_latency_quirk(dev_priv);
3066 snb_wm_lp3_irq_quirk(dev_priv);
3067 }
3030} 3068}
3031 3069
3032static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) 3070static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d0ef50bf930a..187bb0ceb4ac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -91,6 +91,7 @@ static int
91gen4_render_ring_flush(struct i915_request *rq, u32 mode) 91gen4_render_ring_flush(struct i915_request *rq, u32 mode)
92{ 92{
93 u32 cmd, *cs; 93 u32 cmd, *cs;
94 int i;
94 95
95 /* 96 /*
96 * read/write caches: 97 * read/write caches:
@@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
127 cmd |= MI_INVALIDATE_ISP; 128 cmd |= MI_INVALIDATE_ISP;
128 } 129 }
129 130
130 cs = intel_ring_begin(rq, 2); 131 i = 2;
132 if (mode & EMIT_INVALIDATE)
133 i += 20;
134
135 cs = intel_ring_begin(rq, i);
131 if (IS_ERR(cs)) 136 if (IS_ERR(cs))
132 return PTR_ERR(cs); 137 return PTR_ERR(cs);
133 138
134 *cs++ = cmd; 139 *cs++ = cmd;
135 *cs++ = MI_NOOP; 140
141 /*
142 * A random delay to let the CS invalidate take effect? Without this
143 * delay, the GPU relocation path fails as the CS does not see
144 * the updated contents. Just as important, if we apply the flushes
145 * to the EMIT_FLUSH branch (i.e. immediately after the relocation
146 * write and before the invalidate on the next batch), the relocations
147 * still fail. This implies that is a delay following invalidation
148 * that is required to reset the caches as opposed to a delay to
149 * ensure the memory is written.
150 */
151 if (mode & EMIT_INVALIDATE) {
152 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
153 *cs++ = i915_ggtt_offset(rq->engine->scratch) |
154 PIPE_CONTROL_GLOBAL_GTT;
155 *cs++ = 0;
156 *cs++ = 0;
157
158 for (i = 0; i < 12; i++)
159 *cs++ = MI_FLUSH;
160
161 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
162 *cs++ = i915_ggtt_offset(rq->engine->scratch) |
163 PIPE_CONTROL_GLOBAL_GTT;
164 *cs++ = 0;
165 *cs++ = 0;
166 }
167
168 *cs++ = cmd;
169
136 intel_ring_advance(rq, cs); 170 intel_ring_advance(rq, cs);
137 171
138 return 0; 172 return 0;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 0fdabce647ab..44e4491a4918 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2749,6 +2749,12 @@ static const struct i915_power_well_desc icl_power_wells[] = {
2749 }, 2749 },
2750 }, 2750 },
2751 { 2751 {
2752 .name = "DC off",
2753 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
2754 .ops = &gen9_dc_off_power_well_ops,
2755 .id = DISP_PW_ID_NONE,
2756 },
2757 {
2752 .name = "power well 2", 2758 .name = "power well 2",
2753 .domains = ICL_PW_2_POWER_DOMAINS, 2759 .domains = ICL_PW_2_POWER_DOMAINS,
2754 .ops = &hsw_power_well_ops, 2760 .ops = &hsw_power_well_ops,
@@ -2760,12 +2766,6 @@ static const struct i915_power_well_desc icl_power_wells[] = {
2760 }, 2766 },
2761 }, 2767 },
2762 { 2768 {
2763 .name = "DC off",
2764 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
2765 .ops = &gen9_dc_off_power_well_ops,
2766 .id = DISP_PW_ID_NONE,
2767 },
2768 {
2769 .name = "power well 3", 2769 .name = "power well 3",
2770 .domains = ICL_PW_3_POWER_DOMAINS, 2770 .domains = ICL_PW_3_POWER_DOMAINS,
2771 .ops = &hsw_power_well_ops, 2771 .ops = &hsw_power_well_ops,
@@ -3176,8 +3176,7 @@ static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3176void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, 3176void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3177 u8 req_slices) 3177 u8 req_slices)
3178{ 3178{
3179 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 3179 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
3180 u32 val;
3181 bool ret; 3180 bool ret;
3182 3181
3183 if (req_slices > intel_dbuf_max_slices(dev_priv)) { 3182 if (req_slices > intel_dbuf_max_slices(dev_priv)) {
@@ -3188,7 +3187,6 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3188 if (req_slices == hw_enabled_slices || req_slices == 0) 3187 if (req_slices == hw_enabled_slices || req_slices == 0)
3189 return; 3188 return;
3190 3189
3191 val = I915_READ(DBUF_CTL_S2);
3192 if (req_slices > hw_enabled_slices) 3190 if (req_slices > hw_enabled_slices)
3193 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); 3191 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3194 else 3192 else
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 5fd2f7bf3927..d3090a7537bb 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -302,13 +302,65 @@ skl_plane_max_stride(struct intel_plane *plane,
302 return min(8192 * cpp, 32768); 302 return min(8192 * cpp, 32768);
303} 303}
304 304
305static void
306skl_program_scaler(struct intel_plane *plane,
307 const struct intel_crtc_state *crtc_state,
308 const struct intel_plane_state *plane_state)
309{
310 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
311 enum pipe pipe = plane->pipe;
312 int scaler_id = plane_state->scaler_id;
313 const struct intel_scaler *scaler =
314 &crtc_state->scaler_state.scalers[scaler_id];
315 int crtc_x = plane_state->base.dst.x1;
316 int crtc_y = plane_state->base.dst.y1;
317 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
318 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
319 u16 y_hphase, uv_rgb_hphase;
320 u16 y_vphase, uv_rgb_vphase;
321 int hscale, vscale;
322
323 hscale = drm_rect_calc_hscale(&plane_state->base.src,
324 &plane_state->base.dst,
325 0, INT_MAX);
326 vscale = drm_rect_calc_vscale(&plane_state->base.src,
327 &plane_state->base.dst,
328 0, INT_MAX);
329
330 /* TODO: handle sub-pixel coordinates */
331 if (plane_state->base.fb->format->format == DRM_FORMAT_NV12) {
332 y_hphase = skl_scaler_calc_phase(1, hscale, false);
333 y_vphase = skl_scaler_calc_phase(1, vscale, false);
334
335 /* MPEG2 chroma siting convention */
336 uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
337 uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
338 } else {
339 /* not used */
340 y_hphase = 0;
341 y_vphase = 0;
342
343 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
344 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
345 }
346
347 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
348 PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
349 I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
350 I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
351 PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
352 I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
353 PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
354 I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
355 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
356}
357
305void 358void
306skl_update_plane(struct intel_plane *plane, 359skl_update_plane(struct intel_plane *plane,
307 const struct intel_crtc_state *crtc_state, 360 const struct intel_crtc_state *crtc_state,
308 const struct intel_plane_state *plane_state) 361 const struct intel_plane_state *plane_state)
309{ 362{
310 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 363 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
311 const struct drm_framebuffer *fb = plane_state->base.fb;
312 enum plane_id plane_id = plane->id; 364 enum plane_id plane_id = plane->id;
313 enum pipe pipe = plane->pipe; 365 enum pipe pipe = plane->pipe;
314 u32 plane_ctl = plane_state->ctl; 366 u32 plane_ctl = plane_state->ctl;
@@ -318,8 +370,6 @@ skl_update_plane(struct intel_plane *plane,
318 u32 aux_stride = skl_plane_stride(plane_state, 1); 370 u32 aux_stride = skl_plane_stride(plane_state, 1);
319 int crtc_x = plane_state->base.dst.x1; 371 int crtc_x = plane_state->base.dst.x1;
320 int crtc_y = plane_state->base.dst.y1; 372 int crtc_y = plane_state->base.dst.y1;
321 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
322 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
323 uint32_t x = plane_state->color_plane[0].x; 373 uint32_t x = plane_state->color_plane[0].x;
324 uint32_t y = plane_state->color_plane[0].y; 374 uint32_t y = plane_state->color_plane[0].y;
325 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; 375 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
@@ -329,8 +379,6 @@ skl_update_plane(struct intel_plane *plane,
329 /* Sizes are 0 based */ 379 /* Sizes are 0 based */
330 src_w--; 380 src_w--;
331 src_h--; 381 src_h--;
332 crtc_w--;
333 crtc_h--;
334 382
335 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 383 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
336 384
@@ -353,41 +401,8 @@ skl_update_plane(struct intel_plane *plane,
353 (plane_state->color_plane[1].y << 16) | 401 (plane_state->color_plane[1].y << 16) |
354 plane_state->color_plane[1].x); 402 plane_state->color_plane[1].x);
355 403
356 /* program plane scaler */
357 if (plane_state->scaler_id >= 0) { 404 if (plane_state->scaler_id >= 0) {
358 int scaler_id = plane_state->scaler_id; 405 skl_program_scaler(plane, crtc_state, plane_state);
359 const struct intel_scaler *scaler =
360 &crtc_state->scaler_state.scalers[scaler_id];
361 u16 y_hphase, uv_rgb_hphase;
362 u16 y_vphase, uv_rgb_vphase;
363
364 /* TODO: handle sub-pixel coordinates */
365 if (fb->format->format == DRM_FORMAT_NV12) {
366 y_hphase = skl_scaler_calc_phase(1, false);
367 y_vphase = skl_scaler_calc_phase(1, false);
368
369 /* MPEG2 chroma siting convention */
370 uv_rgb_hphase = skl_scaler_calc_phase(2, true);
371 uv_rgb_vphase = skl_scaler_calc_phase(2, false);
372 } else {
373 /* not used */
374 y_hphase = 0;
375 y_vphase = 0;
376
377 uv_rgb_hphase = skl_scaler_calc_phase(1, false);
378 uv_rgb_vphase = skl_scaler_calc_phase(1, false);
379 }
380
381 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
382 PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
383 I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
384 I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
385 PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
386 I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
387 PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
388 I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
389 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
390 ((crtc_w + 1) << 16)|(crtc_h + 1));
391 406
392 I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0); 407 I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
393 } else { 408 } else {
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 8d03f64eabd7..5c22f2c8d4cf 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -551,7 +551,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
551 err = igt_check_page_sizes(vma); 551 err = igt_check_page_sizes(vma);
552 552
553 if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { 553 if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
554 pr_err("page_sizes.gtt=%u, expected %lu\n", 554 pr_err("page_sizes.gtt=%u, expected %llu\n",
555 vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); 555 vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
556 err = -EINVAL; 556 err = -EINVAL;
557 } 557 }
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 8e2e269db97e..127d81513671 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg)
1337 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 1337 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1338 if (vma->node.start != total || 1338 if (vma->node.start != total ||
1339 vma->node.size != 2*I915_GTT_PAGE_SIZE) { 1339 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1340 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", 1340 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1341 vma->node.start, vma->node.size, 1341 vma->node.start, vma->node.size,
1342 total, 2*I915_GTT_PAGE_SIZE); 1342 total, 2*I915_GTT_PAGE_SIZE);
1343 err = -EINVAL; 1343 err = -EINVAL;
@@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg)
1386 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 1386 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1387 if (vma->node.start != total || 1387 if (vma->node.start != total ||
1388 vma->node.size != 2*I915_GTT_PAGE_SIZE) { 1388 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1389 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", 1389 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1390 vma->node.start, vma->node.size, 1390 vma->node.start, vma->node.size,
1391 total, 2*I915_GTT_PAGE_SIZE); 1391 total, 2*I915_GTT_PAGE_SIZE);
1392 err = -EINVAL; 1392 err = -EINVAL;
@@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg)
1430 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 1430 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1431 if (vma->node.start != offset || 1431 if (vma->node.start != offset ||
1432 vma->node.size != 2*I915_GTT_PAGE_SIZE) { 1432 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1433 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", 1433 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1434 vma->node.start, vma->node.size, 1434 vma->node.start, vma->node.size,
1435 offset, 2*I915_GTT_PAGE_SIZE); 1435 offset, 2*I915_GTT_PAGE_SIZE);
1436 err = -EINVAL; 1436 err = -EINVAL;
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 05520202c967..191b314f9e9e 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -45,6 +45,7 @@ struct meson_crtc {
45 struct drm_crtc base; 45 struct drm_crtc base;
46 struct drm_pending_vblank_event *event; 46 struct drm_pending_vblank_event *event;
47 struct meson_drm *priv; 47 struct meson_drm *priv;
48 bool enabled;
48}; 49};
49#define to_meson_crtc(x) container_of(x, struct meson_crtc, base) 50#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
50 51
@@ -80,8 +81,7 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
80 81
81}; 82};
82 83
83static void meson_crtc_atomic_enable(struct drm_crtc *crtc, 84static void meson_crtc_enable(struct drm_crtc *crtc)
84 struct drm_crtc_state *old_state)
85{ 85{
86 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 86 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
87 struct drm_crtc_state *crtc_state = crtc->state; 87 struct drm_crtc_state *crtc_state = crtc->state;
@@ -101,6 +101,22 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
101 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE, 101 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
102 priv->io_base + _REG(VPP_MISC)); 102 priv->io_base + _REG(VPP_MISC));
103 103
104 drm_crtc_vblank_on(crtc);
105
106 meson_crtc->enabled = true;
107}
108
109static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
110 struct drm_crtc_state *old_state)
111{
112 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
113 struct meson_drm *priv = meson_crtc->priv;
114
115 DRM_DEBUG_DRIVER("\n");
116
117 if (!meson_crtc->enabled)
118 meson_crtc_enable(crtc);
119
104 priv->viu.osd1_enabled = true; 120 priv->viu.osd1_enabled = true;
105} 121}
106 122
@@ -110,6 +126,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
110 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 126 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
111 struct meson_drm *priv = meson_crtc->priv; 127 struct meson_drm *priv = meson_crtc->priv;
112 128
129 drm_crtc_vblank_off(crtc);
130
113 priv->viu.osd1_enabled = false; 131 priv->viu.osd1_enabled = false;
114 priv->viu.osd1_commit = false; 132 priv->viu.osd1_commit = false;
115 133
@@ -124,6 +142,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
124 142
125 crtc->state->event = NULL; 143 crtc->state->event = NULL;
126 } 144 }
145
146 meson_crtc->enabled = false;
127} 147}
128 148
129static void meson_crtc_atomic_begin(struct drm_crtc *crtc, 149static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -132,6 +152,9 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
132 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 152 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
133 unsigned long flags; 153 unsigned long flags;
134 154
155 if (crtc->state->enable && !meson_crtc->enabled)
156 meson_crtc_enable(crtc);
157
135 if (crtc->state->event) { 158 if (crtc->state->event) {
136 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 159 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
137 160
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index df7247cd93f9..2cb2ad26d716 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -706,6 +706,7 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
706 .reg_read = meson_dw_hdmi_reg_read, 706 .reg_read = meson_dw_hdmi_reg_read,
707 .reg_write = meson_dw_hdmi_reg_write, 707 .reg_write = meson_dw_hdmi_reg_write,
708 .max_register = 0x10000, 708 .max_register = 0x10000,
709 .fast_io = true,
709}; 710};
710 711
711static bool meson_hdmi_connector_is_available(struct device *dev) 712static bool meson_hdmi_connector_is_available(struct device *dev)
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 514245e69b38..be76f3d64bf2 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -71,6 +71,7 @@
71 */ 71 */
72 72
73/* HHI Registers */ 73/* HHI Registers */
74#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
74#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ 75#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
75#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ 76#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
76#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */ 77#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
@@ -714,6 +715,7 @@ struct meson_hdmi_venc_vic_mode {
714 { 5, &meson_hdmi_encp_mode_1080i60 }, 715 { 5, &meson_hdmi_encp_mode_1080i60 },
715 { 20, &meson_hdmi_encp_mode_1080i50 }, 716 { 20, &meson_hdmi_encp_mode_1080i50 },
716 { 32, &meson_hdmi_encp_mode_1080p24 }, 717 { 32, &meson_hdmi_encp_mode_1080p24 },
718 { 33, &meson_hdmi_encp_mode_1080p50 },
717 { 34, &meson_hdmi_encp_mode_1080p30 }, 719 { 34, &meson_hdmi_encp_mode_1080p30 },
718 { 31, &meson_hdmi_encp_mode_1080p50 }, 720 { 31, &meson_hdmi_encp_mode_1080p50 },
719 { 16, &meson_hdmi_encp_mode_1080p60 }, 721 { 16, &meson_hdmi_encp_mode_1080p60 },
@@ -854,6 +856,13 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
854 unsigned int sof_lines; 856 unsigned int sof_lines;
855 unsigned int vsync_lines; 857 unsigned int vsync_lines;
856 858
859 /* Use VENCI for 480i and 576i and double HDMI pixels */
860 if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
861 hdmi_repeat = true;
862 use_enci = true;
863 venc_hdmi_latency = 1;
864 }
865
857 if (meson_venc_hdmi_supported_vic(vic)) { 866 if (meson_venc_hdmi_supported_vic(vic)) {
858 vmode = meson_venc_hdmi_get_vic_vmode(vic); 867 vmode = meson_venc_hdmi_get_vic_vmode(vic);
859 if (!vmode) { 868 if (!vmode) {
@@ -865,13 +874,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
865 } else { 874 } else {
866 meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt); 875 meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
867 vmode = &vmode_dmt; 876 vmode = &vmode_dmt;
868 } 877 use_enci = false;
869
870 /* Use VENCI for 480i and 576i and double HDMI pixels */
871 if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
872 hdmi_repeat = true;
873 use_enci = true;
874 venc_hdmi_latency = 1;
875 } 878 }
876 879
877 /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */ 880 /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
@@ -1529,10 +1532,12 @@ unsigned int meson_venci_get_field(struct meson_drm *priv)
1529void meson_venc_enable_vsync(struct meson_drm *priv) 1532void meson_venc_enable_vsync(struct meson_drm *priv)
1530{ 1533{
1531 writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL)); 1534 writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL));
1535 regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25));
1532} 1536}
1533 1537
1534void meson_venc_disable_vsync(struct meson_drm *priv) 1538void meson_venc_disable_vsync(struct meson_drm *priv)
1535{ 1539{
1540 regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), 0);
1536 writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL)); 1541 writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL));
1537} 1542}
1538 1543
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 6bcfa527c180..26a0857878bf 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -184,18 +184,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
184 if (lut_sel == VIU_LUT_OSD_OETF) { 184 if (lut_sel == VIU_LUT_OSD_OETF) {
185 writel(0, priv->io_base + _REG(addr_port)); 185 writel(0, priv->io_base + _REG(addr_port));
186 186
187 for (i = 0; i < 20; i++) 187 for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
188 writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16), 188 writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
189 priv->io_base + _REG(data_port)); 189 priv->io_base + _REG(data_port));
190 190
191 writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16), 191 writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16),
192 priv->io_base + _REG(data_port)); 192 priv->io_base + _REG(data_port));
193 193
194 for (i = 0; i < 20; i++) 194 for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
195 writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16), 195 writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
196 priv->io_base + _REG(data_port)); 196 priv->io_base + _REG(data_port));
197 197
198 for (i = 0; i < 20; i++) 198 for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
199 writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16), 199 writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
200 priv->io_base + _REG(data_port)); 200 priv->io_base + _REG(data_port));
201 201
@@ -211,18 +211,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
211 } else if (lut_sel == VIU_LUT_OSD_EOTF) { 211 } else if (lut_sel == VIU_LUT_OSD_EOTF) {
212 writel(0, priv->io_base + _REG(addr_port)); 212 writel(0, priv->io_base + _REG(addr_port));
213 213
214 for (i = 0; i < 20; i++) 214 for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
215 writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16), 215 writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
216 priv->io_base + _REG(data_port)); 216 priv->io_base + _REG(data_port));
217 217
218 writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16), 218 writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16),
219 priv->io_base + _REG(data_port)); 219 priv->io_base + _REG(data_port));
220 220
221 for (i = 0; i < 20; i++) 221 for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
222 writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16), 222 writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
223 priv->io_base + _REG(data_port)); 223 priv->io_base + _REG(data_port));
224 224
225 for (i = 0; i < 20; i++) 225 for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
226 writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16), 226 writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
227 priv->io_base + _REG(data_port)); 227 priv->io_base + _REG(data_port));
228 228
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 394c129cfb3b..0a485c5b982e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5409,11 +5409,14 @@ static int dsi_probe(struct platform_device *pdev)
5409 5409
5410 /* DSI on OMAP3 doesn't have register DSI_GNQ, set number 5410 /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
5411 * of data to 3 by default */ 5411 * of data to 3 by default */
5412 if (dsi->data->quirks & DSI_QUIRK_GNQ) 5412 if (dsi->data->quirks & DSI_QUIRK_GNQ) {
5413 dsi_runtime_get(dsi);
5413 /* NB_DATA_LANES */ 5414 /* NB_DATA_LANES */
5414 dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9); 5415 dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9);
5415 else 5416 dsi_runtime_put(dsi);
5417 } else {
5416 dsi->num_lanes_supported = 3; 5418 dsi->num_lanes_supported = 3;
5419 }
5417 5420
5418 r = dsi_init_output(dsi); 5421 r = dsi_init_output(dsi);
5419 if (r) 5422 if (r)
@@ -5426,15 +5429,19 @@ static int dsi_probe(struct platform_device *pdev)
5426 } 5429 }
5427 5430
5428 r = of_platform_populate(dev->of_node, NULL, NULL, dev); 5431 r = of_platform_populate(dev->of_node, NULL, NULL, dev);
5429 if (r) 5432 if (r) {
5430 DSSERR("Failed to populate DSI child devices: %d\n", r); 5433 DSSERR("Failed to populate DSI child devices: %d\n", r);
5434 goto err_uninit_output;
5435 }
5431 5436
5432 r = component_add(&pdev->dev, &dsi_component_ops); 5437 r = component_add(&pdev->dev, &dsi_component_ops);
5433 if (r) 5438 if (r)
5434 goto err_uninit_output; 5439 goto err_of_depopulate;
5435 5440
5436 return 0; 5441 return 0;
5437 5442
5443err_of_depopulate:
5444 of_platform_depopulate(dev);
5438err_uninit_output: 5445err_uninit_output:
5439 dsi_uninit_output(dsi); 5446 dsi_uninit_output(dsi);
5440err_pm_disable: 5447err_pm_disable:
@@ -5470,19 +5477,12 @@ static int dsi_runtime_suspend(struct device *dev)
5470 /* wait for current handler to finish before turning the DSI off */ 5477 /* wait for current handler to finish before turning the DSI off */
5471 synchronize_irq(dsi->irq); 5478 synchronize_irq(dsi->irq);
5472 5479
5473 dispc_runtime_put(dsi->dss->dispc);
5474
5475 return 0; 5480 return 0;
5476} 5481}
5477 5482
5478static int dsi_runtime_resume(struct device *dev) 5483static int dsi_runtime_resume(struct device *dev)
5479{ 5484{
5480 struct dsi_data *dsi = dev_get_drvdata(dev); 5485 struct dsi_data *dsi = dev_get_drvdata(dev);
5481 int r;
5482
5483 r = dispc_runtime_get(dsi->dss->dispc);
5484 if (r)
5485 return r;
5486 5486
5487 dsi->is_enabled = true; 5487 dsi->is_enabled = true;
5488 /* ensure the irq handler sees the is_enabled value */ 5488 /* ensure the irq handler sees the is_enabled value */
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 1aaf260aa9b8..7553c7fc1c45 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1484,16 +1484,23 @@ static int dss_probe(struct platform_device *pdev)
1484 dss); 1484 dss);
1485 1485
1486 /* Add all the child devices as components. */ 1486 /* Add all the child devices as components. */
1487 r = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
1488 if (r)
1489 goto err_uninit_debugfs;
1490
1487 omapdss_gather_components(&pdev->dev); 1491 omapdss_gather_components(&pdev->dev);
1488 1492
1489 device_for_each_child(&pdev->dev, &match, dss_add_child_component); 1493 device_for_each_child(&pdev->dev, &match, dss_add_child_component);
1490 1494
1491 r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); 1495 r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
1492 if (r) 1496 if (r)
1493 goto err_uninit_debugfs; 1497 goto err_of_depopulate;
1494 1498
1495 return 0; 1499 return 0;
1496 1500
1501err_of_depopulate:
1502 of_platform_depopulate(&pdev->dev);
1503
1497err_uninit_debugfs: 1504err_uninit_debugfs:
1498 dss_debugfs_remove_file(dss->debugfs.clk); 1505 dss_debugfs_remove_file(dss->debugfs.clk);
1499 dss_debugfs_remove_file(dss->debugfs.dss); 1506 dss_debugfs_remove_file(dss->debugfs.dss);
@@ -1522,6 +1529,8 @@ static int dss_remove(struct platform_device *pdev)
1522{ 1529{
1523 struct dss_device *dss = platform_get_drvdata(pdev); 1530 struct dss_device *dss = platform_get_drvdata(pdev);
1524 1531
1532 of_platform_depopulate(&pdev->dev);
1533
1525 component_master_del(&pdev->dev, &dss_component_ops); 1534 component_master_del(&pdev->dev, &dss_component_ops);
1526 1535
1527 dss_debugfs_remove_file(dss->debugfs.clk); 1536 dss_debugfs_remove_file(dss->debugfs.clk);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index cf6230eac31a..aabdda394c9c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -635,10 +635,14 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
635 635
636 hdmi->dss = dss; 636 hdmi->dss = dss;
637 637
638 r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp); 638 r = hdmi_runtime_get(hdmi);
639 if (r) 639 if (r)
640 return r; 640 return r;
641 641
642 r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
643 if (r)
644 goto err_runtime_put;
645
642 r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp); 646 r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp);
643 if (r) 647 if (r)
644 goto err_pll_uninit; 648 goto err_pll_uninit;
@@ -652,12 +656,16 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
652 hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, 656 hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
653 hdmi); 657 hdmi);
654 658
659 hdmi_runtime_put(hdmi);
660
655 return 0; 661 return 0;
656 662
657err_cec_uninit: 663err_cec_uninit:
658 hdmi4_cec_uninit(&hdmi->core); 664 hdmi4_cec_uninit(&hdmi->core);
659err_pll_uninit: 665err_pll_uninit:
660 hdmi_pll_uninit(&hdmi->pll); 666 hdmi_pll_uninit(&hdmi->pll);
667err_runtime_put:
668 hdmi_runtime_put(hdmi);
661 return r; 669 return r;
662} 670}
663 671
@@ -833,32 +841,6 @@ static int hdmi4_remove(struct platform_device *pdev)
833 return 0; 841 return 0;
834} 842}
835 843
836static int hdmi_runtime_suspend(struct device *dev)
837{
838 struct omap_hdmi *hdmi = dev_get_drvdata(dev);
839
840 dispc_runtime_put(hdmi->dss->dispc);
841
842 return 0;
843}
844
845static int hdmi_runtime_resume(struct device *dev)
846{
847 struct omap_hdmi *hdmi = dev_get_drvdata(dev);
848 int r;
849
850 r = dispc_runtime_get(hdmi->dss->dispc);
851 if (r < 0)
852 return r;
853
854 return 0;
855}
856
857static const struct dev_pm_ops hdmi_pm_ops = {
858 .runtime_suspend = hdmi_runtime_suspend,
859 .runtime_resume = hdmi_runtime_resume,
860};
861
862static const struct of_device_id hdmi_of_match[] = { 844static const struct of_device_id hdmi_of_match[] = {
863 { .compatible = "ti,omap4-hdmi", }, 845 { .compatible = "ti,omap4-hdmi", },
864 {}, 846 {},
@@ -869,7 +851,6 @@ struct platform_driver omapdss_hdmi4hw_driver = {
869 .remove = hdmi4_remove, 851 .remove = hdmi4_remove,
870 .driver = { 852 .driver = {
871 .name = "omapdss_hdmi", 853 .name = "omapdss_hdmi",
872 .pm = &hdmi_pm_ops,
873 .of_match_table = hdmi_of_match, 854 .of_match_table = hdmi_of_match,
874 .suppress_bind_attrs = true, 855 .suppress_bind_attrs = true,
875 }, 856 },
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index b0e4a7463f8c..9e8556f67a29 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -825,32 +825,6 @@ static int hdmi5_remove(struct platform_device *pdev)
825 return 0; 825 return 0;
826} 826}
827 827
828static int hdmi_runtime_suspend(struct device *dev)
829{
830 struct omap_hdmi *hdmi = dev_get_drvdata(dev);
831
832 dispc_runtime_put(hdmi->dss->dispc);
833
834 return 0;
835}
836
837static int hdmi_runtime_resume(struct device *dev)
838{
839 struct omap_hdmi *hdmi = dev_get_drvdata(dev);
840 int r;
841
842 r = dispc_runtime_get(hdmi->dss->dispc);
843 if (r < 0)
844 return r;
845
846 return 0;
847}
848
849static const struct dev_pm_ops hdmi_pm_ops = {
850 .runtime_suspend = hdmi_runtime_suspend,
851 .runtime_resume = hdmi_runtime_resume,
852};
853
854static const struct of_device_id hdmi_of_match[] = { 828static const struct of_device_id hdmi_of_match[] = {
855 { .compatible = "ti,omap5-hdmi", }, 829 { .compatible = "ti,omap5-hdmi", },
856 { .compatible = "ti,dra7-hdmi", }, 830 { .compatible = "ti,dra7-hdmi", },
@@ -862,7 +836,6 @@ struct platform_driver omapdss_hdmi5hw_driver = {
862 .remove = hdmi5_remove, 836 .remove = hdmi5_remove,
863 .driver = { 837 .driver = {
864 .name = "omapdss_hdmi5", 838 .name = "omapdss_hdmi5",
865 .pm = &hdmi_pm_ops,
866 .of_match_table = hdmi_of_match, 839 .of_match_table = hdmi_of_match,
867 .suppress_bind_attrs = true, 840 .suppress_bind_attrs = true,
868 }, 841 },
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index ff0b18c8e4ac..b5f52727f8b1 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -946,19 +946,12 @@ static int venc_runtime_suspend(struct device *dev)
946 if (venc->tv_dac_clk) 946 if (venc->tv_dac_clk)
947 clk_disable_unprepare(venc->tv_dac_clk); 947 clk_disable_unprepare(venc->tv_dac_clk);
948 948
949 dispc_runtime_put(venc->dss->dispc);
950
951 return 0; 949 return 0;
952} 950}
953 951
954static int venc_runtime_resume(struct device *dev) 952static int venc_runtime_resume(struct device *dev)
955{ 953{
956 struct venc_device *venc = dev_get_drvdata(dev); 954 struct venc_device *venc = dev_get_drvdata(dev);
957 int r;
958
959 r = dispc_runtime_get(venc->dss->dispc);
960 if (r < 0)
961 return r;
962 955
963 if (venc->tv_dac_clk) 956 if (venc->tv_dac_clk)
964 clk_prepare_enable(venc->tv_dac_clk); 957 clk_prepare_enable(venc->tv_dac_clk);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 62928ec0e7db..caffc547ef97 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -350,11 +350,14 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc)
350static void omap_crtc_atomic_enable(struct drm_crtc *crtc, 350static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
351 struct drm_crtc_state *old_state) 351 struct drm_crtc_state *old_state)
352{ 352{
353 struct omap_drm_private *priv = crtc->dev->dev_private;
353 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 354 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
354 int ret; 355 int ret;
355 356
356 DBG("%s", omap_crtc->name); 357 DBG("%s", omap_crtc->name);
357 358
359 priv->dispc_ops->runtime_get(priv->dispc);
360
358 spin_lock_irq(&crtc->dev->event_lock); 361 spin_lock_irq(&crtc->dev->event_lock);
359 drm_crtc_vblank_on(crtc); 362 drm_crtc_vblank_on(crtc);
360 ret = drm_crtc_vblank_get(crtc); 363 ret = drm_crtc_vblank_get(crtc);
@@ -367,6 +370,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
367static void omap_crtc_atomic_disable(struct drm_crtc *crtc, 370static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
368 struct drm_crtc_state *old_state) 371 struct drm_crtc_state *old_state)
369{ 372{
373 struct omap_drm_private *priv = crtc->dev->dev_private;
370 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 374 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
371 375
372 DBG("%s", omap_crtc->name); 376 DBG("%s", omap_crtc->name);
@@ -379,6 +383,8 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
379 spin_unlock_irq(&crtc->dev->event_lock); 383 spin_unlock_irq(&crtc->dev->event_lock);
380 384
381 drm_crtc_vblank_off(crtc); 385 drm_crtc_vblank_off(crtc);
386
387 priv->dispc_ops->runtime_put(priv->dispc);
382} 388}
383 389
384static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, 390static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index d85f0a1c1581..cebf313c6e1f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -202,10 +202,25 @@ void rcar_du_group_put(struct rcar_du_group *rgrp)
202 202
203static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) 203static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
204{ 204{
205 struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2]; 205 struct rcar_du_device *rcdu = rgrp->dev;
206
207 /*
208 * Group start/stop is controlled by the DRES and DEN bits of DSYSR0
209 * for the first group and DSYSR2 for the second group. On most DU
210 * instances, this maps to the first CRTC of the group, and we can just
211 * use rcar_du_crtc_dsysr_clr_set() to access the correct DSYSR. On
212 * M3-N, however, DU2 doesn't exist, but DSYSR2 does. We thus need to
213 * access the register directly using group read/write.
214 */
215 if (rcdu->info->channels_mask & BIT(rgrp->index * 2)) {
216 struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2];
206 217
207 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN, 218 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN,
208 start ? DSYSR_DEN : DSYSR_DRES); 219 start ? DSYSR_DEN : DSYSR_DRES);
220 } else {
221 rcar_du_group_write(rgrp, DSYSR,
222 start ? DSYSR_DEN : DSYSR_DRES);
223 }
209} 224}
210 225
211void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) 226void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index af7dcb6da351..e7eb0d1e17be 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -75,7 +75,7 @@ static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
75 75
76 DRM_DEBUG_DRIVER("Enabling LVDS output\n"); 76 DRM_DEBUG_DRIVER("Enabling LVDS output\n");
77 77
78 if (!IS_ERR(tcon->panel)) { 78 if (tcon->panel) {
79 drm_panel_prepare(tcon->panel); 79 drm_panel_prepare(tcon->panel);
80 drm_panel_enable(tcon->panel); 80 drm_panel_enable(tcon->panel);
81 } 81 }
@@ -88,7 +88,7 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
88 88
89 DRM_DEBUG_DRIVER("Disabling LVDS output\n"); 89 DRM_DEBUG_DRIVER("Disabling LVDS output\n");
90 90
91 if (!IS_ERR(tcon->panel)) { 91 if (tcon->panel) {
92 drm_panel_disable(tcon->panel); 92 drm_panel_disable(tcon->panel);
93 drm_panel_unprepare(tcon->panel); 93 drm_panel_unprepare(tcon->panel);
94 } 94 }
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index bf068da6b12e..f4a22689eb54 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -135,7 +135,7 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
135 135
136 DRM_DEBUG_DRIVER("Enabling RGB output\n"); 136 DRM_DEBUG_DRIVER("Enabling RGB output\n");
137 137
138 if (!IS_ERR(tcon->panel)) { 138 if (tcon->panel) {
139 drm_panel_prepare(tcon->panel); 139 drm_panel_prepare(tcon->panel);
140 drm_panel_enable(tcon->panel); 140 drm_panel_enable(tcon->panel);
141 } 141 }
@@ -148,7 +148,7 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
148 148
149 DRM_DEBUG_DRIVER("Disabling RGB output\n"); 149 DRM_DEBUG_DRIVER("Disabling RGB output\n");
150 150
151 if (!IS_ERR(tcon->panel)) { 151 if (tcon->panel) {
152 drm_panel_disable(tcon->panel); 152 drm_panel_disable(tcon->panel);
153 drm_panel_unprepare(tcon->panel); 153 drm_panel_unprepare(tcon->panel);
154 } 154 }
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c78cd35a1294..f949287d926c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -491,7 +491,8 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
491 sun4i_tcon0_mode_set_common(tcon, mode); 491 sun4i_tcon0_mode_set_common(tcon, mode);
492 492
493 /* Set dithering if needed */ 493 /* Set dithering if needed */
494 sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector); 494 if (tcon->panel)
495 sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
495 496
496 /* Adjust clock delay */ 497 /* Adjust clock delay */
497 clk_delay = sun4i_tcon_get_clk_delay(mode, 0); 498 clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
@@ -555,7 +556,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
555 * Following code is a way to avoid quirks all around TCON 556 * Following code is a way to avoid quirks all around TCON
556 * and DOTCLOCK drivers. 557 * and DOTCLOCK drivers.
557 */ 558 */
558 if (!IS_ERR(tcon->panel)) { 559 if (tcon->panel) {
559 struct drm_panel *panel = tcon->panel; 560 struct drm_panel *panel = tcon->panel;
560 struct drm_connector *connector = panel->connector; 561 struct drm_connector *connector = panel->connector;
561 struct drm_display_info display_info = connector->display_info; 562 struct drm_display_info display_info = connector->display_info;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 127468785f74..1f94b9affe4b 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -214,6 +214,12 @@ static int vc4_atomic_commit(struct drm_device *dev,
214 return 0; 214 return 0;
215 } 215 }
216 216
217 /* We know for sure we don't want an async update here. Set
218 * state->legacy_cursor_update to false to prevent
219 * drm_atomic_helper_setup_commit() from auto-completing
220 * commit->flip_done.
221 */
222 state->legacy_cursor_update = false;
217 ret = drm_atomic_helper_setup_commit(state, nonblock); 223 ret = drm_atomic_helper_setup_commit(state, nonblock);
218 if (ret) 224 if (ret)
219 return ret; 225 return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 9dc3fcbd290b..c6635f23918a 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -807,7 +807,7 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
807static void vc4_plane_atomic_async_update(struct drm_plane *plane, 807static void vc4_plane_atomic_async_update(struct drm_plane *plane,
808 struct drm_plane_state *state) 808 struct drm_plane_state *state)
809{ 809{
810 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 810 struct vc4_plane_state *vc4_state, *new_vc4_state;
811 811
812 if (plane->state->fb != state->fb) { 812 if (plane->state->fb != state->fb) {
813 vc4_plane_async_set_fb(plane, state->fb); 813 vc4_plane_async_set_fb(plane, state->fb);
@@ -828,7 +828,18 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
828 plane->state->src_y = state->src_y; 828 plane->state->src_y = state->src_y;
829 829
830 /* Update the display list based on the new crtc_x/y. */ 830 /* Update the display list based on the new crtc_x/y. */
831 vc4_plane_atomic_check(plane, plane->state); 831 vc4_plane_atomic_check(plane, state);
832
833 new_vc4_state = to_vc4_plane_state(state);
834 vc4_state = to_vc4_plane_state(plane->state);
835
836 /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
837 vc4_state->dlist[vc4_state->pos0_offset] =
838 new_vc4_state->dlist[vc4_state->pos0_offset];
839 vc4_state->dlist[vc4_state->pos2_offset] =
840 new_vc4_state->dlist[vc4_state->pos2_offset];
841 vc4_state->dlist[vc4_state->ptr0_offset] =
842 new_vc4_state->dlist[vc4_state->ptr0_offset];
832 843
833 /* Note that we can't just call vc4_plane_write_dlist() 844 /* Note that we can't just call vc4_plane_write_dlist()
834 * because that would smash the context data that the HVS is 845 * because that would smash the context data that the HVS is
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cf2a18571d48..a132c37d7334 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -380,6 +380,9 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
380 mutex_unlock(&vgasr_mutex); 380 mutex_unlock(&vgasr_mutex);
381 return -EINVAL; 381 return -EINVAL;
382 } 382 }
383 /* notify if GPU has been already bound */
384 if (ops->gpu_bound)
385 ops->gpu_bound(pdev, id);
383 } 386 }
384 mutex_unlock(&vgasr_mutex); 387 mutex_unlock(&vgasr_mutex);
385 388
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index aec253b44156..3cd7229b6e54 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -660,6 +660,20 @@ exit:
660 return ret; 660 return ret;
661} 661}
662 662
663static int alps_sp_open(struct input_dev *dev)
664{
665 struct hid_device *hid = input_get_drvdata(dev);
666
667 return hid_hw_open(hid);
668}
669
670static void alps_sp_close(struct input_dev *dev)
671{
672 struct hid_device *hid = input_get_drvdata(dev);
673
674 hid_hw_close(hid);
675}
676
663static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) 677static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
664{ 678{
665 struct alps_dev *data = hid_get_drvdata(hdev); 679 struct alps_dev *data = hid_get_drvdata(hdev);
@@ -733,6 +747,10 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
733 input2->id.version = input->id.version; 747 input2->id.version = input->id.version;
734 input2->dev.parent = input->dev.parent; 748 input2->dev.parent = input->dev.parent;
735 749
750 input_set_drvdata(input2, hdev);
751 input2->open = alps_sp_open;
752 input2->close = alps_sp_close;
753
736 __set_bit(EV_KEY, input2->evbit); 754 __set_bit(EV_KEY, input2->evbit);
737 data->sp_btn_cnt = (data->sp_btn_info & 0x0F); 755 data->sp_btn_cnt = (data->sp_btn_info & 0x0F);
738 for (i = 0; i < data->sp_btn_cnt; i++) 756 for (i = 0; i < data->sp_btn_cnt; i++)
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index dc6d6477e961..a1fa2fc8c9b5 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -359,6 +359,9 @@ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev)
359 u32 value; 359 u32 value;
360 int ret; 360 int ret;
361 361
362 if (!IS_ENABLED(CONFIG_ASUS_WMI))
363 return false;
364
362 ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 365 ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2,
363 ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value); 366 ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value);
364 hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value); 367 hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value);
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index b372854cf38d..704049e62d58 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -309,7 +309,7 @@ static void mousevsc_on_receive(struct hv_device *device,
309 hid_input_report(input_dev->hid_device, HID_INPUT_REPORT, 309 hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
310 input_dev->input_buf, len, 1); 310 input_dev->input_buf, len, 1);
311 311
312 pm_wakeup_event(&input_dev->device->device, 0); 312 pm_wakeup_hard_event(&input_dev->device->device);
313 313
314 break; 314 break;
315 default: 315 default:
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f63489c882bb..ed35c9a9a110 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -275,6 +275,9 @@
275 275
276#define USB_VENDOR_ID_CIDC 0x1677 276#define USB_VENDOR_ID_CIDC 0x1677
277 277
278#define I2C_VENDOR_ID_CIRQUE 0x0488
279#define I2C_PRODUCT_ID_CIRQUE_121F 0x121F
280
278#define USB_VENDOR_ID_CJTOUCH 0x24b8 281#define USB_VENDOR_ID_CJTOUCH 0x24b8
279#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020 282#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
280#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040 283#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
@@ -707,6 +710,7 @@
707#define USB_VENDOR_ID_LG 0x1fd2 710#define USB_VENDOR_ID_LG 0x1fd2
708#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 711#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
709#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007 712#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007
713#define I2C_DEVICE_ID_LG_8001 0x8001
710 714
711#define USB_VENDOR_ID_LOGITECH 0x046d 715#define USB_VENDOR_ID_LOGITECH 0x046d
712#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e 716#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
@@ -805,6 +809,7 @@
805#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 809#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
806#define USB_DEVICE_ID_MS_POWER_COVER 0x07da 810#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
807#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd 811#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
812#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
808 813
809#define USB_VENDOR_ID_MOJO 0x8282 814#define USB_VENDOR_ID_MOJO 0x8282
810#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 815#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -927,6 +932,9 @@
927#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003 932#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003
928#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 933#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
929 934
935#define I2C_VENDOR_ID_RAYDIUM 0x2386
936#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
937
930#define USB_VENDOR_ID_RAZER 0x1532 938#define USB_VENDOR_ID_RAZER 0x1532
931#define USB_DEVICE_ID_RAZER_BLADE_14 0x011D 939#define USB_DEVICE_ID_RAZER_BLADE_14 0x011D
932 940
@@ -1040,6 +1048,7 @@
1040#define USB_VENDOR_ID_SYMBOL 0x05e0 1048#define USB_VENDOR_ID_SYMBOL 0x05e0
1041#define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800 1049#define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800
1042#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300 1050#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
1051#define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200
1043 1052
1044#define USB_VENDOR_ID_SYNAPTICS 0x06cb 1053#define USB_VENDOR_ID_SYNAPTICS 0x06cb
1045#define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 1054#define USB_DEVICE_ID_SYNAPTICS_TP 0x0001
@@ -1201,6 +1210,8 @@
1201#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22 1210#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
1202#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 1211#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
1203#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72 1212#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
1213#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f
1214#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22 0x4e22
1204 1215
1205 1216
1206#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */ 1217#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index a2f74e6adc70..d6fab5798487 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -325,6 +325,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
325 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, 325 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM,
326 USB_DEVICE_ID_ELECOM_BM084), 326 USB_DEVICE_ID_ELECOM_BM084),
327 HID_BATTERY_QUIRK_IGNORE }, 327 HID_BATTERY_QUIRK_IGNORE },
328 { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL,
329 USB_DEVICE_ID_SYMBOL_SCANNER_3),
330 HID_BATTERY_QUIRK_IGNORE },
328 {} 331 {}
329}; 332};
330 333
@@ -1838,47 +1841,3 @@ void hidinput_disconnect(struct hid_device *hid)
1838} 1841}
1839EXPORT_SYMBOL_GPL(hidinput_disconnect); 1842EXPORT_SYMBOL_GPL(hidinput_disconnect);
1840 1843
1841/**
1842 * hid_scroll_counter_handle_scroll() - Send high- and low-resolution scroll
1843 * events given a high-resolution wheel
1844 * movement.
1845 * @counter: a hid_scroll_counter struct describing the wheel.
1846 * @hi_res_value: the movement of the wheel, in the mouse's high-resolution
1847 * units.
1848 *
1849 * Given a high-resolution movement, this function converts the movement into
1850 * microns and emits high-resolution scroll events for the input device. It also
1851 * uses the multiplier from &struct hid_scroll_counter to emit low-resolution
1852 * scroll events when appropriate for backwards-compatibility with userspace
1853 * input libraries.
1854 */
1855void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
1856 int hi_res_value)
1857{
1858 int low_res_value, remainder, multiplier;
1859
1860 input_report_rel(counter->dev, REL_WHEEL_HI_RES,
1861 hi_res_value * counter->microns_per_hi_res_unit);
1862
1863 /*
1864 * Update the low-res remainder with the high-res value,
1865 * but reset if the direction has changed.
1866 */
1867 remainder = counter->remainder;
1868 if ((remainder ^ hi_res_value) < 0)
1869 remainder = 0;
1870 remainder += hi_res_value;
1871
1872 /*
1873 * Then just use the resolution multiplier to see if
1874 * we should send a low-res (aka regular wheel) event.
1875 */
1876 multiplier = counter->resolution_multiplier;
1877 low_res_value = remainder / multiplier;
1878 remainder -= low_res_value * multiplier;
1879 counter->remainder = remainder;
1880
1881 if (low_res_value)
1882 input_report_rel(counter->dev, REL_WHEEL, low_res_value);
1883}
1884EXPORT_SYMBOL_GPL(hid_scroll_counter_handle_scroll);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index f01280898b24..19cc980eebce 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -64,14 +64,6 @@ MODULE_PARM_DESC(disable_tap_to_click,
64#define HIDPP_QUIRK_NO_HIDINPUT BIT(23) 64#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
65#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24) 65#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24)
66#define HIDPP_QUIRK_UNIFYING BIT(25) 66#define HIDPP_QUIRK_UNIFYING BIT(25)
67#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
68#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
69#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
70
71/* Convenience constant to check for any high-res support. */
72#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
73 HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \
74 HIDPP_QUIRK_HI_RES_SCROLL_X2121)
75 67
76#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT 68#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
77 69
@@ -157,7 +149,6 @@ struct hidpp_device {
157 unsigned long capabilities; 149 unsigned long capabilities;
158 150
159 struct hidpp_battery battery; 151 struct hidpp_battery battery;
160 struct hid_scroll_counter vertical_wheel_counter;
161}; 152};
162 153
163/* HID++ 1.0 error codes */ 154/* HID++ 1.0 error codes */
@@ -409,53 +400,32 @@ static void hidpp_prefix_name(char **name, int name_length)
409#define HIDPP_SET_LONG_REGISTER 0x82 400#define HIDPP_SET_LONG_REGISTER 0x82
410#define HIDPP_GET_LONG_REGISTER 0x83 401#define HIDPP_GET_LONG_REGISTER 0x83
411 402
412/** 403#define HIDPP_REG_GENERAL 0x00
413 * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register. 404
414 * @hidpp_dev: the device to set the register on. 405static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
415 * @register_address: the address of the register to modify.
416 * @byte: the byte of the register to modify. Should be less than 3.
417 * Return: 0 if successful, otherwise a negative error code.
418 */
419static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
420 u8 register_address, u8 byte, u8 bit)
421{ 406{
422 struct hidpp_report response; 407 struct hidpp_report response;
423 int ret; 408 int ret;
424 u8 params[3] = { 0 }; 409 u8 params[3] = { 0 };
425 410
426 ret = hidpp_send_rap_command_sync(hidpp_dev, 411 ret = hidpp_send_rap_command_sync(hidpp_dev,
427 REPORT_ID_HIDPP_SHORT, 412 REPORT_ID_HIDPP_SHORT,
428 HIDPP_GET_REGISTER, 413 HIDPP_GET_REGISTER,
429 register_address, 414 HIDPP_REG_GENERAL,
430 NULL, 0, &response); 415 NULL, 0, &response);
431 if (ret) 416 if (ret)
432 return ret; 417 return ret;
433 418
434 memcpy(params, response.rap.params, 3); 419 memcpy(params, response.rap.params, 3);
435 420
436 params[byte] |= BIT(bit); 421 /* Set the battery bit */
422 params[0] |= BIT(4);
437 423
438 return hidpp_send_rap_command_sync(hidpp_dev, 424 return hidpp_send_rap_command_sync(hidpp_dev,
439 REPORT_ID_HIDPP_SHORT, 425 REPORT_ID_HIDPP_SHORT,
440 HIDPP_SET_REGISTER, 426 HIDPP_SET_REGISTER,
441 register_address, 427 HIDPP_REG_GENERAL,
442 params, 3, &response); 428 params, 3, &response);
443}
444
445
446#define HIDPP_REG_GENERAL 0x00
447
448static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
449{
450 return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_GENERAL, 0, 4);
451}
452
453#define HIDPP_REG_FEATURES 0x01
454
455/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */
456static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev)
457{
458 return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6);
459} 429}
460 430
461#define HIDPP_REG_BATTERY_STATUS 0x07 431#define HIDPP_REG_BATTERY_STATUS 0x07
@@ -1167,100 +1137,6 @@ static int hidpp_battery_get_property(struct power_supply *psy,
1167} 1137}
1168 1138
1169/* -------------------------------------------------------------------------- */ 1139/* -------------------------------------------------------------------------- */
1170/* 0x2120: Hi-resolution scrolling */
1171/* -------------------------------------------------------------------------- */
1172
1173#define HIDPP_PAGE_HI_RESOLUTION_SCROLLING 0x2120
1174
1175#define CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE 0x10
1176
1177static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp,
1178 bool enabled, u8 *multiplier)
1179{
1180 u8 feature_index;
1181 u8 feature_type;
1182 int ret;
1183 u8 params[1];
1184 struct hidpp_report response;
1185
1186 ret = hidpp_root_get_feature(hidpp,
1187 HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
1188 &feature_index,
1189 &feature_type);
1190 if (ret)
1191 return ret;
1192
1193 params[0] = enabled ? BIT(0) : 0;
1194 ret = hidpp_send_fap_command_sync(hidpp, feature_index,
1195 CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE,
1196 params, sizeof(params), &response);
1197 if (ret)
1198 return ret;
1199 *multiplier = response.fap.params[1];
1200 return 0;
1201}
1202
1203/* -------------------------------------------------------------------------- */
1204/* 0x2121: HiRes Wheel */
1205/* -------------------------------------------------------------------------- */
1206
1207#define HIDPP_PAGE_HIRES_WHEEL 0x2121
1208
1209#define CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY 0x00
1210#define CMD_HIRES_WHEEL_SET_WHEEL_MODE 0x20
1211
1212static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp,
1213 u8 *multiplier)
1214{
1215 u8 feature_index;
1216 u8 feature_type;
1217 int ret;
1218 struct hidpp_report response;
1219
1220 ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
1221 &feature_index, &feature_type);
1222 if (ret)
1223 goto return_default;
1224
1225 ret = hidpp_send_fap_command_sync(hidpp, feature_index,
1226 CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY,
1227 NULL, 0, &response);
1228 if (ret)
1229 goto return_default;
1230
1231 *multiplier = response.fap.params[0];
1232 return 0;
1233return_default:
1234 hid_warn(hidpp->hid_dev,
1235 "Couldn't get wheel multiplier (error %d), assuming %d.\n",
1236 ret, *multiplier);
1237 return ret;
1238}
1239
1240static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert,
1241 bool high_resolution, bool use_hidpp)
1242{
1243 u8 feature_index;
1244 u8 feature_type;
1245 int ret;
1246 u8 params[1];
1247 struct hidpp_report response;
1248
1249 ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
1250 &feature_index, &feature_type);
1251 if (ret)
1252 return ret;
1253
1254 params[0] = (invert ? BIT(2) : 0) |
1255 (high_resolution ? BIT(1) : 0) |
1256 (use_hidpp ? BIT(0) : 0);
1257
1258 return hidpp_send_fap_command_sync(hidpp, feature_index,
1259 CMD_HIRES_WHEEL_SET_WHEEL_MODE,
1260 params, sizeof(params), &response);
1261}
1262
1263/* -------------------------------------------------------------------------- */
1264/* 0x4301: Solar Keyboard */ 1140/* 0x4301: Solar Keyboard */
1265/* -------------------------------------------------------------------------- */ 1141/* -------------------------------------------------------------------------- */
1266 1142
@@ -2523,8 +2399,7 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
2523 input_report_rel(mydata->input, REL_Y, v); 2399 input_report_rel(mydata->input, REL_Y, v);
2524 2400
2525 v = hid_snto32(data[6], 8); 2401 v = hid_snto32(data[6], 8);
2526 hid_scroll_counter_handle_scroll( 2402 input_report_rel(mydata->input, REL_WHEEL, v);
2527 &hidpp->vertical_wheel_counter, v);
2528 2403
2529 input_sync(mydata->input); 2404 input_sync(mydata->input);
2530 } 2405 }
@@ -2653,72 +2528,6 @@ static int g920_get_config(struct hidpp_device *hidpp)
2653} 2528}
2654 2529
2655/* -------------------------------------------------------------------------- */ 2530/* -------------------------------------------------------------------------- */
2656/* High-resolution scroll wheels */
2657/* -------------------------------------------------------------------------- */
2658
2659/**
2660 * struct hi_res_scroll_info - Stores info on a device's high-res scroll wheel.
2661 * @product_id: the HID product ID of the device being described.
2662 * @microns_per_hi_res_unit: the distance moved by the user's finger for each
2663 * high-resolution unit reported by the device, in
2664 * 256ths of a millimetre.
2665 */
2666struct hi_res_scroll_info {
2667 __u32 product_id;
2668 int microns_per_hi_res_unit;
2669};
2670
2671static struct hi_res_scroll_info hi_res_scroll_devices[] = {
2672 { /* Anywhere MX */
2673 .product_id = 0x1017, .microns_per_hi_res_unit = 445 },
2674 { /* Performance MX */
2675 .product_id = 0x101a, .microns_per_hi_res_unit = 406 },
2676 { /* M560 */
2677 .product_id = 0x402d, .microns_per_hi_res_unit = 435 },
2678 { /* MX Master 2S */
2679 .product_id = 0x4069, .microns_per_hi_res_unit = 406 },
2680};
2681
2682static int hi_res_scroll_look_up_microns(__u32 product_id)
2683{
2684 int i;
2685 int num_devices = sizeof(hi_res_scroll_devices)
2686 / sizeof(hi_res_scroll_devices[0]);
2687 for (i = 0; i < num_devices; i++) {
2688 if (hi_res_scroll_devices[i].product_id == product_id)
2689 return hi_res_scroll_devices[i].microns_per_hi_res_unit;
2690 }
2691 /* We don't have a value for this device, so use a sensible default. */
2692 return 406;
2693}
2694
2695static int hi_res_scroll_enable(struct hidpp_device *hidpp)
2696{
2697 int ret;
2698 u8 multiplier = 8;
2699
2700 if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) {
2701 ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false);
2702 hidpp_hrw_get_wheel_capability(hidpp, &multiplier);
2703 } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) {
2704 ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true,
2705 &multiplier);
2706 } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */
2707 ret = hidpp10_enable_scrolling_acceleration(hidpp);
2708
2709 if (ret)
2710 return ret;
2711
2712 hidpp->vertical_wheel_counter.resolution_multiplier = multiplier;
2713 hidpp->vertical_wheel_counter.microns_per_hi_res_unit =
2714 hi_res_scroll_look_up_microns(hidpp->hid_dev->product);
2715 hid_info(hidpp->hid_dev, "multiplier = %d, microns = %d\n",
2716 multiplier,
2717 hidpp->vertical_wheel_counter.microns_per_hi_res_unit);
2718 return 0;
2719}
2720
2721/* -------------------------------------------------------------------------- */
2722/* Generic HID++ devices */ 2531/* Generic HID++ devices */
2723/* -------------------------------------------------------------------------- */ 2532/* -------------------------------------------------------------------------- */
2724 2533
@@ -2763,11 +2572,6 @@ static void hidpp_populate_input(struct hidpp_device *hidpp,
2763 wtp_populate_input(hidpp, input, origin_is_hid_core); 2572 wtp_populate_input(hidpp, input, origin_is_hid_core);
2764 else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) 2573 else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
2765 m560_populate_input(hidpp, input, origin_is_hid_core); 2574 m560_populate_input(hidpp, input, origin_is_hid_core);
2766
2767 if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) {
2768 input_set_capability(input, EV_REL, REL_WHEEL_HI_RES);
2769 hidpp->vertical_wheel_counter.dev = input;
2770 }
2771} 2575}
2772 2576
2773static int hidpp_input_configured(struct hid_device *hdev, 2577static int hidpp_input_configured(struct hid_device *hdev,
@@ -2886,27 +2690,6 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
2886 return 0; 2690 return 0;
2887} 2691}
2888 2692
2889static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
2890 struct hid_usage *usage, __s32 value)
2891{
2892 /* This function will only be called for scroll events, due to the
2893 * restriction imposed in hidpp_usages.
2894 */
2895 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
2896 struct hid_scroll_counter *counter = &hidpp->vertical_wheel_counter;
2897 /* A scroll event may occur before the multiplier has been retrieved or
2898 * the input device set, or high-res scroll enabling may fail. In such
2899 * cases we must return early (falling back to default behaviour) to
2900 * avoid a crash in hid_scroll_counter_handle_scroll.
2901 */
2902 if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
2903 || counter->dev == NULL || counter->resolution_multiplier == 0)
2904 return 0;
2905
2906 hid_scroll_counter_handle_scroll(counter, value);
2907 return 1;
2908}
2909
2910static int hidpp_initialize_battery(struct hidpp_device *hidpp) 2693static int hidpp_initialize_battery(struct hidpp_device *hidpp)
2911{ 2694{
2912 static atomic_t battery_no = ATOMIC_INIT(0); 2695 static atomic_t battery_no = ATOMIC_INIT(0);
@@ -3118,9 +2901,6 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
3118 if (hidpp->battery.ps) 2901 if (hidpp->battery.ps)
3119 power_supply_changed(hidpp->battery.ps); 2902 power_supply_changed(hidpp->battery.ps);
3120 2903
3121 if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
3122 hi_res_scroll_enable(hidpp);
3123
3124 if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input) 2904 if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
3125 /* if the input nodes are already created, we can stop now */ 2905 /* if the input nodes are already created, we can stop now */
3126 return; 2906 return;
@@ -3306,63 +3086,35 @@ static void hidpp_remove(struct hid_device *hdev)
3306 mutex_destroy(&hidpp->send_mutex); 3086 mutex_destroy(&hidpp->send_mutex);
3307} 3087}
3308 3088
3309#define LDJ_DEVICE(product) \
3310 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
3311 USB_VENDOR_ID_LOGITECH, (product))
3312
3313static const struct hid_device_id hidpp_devices[] = { 3089static const struct hid_device_id hidpp_devices[] = {
3314 { /* wireless touchpad */ 3090 { /* wireless touchpad */
3315 LDJ_DEVICE(0x4011), 3091 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3092 USB_VENDOR_ID_LOGITECH, 0x4011),
3316 .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT | 3093 .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT |
3317 HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS }, 3094 HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS },
3318 { /* wireless touchpad T650 */ 3095 { /* wireless touchpad T650 */
3319 LDJ_DEVICE(0x4101), 3096 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3097 USB_VENDOR_ID_LOGITECH, 0x4101),
3320 .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT }, 3098 .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
3321 { /* wireless touchpad T651 */ 3099 { /* wireless touchpad T651 */
3322 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 3100 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
3323 USB_DEVICE_ID_LOGITECH_T651), 3101 USB_DEVICE_ID_LOGITECH_T651),
3324 .driver_data = HIDPP_QUIRK_CLASS_WTP }, 3102 .driver_data = HIDPP_QUIRK_CLASS_WTP },
3325 { /* Mouse Logitech Anywhere MX */
3326 LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
3327 { /* Mouse Logitech Cube */
3328 LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
3329 { /* Mouse Logitech M335 */
3330 LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3331 { /* Mouse Logitech M515 */
3332 LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
3333 { /* Mouse logitech M560 */ 3103 { /* Mouse logitech M560 */
3334 LDJ_DEVICE(0x402d), 3104 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3335 .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 3105 USB_VENDOR_ID_LOGITECH, 0x402d),
3336 | HIDPP_QUIRK_HI_RES_SCROLL_X2120 }, 3106 .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
3337 { /* Mouse Logitech M705 (firmware RQM17) */
3338 LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
3339 { /* Mouse Logitech M705 (firmware RQM67) */
3340 LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3341 { /* Mouse Logitech M720 */
3342 LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3343 { /* Mouse Logitech MX Anywhere 2 */
3344 LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3345 { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3346 { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3347 { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3348 { /* Mouse Logitech MX Anywhere 2S */
3349 LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3350 { /* Mouse Logitech MX Master */
3351 LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3352 { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3353 { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3354 { /* Mouse Logitech MX Master 2S */
3355 LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
3356 { /* Mouse Logitech Performance MX */
3357 LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
3358 { /* Keyboard logitech K400 */ 3107 { /* Keyboard logitech K400 */
3359 LDJ_DEVICE(0x4024), 3108 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3109 USB_VENDOR_ID_LOGITECH, 0x4024),
3360 .driver_data = HIDPP_QUIRK_CLASS_K400 }, 3110 .driver_data = HIDPP_QUIRK_CLASS_K400 },
3361 { /* Solar Keyboard Logitech K750 */ 3111 { /* Solar Keyboard Logitech K750 */
3362 LDJ_DEVICE(0x4002), 3112 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3113 USB_VENDOR_ID_LOGITECH, 0x4002),
3363 .driver_data = HIDPP_QUIRK_CLASS_K750 }, 3114 .driver_data = HIDPP_QUIRK_CLASS_K750 },
3364 3115
3365 { LDJ_DEVICE(HID_ANY_ID) }, 3116 { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
3117 USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
3366 3118
3367 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL), 3119 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
3368 .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS}, 3120 .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
@@ -3371,19 +3123,12 @@ static const struct hid_device_id hidpp_devices[] = {
3371 3123
3372MODULE_DEVICE_TABLE(hid, hidpp_devices); 3124MODULE_DEVICE_TABLE(hid, hidpp_devices);
3373 3125
3374static const struct hid_usage_id hidpp_usages[] = {
3375 { HID_GD_WHEEL, EV_REL, REL_WHEEL },
3376 { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
3377};
3378
3379static struct hid_driver hidpp_driver = { 3126static struct hid_driver hidpp_driver = {
3380 .name = "logitech-hidpp-device", 3127 .name = "logitech-hidpp-device",
3381 .id_table = hidpp_devices, 3128 .id_table = hidpp_devices,
3382 .probe = hidpp_probe, 3129 .probe = hidpp_probe,
3383 .remove = hidpp_remove, 3130 .remove = hidpp_remove,
3384 .raw_event = hidpp_raw_event, 3131 .raw_event = hidpp_raw_event,
3385 .usage_table = hidpp_usages,
3386 .event = hidpp_event,
3387 .input_configured = hidpp_input_configured, 3132 .input_configured = hidpp_input_configured,
3388 .input_mapping = hidpp_input_mapping, 3133 .input_mapping = hidpp_input_mapping,
3389 .input_mapped = hidpp_input_mapped, 3134 .input_mapped = hidpp_input_mapped,
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f7c6de2b6730..dca0a3a90fb8 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1814,6 +1814,12 @@ static const struct hid_device_id mt_devices[] = {
1814 MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, 1814 MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
1815 USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) }, 1815 USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
1816 1816
1817 /* Cirque devices */
1818 { .driver_data = MT_CLS_WIN_8_DUAL,
1819 HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
1820 I2C_VENDOR_ID_CIRQUE,
1821 I2C_PRODUCT_ID_CIRQUE_121F) },
1822
1817 /* CJTouch panels */ 1823 /* CJTouch panels */
1818 { .driver_data = MT_CLS_NSMU, 1824 { .driver_data = MT_CLS_NSMU,
1819 MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH, 1825 MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 52c3b01917e7..c85a79986b6a 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -107,7 +107,7 @@ static const struct hid_device_id hid_quirks[] = {
107 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL }, 107 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
108 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL }, 108 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
109 { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT }, 109 { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
110 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET }, 110 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
111 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS }, 111 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
112 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS }, 112 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
113 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS }, 113 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
@@ -130,6 +130,8 @@ static const struct hid_device_id hid_quirks[] = {
130 { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS }, 130 { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
131 { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, 131 { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
132 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL }, 132 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
133 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL },
134 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL },
133 { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET }, 135 { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET },
134 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET }, 136 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET },
135 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET }, 137 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index e8a114157f87..bb012bc032e0 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -358,7 +358,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
358 sensor_inst->hsdev, 358 sensor_inst->hsdev,
359 sensor_inst->hsdev->usage, 359 sensor_inst->hsdev->usage,
360 usage, report_id, 360 usage, report_id,
361 SENSOR_HUB_SYNC); 361 SENSOR_HUB_SYNC, false);
362 } else if (!strncmp(name, "units", strlen("units"))) 362 } else if (!strncmp(name, "units", strlen("units")))
363 value = sensor_inst->fields[field_index].attribute.units; 363 value = sensor_inst->fields[field_index].attribute.units;
364 else if (!strncmp(name, "unit-expo", strlen("unit-expo"))) 364 else if (!strncmp(name, "unit-expo", strlen("unit-expo")))
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 2b63487057c2..4256fdc5cd6d 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(sensor_hub_get_feature);
299int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, 299int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
300 u32 usage_id, 300 u32 usage_id,
301 u32 attr_usage_id, u32 report_id, 301 u32 attr_usage_id, u32 report_id,
302 enum sensor_hub_read_flags flag) 302 enum sensor_hub_read_flags flag,
303 bool is_signed)
303{ 304{
304 struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev); 305 struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
305 unsigned long flags; 306 unsigned long flags;
@@ -331,10 +332,16 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
331 &hsdev->pending.ready, HZ*5); 332 &hsdev->pending.ready, HZ*5);
332 switch (hsdev->pending.raw_size) { 333 switch (hsdev->pending.raw_size) {
333 case 1: 334 case 1:
334 ret_val = *(u8 *)hsdev->pending.raw_data; 335 if (is_signed)
336 ret_val = *(s8 *)hsdev->pending.raw_data;
337 else
338 ret_val = *(u8 *)hsdev->pending.raw_data;
335 break; 339 break;
336 case 2: 340 case 2:
337 ret_val = *(u16 *)hsdev->pending.raw_data; 341 if (is_signed)
342 ret_val = *(s16 *)hsdev->pending.raw_data;
343 else
344 ret_val = *(u16 *)hsdev->pending.raw_data;
338 break; 345 break;
339 case 4: 346 case 4:
340 ret_val = *(u32 *)hsdev->pending.raw_data; 347 ret_val = *(u32 *)hsdev->pending.raw_data;
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 0422ec2b13d2..dc4128bfe2ca 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -23,8 +23,9 @@
23 * In order to avoid breaking them this driver creates a layered hidraw device, 23 * In order to avoid breaking them this driver creates a layered hidraw device,
24 * so it can detect when the client is running and then: 24 * so it can detect when the client is running and then:
25 * - it will not send any command to the controller. 25 * - it will not send any command to the controller.
26 * - this input device will be disabled, to avoid double input of the same 26 * - this input device will be removed, to avoid double input of the same
27 * user action. 27 * user action.
28 * When the client is closed, this input device will be created again.
28 * 29 *
29 * For additional functions, such as changing the right-pad margin or switching 30 * For additional functions, such as changing the right-pad margin or switching
30 * the led, you can use the user-space tool at: 31 * the led, you can use the user-space tool at:
@@ -113,7 +114,7 @@ struct steam_device {
113 spinlock_t lock; 114 spinlock_t lock;
114 struct hid_device *hdev, *client_hdev; 115 struct hid_device *hdev, *client_hdev;
115 struct mutex mutex; 116 struct mutex mutex;
116 bool client_opened, input_opened; 117 bool client_opened;
117 struct input_dev __rcu *input; 118 struct input_dev __rcu *input;
118 unsigned long quirks; 119 unsigned long quirks;
119 struct work_struct work_connect; 120 struct work_struct work_connect;
@@ -279,18 +280,6 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
279 } 280 }
280} 281}
281 282
282static void steam_update_lizard_mode(struct steam_device *steam)
283{
284 mutex_lock(&steam->mutex);
285 if (!steam->client_opened) {
286 if (steam->input_opened)
287 steam_set_lizard_mode(steam, false);
288 else
289 steam_set_lizard_mode(steam, lizard_mode);
290 }
291 mutex_unlock(&steam->mutex);
292}
293
294static int steam_input_open(struct input_dev *dev) 283static int steam_input_open(struct input_dev *dev)
295{ 284{
296 struct steam_device *steam = input_get_drvdata(dev); 285 struct steam_device *steam = input_get_drvdata(dev);
@@ -301,7 +290,6 @@ static int steam_input_open(struct input_dev *dev)
301 return ret; 290 return ret;
302 291
303 mutex_lock(&steam->mutex); 292 mutex_lock(&steam->mutex);
304 steam->input_opened = true;
305 if (!steam->client_opened && lizard_mode) 293 if (!steam->client_opened && lizard_mode)
306 steam_set_lizard_mode(steam, false); 294 steam_set_lizard_mode(steam, false);
307 mutex_unlock(&steam->mutex); 295 mutex_unlock(&steam->mutex);
@@ -313,7 +301,6 @@ static void steam_input_close(struct input_dev *dev)
313 struct steam_device *steam = input_get_drvdata(dev); 301 struct steam_device *steam = input_get_drvdata(dev);
314 302
315 mutex_lock(&steam->mutex); 303 mutex_lock(&steam->mutex);
316 steam->input_opened = false;
317 if (!steam->client_opened && lizard_mode) 304 if (!steam->client_opened && lizard_mode)
318 steam_set_lizard_mode(steam, true); 305 steam_set_lizard_mode(steam, true);
319 mutex_unlock(&steam->mutex); 306 mutex_unlock(&steam->mutex);
@@ -400,7 +387,7 @@ static int steam_battery_register(struct steam_device *steam)
400 return 0; 387 return 0;
401} 388}
402 389
403static int steam_register(struct steam_device *steam) 390static int steam_input_register(struct steam_device *steam)
404{ 391{
405 struct hid_device *hdev = steam->hdev; 392 struct hid_device *hdev = steam->hdev;
406 struct input_dev *input; 393 struct input_dev *input;
@@ -414,17 +401,6 @@ static int steam_register(struct steam_device *steam)
414 return 0; 401 return 0;
415 } 402 }
416 403
417 /*
418 * Unlikely, but getting the serial could fail, and it is not so
419 * important, so make up a serial number and go on.
420 */
421 if (steam_get_serial(steam) < 0)
422 strlcpy(steam->serial_no, "XXXXXXXXXX",
423 sizeof(steam->serial_no));
424
425 hid_info(hdev, "Steam Controller '%s' connected",
426 steam->serial_no);
427
428 input = input_allocate_device(); 404 input = input_allocate_device();
429 if (!input) 405 if (!input)
430 return -ENOMEM; 406 return -ENOMEM;
@@ -492,11 +468,6 @@ static int steam_register(struct steam_device *steam)
492 goto input_register_fail; 468 goto input_register_fail;
493 469
494 rcu_assign_pointer(steam->input, input); 470 rcu_assign_pointer(steam->input, input);
495
496 /* ignore battery errors, we can live without it */
497 if (steam->quirks & STEAM_QUIRK_WIRELESS)
498 steam_battery_register(steam);
499
500 return 0; 471 return 0;
501 472
502input_register_fail: 473input_register_fail:
@@ -504,27 +475,88 @@ input_register_fail:
504 return ret; 475 return ret;
505} 476}
506 477
507static void steam_unregister(struct steam_device *steam) 478static void steam_input_unregister(struct steam_device *steam)
508{ 479{
509 struct input_dev *input; 480 struct input_dev *input;
481 rcu_read_lock();
482 input = rcu_dereference(steam->input);
483 rcu_read_unlock();
484 if (!input)
485 return;
486 RCU_INIT_POINTER(steam->input, NULL);
487 synchronize_rcu();
488 input_unregister_device(input);
489}
490
491static void steam_battery_unregister(struct steam_device *steam)
492{
510 struct power_supply *battery; 493 struct power_supply *battery;
511 494
512 rcu_read_lock(); 495 rcu_read_lock();
513 input = rcu_dereference(steam->input);
514 battery = rcu_dereference(steam->battery); 496 battery = rcu_dereference(steam->battery);
515 rcu_read_unlock(); 497 rcu_read_unlock();
516 498
517 if (battery) { 499 if (!battery)
518 RCU_INIT_POINTER(steam->battery, NULL); 500 return;
519 synchronize_rcu(); 501 RCU_INIT_POINTER(steam->battery, NULL);
520 power_supply_unregister(battery); 502 synchronize_rcu();
503 power_supply_unregister(battery);
504}
505
506static int steam_register(struct steam_device *steam)
507{
508 int ret;
509
510 /*
511 * This function can be called several times in a row with the
512 * wireless adaptor, without steam_unregister() between them, because
513 * another client send a get_connection_status command, for example.
514 * The battery and serial number are set just once per device.
515 */
516 if (!steam->serial_no[0]) {
517 /*
518 * Unlikely, but getting the serial could fail, and it is not so
519 * important, so make up a serial number and go on.
520 */
521 if (steam_get_serial(steam) < 0)
522 strlcpy(steam->serial_no, "XXXXXXXXXX",
523 sizeof(steam->serial_no));
524
525 hid_info(steam->hdev, "Steam Controller '%s' connected",
526 steam->serial_no);
527
528 /* ignore battery errors, we can live without it */
529 if (steam->quirks & STEAM_QUIRK_WIRELESS)
530 steam_battery_register(steam);
531
532 mutex_lock(&steam_devices_lock);
533 list_add(&steam->list, &steam_devices);
534 mutex_unlock(&steam_devices_lock);
521 } 535 }
522 if (input) { 536
523 RCU_INIT_POINTER(steam->input, NULL); 537 mutex_lock(&steam->mutex);
524 synchronize_rcu(); 538 if (!steam->client_opened) {
539 steam_set_lizard_mode(steam, lizard_mode);
540 ret = steam_input_register(steam);
541 } else {
542 ret = 0;
543 }
544 mutex_unlock(&steam->mutex);
545
546 return ret;
547}
548
549static void steam_unregister(struct steam_device *steam)
550{
551 steam_battery_unregister(steam);
552 steam_input_unregister(steam);
553 if (steam->serial_no[0]) {
525 hid_info(steam->hdev, "Steam Controller '%s' disconnected", 554 hid_info(steam->hdev, "Steam Controller '%s' disconnected",
526 steam->serial_no); 555 steam->serial_no);
527 input_unregister_device(input); 556 mutex_lock(&steam_devices_lock);
557 list_del(&steam->list);
558 mutex_unlock(&steam_devices_lock);
559 steam->serial_no[0] = 0;
528 } 560 }
529} 561}
530 562
@@ -600,6 +632,9 @@ static int steam_client_ll_open(struct hid_device *hdev)
600 mutex_lock(&steam->mutex); 632 mutex_lock(&steam->mutex);
601 steam->client_opened = true; 633 steam->client_opened = true;
602 mutex_unlock(&steam->mutex); 634 mutex_unlock(&steam->mutex);
635
636 steam_input_unregister(steam);
637
603 return ret; 638 return ret;
604} 639}
605 640
@@ -609,13 +644,13 @@ static void steam_client_ll_close(struct hid_device *hdev)
609 644
610 mutex_lock(&steam->mutex); 645 mutex_lock(&steam->mutex);
611 steam->client_opened = false; 646 steam->client_opened = false;
612 if (steam->input_opened)
613 steam_set_lizard_mode(steam, false);
614 else
615 steam_set_lizard_mode(steam, lizard_mode);
616 mutex_unlock(&steam->mutex); 647 mutex_unlock(&steam->mutex);
617 648
618 hid_hw_close(steam->hdev); 649 hid_hw_close(steam->hdev);
650 if (steam->connected) {
651 steam_set_lizard_mode(steam, lizard_mode);
652 steam_input_register(steam);
653 }
619} 654}
620 655
621static int steam_client_ll_raw_request(struct hid_device *hdev, 656static int steam_client_ll_raw_request(struct hid_device *hdev,
@@ -744,11 +779,6 @@ static int steam_probe(struct hid_device *hdev,
744 } 779 }
745 } 780 }
746 781
747 mutex_lock(&steam_devices_lock);
748 steam_update_lizard_mode(steam);
749 list_add(&steam->list, &steam_devices);
750 mutex_unlock(&steam_devices_lock);
751
752 return 0; 782 return 0;
753 783
754hid_hw_open_fail: 784hid_hw_open_fail:
@@ -774,10 +804,6 @@ static void steam_remove(struct hid_device *hdev)
774 return; 804 return;
775 } 805 }
776 806
777 mutex_lock(&steam_devices_lock);
778 list_del(&steam->list);
779 mutex_unlock(&steam_devices_lock);
780
781 hid_destroy_device(steam->client_hdev); 807 hid_destroy_device(steam->client_hdev);
782 steam->client_opened = false; 808 steam->client_opened = false;
783 cancel_work_sync(&steam->work_connect); 809 cancel_work_sync(&steam->work_connect);
@@ -792,12 +818,14 @@ static void steam_remove(struct hid_device *hdev)
792static void steam_do_connect_event(struct steam_device *steam, bool connected) 818static void steam_do_connect_event(struct steam_device *steam, bool connected)
793{ 819{
794 unsigned long flags; 820 unsigned long flags;
821 bool changed;
795 822
796 spin_lock_irqsave(&steam->lock, flags); 823 spin_lock_irqsave(&steam->lock, flags);
824 changed = steam->connected != connected;
797 steam->connected = connected; 825 steam->connected = connected;
798 spin_unlock_irqrestore(&steam->lock, flags); 826 spin_unlock_irqrestore(&steam->lock, flags);
799 827
800 if (schedule_work(&steam->work_connect) == 0) 828 if (changed && schedule_work(&steam->work_connect) == 0)
801 dbg_hid("%s: connected=%d event already queued\n", 829 dbg_hid("%s: connected=%d event already queued\n",
802 __func__, connected); 830 __func__, connected);
803} 831}
@@ -1019,13 +1047,8 @@ static int steam_raw_event(struct hid_device *hdev,
1019 return 0; 1047 return 0;
1020 rcu_read_lock(); 1048 rcu_read_lock();
1021 input = rcu_dereference(steam->input); 1049 input = rcu_dereference(steam->input);
1022 if (likely(input)) { 1050 if (likely(input))
1023 steam_do_input_event(steam, input, data); 1051 steam_do_input_event(steam, input, data);
1024 } else {
1025 dbg_hid("%s: input data without connect event\n",
1026 __func__);
1027 steam_do_connect_event(steam, true);
1028 }
1029 rcu_read_unlock(); 1052 rcu_read_unlock();
1030 break; 1053 break;
1031 case STEAM_EV_CONNECT: 1054 case STEAM_EV_CONNECT:
@@ -1074,7 +1097,10 @@ static int steam_param_set_lizard_mode(const char *val,
1074 1097
1075 mutex_lock(&steam_devices_lock); 1098 mutex_lock(&steam_devices_lock);
1076 list_for_each_entry(steam, &steam_devices, list) { 1099 list_for_each_entry(steam, &steam_devices, list) {
1077 steam_update_lizard_mode(steam); 1100 mutex_lock(&steam->mutex);
1101 if (!steam->client_opened)
1102 steam_set_lizard_mode(steam, lizard_mode);
1103 mutex_unlock(&steam->mutex);
1078 } 1104 }
1079 mutex_unlock(&steam_devices_lock); 1105 mutex_unlock(&steam_devices_lock);
1080 return 0; 1106 return 0;
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 4aab96cf0818..8555ce7e737b 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -49,6 +49,7 @@
49#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) 49#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
50#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) 50#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
51#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2) 51#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
52#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3)
52 53
53/* flags */ 54/* flags */
54#define I2C_HID_STARTED 0 55#define I2C_HID_STARTED 0
@@ -158,6 +159,8 @@ struct i2c_hid {
158 159
159 bool irq_wake_enabled; 160 bool irq_wake_enabled;
160 struct mutex reset_lock; 161 struct mutex reset_lock;
162
163 unsigned long sleep_delay;
161}; 164};
162 165
163static const struct i2c_hid_quirks { 166static const struct i2c_hid_quirks {
@@ -172,6 +175,10 @@ static const struct i2c_hid_quirks {
172 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, 175 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
173 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET | 176 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
174 I2C_HID_QUIRK_NO_RUNTIME_PM }, 177 I2C_HID_QUIRK_NO_RUNTIME_PM },
178 { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
179 I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
180 { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
181 I2C_HID_QUIRK_NO_RUNTIME_PM },
175 { 0, 0 } 182 { 0, 0 }
176}; 183};
177 184
@@ -387,6 +394,7 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
387{ 394{
388 struct i2c_hid *ihid = i2c_get_clientdata(client); 395 struct i2c_hid *ihid = i2c_get_clientdata(client);
389 int ret; 396 int ret;
397 unsigned long now, delay;
390 398
391 i2c_hid_dbg(ihid, "%s\n", __func__); 399 i2c_hid_dbg(ihid, "%s\n", __func__);
392 400
@@ -404,9 +412,22 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
404 goto set_pwr_exit; 412 goto set_pwr_exit;
405 } 413 }
406 414
415 if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
416 power_state == I2C_HID_PWR_ON) {
417 now = jiffies;
418 if (time_after(ihid->sleep_delay, now)) {
419 delay = jiffies_to_usecs(ihid->sleep_delay - now);
420 usleep_range(delay, delay + 1);
421 }
422 }
423
407 ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state, 424 ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
408 0, NULL, 0, NULL, 0); 425 0, NULL, 0, NULL, 0);
409 426
427 if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
428 power_state == I2C_HID_PWR_SLEEP)
429 ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
430
410 if (ret) 431 if (ret)
411 dev_err(&client->dev, "failed to change power setting.\n"); 432 dev_err(&client->dev, "failed to change power setting.\n");
412 433
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index cac262a912c1..89f2976f9c53 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -331,6 +331,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
331 .driver_data = (void *)&sipodev_desc 331 .driver_data = (void *)&sipodev_desc
332 }, 332 },
333 { 333 {
334 .ident = "Direkt-Tek DTLAPY133-1",
335 .matches = {
336 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
337 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY133-1"),
338 },
339 .driver_data = (void *)&sipodev_desc
340 },
341 {
334 .ident = "Mediacom Flexbook Edge 11", 342 .ident = "Mediacom Flexbook Edge 11",
335 .matches = { 343 .matches = {
336 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"), 344 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 3c5507313606..840634e0f1e3 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14#include <linux/compat.h> 14#include <linux/compat.h>
15#include <linux/cred.h>
15#include <linux/device.h> 16#include <linux/device.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
17#include <linux/hid.h> 18#include <linux/hid.h>
@@ -496,12 +497,13 @@ static int uhid_dev_create2(struct uhid_device *uhid,
496 goto err_free; 497 goto err_free;
497 } 498 }
498 499
499 len = min(sizeof(hid->name), sizeof(ev->u.create2.name)); 500 /* @hid is zero-initialized, strncpy() is correct, strlcpy() not */
500 strlcpy(hid->name, ev->u.create2.name, len); 501 len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
501 len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)); 502 strncpy(hid->name, ev->u.create2.name, len);
502 strlcpy(hid->phys, ev->u.create2.phys, len); 503 len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
503 len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)); 504 strncpy(hid->phys, ev->u.create2.phys, len);
504 strlcpy(hid->uniq, ev->u.create2.uniq, len); 505 len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
506 strncpy(hid->uniq, ev->u.create2.uniq, len);
505 507
506 hid->ll_driver = &uhid_hid_driver; 508 hid->ll_driver = &uhid_hid_driver;
507 hid->bus = ev->u.create2.bus; 509 hid->bus = ev->u.create2.bus;
@@ -722,6 +724,17 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
722 724
723 switch (uhid->input_buf.type) { 725 switch (uhid->input_buf.type) {
724 case UHID_CREATE: 726 case UHID_CREATE:
727 /*
728 * 'struct uhid_create_req' contains a __user pointer which is
729 * copied from, so it's unsafe to allow this with elevated
730 * privileges (e.g. from a setuid binary) or via kernel_write().
731 */
732 if (file->f_cred != current_cred() || uaccess_kernel()) {
733 pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
734 task_tgid_vnr(current), current->comm);
735 ret = -EACCES;
736 goto unlock;
737 }
725 ret = uhid_dev_create(uhid, &uhid->input_buf); 738 ret = uhid_dev_create(uhid, &uhid->input_buf);
726 break; 739 break;
727 case UHID_CREATE2: 740 case UHID_CREATE2:
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 23872d08308c..a746017fac17 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -512,14 +512,24 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
512 if (cmd == HIDIOCGCOLLECTIONINDEX) { 512 if (cmd == HIDIOCGCOLLECTIONINDEX) {
513 if (uref->usage_index >= field->maxusage) 513 if (uref->usage_index >= field->maxusage)
514 goto inval; 514 goto inval;
515 uref->usage_index =
516 array_index_nospec(uref->usage_index,
517 field->maxusage);
515 } else if (uref->usage_index >= field->report_count) 518 } else if (uref->usage_index >= field->report_count)
516 goto inval; 519 goto inval;
517 } 520 }
518 521
519 if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && 522 if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
520 (uref_multi->num_values > HID_MAX_MULTI_USAGES || 523 if (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
521 uref->usage_index + uref_multi->num_values > field->report_count)) 524 uref->usage_index + uref_multi->num_values >
522 goto inval; 525 field->report_count)
526 goto inval;
527
528 uref->usage_index =
529 array_index_nospec(uref->usage_index,
530 field->report_count -
531 uref_multi->num_values);
532 }
523 533
524 switch (cmd) { 534 switch (cmd) {
525 case HIDIOCGUSAGE: 535 case HIDIOCGUSAGE:
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index de8193f3b838..fe00b12e4417 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -516,6 +516,14 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
516 } 516 }
517 wait_for_completion(&msginfo->waitevent); 517 wait_for_completion(&msginfo->waitevent);
518 518
519 if (msginfo->response.gpadl_created.creation_status != 0) {
520 pr_err("Failed to establish GPADL: err = 0x%x\n",
521 msginfo->response.gpadl_created.creation_status);
522
523 ret = -EDQUOT;
524 goto cleanup;
525 }
526
519 if (channel->rescind) { 527 if (channel->rescind) {
520 ret = -ENODEV; 528 ret = -ENODEV;
521 goto cleanup; 529 goto cleanup;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index a7513a8a8e37..d6106e1a0d4a 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -353,6 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
353 353
354 out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled; 354 out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
355 355
356 /* fallthrough */
357
358 case KVP_OP_GET_IP_INFO:
356 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id, 359 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
357 MAX_ADAPTER_ID_SIZE, 360 MAX_ADAPTER_ID_SIZE,
358 UTF16_LITTLE_ENDIAN, 361 UTF16_LITTLE_ENDIAN,
@@ -405,7 +408,11 @@ kvp_send_key(struct work_struct *dummy)
405 process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO); 408 process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
406 break; 409 break;
407 case KVP_OP_GET_IP_INFO: 410 case KVP_OP_GET_IP_INFO:
408 /* We only need to pass on message->kvp_hdr.operation. */ 411 /*
412 * We only need to pass on the info of operation, adapter_id
413 * and addr_family to the userland kvp daemon.
414 */
415 process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
409 break; 416 break;
410 case KVP_OP_SET: 417 case KVP_OP_SET:
411 switch (in_msg->body.kvp_set.data.value_type) { 418 switch (in_msg->body.kvp_set.data.value_type) {
@@ -446,9 +453,9 @@ kvp_send_key(struct work_struct *dummy)
446 453
447 } 454 }
448 455
449 break; 456 /*
450 457 * The key is always a string - utf16 encoding.
451 case KVP_OP_GET: 458 */
452 message->body.kvp_set.data.key_size = 459 message->body.kvp_set.data.key_size =
453 utf16s_to_utf8s( 460 utf16s_to_utf8s(
454 (wchar_t *)in_msg->body.kvp_set.data.key, 461 (wchar_t *)in_msg->body.kvp_set.data.key,
@@ -456,6 +463,17 @@ kvp_send_key(struct work_struct *dummy)
456 UTF16_LITTLE_ENDIAN, 463 UTF16_LITTLE_ENDIAN,
457 message->body.kvp_set.data.key, 464 message->body.kvp_set.data.key,
458 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1; 465 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
466
467 break;
468
469 case KVP_OP_GET:
470 message->body.kvp_get.data.key_size =
471 utf16s_to_utf8s(
472 (wchar_t *)in_msg->body.kvp_get.data.key,
473 in_msg->body.kvp_get.data.key_size,
474 UTF16_LITTLE_ENDIAN,
475 message->body.kvp_get.data.key,
476 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
459 break; 477 break;
460 478
461 case KVP_OP_DELETE: 479 case KVP_OP_DELETE:
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 975c95169884..84f61cec6319 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -649,8 +649,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
649 if (info[i]->config[j] & HWMON_T_INPUT) { 649 if (info[i]->config[j] & HWMON_T_INPUT) {
650 err = hwmon_thermal_add_sensor(dev, 650 err = hwmon_thermal_add_sensor(dev,
651 hwdev, j); 651 hwdev, j);
652 if (err) 652 if (err) {
653 goto free_device; 653 device_unregister(hdev);
654 goto ida_remove;
655 }
654 } 656 }
655 } 657 }
656 } 658 }
@@ -658,8 +660,6 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
658 660
659 return hdev; 661 return hdev;
660 662
661free_device:
662 device_unregister(hdev);
663free_hwmon: 663free_hwmon:
664 kfree(hwdev); 664 kfree(hwdev);
665ida_remove: 665ida_remove:
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index 0ccca87f5271..293dd1c6c7b3 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -181,7 +181,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
181 return sprintf(buf, "%s\n", sdata->label); 181 return sprintf(buf, "%s\n", sdata->label);
182} 182}
183 183
184static int __init get_logical_cpu(int hwcpu) 184static int get_logical_cpu(int hwcpu)
185{ 185{
186 int cpu; 186 int cpu;
187 187
@@ -192,9 +192,8 @@ static int __init get_logical_cpu(int hwcpu)
192 return -ENOENT; 192 return -ENOENT;
193} 193}
194 194
195static void __init make_sensor_label(struct device_node *np, 195static void make_sensor_label(struct device_node *np,
196 struct sensor_data *sdata, 196 struct sensor_data *sdata, const char *label)
197 const char *label)
198{ 197{
199 u32 id; 198 u32 id;
200 size_t n; 199 size_t n;
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 71d3445ba869..07ee19573b3f 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -274,7 +274,7 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
274 break; 274 break;
275 case INA2XX_CURRENT: 275 case INA2XX_CURRENT:
276 /* signed register, result in mA */ 276 /* signed register, result in mA */
277 val = regval * data->current_lsb_uA; 277 val = (s16)regval * data->current_lsb_uA;
278 val = DIV_ROUND_CLOSEST(val, 1000); 278 val = DIV_ROUND_CLOSEST(val, 1000);
279 break; 279 break;
280 case INA2XX_CALIBRATION: 280 case INA2XX_CALIBRATION:
@@ -491,7 +491,7 @@ static int ina2xx_probe(struct i2c_client *client,
491 } 491 }
492 492
493 data->groups[group++] = &ina2xx_group; 493 data->groups[group++] = &ina2xx_group;
494 if (id->driver_data == ina226) 494 if (chip == ina226)
495 data->groups[group++] = &ina226_group; 495 data->groups[group++] = &ina226_group;
496 496
497 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, 497 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
@@ -500,7 +500,7 @@ static int ina2xx_probe(struct i2c_client *client,
500 return PTR_ERR(hwmon_dev); 500 return PTR_ERR(hwmon_dev);
501 501
502 dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n", 502 dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
503 id->name, data->rshunt); 503 client->name, data->rshunt);
504 504
505 return 0; 505 return 0;
506} 506}
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index de46577c7d5a..d8fa4bea4bc8 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -51,7 +51,7 @@
51 */ 51 */
52#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \ 52#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \
53 ((rval) + (s)) * (d))) 53 ((rval) + (s)) * (d)))
54#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask))) 54#define MLXREG_FAN_GET_FAULT(val, mask) (!((val) ^ (mask)))
55#define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \ 55#define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \
56 MLXREG_FAN_MAX_STATE, \ 56 MLXREG_FAN_MAX_STATE, \
57 MLXREG_FAN_MAX_DUTY)) 57 MLXREG_FAN_MAX_DUTY))
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index be5ba4690895..0d0457245e7d 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -115,7 +115,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
115{ 115{
116 struct device *dev = &pdev->dev; 116 struct device *dev = &pdev->dev;
117 struct rpi_hwmon_data *data; 117 struct rpi_hwmon_data *data;
118 int ret;
119 118
120 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 119 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
121 if (!data) 120 if (!data)
@@ -124,11 +123,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
124 /* Parent driver assure that firmware is correct */ 123 /* Parent driver assure that firmware is correct */
125 data->fw = dev_get_drvdata(dev->parent); 124 data->fw = dev_get_drvdata(dev->parent);
126 125
127 /* Init throttled */
128 ret = rpi_firmware_property(data->fw, RPI_FIRMWARE_GET_THROTTLED,
129 &data->last_throttled,
130 sizeof(data->last_throttled));
131
132 data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt", 126 data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt",
133 data, 127 data,
134 &rpi_chip_info, 128 &rpi_chip_info,
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 49276bbdac3d..1bb80f992aa8 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -1691,7 +1691,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
1691 * somewhere else in the code 1691 * somewhere else in the code
1692 */ 1692 */
1693#define SENSOR_ATTR_TEMP(index) { \ 1693#define SENSOR_ATTR_TEMP(index) { \
1694 SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \ 1694 SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \
1695 show_temp_mode, store_temp_mode, NOT_USED, index - 1), \ 1695 show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
1696 SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \ 1696 SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
1697 NULL, TEMP_READ, index - 1), \ 1697 NULL, TEMP_READ, index - 1), \
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 56ccb1ea7da5..f2c681971201 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -224,6 +224,15 @@ config I2C_NFORCE2_S4985
224 This driver can also be built as a module. If so, the module 224 This driver can also be built as a module. If so, the module
225 will be called i2c-nforce2-s4985. 225 will be called i2c-nforce2-s4985.
226 226
227config I2C_NVIDIA_GPU
228 tristate "NVIDIA GPU I2C controller"
229 depends on PCI
230 help
231 If you say yes to this option, support will be included for the
232 NVIDIA GPU I2C controller which is used to communicate with the GPU's
233 Type-C controller. This driver can also be built as a module called
234 i2c-nvidia-gpu.
235
227config I2C_SIS5595 236config I2C_SIS5595
228 tristate "SiS 5595" 237 tristate "SiS 5595"
229 depends on PCI 238 depends on PCI
@@ -752,7 +761,7 @@ config I2C_OCORES
752 761
753config I2C_OMAP 762config I2C_OMAP
754 tristate "OMAP I2C adapter" 763 tristate "OMAP I2C adapter"
755 depends on ARCH_OMAP 764 depends on ARCH_OMAP || ARCH_K3
756 default y if MACH_OMAP_H3 || MACH_OMAP_OSK 765 default y if MACH_OMAP_H3 || MACH_OMAP_OSK
757 help 766 help
758 If you say yes to this option, support will be included for the 767 If you say yes to this option, support will be included for the
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 18b26af82b1c..5f0cb6915969 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_I2C_ISCH) += i2c-isch.o
19obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o 19obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o
20obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o 20obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
21obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o 21obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o
22obj-$(CONFIG_I2C_NVIDIA_GPU) += i2c-nvidia-gpu.o
22obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o 23obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
23obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o 24obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o
24obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o 25obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
new file mode 100644
index 000000000000..8822357bca0c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -0,0 +1,368 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Nvidia GPU I2C controller Driver
4 *
5 * Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
6 * Author: Ajay Gupta <ajayg@nvidia.com>
7 */
8#include <linux/delay.h>
9#include <linux/i2c.h>
10#include <linux/interrupt.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
14#include <linux/pm.h>
15#include <linux/pm_runtime.h>
16
17#include <asm/unaligned.h>
18
19/* I2C definitions */
20#define I2C_MST_CNTL 0x00
21#define I2C_MST_CNTL_GEN_START BIT(0)
22#define I2C_MST_CNTL_GEN_STOP BIT(1)
23#define I2C_MST_CNTL_CMD_READ (1 << 2)
24#define I2C_MST_CNTL_CMD_WRITE (2 << 2)
25#define I2C_MST_CNTL_BURST_SIZE_SHIFT 6
26#define I2C_MST_CNTL_GEN_NACK BIT(28)
27#define I2C_MST_CNTL_STATUS GENMASK(30, 29)
28#define I2C_MST_CNTL_STATUS_OKAY (0 << 29)
29#define I2C_MST_CNTL_STATUS_NO_ACK (1 << 29)
30#define I2C_MST_CNTL_STATUS_TIMEOUT (2 << 29)
31#define I2C_MST_CNTL_STATUS_BUS_BUSY (3 << 29)
32#define I2C_MST_CNTL_CYCLE_TRIGGER BIT(31)
33
34#define I2C_MST_ADDR 0x04
35
36#define I2C_MST_I2C0_TIMING 0x08
37#define I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ 0x10e
38#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT 16
39#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX 255
40#define I2C_MST_I2C0_TIMING_TIMEOUT_CHECK BIT(24)
41
42#define I2C_MST_DATA 0x0c
43
44#define I2C_MST_HYBRID_PADCTL 0x20
45#define I2C_MST_HYBRID_PADCTL_MODE_I2C BIT(0)
46#define I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV BIT(14)
47#define I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV BIT(15)
48
49struct gpu_i2c_dev {
50 struct device *dev;
51 void __iomem *regs;
52 struct i2c_adapter adapter;
53 struct i2c_board_info *gpu_ccgx_ucsi;
54};
55
56static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
57{
58 u32 val;
59
60 /* enable I2C */
61 val = readl(i2cd->regs + I2C_MST_HYBRID_PADCTL);
62 val |= I2C_MST_HYBRID_PADCTL_MODE_I2C |
63 I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
64 I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV;
65 writel(val, i2cd->regs + I2C_MST_HYBRID_PADCTL);
66
67 /* enable 100KHZ mode */
68 val = I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ;
69 val |= (I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX
70 << I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT);
71 val |= I2C_MST_I2C0_TIMING_TIMEOUT_CHECK;
72 writel(val, i2cd->regs + I2C_MST_I2C0_TIMING);
73}
74
75static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
76{
77 unsigned long target = jiffies + msecs_to_jiffies(1000);
78 u32 val;
79
80 do {
81 val = readl(i2cd->regs + I2C_MST_CNTL);
82 if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
83 break;
84 if ((val & I2C_MST_CNTL_STATUS) !=
85 I2C_MST_CNTL_STATUS_BUS_BUSY)
86 break;
87 usleep_range(500, 600);
88 } while (time_is_after_jiffies(target));
89
90 if (time_is_before_jiffies(target)) {
91 dev_err(i2cd->dev, "i2c timeout error %x\n", val);
92 return -ETIME;
93 }
94
95 val = readl(i2cd->regs + I2C_MST_CNTL);
96 switch (val & I2C_MST_CNTL_STATUS) {
97 case I2C_MST_CNTL_STATUS_OKAY:
98 return 0;
99 case I2C_MST_CNTL_STATUS_NO_ACK:
100 return -EIO;
101 case I2C_MST_CNTL_STATUS_TIMEOUT:
102 return -ETIME;
103 default:
104 return 0;
105 }
106}
107
108static int gpu_i2c_read(struct gpu_i2c_dev *i2cd, u8 *data, u16 len)
109{
110 int status;
111 u32 val;
112
113 val = I2C_MST_CNTL_GEN_START | I2C_MST_CNTL_CMD_READ |
114 (len << I2C_MST_CNTL_BURST_SIZE_SHIFT) |
115 I2C_MST_CNTL_CYCLE_TRIGGER | I2C_MST_CNTL_GEN_NACK;
116 writel(val, i2cd->regs + I2C_MST_CNTL);
117
118 status = gpu_i2c_check_status(i2cd);
119 if (status < 0)
120 return status;
121
122 val = readl(i2cd->regs + I2C_MST_DATA);
123 switch (len) {
124 case 1:
125 data[0] = val;
126 break;
127 case 2:
128 put_unaligned_be16(val, data);
129 break;
130 case 3:
131 put_unaligned_be16(val >> 8, data);
132 data[2] = val;
133 break;
134 case 4:
135 put_unaligned_be32(val, data);
136 break;
137 default:
138 break;
139 }
140 return status;
141}
142
143static int gpu_i2c_start(struct gpu_i2c_dev *i2cd)
144{
145 writel(I2C_MST_CNTL_GEN_START, i2cd->regs + I2C_MST_CNTL);
146 return gpu_i2c_check_status(i2cd);
147}
148
149static int gpu_i2c_stop(struct gpu_i2c_dev *i2cd)
150{
151 writel(I2C_MST_CNTL_GEN_STOP, i2cd->regs + I2C_MST_CNTL);
152 return gpu_i2c_check_status(i2cd);
153}
154
155static int gpu_i2c_write(struct gpu_i2c_dev *i2cd, u8 data)
156{
157 u32 val;
158
159 writel(data, i2cd->regs + I2C_MST_DATA);
160
161 val = I2C_MST_CNTL_CMD_WRITE | (1 << I2C_MST_CNTL_BURST_SIZE_SHIFT);
162 writel(val, i2cd->regs + I2C_MST_CNTL);
163
164 return gpu_i2c_check_status(i2cd);
165}
166
167static int gpu_i2c_master_xfer(struct i2c_adapter *adap,
168 struct i2c_msg *msgs, int num)
169{
170 struct gpu_i2c_dev *i2cd = i2c_get_adapdata(adap);
171 int status, status2;
172 int i, j;
173
174 /*
175 * The controller supports maximum 4 byte read due to known
176 * limitation of sending STOP after every read.
177 */
178 for (i = 0; i < num; i++) {
179 if (msgs[i].flags & I2C_M_RD) {
180 /* program client address before starting read */
181 writel(msgs[i].addr, i2cd->regs + I2C_MST_ADDR);
182 /* gpu_i2c_read has implicit start */
183 status = gpu_i2c_read(i2cd, msgs[i].buf, msgs[i].len);
184 if (status < 0)
185 goto stop;
186 } else {
187 u8 addr = i2c_8bit_addr_from_msg(msgs + i);
188
189 status = gpu_i2c_start(i2cd);
190 if (status < 0) {
191 if (i == 0)
192 return status;
193 goto stop;
194 }
195
196 status = gpu_i2c_write(i2cd, addr);
197 if (status < 0)
198 goto stop;
199
200 for (j = 0; j < msgs[i].len; j++) {
201 status = gpu_i2c_write(i2cd, msgs[i].buf[j]);
202 if (status < 0)
203 goto stop;
204 }
205 }
206 }
207 status = gpu_i2c_stop(i2cd);
208 if (status < 0)
209 return status;
210
211 return i;
212stop:
213 status2 = gpu_i2c_stop(i2cd);
214 if (status2 < 0)
215 dev_err(i2cd->dev, "i2c stop failed %d\n", status2);
216 return status;
217}
218
219static const struct i2c_adapter_quirks gpu_i2c_quirks = {
220 .max_read_len = 4,
221 .flags = I2C_AQ_COMB_WRITE_THEN_READ,
222};
223
224static u32 gpu_i2c_functionality(struct i2c_adapter *adap)
225{
226 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
227}
228
229static const struct i2c_algorithm gpu_i2c_algorithm = {
230 .master_xfer = gpu_i2c_master_xfer,
231 .functionality = gpu_i2c_functionality,
232};
233
234/*
235 * This driver is for Nvidia GPU cards with USB Type-C interface.
236 * We want to identify the cards using vendor ID and class code only
237 * to avoid dependency of adding product id for any new card which
238 * requires this driver.
239 * Currently there is no class code defined for UCSI device over PCI
240 * so using UNKNOWN class for now and it will be updated when UCSI
241 * over PCI gets a class code.
242 * There is no other NVIDIA cards with UNKNOWN class code. Even if the
243 * driver gets loaded for an undesired card then eventually i2c_read()
244 * (initiated from UCSI i2c_client) will timeout or UCSI commands will
245 * timeout.
246 */
247#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
248static const struct pci_device_id gpu_i2c_ids[] = {
249 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
250 PCI_CLASS_SERIAL_UNKNOWN << 8, 0xffffff00},
251 { }
252};
253MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
254
255static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
256{
257 struct i2c_client *ccgx_client;
258
259 i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev,
260 sizeof(*i2cd->gpu_ccgx_ucsi),
261 GFP_KERNEL);
262 if (!i2cd->gpu_ccgx_ucsi)
263 return -ENOMEM;
264
265 strlcpy(i2cd->gpu_ccgx_ucsi->type, "ccgx-ucsi",
266 sizeof(i2cd->gpu_ccgx_ucsi->type));
267 i2cd->gpu_ccgx_ucsi->addr = 0x8;
268 i2cd->gpu_ccgx_ucsi->irq = irq;
269 ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
270 if (!ccgx_client)
271 return -ENODEV;
272
273 return 0;
274}
275
276static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
277{
278 struct gpu_i2c_dev *i2cd;
279 int status;
280
281 i2cd = devm_kzalloc(&pdev->dev, sizeof(*i2cd), GFP_KERNEL);
282 if (!i2cd)
283 return -ENOMEM;
284
285 i2cd->dev = &pdev->dev;
286 dev_set_drvdata(&pdev->dev, i2cd);
287
288 status = pcim_enable_device(pdev);
289 if (status < 0) {
290 dev_err(&pdev->dev, "pcim_enable_device failed %d\n", status);
291 return status;
292 }
293
294 pci_set_master(pdev);
295
296 i2cd->regs = pcim_iomap(pdev, 0, 0);
297 if (!i2cd->regs) {
298 dev_err(&pdev->dev, "pcim_iomap failed\n");
299 return -ENOMEM;
300 }
301
302 status = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
303 if (status < 0) {
304 dev_err(&pdev->dev, "pci_alloc_irq_vectors err %d\n", status);
305 return status;
306 }
307
308 gpu_enable_i2c_bus(i2cd);
309
310 i2c_set_adapdata(&i2cd->adapter, i2cd);
311 i2cd->adapter.owner = THIS_MODULE;
312 strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
313 sizeof(i2cd->adapter.name));
314 i2cd->adapter.algo = &gpu_i2c_algorithm;
315 i2cd->adapter.quirks = &gpu_i2c_quirks;
316 i2cd->adapter.dev.parent = &pdev->dev;
317 status = i2c_add_adapter(&i2cd->adapter);
318 if (status < 0)
319 goto free_irq_vectors;
320
321 status = gpu_populate_client(i2cd, pdev->irq);
322 if (status < 0) {
323 dev_err(&pdev->dev, "gpu_populate_client failed %d\n", status);
324 goto del_adapter;
325 }
326
327 return 0;
328
329del_adapter:
330 i2c_del_adapter(&i2cd->adapter);
331free_irq_vectors:
332 pci_free_irq_vectors(pdev);
333 return status;
334}
335
336static void gpu_i2c_remove(struct pci_dev *pdev)
337{
338 struct gpu_i2c_dev *i2cd = dev_get_drvdata(&pdev->dev);
339
340 i2c_del_adapter(&i2cd->adapter);
341 pci_free_irq_vectors(pdev);
342}
343
344static int gpu_i2c_resume(struct device *dev)
345{
346 struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev);
347
348 gpu_enable_i2c_bus(i2cd);
349 return 0;
350}
351
352static UNIVERSAL_DEV_PM_OPS(gpu_i2c_driver_pm, NULL, gpu_i2c_resume, NULL);
353
354static struct pci_driver gpu_i2c_driver = {
355 .name = "nvidia-gpu",
356 .id_table = gpu_i2c_ids,
357 .probe = gpu_i2c_probe,
358 .remove = gpu_i2c_remove,
359 .driver = {
360 .pm = &gpu_i2c_driver_pm,
361 },
362};
363
364module_pci_driver(gpu_i2c_driver);
365
366MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
367MODULE_DESCRIPTION("Nvidia GPU I2C controller Driver");
368MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 527f55c8c4c7..db075bc0d952 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -571,18 +571,19 @@ static int geni_i2c_probe(struct platform_device *pdev)
571 571
572 dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth); 572 dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
573 573
574 ret = i2c_add_adapter(&gi2c->adap);
575 if (ret) {
576 dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
577 return ret;
578 }
579
580 gi2c->suspended = 1; 574 gi2c->suspended = 1;
581 pm_runtime_set_suspended(gi2c->se.dev); 575 pm_runtime_set_suspended(gi2c->se.dev);
582 pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY); 576 pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
583 pm_runtime_use_autosuspend(gi2c->se.dev); 577 pm_runtime_use_autosuspend(gi2c->se.dev);
584 pm_runtime_enable(gi2c->se.dev); 578 pm_runtime_enable(gi2c->se.dev);
585 579
580 ret = i2c_add_adapter(&gi2c->adap);
581 if (ret) {
582 dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
583 pm_runtime_disable(gi2c->se.dev);
584 return ret;
585 }
586
586 return 0; 587 return 0;
587} 588}
588 589
@@ -590,8 +591,8 @@ static int geni_i2c_remove(struct platform_device *pdev)
590{ 591{
591 struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev); 592 struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
592 593
593 pm_runtime_disable(gi2c->se.dev);
594 i2c_del_adapter(&gi2c->adap); 594 i2c_del_adapter(&gi2c->adap);
595 pm_runtime_disable(gi2c->se.dev);
595 return 0; 596 return 0;
596} 597}
597 598
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 45c997430332..4c8c7a620d08 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -614,18 +614,7 @@ static int ide_drivers_show(struct seq_file *s, void *p)
614 return 0; 614 return 0;
615} 615}
616 616
617static int ide_drivers_open(struct inode *inode, struct file *file) 617DEFINE_SHOW_ATTRIBUTE(ide_drivers);
618{
619 return single_open(file, &ide_drivers_show, NULL);
620}
621
622static const struct file_operations ide_drivers_operations = {
623 .owner = THIS_MODULE,
624 .open = ide_drivers_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628};
629 618
630void proc_ide_create(void) 619void proc_ide_create(void)
631{ 620{
@@ -634,7 +623,7 @@ void proc_ide_create(void)
634 if (!proc_ide_root) 623 if (!proc_ide_root)
635 return; 624 return;
636 625
637 proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations); 626 proc_create("drivers", 0, proc_ide_root, &ide_drivers_fops);
638} 627}
639 628
640void proc_ide_destroy(void) 629void proc_ide_destroy(void)
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index c5b902b86b44..203ed4adc04a 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
920 struct device_node *root = of_find_node_by_path("/"); 920 struct device_node *root = of_find_node_by_path("/");
921 const char *model = of_get_property(root, "model", NULL); 921 const char *model = of_get_property(root, "model", NULL);
922 922
923 of_node_put(root);
923 /* Get cable type from device-tree. */ 924 /* Get cable type from device-tree. */
924 if (cable && !strncmp(cable, "80-", 3)) { 925 if (cable && !strncmp(cable, "80-", 3)) {
925 /* Some drives fail to detect 80c cable in PowerBook */ 926 /* Some drives fail to detect 80c cable in PowerBook */
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 41d97faf5013..38ff374a3ca4 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -149,6 +149,7 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
149 int report_id = -1; 149 int report_id = -1;
150 u32 address; 150 u32 address;
151 int ret_type; 151 int ret_type;
152 s32 min;
152 struct hid_sensor_hub_device *hsdev = 153 struct hid_sensor_hub_device *hsdev =
153 accel_state->common_attributes.hsdev; 154 accel_state->common_attributes.hsdev;
154 155
@@ -158,12 +159,14 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
158 case IIO_CHAN_INFO_RAW: 159 case IIO_CHAN_INFO_RAW:
159 hid_sensor_power_state(&accel_state->common_attributes, true); 160 hid_sensor_power_state(&accel_state->common_attributes, true);
160 report_id = accel_state->accel[chan->scan_index].report_id; 161 report_id = accel_state->accel[chan->scan_index].report_id;
162 min = accel_state->accel[chan->scan_index].logical_minimum;
161 address = accel_3d_addresses[chan->scan_index]; 163 address = accel_3d_addresses[chan->scan_index];
162 if (report_id >= 0) 164 if (report_id >= 0)
163 *val = sensor_hub_input_attr_get_raw_value( 165 *val = sensor_hub_input_attr_get_raw_value(
164 accel_state->common_attributes.hsdev, 166 accel_state->common_attributes.hsdev,
165 hsdev->usage, address, report_id, 167 hsdev->usage, address, report_id,
166 SENSOR_HUB_SYNC); 168 SENSOR_HUB_SYNC,
169 min < 0);
167 else { 170 else {
168 *val = 0; 171 *val = 0;
169 hid_sensor_power_state(&accel_state->common_attributes, 172 hid_sensor_power_state(&accel_state->common_attributes,
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 36941e69f959..88e857c4baf4 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -111,6 +111,7 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
111 int report_id = -1; 111 int report_id = -1;
112 u32 address; 112 u32 address;
113 int ret_type; 113 int ret_type;
114 s32 min;
114 115
115 *val = 0; 116 *val = 0;
116 *val2 = 0; 117 *val2 = 0;
@@ -118,13 +119,15 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
118 case IIO_CHAN_INFO_RAW: 119 case IIO_CHAN_INFO_RAW:
119 hid_sensor_power_state(&gyro_state->common_attributes, true); 120 hid_sensor_power_state(&gyro_state->common_attributes, true);
120 report_id = gyro_state->gyro[chan->scan_index].report_id; 121 report_id = gyro_state->gyro[chan->scan_index].report_id;
122 min = gyro_state->gyro[chan->scan_index].logical_minimum;
121 address = gyro_3d_addresses[chan->scan_index]; 123 address = gyro_3d_addresses[chan->scan_index];
122 if (report_id >= 0) 124 if (report_id >= 0)
123 *val = sensor_hub_input_attr_get_raw_value( 125 *val = sensor_hub_input_attr_get_raw_value(
124 gyro_state->common_attributes.hsdev, 126 gyro_state->common_attributes.hsdev,
125 HID_USAGE_SENSOR_GYRO_3D, address, 127 HID_USAGE_SENSOR_GYRO_3D, address,
126 report_id, 128 report_id,
127 SENSOR_HUB_SYNC); 129 SENSOR_HUB_SYNC,
130 min < 0);
128 else { 131 else {
129 *val = 0; 132 *val = 0;
130 hid_sensor_power_state(&gyro_state->common_attributes, 133 hid_sensor_power_state(&gyro_state->common_attributes,
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index beab6d6fd6e1..4bc95f31c730 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -75,7 +75,8 @@ static int humidity_read_raw(struct iio_dev *indio_dev,
75 HID_USAGE_SENSOR_HUMIDITY, 75 HID_USAGE_SENSOR_HUMIDITY,
76 HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY, 76 HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY,
77 humid_st->humidity_attr.report_id, 77 humid_st->humidity_attr.report_id,
78 SENSOR_HUB_SYNC); 78 SENSOR_HUB_SYNC,
79 humid_st->humidity_attr.logical_minimum < 0);
79 hid_sensor_power_state(&humid_st->common_attributes, false); 80 hid_sensor_power_state(&humid_st->common_attributes, false);
80 81
81 return IIO_VAL_INT; 82 return IIO_VAL_INT;
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 406caaee9a3c..94f33250ba5a 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -93,6 +93,7 @@ static int als_read_raw(struct iio_dev *indio_dev,
93 int report_id = -1; 93 int report_id = -1;
94 u32 address; 94 u32 address;
95 int ret_type; 95 int ret_type;
96 s32 min;
96 97
97 *val = 0; 98 *val = 0;
98 *val2 = 0; 99 *val2 = 0;
@@ -102,8 +103,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
102 case CHANNEL_SCAN_INDEX_INTENSITY: 103 case CHANNEL_SCAN_INDEX_INTENSITY:
103 case CHANNEL_SCAN_INDEX_ILLUM: 104 case CHANNEL_SCAN_INDEX_ILLUM:
104 report_id = als_state->als_illum.report_id; 105 report_id = als_state->als_illum.report_id;
105 address = 106 min = als_state->als_illum.logical_minimum;
106 HID_USAGE_SENSOR_LIGHT_ILLUM; 107 address = HID_USAGE_SENSOR_LIGHT_ILLUM;
107 break; 108 break;
108 default: 109 default:
109 report_id = -1; 110 report_id = -1;
@@ -116,7 +117,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
116 als_state->common_attributes.hsdev, 117 als_state->common_attributes.hsdev,
117 HID_USAGE_SENSOR_ALS, address, 118 HID_USAGE_SENSOR_ALS, address,
118 report_id, 119 report_id,
119 SENSOR_HUB_SYNC); 120 SENSOR_HUB_SYNC,
121 min < 0);
120 hid_sensor_power_state(&als_state->common_attributes, 122 hid_sensor_power_state(&als_state->common_attributes,
121 false); 123 false);
122 } else { 124 } else {
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 45107f7537b5..cf5a0c242609 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -73,6 +73,7 @@ static int prox_read_raw(struct iio_dev *indio_dev,
73 int report_id = -1; 73 int report_id = -1;
74 u32 address; 74 u32 address;
75 int ret_type; 75 int ret_type;
76 s32 min;
76 77
77 *val = 0; 78 *val = 0;
78 *val2 = 0; 79 *val2 = 0;
@@ -81,8 +82,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
81 switch (chan->scan_index) { 82 switch (chan->scan_index) {
82 case CHANNEL_SCAN_INDEX_PRESENCE: 83 case CHANNEL_SCAN_INDEX_PRESENCE:
83 report_id = prox_state->prox_attr.report_id; 84 report_id = prox_state->prox_attr.report_id;
84 address = 85 min = prox_state->prox_attr.logical_minimum;
85 HID_USAGE_SENSOR_HUMAN_PRESENCE; 86 address = HID_USAGE_SENSOR_HUMAN_PRESENCE;
86 break; 87 break;
87 default: 88 default:
88 report_id = -1; 89 report_id = -1;
@@ -95,7 +96,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
95 prox_state->common_attributes.hsdev, 96 prox_state->common_attributes.hsdev,
96 HID_USAGE_SENSOR_PROX, address, 97 HID_USAGE_SENSOR_PROX, address,
97 report_id, 98 report_id,
98 SENSOR_HUB_SYNC); 99 SENSOR_HUB_SYNC,
100 min < 0);
99 hid_sensor_power_state(&prox_state->common_attributes, 101 hid_sensor_power_state(&prox_state->common_attributes,
100 false); 102 false);
101 } else { 103 } else {
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index d55c4885211a..f3c0d41e5a8c 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -163,21 +163,23 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
163 int report_id = -1; 163 int report_id = -1;
164 u32 address; 164 u32 address;
165 int ret_type; 165 int ret_type;
166 s32 min;
166 167
167 *val = 0; 168 *val = 0;
168 *val2 = 0; 169 *val2 = 0;
169 switch (mask) { 170 switch (mask) {
170 case IIO_CHAN_INFO_RAW: 171 case IIO_CHAN_INFO_RAW:
171 hid_sensor_power_state(&magn_state->magn_flux_attributes, true); 172 hid_sensor_power_state(&magn_state->magn_flux_attributes, true);
172 report_id = 173 report_id = magn_state->magn[chan->address].report_id;
173 magn_state->magn[chan->address].report_id; 174 min = magn_state->magn[chan->address].logical_minimum;
174 address = magn_3d_addresses[chan->address]; 175 address = magn_3d_addresses[chan->address];
175 if (report_id >= 0) 176 if (report_id >= 0)
176 *val = sensor_hub_input_attr_get_raw_value( 177 *val = sensor_hub_input_attr_get_raw_value(
177 magn_state->magn_flux_attributes.hsdev, 178 magn_state->magn_flux_attributes.hsdev,
178 HID_USAGE_SENSOR_COMPASS_3D, address, 179 HID_USAGE_SENSOR_COMPASS_3D, address,
179 report_id, 180 report_id,
180 SENSOR_HUB_SYNC); 181 SENSOR_HUB_SYNC,
182 min < 0);
181 else { 183 else {
182 *val = 0; 184 *val = 0;
183 hid_sensor_power_state( 185 hid_sensor_power_state(
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
index 0a9e8fadfa9d..37ab30566464 100644
--- a/drivers/iio/magnetometer/st_magn_buffer.c
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -30,11 +30,6 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state)
30 return st_sensors_set_dataready_irq(indio_dev, state); 30 return st_sensors_set_dataready_irq(indio_dev, state);
31} 31}
32 32
33static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
34{
35 return st_sensors_set_enable(indio_dev, true);
36}
37
38static int st_magn_buffer_postenable(struct iio_dev *indio_dev) 33static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
39{ 34{
40 int err; 35 int err;
@@ -50,7 +45,7 @@ static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
50 if (err < 0) 45 if (err < 0)
51 goto st_magn_buffer_postenable_error; 46 goto st_magn_buffer_postenable_error;
52 47
53 return err; 48 return st_sensors_set_enable(indio_dev, true);
54 49
55st_magn_buffer_postenable_error: 50st_magn_buffer_postenable_error:
56 kfree(mdata->buffer_data); 51 kfree(mdata->buffer_data);
@@ -63,11 +58,11 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev)
63 int err; 58 int err;
64 struct st_sensor_data *mdata = iio_priv(indio_dev); 59 struct st_sensor_data *mdata = iio_priv(indio_dev);
65 60
66 err = iio_triggered_buffer_predisable(indio_dev); 61 err = st_sensors_set_enable(indio_dev, false);
67 if (err < 0) 62 if (err < 0)
68 goto st_magn_buffer_predisable_error; 63 goto st_magn_buffer_predisable_error;
69 64
70 err = st_sensors_set_enable(indio_dev, false); 65 err = iio_triggered_buffer_predisable(indio_dev);
71 66
72st_magn_buffer_predisable_error: 67st_magn_buffer_predisable_error:
73 kfree(mdata->buffer_data); 68 kfree(mdata->buffer_data);
@@ -75,7 +70,6 @@ st_magn_buffer_predisable_error:
75} 70}
76 71
77static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = { 72static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
78 .preenable = &st_magn_buffer_preenable,
79 .postenable = &st_magn_buffer_postenable, 73 .postenable = &st_magn_buffer_postenable,
80 .predisable = &st_magn_buffer_predisable, 74 .predisable = &st_magn_buffer_predisable,
81}; 75};
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index 1e5451d1ff88..bdc5e4554ee4 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -111,21 +111,23 @@ static int incl_3d_read_raw(struct iio_dev *indio_dev,
111 int report_id = -1; 111 int report_id = -1;
112 u32 address; 112 u32 address;
113 int ret_type; 113 int ret_type;
114 s32 min;
114 115
115 *val = 0; 116 *val = 0;
116 *val2 = 0; 117 *val2 = 0;
117 switch (mask) { 118 switch (mask) {
118 case IIO_CHAN_INFO_RAW: 119 case IIO_CHAN_INFO_RAW:
119 hid_sensor_power_state(&incl_state->common_attributes, true); 120 hid_sensor_power_state(&incl_state->common_attributes, true);
120 report_id = 121 report_id = incl_state->incl[chan->scan_index].report_id;
121 incl_state->incl[chan->scan_index].report_id; 122 min = incl_state->incl[chan->scan_index].logical_minimum;
122 address = incl_3d_addresses[chan->scan_index]; 123 address = incl_3d_addresses[chan->scan_index];
123 if (report_id >= 0) 124 if (report_id >= 0)
124 *val = sensor_hub_input_attr_get_raw_value( 125 *val = sensor_hub_input_attr_get_raw_value(
125 incl_state->common_attributes.hsdev, 126 incl_state->common_attributes.hsdev,
126 HID_USAGE_SENSOR_INCLINOMETER_3D, address, 127 HID_USAGE_SENSOR_INCLINOMETER_3D, address,
127 report_id, 128 report_id,
128 SENSOR_HUB_SYNC); 129 SENSOR_HUB_SYNC,
130 min < 0);
129 else { 131 else {
130 hid_sensor_power_state(&incl_state->common_attributes, 132 hid_sensor_power_state(&incl_state->common_attributes,
131 false); 133 false);
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 4c437918f1d2..d7b1c00ceb4d 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -77,6 +77,7 @@ static int press_read_raw(struct iio_dev *indio_dev,
77 int report_id = -1; 77 int report_id = -1;
78 u32 address; 78 u32 address;
79 int ret_type; 79 int ret_type;
80 s32 min;
80 81
81 *val = 0; 82 *val = 0;
82 *val2 = 0; 83 *val2 = 0;
@@ -85,8 +86,8 @@ static int press_read_raw(struct iio_dev *indio_dev,
85 switch (chan->scan_index) { 86 switch (chan->scan_index) {
86 case CHANNEL_SCAN_INDEX_PRESSURE: 87 case CHANNEL_SCAN_INDEX_PRESSURE:
87 report_id = press_state->press_attr.report_id; 88 report_id = press_state->press_attr.report_id;
88 address = 89 min = press_state->press_attr.logical_minimum;
89 HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE; 90 address = HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE;
90 break; 91 break;
91 default: 92 default:
92 report_id = -1; 93 report_id = -1;
@@ -99,7 +100,8 @@ static int press_read_raw(struct iio_dev *indio_dev,
99 press_state->common_attributes.hsdev, 100 press_state->common_attributes.hsdev,
100 HID_USAGE_SENSOR_PRESSURE, address, 101 HID_USAGE_SENSOR_PRESSURE, address,
101 report_id, 102 report_id,
102 SENSOR_HUB_SYNC); 103 SENSOR_HUB_SYNC,
104 min < 0);
103 hid_sensor_power_state(&press_state->common_attributes, 105 hid_sensor_power_state(&press_state->common_attributes,
104 false); 106 false);
105 } else { 107 } else {
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index beaf6fd3e337..b592fc4f007e 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -76,7 +76,8 @@ static int temperature_read_raw(struct iio_dev *indio_dev,
76 HID_USAGE_SENSOR_TEMPERATURE, 76 HID_USAGE_SENSOR_TEMPERATURE,
77 HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE, 77 HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE,
78 temp_st->temperature_attr.report_id, 78 temp_st->temperature_attr.report_id,
79 SENSOR_HUB_SYNC); 79 SENSOR_HUB_SYNC,
80 temp_st->temperature_attr.logical_minimum < 0);
80 hid_sensor_power_state( 81 hid_sensor_power_state(
81 &temp_st->common_attributes, 82 &temp_st->common_attributes,
82 false); 83 false);
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index ee366199b169..25d43c8f1c2a 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -767,8 +767,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
767 767
768 case NETDEV_CHANGEADDR: 768 case NETDEV_CHANGEADDR:
769 cmds[0] = netdev_del_cmd; 769 cmds[0] = netdev_del_cmd;
770 cmds[1] = add_default_gid_cmd; 770 if (ndev->reg_state == NETREG_REGISTERED) {
771 cmds[2] = add_cmd; 771 cmds[1] = add_default_gid_cmd;
772 cmds[2] = add_cmd;
773 }
772 break; 774 break;
773 775
774 case NETDEV_CHANGEUPPER: 776 case NETDEV_CHANGEUPPER:
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 2b4c5e7dd5a1..676c1fd1119d 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -137,15 +137,6 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn,
137 up_read(&per_mm->umem_rwsem); 137 up_read(&per_mm->umem_rwsem);
138} 138}
139 139
140static int invalidate_page_trampoline(struct ib_umem_odp *item, u64 start,
141 u64 end, void *cookie)
142{
143 ib_umem_notifier_start_account(item);
144 item->umem.context->invalidate_range(item, start, start + PAGE_SIZE);
145 ib_umem_notifier_end_account(item);
146 return 0;
147}
148
149static int invalidate_range_start_trampoline(struct ib_umem_odp *item, 140static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
150 u64 start, u64 end, void *cookie) 141 u64 start, u64 end, void *cookie)
151{ 142{
@@ -553,12 +544,13 @@ out:
553 put_page(page); 544 put_page(page);
554 545
555 if (remove_existing_mapping && umem->context->invalidate_range) { 546 if (remove_existing_mapping && umem->context->invalidate_range) {
556 invalidate_page_trampoline( 547 ib_umem_notifier_start_account(umem_odp);
548 umem->context->invalidate_range(
557 umem_odp, 549 umem_odp,
558 ib_umem_start(umem) + (page_index >> umem->page_shift), 550 ib_umem_start(umem) + (page_index << umem->page_shift),
559 ib_umem_start(umem) + ((page_index + 1) >> 551 ib_umem_start(umem) +
560 umem->page_shift), 552 ((page_index + 1) << umem->page_shift));
561 NULL); 553 ib_umem_notifier_end_account(umem_odp);
562 ret = -EAGAIN; 554 ret = -EAGAIN;
563 } 555 }
564 556
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index cf2282654210..77f095e5fbe3 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1268,6 +1268,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1268 /* Registered a new RoCE device instance to netdev */ 1268 /* Registered a new RoCE device instance to netdev */
1269 rc = bnxt_re_register_netdev(rdev); 1269 rc = bnxt_re_register_netdev(rdev);
1270 if (rc) { 1270 if (rc) {
1271 rtnl_unlock();
1271 pr_err("Failed to register with netedev: %#x\n", rc); 1272 pr_err("Failed to register with netedev: %#x\n", rc);
1272 return -EINVAL; 1273 return -EINVAL;
1273 } 1274 }
@@ -1466,6 +1467,7 @@ static void bnxt_re_task(struct work_struct *work)
1466 "Failed to register with IB: %#x", rc); 1467 "Failed to register with IB: %#x", rc);
1467 bnxt_re_remove_one(rdev); 1468 bnxt_re_remove_one(rdev);
1468 bnxt_re_dev_unreg(rdev); 1469 bnxt_re_dev_unreg(rdev);
1470 goto exit;
1469 } 1471 }
1470 break; 1472 break;
1471 case NETDEV_UP: 1473 case NETDEV_UP:
@@ -1489,6 +1491,7 @@ static void bnxt_re_task(struct work_struct *work)
1489 } 1491 }
1490 smp_mb__before_atomic(); 1492 smp_mb__before_atomic();
1491 atomic_dec(&rdev->sched_count); 1493 atomic_dec(&rdev->sched_count);
1494exit:
1492 kfree(re_work); 1495 kfree(re_work);
1493} 1496}
1494 1497
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index a4c62ae23a9a..3beb1523e17c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1756,10 +1756,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1756 return hns_roce_cmq_send(hr_dev, &desc, 1); 1756 return hns_roce_cmq_send(hr_dev, &desc, 1);
1757} 1757}
1758 1758
1759static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, 1759static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
1760 unsigned long mtpt_idx) 1760 struct hns_roce_mr *mr)
1761{ 1761{
1762 struct hns_roce_v2_mpt_entry *mpt_entry;
1763 struct scatterlist *sg; 1762 struct scatterlist *sg;
1764 u64 page_addr; 1763 u64 page_addr;
1765 u64 *pages; 1764 u64 *pages;
@@ -1767,6 +1766,53 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1767 int len; 1766 int len;
1768 int entry; 1767 int entry;
1769 1768
1769 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1770 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1771 roce_set_field(mpt_entry->byte_48_mode_ba,
1772 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
1773 upper_32_bits(mr->pbl_ba >> 3));
1774
1775 pages = (u64 *)__get_free_page(GFP_KERNEL);
1776 if (!pages)
1777 return -ENOMEM;
1778
1779 i = 0;
1780 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1781 len = sg_dma_len(sg) >> PAGE_SHIFT;
1782 for (j = 0; j < len; ++j) {
1783 page_addr = sg_dma_address(sg) +
1784 (j << mr->umem->page_shift);
1785 pages[i] = page_addr >> 6;
1786 /* Record the first 2 entry directly to MTPT table */
1787 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
1788 goto found;
1789 i++;
1790 }
1791 }
1792found:
1793 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
1794 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
1795 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
1796
1797 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
1798 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
1799 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
1800 roce_set_field(mpt_entry->byte_64_buf_pa1,
1801 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1802 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
1803 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
1804
1805 free_page((unsigned long)pages);
1806
1807 return 0;
1808}
1809
1810static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1811 unsigned long mtpt_idx)
1812{
1813 struct hns_roce_v2_mpt_entry *mpt_entry;
1814 int ret;
1815
1770 mpt_entry = mb_buf; 1816 mpt_entry = mb_buf;
1771 memset(mpt_entry, 0, sizeof(*mpt_entry)); 1817 memset(mpt_entry, 0, sizeof(*mpt_entry));
1772 1818
@@ -1781,7 +1827,6 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1781 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET); 1827 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
1782 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, 1828 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1783 V2_MPT_BYTE_4_PD_S, mr->pd); 1829 V2_MPT_BYTE_4_PD_S, mr->pd);
1784 mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
1785 1830
1786 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0); 1831 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
1787 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); 1832 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
@@ -1796,13 +1841,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1796 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); 1841 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1797 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1842 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1798 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); 1843 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1799 mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
1800 1844
1801 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 1845 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
1802 mr->type == MR_TYPE_MR ? 0 : 1); 1846 mr->type == MR_TYPE_MR ? 0 : 1);
1803 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S, 1847 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1804 1); 1848 1);
1805 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
1806 1849
1807 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); 1850 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
1808 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); 1851 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
@@ -1813,53 +1856,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1813 if (mr->type == MR_TYPE_DMA) 1856 if (mr->type == MR_TYPE_DMA)
1814 return 0; 1857 return 0;
1815 1858
1816 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); 1859 ret = set_mtpt_pbl(mpt_entry, mr);
1817
1818 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1819 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
1820 V2_MPT_BYTE_48_PBL_BA_H_S,
1821 upper_32_bits(mr->pbl_ba >> 3));
1822 mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
1823
1824 pages = (u64 *)__get_free_page(GFP_KERNEL);
1825 if (!pages)
1826 return -ENOMEM;
1827
1828 i = 0;
1829 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1830 len = sg_dma_len(sg) >> PAGE_SHIFT;
1831 for (j = 0; j < len; ++j) {
1832 page_addr = sg_dma_address(sg) +
1833 (j << mr->umem->page_shift);
1834 pages[i] = page_addr >> 6;
1835
1836 /* Record the first 2 entry directly to MTPT table */
1837 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
1838 goto found;
1839 i++;
1840 }
1841 }
1842
1843found:
1844 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
1845 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
1846 V2_MPT_BYTE_56_PA0_H_S,
1847 upper_32_bits(pages[0]));
1848 mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
1849
1850 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
1851 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
1852 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
1853 1860
1854 free_page((unsigned long)pages); 1861 return ret;
1855
1856 roce_set_field(mpt_entry->byte_64_buf_pa1,
1857 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1858 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
1859 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
1860 mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
1861
1862 return 0;
1863} 1862}
1864 1863
1865static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, 1864static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
@@ -1868,6 +1867,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1868 u64 size, void *mb_buf) 1867 u64 size, void *mb_buf)
1869{ 1868{
1870 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf; 1869 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
1870 int ret = 0;
1871 1871
1872 if (flags & IB_MR_REREG_PD) { 1872 if (flags & IB_MR_REREG_PD) {
1873 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, 1873 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
@@ -1880,14 +1880,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1880 V2_MPT_BYTE_8_BIND_EN_S, 1880 V2_MPT_BYTE_8_BIND_EN_S,
1881 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); 1881 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
1882 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, 1882 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1883 V2_MPT_BYTE_8_ATOMIC_EN_S, 1883 V2_MPT_BYTE_8_ATOMIC_EN_S,
1884 (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0)); 1884 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
1885 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, 1885 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1886 (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0)); 1886 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
1887 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, 1887 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1888 (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); 1888 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
1889 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1889 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1890 (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); 1890 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
1891 } 1891 }
1892 1892
1893 if (flags & IB_MR_REREG_TRANS) { 1893 if (flags & IB_MR_REREG_TRANS) {
@@ -1896,21 +1896,13 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1896 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size)); 1896 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
1897 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size)); 1897 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
1898 1898
1899 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1900 mpt_entry->pbl_ba_l =
1901 cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1902 roce_set_field(mpt_entry->byte_48_mode_ba,
1903 V2_MPT_BYTE_48_PBL_BA_H_M,
1904 V2_MPT_BYTE_48_PBL_BA_H_S,
1905 upper_32_bits(mr->pbl_ba >> 3));
1906 mpt_entry->byte_48_mode_ba =
1907 cpu_to_le32(mpt_entry->byte_48_mode_ba);
1908
1909 mr->iova = iova; 1899 mr->iova = iova;
1910 mr->size = size; 1900 mr->size = size;
1901
1902 ret = set_mtpt_pbl(mpt_entry, mr);
1911 } 1903 }
1912 1904
1913 return 0; 1905 return ret;
1914} 1906}
1915 1907
1916static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) 1908static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e9c428071df3..3569fda07e07 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1094,31 +1094,26 @@ enum mlx5_ib_width {
1094 MLX5_IB_WIDTH_12X = 1 << 4 1094 MLX5_IB_WIDTH_12X = 1 << 4
1095}; 1095};
1096 1096
1097static int translate_active_width(struct ib_device *ibdev, u8 active_width, 1097static void translate_active_width(struct ib_device *ibdev, u8 active_width,
1098 u8 *ib_width) 1098 u8 *ib_width)
1099{ 1099{
1100 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1100 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1101 int err = 0;
1102 1101
1103 if (active_width & MLX5_IB_WIDTH_1X) { 1102 if (active_width & MLX5_IB_WIDTH_1X)
1104 *ib_width = IB_WIDTH_1X; 1103 *ib_width = IB_WIDTH_1X;
1105 } else if (active_width & MLX5_IB_WIDTH_2X) { 1104 else if (active_width & MLX5_IB_WIDTH_4X)
1106 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
1107 (int)active_width);
1108 err = -EINVAL;
1109 } else if (active_width & MLX5_IB_WIDTH_4X) {
1110 *ib_width = IB_WIDTH_4X; 1105 *ib_width = IB_WIDTH_4X;
1111 } else if (active_width & MLX5_IB_WIDTH_8X) { 1106 else if (active_width & MLX5_IB_WIDTH_8X)
1112 *ib_width = IB_WIDTH_8X; 1107 *ib_width = IB_WIDTH_8X;
1113 } else if (active_width & MLX5_IB_WIDTH_12X) { 1108 else if (active_width & MLX5_IB_WIDTH_12X)
1114 *ib_width = IB_WIDTH_12X; 1109 *ib_width = IB_WIDTH_12X;
1115 } else { 1110 else {
1116 mlx5_ib_dbg(dev, "Invalid active_width %d\n", 1111 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1117 (int)active_width); 1112 (int)active_width);
1118 err = -EINVAL; 1113 *ib_width = IB_WIDTH_4X;
1119 } 1114 }
1120 1115
1121 return err; 1116 return;
1122} 1117}
1123 1118
1124static int mlx5_mtu_to_ib_mtu(int mtu) 1119static int mlx5_mtu_to_ib_mtu(int mtu)
@@ -1225,10 +1220,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1225 if (err) 1220 if (err)
1226 goto out; 1221 goto out;
1227 1222
1228 err = translate_active_width(ibdev, ib_link_width_oper, 1223 translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1229 &props->active_width); 1224
1230 if (err)
1231 goto out;
1232 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); 1225 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
1233 if (err) 1226 if (err)
1234 goto out; 1227 goto out;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index b04eb6775326..2cc3d69ab6f6 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -674,6 +674,15 @@ next_mr:
674 goto srcu_unlock; 674 goto srcu_unlock;
675 } 675 }
676 676
677 if (!mr->umem->is_odp) {
678 mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
679 key);
680 if (bytes_mapped)
681 *bytes_mapped += bcnt;
682 ret = 0;
683 goto srcu_unlock;
684 }
685
677 ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped); 686 ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped);
678 if (ret < 0) 687 if (ret < 0)
679 goto srcu_unlock; 688 goto srcu_unlock;
@@ -735,6 +744,7 @@ next_mr:
735 head = frame; 744 head = frame;
736 745
737 bcnt -= frame->bcnt; 746 bcnt -= frame->bcnt;
747 offset = 0;
738 } 748 }
739 break; 749 break;
740 750
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 6841c0f9237f..3747cc681b18 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2633,8 +2633,7 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
2633 2633
2634 if (access_flags & IB_ACCESS_REMOTE_READ) 2634 if (access_flags & IB_ACCESS_REMOTE_READ)
2635 *hw_access_flags |= MLX5_QP_BIT_RRE; 2635 *hw_access_flags |= MLX5_QP_BIT_RRE;
2636 if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) && 2636 if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
2637 qp->ibqp.qp_type == IB_QPT_RC) {
2638 int atomic_mode; 2637 int atomic_mode;
2639 2638
2640 atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type); 2639 atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
@@ -4678,17 +4677,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
4678 goto out; 4677 goto out;
4679 } 4678 }
4680 4679
4681 if (wr->opcode == IB_WR_LOCAL_INV || 4680 if (wr->opcode == IB_WR_REG_MR) {
4682 wr->opcode == IB_WR_REG_MR) {
4683 fence = dev->umr_fence; 4681 fence = dev->umr_fence;
4684 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 4682 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
4685 } else if (wr->send_flags & IB_SEND_FENCE) { 4683 } else {
4686 if (qp->next_fence) 4684 if (wr->send_flags & IB_SEND_FENCE) {
4687 fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; 4685 if (qp->next_fence)
4688 else 4686 fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
4689 fence = MLX5_FENCE_MODE_FENCE; 4687 else
4690 } else { 4688 fence = MLX5_FENCE_MODE_FENCE;
4691 fence = qp->next_fence; 4689 } else {
4690 fence = qp->next_fence;
4691 }
4692 } 4692 }
4693 4693
4694 switch (ibqp->qp_type) { 4694 switch (ibqp->qp_type) {
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index 89ec0f64abfc..084bb4baebb5 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah);
91 * rvt_create_ah - create an address handle 91 * rvt_create_ah - create an address handle
92 * @pd: the protection domain 92 * @pd: the protection domain
93 * @ah_attr: the attributes of the AH 93 * @ah_attr: the attributes of the AH
94 * @udata: pointer to user's input output buffer information.
94 * 95 *
95 * This may be called from interrupt context. 96 * This may be called from interrupt context.
96 * 97 *
97 * Return: newly allocated ah 98 * Return: newly allocated ah
98 */ 99 */
99struct ib_ah *rvt_create_ah(struct ib_pd *pd, 100struct ib_ah *rvt_create_ah(struct ib_pd *pd,
100 struct rdma_ah_attr *ah_attr) 101 struct rdma_ah_attr *ah_attr,
102 struct ib_udata *udata)
101{ 103{
102 struct rvt_ah *ah; 104 struct rvt_ah *ah;
103 struct rvt_dev_info *dev = ib_to_rvt(pd->device); 105 struct rvt_dev_info *dev = ib_to_rvt(pd->device);
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
index 16105af99189..25271b48a683 100644
--- a/drivers/infiniband/sw/rdmavt/ah.h
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -51,7 +51,8 @@
51#include <rdma/rdma_vt.h> 51#include <rdma/rdma_vt.h>
52 52
53struct ib_ah *rvt_create_ah(struct ib_pd *pd, 53struct ib_ah *rvt_create_ah(struct ib_pd *pd,
54 struct rdma_ah_attr *ah_attr); 54 struct rdma_ah_attr *ah_attr,
55 struct ib_udata *udata);
55int rvt_destroy_ah(struct ib_ah *ibah); 56int rvt_destroy_ah(struct ib_ah *ibah);
56int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); 57int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
57int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); 58int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 946b623ba5eb..4ff3d98fa6a4 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1124,7 +1124,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1124 IB_MR_CHECK_SIG_STATUS, &mr_status); 1124 IB_MR_CHECK_SIG_STATUS, &mr_status);
1125 if (ret) { 1125 if (ret) {
1126 pr_err("ib_check_mr_status failed, ret %d\n", ret); 1126 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1127 goto err; 1127 /* Not a lot we can do, return ambiguous guard error */
1128 *sector = 0;
1129 return 0x1;
1128 } 1130 }
1129 1131
1130 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1132 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
@@ -1152,9 +1154,6 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1152 } 1154 }
1153 1155
1154 return 0; 1156 return 0;
1155err:
1156 /* Not alot we can do here, return ambiguous guard error */
1157 return 0x1;
1158} 1157}
1159 1158
1160void iser_err_comp(struct ib_wc *wc, const char *type) 1159void iser_err_comp(struct ib_wc *wc, const char *type)
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d4b9db487b16..cfc8b94527b9 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -480,18 +480,18 @@ static const u8 xboxone_hori_init[] = {
480}; 480};
481 481
482/* 482/*
483 * This packet is required for some of the PDP pads to start 483 * This packet is required for most (all?) of the PDP pads to start
484 * sending input reports. These pads include: (0x0e6f:0x02ab), 484 * sending input reports. These pads include: (0x0e6f:0x02ab),
485 * (0x0e6f:0x02a4). 485 * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
486 */ 486 */
487static const u8 xboxone_pdp_init1[] = { 487static const u8 xboxone_pdp_init1[] = {
488 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14 488 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
489}; 489};
490 490
491/* 491/*
492 * This packet is required for some of the PDP pads to start 492 * This packet is required for most (all?) of the PDP pads to start
493 * sending input reports. These pads include: (0x0e6f:0x02ab), 493 * sending input reports. These pads include: (0x0e6f:0x02ab),
494 * (0x0e6f:0x02a4). 494 * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
495 */ 495 */
496static const u8 xboxone_pdp_init2[] = { 496static const u8 xboxone_pdp_init2[] = {
497 0x06, 0x20, 0x00, 0x02, 0x01, 0x00 497 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
@@ -527,12 +527,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
527 XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), 527 XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
528 XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), 528 XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
529 XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), 529 XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
530 XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1), 530 XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
531 XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), 531 XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
532 XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
533 XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
534 XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
535 XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
536 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), 532 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
537 XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), 533 XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
538 XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), 534 XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 7e75835e220f..850bb259c20e 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -841,7 +841,7 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
841 if (param[0] != 3) { 841 if (param[0] != 3) {
842 param[0] = 2; 842 param[0] = 2;
843 if (ps2_command(ps2dev, param, ATKBD_CMD_SSCANSET)) 843 if (ps2_command(ps2dev, param, ATKBD_CMD_SSCANSET))
844 return 2; 844 return 2;
845 } 845 }
846 846
847 ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MBR); 847 ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MBR);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 81be6f781f0b..d56001181598 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -493,7 +493,8 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev)
493 for (i = 0; i < ARRAY_SIZE(cros_ec_keyb_bs); i++) { 493 for (i = 0; i < ARRAY_SIZE(cros_ec_keyb_bs); i++) {
494 const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i]; 494 const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i];
495 495
496 if (buttons & BIT(map->bit)) 496 if ((map->ev_type == EV_KEY && (buttons & BIT(map->bit))) ||
497 (map->ev_type == EV_SW && (switches & BIT(map->bit))))
497 input_set_capability(idev, map->ev_type, map->code); 498 input_set_capability(idev, map->ev_type, map->code);
498 } 499 }
499 500
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index f51ae09596ef..403452ef00e6 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -407,7 +407,7 @@ matrix_keypad_parse_dt(struct device *dev)
407 struct matrix_keypad_platform_data *pdata; 407 struct matrix_keypad_platform_data *pdata;
408 struct device_node *np = dev->of_node; 408 struct device_node *np = dev->of_node;
409 unsigned int *gpios; 409 unsigned int *gpios;
410 int i, nrow, ncol; 410 int ret, i, nrow, ncol;
411 411
412 if (!np) { 412 if (!np) {
413 dev_err(dev, "device lacks DT data\n"); 413 dev_err(dev, "device lacks DT data\n");
@@ -452,12 +452,19 @@ matrix_keypad_parse_dt(struct device *dev)
452 return ERR_PTR(-ENOMEM); 452 return ERR_PTR(-ENOMEM);
453 } 453 }
454 454
455 for (i = 0; i < pdata->num_row_gpios; i++) 455 for (i = 0; i < nrow; i++) {
456 gpios[i] = of_get_named_gpio(np, "row-gpios", i); 456 ret = of_get_named_gpio(np, "row-gpios", i);
457 if (ret < 0)
458 return ERR_PTR(ret);
459 gpios[i] = ret;
460 }
457 461
458 for (i = 0; i < pdata->num_col_gpios; i++) 462 for (i = 0; i < ncol; i++) {
459 gpios[pdata->num_row_gpios + i] = 463 ret = of_get_named_gpio(np, "col-gpios", i);
460 of_get_named_gpio(np, "col-gpios", i); 464 if (ret < 0)
465 return ERR_PTR(ret);
466 gpios[nrow + i] = ret;
467 }
461 468
462 pdata->row_gpios = gpios; 469 pdata->row_gpios = gpios;
463 pdata->col_gpios = &gpios[pdata->num_row_gpios]; 470 pdata->col_gpios = &gpios[pdata->num_row_gpios];
@@ -484,10 +491,8 @@ static int matrix_keypad_probe(struct platform_device *pdev)
484 pdata = dev_get_platdata(&pdev->dev); 491 pdata = dev_get_platdata(&pdev->dev);
485 if (!pdata) { 492 if (!pdata) {
486 pdata = matrix_keypad_parse_dt(&pdev->dev); 493 pdata = matrix_keypad_parse_dt(&pdev->dev);
487 if (IS_ERR(pdata)) { 494 if (IS_ERR(pdata))
488 dev_err(&pdev->dev, "no platform data defined\n");
489 return PTR_ERR(pdata); 495 return PTR_ERR(pdata);
490 }
491 } else if (!pdata->keymap_data) { 496 } else if (!pdata->keymap_data) {
492 dev_err(&pdev->dev, "no keymap data defined\n"); 497 dev_err(&pdev->dev, "no keymap data defined\n");
493 return -EINVAL; 498 return -EINVAL;
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 46406345742b..a7dc286f406c 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -60,8 +60,18 @@
60 60
61/* OMAP4 values */ 61/* OMAP4 values */
62#define OMAP4_VAL_IRQDISABLE 0x0 62#define OMAP4_VAL_IRQDISABLE 0x0
63#define OMAP4_VAL_DEBOUNCINGTIME 0x7 63
64#define OMAP4_VAL_PVT 0x7 64/*
65 * Errata i689: If a key is released for a time shorter than debounce time,
66 * the keyboard will idle and never detect the key release. The workaround
67 * is to use at least a 12ms debounce time. See omap5432 TRM chapter
68 * "26.4.6.2 Keyboard Controller Timer" for more information.
69 */
70#define OMAP4_KEYPAD_PTV_DIV_128 0x6
71#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv) \
72 ((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
73#define OMAP4_VAL_DEBOUNCINGTIME_16MS \
74 OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
65 75
66enum { 76enum {
67 KBD_REVISION_OMAP4 = 0, 77 KBD_REVISION_OMAP4 = 0,
@@ -181,9 +191,9 @@ static int omap4_keypad_open(struct input_dev *input)
181 191
182 kbd_writel(keypad_data, OMAP4_KBD_CTRL, 192 kbd_writel(keypad_data, OMAP4_KBD_CTRL,
183 OMAP4_DEF_CTRL_NOSOFTMODE | 193 OMAP4_DEF_CTRL_NOSOFTMODE |
184 (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT)); 194 (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT));
185 kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME, 195 kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
186 OMAP4_VAL_DEBOUNCINGTIME); 196 OMAP4_VAL_DEBOUNCINGTIME_16MS);
187 /* clear pending interrupts */ 197 /* clear pending interrupts */
188 kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, 198 kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
189 kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)); 199 kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index b0f9d19b3410..a94b6494e71a 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1348,6 +1348,9 @@ static const struct acpi_device_id elan_acpi_id[] = {
1348 { "ELAN0618", 0 }, 1348 { "ELAN0618", 0 },
1349 { "ELAN061C", 0 }, 1349 { "ELAN061C", 0 },
1350 { "ELAN061D", 0 }, 1350 { "ELAN061D", 0 },
1351 { "ELAN061E", 0 },
1352 { "ELAN0620", 0 },
1353 { "ELAN0621", 0 },
1351 { "ELAN0622", 0 }, 1354 { "ELAN0622", 0 },
1352 { "ELAN1000", 0 }, 1355 { "ELAN1000", 0 },
1353 { } 1356 { }
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 5e85f3cca867..2bd5bb11c8ba 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = {
170 "LEN0048", /* X1 Carbon 3 */ 170 "LEN0048", /* X1 Carbon 3 */
171 "LEN0046", /* X250 */ 171 "LEN0046", /* X250 */
172 "LEN004a", /* W541 */ 172 "LEN004a", /* W541 */
173 "LEN005b", /* P50 */
173 "LEN0071", /* T480 */ 174 "LEN0071", /* T480 */
174 "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ 175 "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
175 "LEN0073", /* X1 Carbon G5 (Elantech) */ 176 "LEN0073", /* X1 Carbon G5 (Elantech) */
@@ -177,6 +178,7 @@ static const char * const smbus_pnp_ids[] = {
177 "LEN0096", /* X280 */ 178 "LEN0096", /* X280 */
178 "LEN0097", /* X280 -> ALPS trackpoint */ 179 "LEN0097", /* X280 -> ALPS trackpoint */
179 "LEN200f", /* T450s */ 180 "LEN200f", /* T450s */
181 "SYN3221", /* HP 15-ay000 */
180 NULL 182 NULL
181}; 183};
182 184
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 47a0e81a2989..a8b9be3e28db 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -177,7 +177,7 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev,
177 * state because the Enter-UP can trigger a wakeup at once. 177 * state because the Enter-UP can trigger a wakeup at once.
178 */ 178 */
179 if (!(info & IS_BREAK)) 179 if (!(info & IS_BREAK))
180 pm_wakeup_event(&hv_dev->device, 0); 180 pm_wakeup_hard_event(&hv_dev->device);
181 181
182 break; 182 break;
183 183
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index 02fb11985819..42d3fd7e04d7 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -1,23 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Touch Screen driver for Renesas MIGO-R Platform 3 * Touch Screen driver for Renesas MIGO-R Platform
3 * 4 *
4 * Copyright (c) 2008 Magnus Damm 5 * Copyright (c) 2008 Magnus Damm
5 * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>, 6 * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>,
6 * Kenati Technologies Pvt Ltd. 7 * Kenati Technologies Pvt Ltd.
7 *
8 * This file is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This file is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 8 */
22#include <linux/module.h> 9#include <linux/module.h>
23#include <linux/kernel.h> 10#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index b71673911aac..11ff32c68025 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * ST1232 Touchscreen Controller Driver 3 * ST1232 Touchscreen Controller Driver
3 * 4 *
@@ -7,15 +8,6 @@
7 * Using code from: 8 * Using code from:
8 * - android.git.kernel.org: projects/kernel/common.git: synaptics_i2c_rmi.c 9 * - android.git.kernel.org: projects/kernel/common.git: synaptics_i2c_rmi.c
9 * Copyright (C) 2007 Google, Inc. 10 * Copyright (C) 2007 Google, Inc.
10 *
11 * This software is licensed under the terms of the GNU General Public
12 * License version 2, as published by the Free Software Foundation, and
13 * may be copied, distributed, and modified under those terms.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */ 11 */
20 12
21#include <linux/delay.h> 13#include <linux/delay.h>
@@ -295,4 +287,4 @@ module_i2c_driver(st1232_ts_driver);
295 287
296MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>"); 288MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>");
297MODULE_DESCRIPTION("SITRONIX ST1232 Touchscreen Controller Driver"); 289MODULE_DESCRIPTION("SITRONIX ST1232 Touchscreen Controller Driver");
298MODULE_LICENSE("GPL"); 290MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index bb2cd29e1658..d8f7000a466a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -797,7 +797,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
797 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; 797 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
798 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, 798 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
799 &entry, sizeof(entry)); 799 &entry, sizeof(entry));
800 entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL; 800 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
801 (BIT_ULL(52)-1)) & ~7ULL;
801 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, 802 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
802 &entry, sizeof(entry)); 803 &entry, sizeof(entry));
803 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 804 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f3ccf025108b..41a4b8808802 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3075,7 +3075,7 @@ static int copy_context_table(struct intel_iommu *iommu,
3075 } 3075 }
3076 3076
3077 if (old_ce) 3077 if (old_ce)
3078 iounmap(old_ce); 3078 memunmap(old_ce);
3079 3079
3080 ret = 0; 3080 ret = 0;
3081 if (devfn < 0x80) 3081 if (devfn < 0x80)
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index db301efe126d..887150907526 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -595,7 +595,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
595 pr_err("%s: Page request without PASID: %08llx %08llx\n", 595 pr_err("%s: Page request without PASID: %08llx %08llx\n",
596 iommu->name, ((unsigned long long *)req)[0], 596 iommu->name, ((unsigned long long *)req)[0],
597 ((unsigned long long *)req)[1]); 597 ((unsigned long long *)req)[1]);
598 goto bad_req; 598 goto no_pasid;
599 } 599 }
600 600
601 if (!svm || svm->pasid != req->pasid) { 601 if (!svm || svm->pasid != req->pasid) {
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index b98a03189580..ddf3a492e1d5 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -498,6 +498,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
498 498
499static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) 499static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
500{ 500{
501 if (!domain->mmu)
502 return;
503
501 /* 504 /*
502 * Disable the context. Flush the TLB as required when modifying the 505 * Disable the context. Flush the TLB as required when modifying the
503 * context registers. 506 * context registers.
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index ce7acd115dd8..1870cf87afe1 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -75,8 +75,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
75{ 75{
76 struct pattern_trig_data *data = from_timer(data, t, timer); 76 struct pattern_trig_data *data = from_timer(data, t, timer);
77 77
78 mutex_lock(&data->lock);
79
80 for (;;) { 78 for (;;) {
81 if (!data->is_indefinite && !data->repeat) 79 if (!data->is_indefinite && !data->repeat)
82 break; 80 break;
@@ -87,9 +85,10 @@ static void pattern_trig_timer_function(struct timer_list *t)
87 data->curr->brightness); 85 data->curr->brightness);
88 mod_timer(&data->timer, 86 mod_timer(&data->timer,
89 jiffies + msecs_to_jiffies(data->curr->delta_t)); 87 jiffies + msecs_to_jiffies(data->curr->delta_t));
90 88 if (!data->next->delta_t) {
91 /* Skip the tuple with zero duration */ 89 /* Skip the tuple with zero duration */
92 pattern_trig_update_patterns(data); 90 pattern_trig_update_patterns(data);
91 }
93 /* Select next tuple */ 92 /* Select next tuple */
94 pattern_trig_update_patterns(data); 93 pattern_trig_update_patterns(data);
95 } else { 94 } else {
@@ -116,8 +115,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
116 115
117 break; 116 break;
118 } 117 }
119
120 mutex_unlock(&data->lock);
121} 118}
122 119
123static int pattern_trig_start_pattern(struct led_classdev *led_cdev) 120static int pattern_trig_start_pattern(struct led_classdev *led_cdev)
@@ -176,14 +173,10 @@ static ssize_t repeat_store(struct device *dev, struct device_attribute *attr,
176 if (res < -1 || res == 0) 173 if (res < -1 || res == 0)
177 return -EINVAL; 174 return -EINVAL;
178 175
179 /*
180 * Clear previous patterns' performence firstly, and remove the timer
181 * without mutex lock to avoid dead lock.
182 */
183 del_timer_sync(&data->timer);
184
185 mutex_lock(&data->lock); 176 mutex_lock(&data->lock);
186 177
178 del_timer_sync(&data->timer);
179
187 if (data->is_hw_pattern) 180 if (data->is_hw_pattern)
188 led_cdev->pattern_clear(led_cdev); 181 led_cdev->pattern_clear(led_cdev);
189 182
@@ -234,14 +227,10 @@ static ssize_t pattern_trig_store_patterns(struct led_classdev *led_cdev,
234 struct pattern_trig_data *data = led_cdev->trigger_data; 227 struct pattern_trig_data *data = led_cdev->trigger_data;
235 int ccount, cr, offset = 0, err = 0; 228 int ccount, cr, offset = 0, err = 0;
236 229
237 /*
238 * Clear previous patterns' performence firstly, and remove the timer
239 * without mutex lock to avoid dead lock.
240 */
241 del_timer_sync(&data->timer);
242
243 mutex_lock(&data->lock); 230 mutex_lock(&data->lock);
244 231
232 del_timer_sync(&data->timer);
233
245 if (data->is_hw_pattern) 234 if (data->is_hw_pattern)
246 led_cdev->pattern_clear(led_cdev); 235 led_cdev->pattern_clear(led_cdev);
247 236
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 31d1f4ab915e..65a933a21e68 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -807,7 +807,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
807 } 807 }
808 808
809 if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) { 809 if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) {
810 dprintk(1, "%s: transmit queue full\n", __func__); 810 dprintk(2, "%s: transmit queue full\n", __func__);
811 return -EBUSY; 811 return -EBUSY;
812 } 812 }
813 813
@@ -1180,6 +1180,8 @@ static int cec_config_log_addr(struct cec_adapter *adap,
1180{ 1180{
1181 struct cec_log_addrs *las = &adap->log_addrs; 1181 struct cec_log_addrs *las = &adap->log_addrs;
1182 struct cec_msg msg = { }; 1182 struct cec_msg msg = { };
1183 const unsigned int max_retries = 2;
1184 unsigned int i;
1183 int err; 1185 int err;
1184 1186
1185 if (cec_has_log_addr(adap, log_addr)) 1187 if (cec_has_log_addr(adap, log_addr))
@@ -1188,19 +1190,44 @@ static int cec_config_log_addr(struct cec_adapter *adap,
1188 /* Send poll message */ 1190 /* Send poll message */
1189 msg.len = 1; 1191 msg.len = 1;
1190 msg.msg[0] = (log_addr << 4) | log_addr; 1192 msg.msg[0] = (log_addr << 4) | log_addr;
1191 err = cec_transmit_msg_fh(adap, &msg, NULL, true);
1192 1193
1193 /* 1194 for (i = 0; i < max_retries; i++) {
1194 * While trying to poll the physical address was reset 1195 err = cec_transmit_msg_fh(adap, &msg, NULL, true);
1195 * and the adapter was unconfigured, so bail out.
1196 */
1197 if (!adap->is_configuring)
1198 return -EINTR;
1199 1196
1200 if (err) 1197 /*
1201 return err; 1198 * While trying to poll the physical address was reset
1199 * and the adapter was unconfigured, so bail out.
1200 */
1201 if (!adap->is_configuring)
1202 return -EINTR;
1203
1204 if (err)
1205 return err;
1202 1206
1203 if (msg.tx_status & CEC_TX_STATUS_OK) 1207 /*
1208 * The message was aborted due to a disconnect or
1209 * unconfigure, just bail out.
1210 */
1211 if (msg.tx_status & CEC_TX_STATUS_ABORTED)
1212 return -EINTR;
1213 if (msg.tx_status & CEC_TX_STATUS_OK)
1214 return 0;
1215 if (msg.tx_status & CEC_TX_STATUS_NACK)
1216 break;
1217 /*
1218 * Retry up to max_retries times if the message was neither
1219 * OKed or NACKed. This can happen due to e.g. a Lost
1220 * Arbitration condition.
1221 */
1222 }
1223
1224 /*
1225 * If we are unable to get an OK or a NACK after max_retries attempts
1226 * (and note that each attempt already consists of four polls), then
1227 * then we assume that something is really weird and that it is not a
1228 * good idea to try and claim this logical address.
1229 */
1230 if (i == max_retries)
1204 return 0; 1231 return 0;
1205 1232
1206 /* 1233 /*
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 6d4b2eec67b4..29836c1a40e9 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -80,8 +80,8 @@ struct dvb_pll_desc {
80 80
81static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = { 81static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
82 .name = "Thomson dtt7579", 82 .name = "Thomson dtt7579",
83 .min = 177000000, 83 .min = 177 * MHz,
84 .max = 858000000, 84 .max = 858 * MHz,
85 .iffreq= 36166667, 85 .iffreq= 36166667,
86 .sleepdata = (u8[]){ 2, 0xb4, 0x03 }, 86 .sleepdata = (u8[]){ 2, 0xb4, 0x03 },
87 .count = 4, 87 .count = 4,
@@ -102,8 +102,8 @@ static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf)
102 102
103static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = { 103static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
104 .name = "Thomson dtt759x", 104 .name = "Thomson dtt759x",
105 .min = 177000000, 105 .min = 177 * MHz,
106 .max = 896000000, 106 .max = 896 * MHz,
107 .set = thomson_dtt759x_bw, 107 .set = thomson_dtt759x_bw,
108 .iffreq= 36166667, 108 .iffreq= 36166667,
109 .sleepdata = (u8[]){ 2, 0x84, 0x03 }, 109 .sleepdata = (u8[]){ 2, 0x84, 0x03 },
@@ -126,8 +126,8 @@ static void thomson_dtt7520x_bw(struct dvb_frontend *fe, u8 *buf)
126 126
127static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = { 127static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
128 .name = "Thomson dtt7520x", 128 .name = "Thomson dtt7520x",
129 .min = 185000000, 129 .min = 185 * MHz,
130 .max = 900000000, 130 .max = 900 * MHz,
131 .set = thomson_dtt7520x_bw, 131 .set = thomson_dtt7520x_bw,
132 .iffreq = 36166667, 132 .iffreq = 36166667,
133 .count = 7, 133 .count = 7,
@@ -144,8 +144,8 @@ static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
144 144
145static const struct dvb_pll_desc dvb_pll_lg_z201 = { 145static const struct dvb_pll_desc dvb_pll_lg_z201 = {
146 .name = "LG z201", 146 .name = "LG z201",
147 .min = 174000000, 147 .min = 174 * MHz,
148 .max = 862000000, 148 .max = 862 * MHz,
149 .iffreq= 36166667, 149 .iffreq= 36166667,
150 .sleepdata = (u8[]){ 2, 0xbc, 0x03 }, 150 .sleepdata = (u8[]){ 2, 0xbc, 0x03 },
151 .count = 5, 151 .count = 5,
@@ -160,8 +160,8 @@ static const struct dvb_pll_desc dvb_pll_lg_z201 = {
160 160
161static const struct dvb_pll_desc dvb_pll_unknown_1 = { 161static const struct dvb_pll_desc dvb_pll_unknown_1 = {
162 .name = "unknown 1", /* used by dntv live dvb-t */ 162 .name = "unknown 1", /* used by dntv live dvb-t */
163 .min = 174000000, 163 .min = 174 * MHz,
164 .max = 862000000, 164 .max = 862 * MHz,
165 .iffreq= 36166667, 165 .iffreq= 36166667,
166 .count = 9, 166 .count = 9,
167 .entries = { 167 .entries = {
@@ -182,8 +182,8 @@ static const struct dvb_pll_desc dvb_pll_unknown_1 = {
182 */ 182 */
183static const struct dvb_pll_desc dvb_pll_tua6010xs = { 183static const struct dvb_pll_desc dvb_pll_tua6010xs = {
184 .name = "Infineon TUA6010XS", 184 .name = "Infineon TUA6010XS",
185 .min = 44250000, 185 .min = 44250 * kHz,
186 .max = 858000000, 186 .max = 858 * MHz,
187 .iffreq= 36125000, 187 .iffreq= 36125000,
188 .count = 3, 188 .count = 3,
189 .entries = { 189 .entries = {
@@ -196,8 +196,8 @@ static const struct dvb_pll_desc dvb_pll_tua6010xs = {
196/* Panasonic env57h1xd5 (some Philips PLL ?) */ 196/* Panasonic env57h1xd5 (some Philips PLL ?) */
197static const struct dvb_pll_desc dvb_pll_env57h1xd5 = { 197static const struct dvb_pll_desc dvb_pll_env57h1xd5 = {
198 .name = "Panasonic ENV57H1XD5", 198 .name = "Panasonic ENV57H1XD5",
199 .min = 44250000, 199 .min = 44250 * kHz,
200 .max = 858000000, 200 .max = 858 * MHz,
201 .iffreq= 36125000, 201 .iffreq= 36125000,
202 .count = 4, 202 .count = 4,
203 .entries = { 203 .entries = {
@@ -220,8 +220,8 @@ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf)
220 220
221static const struct dvb_pll_desc dvb_pll_tda665x = { 221static const struct dvb_pll_desc dvb_pll_tda665x = {
222 .name = "Philips TDA6650/TDA6651", 222 .name = "Philips TDA6650/TDA6651",
223 .min = 44250000, 223 .min = 44250 * kHz,
224 .max = 858000000, 224 .max = 858 * MHz,
225 .set = tda665x_bw, 225 .set = tda665x_bw,
226 .iffreq= 36166667, 226 .iffreq= 36166667,
227 .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab }, 227 .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab },
@@ -254,8 +254,8 @@ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf)
254 254
255static const struct dvb_pll_desc dvb_pll_tua6034 = { 255static const struct dvb_pll_desc dvb_pll_tua6034 = {
256 .name = "Infineon TUA6034", 256 .name = "Infineon TUA6034",
257 .min = 44250000, 257 .min = 44250 * kHz,
258 .max = 858000000, 258 .max = 858 * MHz,
259 .iffreq= 36166667, 259 .iffreq= 36166667,
260 .count = 3, 260 .count = 3,
261 .set = tua6034_bw, 261 .set = tua6034_bw,
@@ -278,8 +278,8 @@ static void tded4_bw(struct dvb_frontend *fe, u8 *buf)
278 278
279static const struct dvb_pll_desc dvb_pll_tded4 = { 279static const struct dvb_pll_desc dvb_pll_tded4 = {
280 .name = "ALPS TDED4", 280 .name = "ALPS TDED4",
281 .min = 47000000, 281 .min = 47 * MHz,
282 .max = 863000000, 282 .max = 863 * MHz,
283 .iffreq= 36166667, 283 .iffreq= 36166667,
284 .set = tded4_bw, 284 .set = tded4_bw,
285 .count = 4, 285 .count = 4,
@@ -296,8 +296,8 @@ static const struct dvb_pll_desc dvb_pll_tded4 = {
296 */ 296 */
297static const struct dvb_pll_desc dvb_pll_tdhu2 = { 297static const struct dvb_pll_desc dvb_pll_tdhu2 = {
298 .name = "ALPS TDHU2", 298 .name = "ALPS TDHU2",
299 .min = 54000000, 299 .min = 54 * MHz,
300 .max = 864000000, 300 .max = 864 * MHz,
301 .iffreq= 44000000, 301 .iffreq= 44000000,
302 .count = 4, 302 .count = 4,
303 .entries = { 303 .entries = {
@@ -313,8 +313,8 @@ static const struct dvb_pll_desc dvb_pll_tdhu2 = {
313 */ 313 */
314static const struct dvb_pll_desc dvb_pll_samsung_tbmv = { 314static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
315 .name = "Samsung TBMV30111IN / TBMV30712IN1", 315 .name = "Samsung TBMV30111IN / TBMV30712IN1",
316 .min = 54000000, 316 .min = 54 * MHz,
317 .max = 860000000, 317 .max = 860 * MHz,
318 .iffreq= 44000000, 318 .iffreq= 44000000,
319 .count = 6, 319 .count = 6,
320 .entries = { 320 .entries = {
@@ -332,8 +332,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
332 */ 332 */
333static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = { 333static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
334 .name = "Philips SD1878", 334 .name = "Philips SD1878",
335 .min = 950000, 335 .min = 950 * MHz,
336 .max = 2150000, 336 .max = 2150 * MHz,
337 .iffreq= 249, /* zero-IF, offset 249 is to round up */ 337 .iffreq= 249, /* zero-IF, offset 249 is to round up */
338 .count = 4, 338 .count = 4,
339 .entries = { 339 .entries = {
@@ -398,8 +398,8 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf)
398 398
399static const struct dvb_pll_desc dvb_pll_opera1 = { 399static const struct dvb_pll_desc dvb_pll_opera1 = {
400 .name = "Opera Tuner", 400 .name = "Opera Tuner",
401 .min = 900000, 401 .min = 900 * MHz,
402 .max = 2250000, 402 .max = 2250 * MHz,
403 .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 }, 403 .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 },
404 .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 }, 404 .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 },
405 .iffreq= 0, 405 .iffreq= 0,
@@ -445,8 +445,8 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf)
445/* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */ 445/* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */
446static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = { 446static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
447 .name = "Samsung DTOS403IH102A", 447 .name = "Samsung DTOS403IH102A",
448 .min = 44250000, 448 .min = 44250 * kHz,
449 .max = 858000000, 449 .max = 858 * MHz,
450 .iffreq = 36125000, 450 .iffreq = 36125000,
451 .count = 8, 451 .count = 8,
452 .set = samsung_dtos403ih102a_set, 452 .set = samsung_dtos403ih102a_set,
@@ -465,8 +465,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
465/* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */ 465/* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */
466static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = { 466static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
467 .name = "Samsung TDTC9251DH0", 467 .name = "Samsung TDTC9251DH0",
468 .min = 48000000, 468 .min = 48 * MHz,
469 .max = 863000000, 469 .max = 863 * MHz,
470 .iffreq = 36166667, 470 .iffreq = 36166667,
471 .count = 3, 471 .count = 3,
472 .entries = { 472 .entries = {
@@ -479,8 +479,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
479/* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */ 479/* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */
480static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = { 480static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
481 .name = "Samsung TBDU18132", 481 .name = "Samsung TBDU18132",
482 .min = 950000, 482 .min = 950 * MHz,
483 .max = 2150000, /* guesses */ 483 .max = 2150 * MHz, /* guesses */
484 .iffreq = 0, 484 .iffreq = 0,
485 .count = 2, 485 .count = 2,
486 .entries = { 486 .entries = {
@@ -500,8 +500,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
500/* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */ 500/* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */
501static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = { 501static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
502 .name = "Samsung TBMU24112", 502 .name = "Samsung TBMU24112",
503 .min = 950000, 503 .min = 950 * MHz,
504 .max = 2150000, /* guesses */ 504 .max = 2150 * MHz, /* guesses */
505 .iffreq = 0, 505 .iffreq = 0,
506 .count = 2, 506 .count = 2,
507 .entries = { 507 .entries = {
@@ -521,8 +521,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
521 * 822 - 862 1 * 0 0 1 0 0 0 0x88 */ 521 * 822 - 862 1 * 0 0 1 0 0 0 0x88 */
522static const struct dvb_pll_desc dvb_pll_alps_tdee4 = { 522static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
523 .name = "ALPS TDEE4", 523 .name = "ALPS TDEE4",
524 .min = 47000000, 524 .min = 47 * MHz,
525 .max = 862000000, 525 .max = 862 * MHz,
526 .iffreq = 36125000, 526 .iffreq = 36125000,
527 .count = 4, 527 .count = 4,
528 .entries = { 528 .entries = {
@@ -537,8 +537,8 @@ static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
537/* CP cur. 50uA, AGC takeover: 103dBuV, PORT3 on */ 537/* CP cur. 50uA, AGC takeover: 103dBuV, PORT3 on */
538static const struct dvb_pll_desc dvb_pll_tua6034_friio = { 538static const struct dvb_pll_desc dvb_pll_tua6034_friio = {
539 .name = "Infineon TUA6034 ISDB-T (Friio)", 539 .name = "Infineon TUA6034 ISDB-T (Friio)",
540 .min = 90000000, 540 .min = 90 * MHz,
541 .max = 770000000, 541 .max = 770 * MHz,
542 .iffreq = 57000000, 542 .iffreq = 57000000,
543 .initdata = (u8[]){ 4, 0x9a, 0x50, 0xb2, 0x08 }, 543 .initdata = (u8[]){ 4, 0x9a, 0x50, 0xb2, 0x08 },
544 .sleepdata = (u8[]){ 4, 0x9a, 0x70, 0xb3, 0x0b }, 544 .sleepdata = (u8[]){ 4, 0x9a, 0x70, 0xb3, 0x0b },
@@ -553,8 +553,8 @@ static const struct dvb_pll_desc dvb_pll_tua6034_friio = {
553/* Philips TDA6651 ISDB-T, used in Earthsoft PT1 */ 553/* Philips TDA6651 ISDB-T, used in Earthsoft PT1 */
554static const struct dvb_pll_desc dvb_pll_tda665x_earth_pt1 = { 554static const struct dvb_pll_desc dvb_pll_tda665x_earth_pt1 = {
555 .name = "Philips TDA6651 ISDB-T (EarthSoft PT1)", 555 .name = "Philips TDA6651 ISDB-T (EarthSoft PT1)",
556 .min = 90000000, 556 .min = 90 * MHz,
557 .max = 770000000, 557 .max = 770 * MHz,
558 .iffreq = 57000000, 558 .iffreq = 57000000,
559 .initdata = (u8[]){ 5, 0x0e, 0x7f, 0xc1, 0x80, 0x80 }, 559 .initdata = (u8[]){ 5, 0x0e, 0x7f, 0xc1, 0x80, 0x80 },
560 .count = 10, 560 .count = 10,
@@ -610,9 +610,6 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
610 u32 div; 610 u32 div;
611 int i; 611 int i;
612 612
613 if (frequency && (frequency < desc->min || frequency > desc->max))
614 return -EINVAL;
615
616 for (i = 0; i < desc->count; i++) { 613 for (i = 0; i < desc->count; i++) {
617 if (frequency > desc->entries[i].limit) 614 if (frequency > desc->entries[i].limit)
618 continue; 615 continue;
@@ -799,7 +796,6 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
799 struct dvb_pll_priv *priv = NULL; 796 struct dvb_pll_priv *priv = NULL;
800 int ret; 797 int ret;
801 const struct dvb_pll_desc *desc; 798 const struct dvb_pll_desc *desc;
802 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
803 799
804 b1 = kmalloc(1, GFP_KERNEL); 800 b1 = kmalloc(1, GFP_KERNEL);
805 if (!b1) 801 if (!b1)
@@ -845,18 +841,12 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
845 841
846 strncpy(fe->ops.tuner_ops.info.name, desc->name, 842 strncpy(fe->ops.tuner_ops.info.name, desc->name,
847 sizeof(fe->ops.tuner_ops.info.name)); 843 sizeof(fe->ops.tuner_ops.info.name));
848 switch (c->delivery_system) { 844
849 case SYS_DVBS: 845 fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
850 case SYS_DVBS2: 846 fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
851 case SYS_TURBO: 847
852 case SYS_ISDBS: 848 dprintk("%s tuner, frequency range: %u...%u\n",
853 fe->ops.tuner_ops.info.frequency_min_hz = desc->min * kHz; 849 desc->name, desc->min, desc->max);
854 fe->ops.tuner_ops.info.frequency_max_hz = desc->max * kHz;
855 break;
856 default:
857 fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
858 fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
859 }
860 850
861 if (!desc->initdata) 851 if (!desc->initdata)
862 fe->ops.tuner_ops.init = NULL; 852 fe->ops.tuner_ops.init = NULL;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index ca5d92942820..41d470d9ca94 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1918,7 +1918,6 @@ static int tc358743_probe_of(struct tc358743_state *state)
1918 ret = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep), &endpoint); 1918 ret = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep), &endpoint);
1919 if (ret) { 1919 if (ret) {
1920 dev_err(dev, "failed to parse endpoint\n"); 1920 dev_err(dev, "failed to parse endpoint\n");
1921 ret = ret;
1922 goto put_node; 1921 goto put_node;
1923 } 1922 }
1924 1923
diff --git a/drivers/media/media-request.c b/drivers/media/media-request.c
index 4e9db1fed697..c71a34ae6383 100644
--- a/drivers/media/media-request.c
+++ b/drivers/media/media-request.c
@@ -238,6 +238,9 @@ static const struct file_operations request_fops = {
238 .owner = THIS_MODULE, 238 .owner = THIS_MODULE,
239 .poll = media_request_poll, 239 .poll = media_request_poll,
240 .unlocked_ioctl = media_request_ioctl, 240 .unlocked_ioctl = media_request_ioctl,
241#ifdef CONFIG_COMPAT
242 .compat_ioctl = media_request_ioctl,
243#endif /* CONFIG_COMPAT */
241 .release = media_request_close, 244 .release = media_request_close,
242}; 245};
243 246
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 452eb9b42140..447baaebca44 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1844,14 +1844,12 @@ fail_mutex_destroy:
1844static void cio2_pci_remove(struct pci_dev *pci_dev) 1844static void cio2_pci_remove(struct pci_dev *pci_dev)
1845{ 1845{
1846 struct cio2_device *cio2 = pci_get_drvdata(pci_dev); 1846 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1847 unsigned int i;
1848 1847
1848 media_device_unregister(&cio2->media_dev);
1849 cio2_notifier_exit(cio2); 1849 cio2_notifier_exit(cio2);
1850 cio2_queues_exit(cio2);
1850 cio2_fbpt_exit_dummy(cio2); 1851 cio2_fbpt_exit_dummy(cio2);
1851 for (i = 0; i < CIO2_QUEUES; i++)
1852 cio2_queue_exit(cio2, &cio2->queue[i]);
1853 v4l2_device_unregister(&cio2->v4l2_dev); 1852 v4l2_device_unregister(&cio2->v4l2_dev);
1854 media_device_unregister(&cio2->media_dev);
1855 media_device_cleanup(&cio2->media_dev); 1853 media_device_cleanup(&cio2->media_dev);
1856 mutex_destroy(&cio2->lock); 1854 mutex_destroy(&cio2->lock);
1857} 1855}
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 77fb7987b42f..13f2828d880d 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1587,6 +1587,8 @@ static void isp_pm_complete(struct device *dev)
1587 1587
1588static void isp_unregister_entities(struct isp_device *isp) 1588static void isp_unregister_entities(struct isp_device *isp)
1589{ 1589{
1590 media_device_unregister(&isp->media_dev);
1591
1590 omap3isp_csi2_unregister_entities(&isp->isp_csi2a); 1592 omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
1591 omap3isp_ccp2_unregister_entities(&isp->isp_ccp2); 1593 omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
1592 omap3isp_ccdc_unregister_entities(&isp->isp_ccdc); 1594 omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
@@ -1597,7 +1599,6 @@ static void isp_unregister_entities(struct isp_device *isp)
1597 omap3isp_stat_unregister_entities(&isp->isp_hist); 1599 omap3isp_stat_unregister_entities(&isp->isp_hist);
1598 1600
1599 v4l2_device_unregister(&isp->v4l2_dev); 1601 v4l2_device_unregister(&isp->v4l2_dev);
1600 media_device_unregister(&isp->media_dev);
1601 media_device_cleanup(&isp->media_dev); 1602 media_device_cleanup(&isp->media_dev);
1602} 1603}
1603 1604
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 1eb9132bfc85..013cdebecbc4 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(debug, " activates debug info");
42#define MAX_WIDTH 4096U 42#define MAX_WIDTH 4096U
43#define MIN_WIDTH 640U 43#define MIN_WIDTH 640U
44#define MAX_HEIGHT 2160U 44#define MAX_HEIGHT 2160U
45#define MIN_HEIGHT 480U 45#define MIN_HEIGHT 360U
46 46
47#define dprintk(dev, fmt, arg...) \ 47#define dprintk(dev, fmt, arg...) \
48 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) 48 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
@@ -304,7 +304,8 @@ restart:
304 for (; p < p_out + sz; p++) { 304 for (; p < p_out + sz; p++) {
305 u32 copy; 305 u32 copy;
306 306
307 p = memchr(p, magic[ctx->comp_magic_cnt], sz); 307 p = memchr(p, magic[ctx->comp_magic_cnt],
308 p_out + sz - p);
308 if (!p) { 309 if (!p) {
309 ctx->comp_magic_cnt = 0; 310 ctx->comp_magic_cnt = 0;
310 break; 311 break;
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index af150a0395df..d82db738f174 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1009,7 +1009,7 @@ static const struct v4l2_m2m_ops m2m_ops = {
1009 1009
1010static const struct media_device_ops m2m_media_ops = { 1010static const struct media_device_ops m2m_media_ops = {
1011 .req_validate = vb2_request_validate, 1011 .req_validate = vb2_request_validate,
1012 .req_queue = vb2_m2m_request_queue, 1012 .req_queue = v4l2_m2m_request_queue,
1013}; 1013};
1014 1014
1015static int vim2m_probe(struct platform_device *pdev) 1015static int vim2m_probe(struct platform_device *pdev)
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index fce9d6f4b7c9..3137f5d89d80 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -426,10 +426,10 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
426 426
427 /* append the packet to the frame buffer */ 427 /* append the packet to the frame buffer */
428 if (len > 0) { 428 if (len > 0) {
429 if (gspca_dev->image_len + len > gspca_dev->pixfmt.sizeimage) { 429 if (gspca_dev->image_len + len > PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)) {
430 gspca_err(gspca_dev, "frame overflow %d > %d\n", 430 gspca_err(gspca_dev, "frame overflow %d > %d\n",
431 gspca_dev->image_len + len, 431 gspca_dev->image_len + len,
432 gspca_dev->pixfmt.sizeimage); 432 PAGE_ALIGN(gspca_dev->pixfmt.sizeimage));
433 packet_type = DISCARD_PACKET; 433 packet_type = DISCARD_PACKET;
434 } else { 434 } else {
435/* !! image is NULL only when last pkt is LAST or DISCARD 435/* !! image is NULL only when last pkt is LAST or DISCARD
@@ -1297,18 +1297,19 @@ static int gspca_queue_setup(struct vb2_queue *vq,
1297 unsigned int sizes[], struct device *alloc_devs[]) 1297 unsigned int sizes[], struct device *alloc_devs[])
1298{ 1298{
1299 struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq); 1299 struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq);
1300 unsigned int size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage);
1300 1301
1301 if (*nplanes) 1302 if (*nplanes)
1302 return sizes[0] < gspca_dev->pixfmt.sizeimage ? -EINVAL : 0; 1303 return sizes[0] < size ? -EINVAL : 0;
1303 *nplanes = 1; 1304 *nplanes = 1;
1304 sizes[0] = gspca_dev->pixfmt.sizeimage; 1305 sizes[0] = size;
1305 return 0; 1306 return 0;
1306} 1307}
1307 1308
1308static int gspca_buffer_prepare(struct vb2_buffer *vb) 1309static int gspca_buffer_prepare(struct vb2_buffer *vb)
1309{ 1310{
1310 struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue); 1311 struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue);
1311 unsigned long size = gspca_dev->pixfmt.sizeimage; 1312 unsigned long size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage);
1312 1313
1313 if (vb2_plane_size(vb, 0) < size) { 1314 if (vb2_plane_size(vb, 0) < size) {
1314 gspca_err(gspca_dev, "buffer too small (%lu < %lu)\n", 1315 gspca_err(gspca_dev, "buffer too small (%lu < %lu)\n",
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 6e37950292cd..5f2b033a7a42 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1664,6 +1664,11 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
1664 p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME) 1664 p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME)
1665 return -EINVAL; 1665 return -EINVAL;
1666 1666
1667 if (p_mpeg2_slice_params->pad ||
1668 p_mpeg2_slice_params->picture.pad ||
1669 p_mpeg2_slice_params->sequence.pad)
1670 return -EINVAL;
1671
1667 return 0; 1672 return 0;
1668 1673
1669 case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION: 1674 case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index a3ef1f50a4b3..481e3c65cf97 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh)
193} 193}
194EXPORT_SYMBOL_GPL(v4l2_event_pending); 194EXPORT_SYMBOL_GPL(v4l2_event_pending);
195 195
196static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
197{
198 struct v4l2_fh *fh = sev->fh;
199 unsigned int i;
200
201 lockdep_assert_held(&fh->subscribe_lock);
202 assert_spin_locked(&fh->vdev->fh_lock);
203
204 /* Remove any pending events for this subscription */
205 for (i = 0; i < sev->in_use; i++) {
206 list_del(&sev->events[sev_pos(sev, i)].list);
207 fh->navailable--;
208 }
209 list_del(&sev->list);
210}
211
196int v4l2_event_subscribe(struct v4l2_fh *fh, 212int v4l2_event_subscribe(struct v4l2_fh *fh,
197 const struct v4l2_event_subscription *sub, unsigned elems, 213 const struct v4l2_event_subscription *sub, unsigned elems,
198 const struct v4l2_subscribed_event_ops *ops) 214 const struct v4l2_subscribed_event_ops *ops)
@@ -224,27 +240,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
224 240
225 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 241 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
226 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); 242 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
243 if (!found_ev)
244 list_add(&sev->list, &fh->subscribed);
227 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 245 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
228 246
229 if (found_ev) { 247 if (found_ev) {
230 /* Already listening */ 248 /* Already listening */
231 kvfree(sev); 249 kvfree(sev);
232 goto out_unlock; 250 } else if (sev->ops && sev->ops->add) {
233 }
234
235 if (sev->ops && sev->ops->add) {
236 ret = sev->ops->add(sev, elems); 251 ret = sev->ops->add(sev, elems);
237 if (ret) { 252 if (ret) {
253 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
254 __v4l2_event_unsubscribe(sev);
255 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
238 kvfree(sev); 256 kvfree(sev);
239 goto out_unlock;
240 } 257 }
241 } 258 }
242 259
243 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
244 list_add(&sev->list, &fh->subscribed);
245 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
246
247out_unlock:
248 mutex_unlock(&fh->subscribe_lock); 260 mutex_unlock(&fh->subscribe_lock);
249 261
250 return ret; 262 return ret;
@@ -279,7 +291,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
279{ 291{
280 struct v4l2_subscribed_event *sev; 292 struct v4l2_subscribed_event *sev;
281 unsigned long flags; 293 unsigned long flags;
282 int i;
283 294
284 if (sub->type == V4L2_EVENT_ALL) { 295 if (sub->type == V4L2_EVENT_ALL) {
285 v4l2_event_unsubscribe_all(fh); 296 v4l2_event_unsubscribe_all(fh);
@@ -291,14 +302,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
291 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 302 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
292 303
293 sev = v4l2_event_subscribed(fh, sub->type, sub->id); 304 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
294 if (sev != NULL) { 305 if (sev != NULL)
295 /* Remove any pending events for this subscription */ 306 __v4l2_event_unsubscribe(sev);
296 for (i = 0; i < sev->in_use; i++) {
297 list_del(&sev->events[sev_pos(sev, i)].list);
298 fh->navailable--;
299 }
300 list_del(&sev->list);
301 }
302 307
303 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 308 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
304 309
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index d7806db222d8..1ed2465972ac 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -953,7 +953,7 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
953} 953}
954EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 954EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
955 955
956void vb2_m2m_request_queue(struct media_request *req) 956void v4l2_m2m_request_queue(struct media_request *req)
957{ 957{
958 struct media_request_object *obj, *obj_safe; 958 struct media_request_object *obj, *obj_safe;
959 struct v4l2_m2m_ctx *m2m_ctx = NULL; 959 struct v4l2_m2m_ctx *m2m_ctx = NULL;
@@ -997,7 +997,7 @@ void vb2_m2m_request_queue(struct media_request *req)
997 if (m2m_ctx) 997 if (m2m_ctx)
998 v4l2_m2m_try_schedule(m2m_ctx); 998 v4l2_m2m_try_schedule(m2m_ctx);
999} 999}
1000EXPORT_SYMBOL_GPL(vb2_m2m_request_queue); 1000EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
1001 1001
1002/* Videobuf2 ioctl helpers */ 1002/* Videobuf2 ioctl helpers */
1003 1003
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 8f9d6964173e..b99a194ce5a4 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -263,6 +263,11 @@ static const struct file_operations fops = {
263#endif 263#endif
264}; 264};
265 265
266static void cros_ec_class_release(struct device *dev)
267{
268 kfree(to_cros_ec_dev(dev));
269}
270
266static void cros_ec_sensors_register(struct cros_ec_dev *ec) 271static void cros_ec_sensors_register(struct cros_ec_dev *ec)
267{ 272{
268 /* 273 /*
@@ -395,7 +400,7 @@ static int ec_device_probe(struct platform_device *pdev)
395 int retval = -ENOMEM; 400 int retval = -ENOMEM;
396 struct device *dev = &pdev->dev; 401 struct device *dev = &pdev->dev;
397 struct cros_ec_platform *ec_platform = dev_get_platdata(dev); 402 struct cros_ec_platform *ec_platform = dev_get_platdata(dev);
398 struct cros_ec_dev *ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL); 403 struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL);
399 404
400 if (!ec) 405 if (!ec)
401 return retval; 406 return retval;
@@ -417,6 +422,7 @@ static int ec_device_probe(struct platform_device *pdev)
417 ec->class_dev.devt = MKDEV(ec_major, pdev->id); 422 ec->class_dev.devt = MKDEV(ec_major, pdev->id);
418 ec->class_dev.class = &cros_class; 423 ec->class_dev.class = &cros_class;
419 ec->class_dev.parent = dev; 424 ec->class_dev.parent = dev;
425 ec->class_dev.release = cros_ec_class_release;
420 426
421 retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name); 427 retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name);
422 if (retval) { 428 if (retval) {
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index b2a0340f277e..d8e3cc2dc747 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -132,7 +132,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = {
132MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids); 132MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
133#endif 133#endif
134 134
135static inline const struct atmel_ssc_platform_data * __init 135static inline const struct atmel_ssc_platform_data *
136 atmel_ssc_get_driver_data(struct platform_device *pdev) 136 atmel_ssc_get_driver_data(struct platform_device *pdev)
137{ 137{
138 if (pdev->dev.of_node) { 138 if (pdev->dev.of_node) {
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index c824329f7012..0e4193cb08cf 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -416,7 +416,7 @@ static int scif_create_remote_lookup(struct scif_dev *remote_dev,
416 if (err) 416 if (err)
417 goto error_window; 417 goto error_window;
418 err = scif_map_page(&window->num_pages_lookup.lookup[j], 418 err = scif_map_page(&window->num_pages_lookup.lookup[j],
419 vmalloc_dma_phys ? 419 vmalloc_num_pages ?
420 vmalloc_to_page(&window->num_pages[i]) : 420 vmalloc_to_page(&window->num_pages[i]) :
421 virt_to_page(&window->num_pages[i]), 421 virt_to_page(&window->num_pages[i]),
422 remote_dev); 422 remote_dev);
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 313da3150262..1540a7785e14 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -27,6 +27,9 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <asm/uv/uv_hub.h> 29#include <asm/uv/uv_hub.h>
30
31#include <linux/nospec.h>
32
30#include "gru.h" 33#include "gru.h"
31#include "grutables.h" 34#include "grutables.h"
32#include "gruhandles.h" 35#include "gruhandles.h"
@@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg)
196 /* Currently, only dump by gid is implemented */ 199 /* Currently, only dump by gid is implemented */
197 if (req.gid >= gru_max_gids) 200 if (req.gid >= gru_max_gids)
198 return -EINVAL; 201 return -EINVAL;
202 req.gid = array_index_nospec(req.gid, gru_max_gids);
199 203
200 gru = GID_TO_GRU(req.gid); 204 gru = GID_TO_GRU(req.gid);
201 ubuf = req.buf; 205 ubuf = req.buf;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 7bfd366d970d..c4115bae5db1 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -12,6 +12,7 @@
12 * - JMicron (hardware and technical support) 12 * - JMicron (hardware and technical support)
13 */ 13 */
14 14
15#include <linux/bitfield.h>
15#include <linux/string.h> 16#include <linux/string.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/highmem.h> 18#include <linux/highmem.h>
@@ -462,6 +463,9 @@ struct intel_host {
462 u32 dsm_fns; 463 u32 dsm_fns;
463 int drv_strength; 464 int drv_strength;
464 bool d3_retune; 465 bool d3_retune;
466 bool rpm_retune_ok;
467 u32 glk_rx_ctrl1;
468 u32 glk_tun_val;
465}; 469};
466 470
467static const guid_t intel_dsm_guid = 471static const guid_t intel_dsm_guid =
@@ -791,6 +795,77 @@ cleanup:
791 return ret; 795 return ret;
792} 796}
793 797
798#ifdef CONFIG_PM
799#define GLK_RX_CTRL1 0x834
800#define GLK_TUN_VAL 0x840
801#define GLK_PATH_PLL GENMASK(13, 8)
802#define GLK_DLY GENMASK(6, 0)
803/* Workaround firmware failing to restore the tuning value */
804static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
805{
806 struct sdhci_pci_slot *slot = chip->slots[0];
807 struct intel_host *intel_host = sdhci_pci_priv(slot);
808 struct sdhci_host *host = slot->host;
809 u32 glk_rx_ctrl1;
810 u32 glk_tun_val;
811 u32 dly;
812
813 if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
814 return;
815
816 glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
817 glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
818
819 if (susp) {
820 intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
821 intel_host->glk_tun_val = glk_tun_val;
822 return;
823 }
824
825 if (!intel_host->glk_tun_val)
826 return;
827
828 if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
829 intel_host->rpm_retune_ok = true;
830 return;
831 }
832
833 dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
834 (intel_host->glk_tun_val << 1));
835 if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
836 return;
837
838 glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
839 sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
840
841 intel_host->rpm_retune_ok = true;
842 chip->rpm_retune = true;
843 mmc_retune_needed(host->mmc);
844 pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
845}
846
847static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
848{
849 if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
850 !chip->rpm_retune)
851 glk_rpm_retune_wa(chip, susp);
852}
853
854static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
855{
856 glk_rpm_retune_chk(chip, true);
857
858 return sdhci_cqhci_runtime_suspend(chip);
859}
860
861static int glk_runtime_resume(struct sdhci_pci_chip *chip)
862{
863 glk_rpm_retune_chk(chip, false);
864
865 return sdhci_cqhci_runtime_resume(chip);
866}
867#endif
868
794#ifdef CONFIG_ACPI 869#ifdef CONFIG_ACPI
795static int ni_set_max_freq(struct sdhci_pci_slot *slot) 870static int ni_set_max_freq(struct sdhci_pci_slot *slot)
796{ 871{
@@ -879,8 +954,8 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
879 .resume = sdhci_cqhci_resume, 954 .resume = sdhci_cqhci_resume,
880#endif 955#endif
881#ifdef CONFIG_PM 956#ifdef CONFIG_PM
882 .runtime_suspend = sdhci_cqhci_runtime_suspend, 957 .runtime_suspend = glk_runtime_suspend,
883 .runtime_resume = sdhci_cqhci_runtime_resume, 958 .runtime_resume = glk_runtime_resume,
884#endif 959#endif
885 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 960 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
886 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 961 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
@@ -1762,8 +1837,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
1762 device_init_wakeup(&pdev->dev, true); 1837 device_init_wakeup(&pdev->dev, true);
1763 1838
1764 if (slot->cd_idx >= 0) { 1839 if (slot->cd_idx >= 0) {
1765 ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, 1840 ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
1766 slot->cd_override_level, 0, NULL); 1841 slot->cd_override_level, 0, NULL);
1842 if (ret && ret != -EPROBE_DEFER)
1843 ret = mmc_gpiod_request_cd(host->mmc, NULL,
1844 slot->cd_idx,
1845 slot->cd_override_level,
1846 0, NULL);
1767 if (ret == -EPROBE_DEFER) 1847 if (ret == -EPROBE_DEFER)
1768 goto remove; 1848 goto remove;
1769 1849
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index e514d57a0419..aa983422aa97 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers"
207config MTD_DOCG3 207config MTD_DOCG3
208 tristate "M-Systems Disk-On-Chip G3" 208 tristate "M-Systems Disk-On-Chip G3"
209 select BCH 209 select BCH
210 select BCH_CONST_PARAMS 210 select BCH_CONST_PARAMS if !MTD_NAND_BCH
211 select BITREVERSE 211 select BITREVERSE
212 help 212 help
213 This provides an MTD device driver for the M-Systems DiskOnChip 213 This provides an MTD device driver for the M-Systems DiskOnChip
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 784c6e1a0391..fd5fe12d7461 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -221,7 +221,14 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
221 info->mtd = info->subdev[0].mtd; 221 info->mtd = info->subdev[0].mtd;
222 ret = 0; 222 ret = 0;
223 } else if (info->num_subdev > 1) { 223 } else if (info->num_subdev > 1) {
224 struct mtd_info *cdev[nr]; 224 struct mtd_info **cdev;
225
226 cdev = kmalloc_array(nr, sizeof(*cdev), GFP_KERNEL);
227 if (!cdev) {
228 ret = -ENOMEM;
229 goto err;
230 }
231
225 /* 232 /*
226 * We detected multiple devices. Concatenate them together. 233 * We detected multiple devices. Concatenate them together.
227 */ 234 */
@@ -230,6 +237,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
230 237
231 info->mtd = mtd_concat_create(cdev, info->num_subdev, 238 info->mtd = mtd_concat_create(cdev, info->num_subdev,
232 plat->name); 239 plat->name);
240 kfree(cdev);
233 if (info->mtd == NULL) { 241 if (info->mtd == NULL) {
234 ret = -ENXIO; 242 ret = -ENXIO;
235 goto err; 243 goto err;
diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c
index 56cde38b92c0..044adf913854 100644
--- a/drivers/mtd/nand/bbt.c
+++ b/drivers/mtd/nand/bbt.c
@@ -27,7 +27,8 @@ int nanddev_bbt_init(struct nand_device *nand)
27 unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block, 27 unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
28 BITS_PER_LONG); 28 BITS_PER_LONG);
29 29
30 nand->bbt.cache = kzalloc(nwords, GFP_KERNEL); 30 nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache),
31 GFP_KERNEL);
31 if (!nand->bbt.cache) 32 if (!nand->bbt.cache)
32 return -ENOMEM; 33 return -ENOMEM;
33 34
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index fb33f6be7c4f..ad720494e8f7 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -2032,8 +2032,7 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
2032 int ret; 2032 int ret;
2033 2033
2034 nand_np = dev->of_node; 2034 nand_np = dev->of_node;
2035 nfc_np = of_find_compatible_node(dev->of_node, NULL, 2035 nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
2036 "atmel,sama5d3-nfc");
2037 if (!nfc_np) { 2036 if (!nfc_np) {
2038 dev_err(dev, "Could not find device node for sama5d3-nfc\n"); 2037 dev_err(dev, "Could not find device node for sama5d3-nfc\n");
2039 return -ENODEV; 2038 return -ENODEV;
@@ -2447,15 +2446,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev)
2447 } 2446 }
2448 2447
2449 if (caps->legacy_of_bindings) { 2448 if (caps->legacy_of_bindings) {
2449 struct device_node *nfc_node;
2450 u32 ale_offs = 21; 2450 u32 ale_offs = 21;
2451 2451
2452 /* 2452 /*
2453 * If we are parsing legacy DT props and the DT contains a 2453 * If we are parsing legacy DT props and the DT contains a
2454 * valid NFC node, forward the request to the sama5 logic. 2454 * valid NFC node, forward the request to the sama5 logic.
2455 */ 2455 */
2456 if (of_find_compatible_node(pdev->dev.of_node, NULL, 2456 nfc_node = of_get_compatible_child(pdev->dev.of_node,
2457 "atmel,sama5d3-nfc")) 2457 "atmel,sama5d3-nfc");
2458 if (nfc_node) {
2458 caps = &atmel_sama5_nand_caps; 2459 caps = &atmel_sama5_nand_caps;
2460 of_node_put(nfc_node);
2461 }
2459 2462
2460 /* 2463 /*
2461 * Even if the compatible says we are dealing with an 2464 * Even if the compatible says we are dealing with an
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 05bd0779fe9b..71050a0b31df 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -590,7 +590,6 @@ retry:
590 590
591/** 591/**
592 * panic_nand_wait - [GENERIC] wait until the command is done 592 * panic_nand_wait - [GENERIC] wait until the command is done
593 * @mtd: MTD device structure
594 * @chip: NAND chip structure 593 * @chip: NAND chip structure
595 * @timeo: timeout 594 * @timeo: timeout
596 * 595 *
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index ef75dfa62a4f..699d3cf49c6d 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -150,15 +150,15 @@
150#define NAND_VERSION_MINOR_SHIFT 16 150#define NAND_VERSION_MINOR_SHIFT 16
151 151
152/* NAND OP_CMDs */ 152/* NAND OP_CMDs */
153#define PAGE_READ 0x2 153#define OP_PAGE_READ 0x2
154#define PAGE_READ_WITH_ECC 0x3 154#define OP_PAGE_READ_WITH_ECC 0x3
155#define PAGE_READ_WITH_ECC_SPARE 0x4 155#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
156#define PROGRAM_PAGE 0x6 156#define OP_PROGRAM_PAGE 0x6
157#define PAGE_PROGRAM_WITH_ECC 0x7 157#define OP_PAGE_PROGRAM_WITH_ECC 0x7
158#define PROGRAM_PAGE_SPARE 0x9 158#define OP_PROGRAM_PAGE_SPARE 0x9
159#define BLOCK_ERASE 0xa 159#define OP_BLOCK_ERASE 0xa
160#define FETCH_ID 0xb 160#define OP_FETCH_ID 0xb
161#define RESET_DEVICE 0xd 161#define OP_RESET_DEVICE 0xd
162 162
163/* Default Value for NAND_DEV_CMD_VLD */ 163/* Default Value for NAND_DEV_CMD_VLD */
164#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ 164#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
@@ -692,11 +692,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
692 692
693 if (read) { 693 if (read) {
694 if (host->use_ecc) 694 if (host->use_ecc)
695 cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; 695 cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
696 else 696 else
697 cmd = PAGE_READ | PAGE_ACC | LAST_PAGE; 697 cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
698 } else { 698 } else {
699 cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; 699 cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
700 } 700 }
701 701
702 if (host->use_ecc) { 702 if (host->use_ecc) {
@@ -1170,7 +1170,7 @@ static int nandc_param(struct qcom_nand_host *host)
1170 * in use. we configure the controller to perform a raw read of 512 1170 * in use. we configure the controller to perform a raw read of 512
1171 * bytes to read onfi params 1171 * bytes to read onfi params
1172 */ 1172 */
1173 nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE); 1173 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
1174 nandc_set_reg(nandc, NAND_ADDR0, 0); 1174 nandc_set_reg(nandc, NAND_ADDR0, 0);
1175 nandc_set_reg(nandc, NAND_ADDR1, 0); 1175 nandc_set_reg(nandc, NAND_ADDR1, 0);
1176 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE 1176 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
@@ -1224,7 +1224,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
1224 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1224 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1225 1225
1226 nandc_set_reg(nandc, NAND_FLASH_CMD, 1226 nandc_set_reg(nandc, NAND_FLASH_CMD,
1227 BLOCK_ERASE | PAGE_ACC | LAST_PAGE); 1227 OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1228 nandc_set_reg(nandc, NAND_ADDR0, page_addr); 1228 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1229 nandc_set_reg(nandc, NAND_ADDR1, 0); 1229 nandc_set_reg(nandc, NAND_ADDR1, 0);
1230 nandc_set_reg(nandc, NAND_DEV0_CFG0, 1230 nandc_set_reg(nandc, NAND_DEV0_CFG0,
@@ -1255,7 +1255,7 @@ static int read_id(struct qcom_nand_host *host, int column)
1255 if (column == -1) 1255 if (column == -1)
1256 return 0; 1256 return 0;
1257 1257
1258 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID); 1258 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
1259 nandc_set_reg(nandc, NAND_ADDR0, column); 1259 nandc_set_reg(nandc, NAND_ADDR0, column);
1260 nandc_set_reg(nandc, NAND_ADDR1, 0); 1260 nandc_set_reg(nandc, NAND_ADDR1, 0);
1261 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, 1261 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
@@ -1276,7 +1276,7 @@ static int reset(struct qcom_nand_host *host)
1276 struct nand_chip *chip = &host->chip; 1276 struct nand_chip *chip = &host->chip;
1277 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1277 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1278 1278
1279 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE); 1279 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
1280 nandc_set_reg(nandc, NAND_EXEC_CMD, 1); 1280 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1281 1281
1282 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); 1282 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index e24db817154e..04cedd3a2bf6 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -644,9 +644,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
644 ndelay(cqspi->wr_delay); 644 ndelay(cqspi->wr_delay);
645 645
646 while (remaining > 0) { 646 while (remaining > 0) {
647 size_t write_words, mod_bytes;
648
647 write_bytes = remaining > page_size ? page_size : remaining; 649 write_bytes = remaining > page_size ? page_size : remaining;
648 iowrite32_rep(cqspi->ahb_base, txbuf, 650 write_words = write_bytes / 4;
649 DIV_ROUND_UP(write_bytes, 4)); 651 mod_bytes = write_bytes % 4;
652 /* Write 4 bytes at a time then single bytes. */
653 if (write_words) {
654 iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
655 txbuf += (write_words * 4);
656 }
657 if (mod_bytes) {
658 unsigned int temp = 0xFFFFFFFF;
659
660 memcpy(&temp, txbuf, mod_bytes);
661 iowrite32(temp, cqspi->ahb_base);
662 txbuf += mod_bytes;
663 }
650 664
651 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 665 if (!wait_for_completion_timeout(&cqspi->transfer_complete,
652 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { 666 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
@@ -655,7 +669,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
655 goto failwr; 669 goto failwr;
656 } 670 }
657 671
658 txbuf += write_bytes;
659 remaining -= write_bytes; 672 remaining -= write_bytes;
660 673
661 if (remaining > 0) 674 if (remaining > 0)
@@ -996,7 +1009,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
996err_unmap: 1009err_unmap:
997 dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE); 1010 dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
998 1011
999 return 0; 1012 return ret;
1000} 1013}
1001 1014
1002static ssize_t cqspi_read(struct spi_nor *nor, loff_t from, 1015static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 9407ca5f9443..1fdd2834fbcb 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -2156,7 +2156,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
2156 * @nor: pointer to a 'struct spi_nor' 2156 * @nor: pointer to a 'struct spi_nor'
2157 * @addr: offset in the serial flash memory 2157 * @addr: offset in the serial flash memory
2158 * @len: number of bytes to read 2158 * @len: number of bytes to read
2159 * @buf: buffer where the data is copied into 2159 * @buf: buffer where the data is copied into (dma-safe memory)
2160 * 2160 *
2161 * Return: 0 on success, -errno otherwise. 2161 * Return: 0 on success, -errno otherwise.
2162 */ 2162 */
@@ -2522,6 +2522,34 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
2522} 2522}
2523 2523
2524/** 2524/**
2525 * spi_nor_sort_erase_mask() - sort erase mask
2526 * @map: the erase map of the SPI NOR
2527 * @erase_mask: the erase type mask to be sorted
2528 *
2529 * Replicate the sort done for the map's erase types in BFPT: sort the erase
2530 * mask in ascending order with the smallest erase type size starting from
2531 * BIT(0) in the sorted erase mask.
2532 *
2533 * Return: sorted erase mask.
2534 */
2535static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
2536{
2537 struct spi_nor_erase_type *erase_type = map->erase_type;
2538 int i;
2539 u8 sorted_erase_mask = 0;
2540
2541 if (!erase_mask)
2542 return 0;
2543
2544 /* Replicate the sort done for the map's erase types. */
2545 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
2546 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
2547 sorted_erase_mask |= BIT(i);
2548
2549 return sorted_erase_mask;
2550}
2551
2552/**
2525 * spi_nor_regions_sort_erase_types() - sort erase types in each region 2553 * spi_nor_regions_sort_erase_types() - sort erase types in each region
2526 * @map: the erase map of the SPI NOR 2554 * @map: the erase map of the SPI NOR
2527 * 2555 *
@@ -2536,19 +2564,13 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
2536static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) 2564static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
2537{ 2565{
2538 struct spi_nor_erase_region *region = map->regions; 2566 struct spi_nor_erase_region *region = map->regions;
2539 struct spi_nor_erase_type *erase_type = map->erase_type;
2540 int i;
2541 u8 region_erase_mask, sorted_erase_mask; 2567 u8 region_erase_mask, sorted_erase_mask;
2542 2568
2543 while (region) { 2569 while (region) {
2544 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; 2570 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
2545 2571
2546 /* Replicate the sort done for the map's erase types. */ 2572 sorted_erase_mask = spi_nor_sort_erase_mask(map,
2547 sorted_erase_mask = 0; 2573 region_erase_mask);
2548 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
2549 if (erase_type[i].size &&
2550 region_erase_mask & BIT(erase_type[i].idx))
2551 sorted_erase_mask |= BIT(i);
2552 2574
2553 /* Overwrite erase mask. */ 2575 /* Overwrite erase mask. */
2554 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) | 2576 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
@@ -2855,52 +2877,84 @@ static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
2855 * spi_nor_get_map_in_use() - get the configuration map in use 2877 * spi_nor_get_map_in_use() - get the configuration map in use
2856 * @nor: pointer to a 'struct spi_nor' 2878 * @nor: pointer to a 'struct spi_nor'
2857 * @smpt: pointer to the sector map parameter table 2879 * @smpt: pointer to the sector map parameter table
2880 * @smpt_len: sector map parameter table length
2881 *
2882 * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
2858 */ 2883 */
2859static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt) 2884static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
2885 u8 smpt_len)
2860{ 2886{
2861 const u32 *ret = NULL; 2887 const u32 *ret;
2862 u32 i, addr; 2888 u8 *buf;
2889 u32 addr;
2863 int err; 2890 int err;
2891 u8 i;
2864 u8 addr_width, read_opcode, read_dummy; 2892 u8 addr_width, read_opcode, read_dummy;
2865 u8 read_data_mask, data_byte, map_id; 2893 u8 read_data_mask, map_id;
2894
2895 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
2896 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
2897 if (!buf)
2898 return ERR_PTR(-ENOMEM);
2866 2899
2867 addr_width = nor->addr_width; 2900 addr_width = nor->addr_width;
2868 read_dummy = nor->read_dummy; 2901 read_dummy = nor->read_dummy;
2869 read_opcode = nor->read_opcode; 2902 read_opcode = nor->read_opcode;
2870 2903
2871 map_id = 0; 2904 map_id = 0;
2872 i = 0;
2873 /* Determine if there are any optional Detection Command Descriptors */ 2905 /* Determine if there are any optional Detection Command Descriptors */
2874 while (!(smpt[i] & SMPT_DESC_TYPE_MAP)) { 2906 for (i = 0; i < smpt_len; i += 2) {
2907 if (smpt[i] & SMPT_DESC_TYPE_MAP)
2908 break;
2909
2875 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); 2910 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
2876 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]); 2911 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
2877 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); 2912 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
2878 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); 2913 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
2879 addr = smpt[i + 1]; 2914 addr = smpt[i + 1];
2880 2915
2881 err = spi_nor_read_raw(nor, addr, 1, &data_byte); 2916 err = spi_nor_read_raw(nor, addr, 1, buf);
2882 if (err) 2917 if (err) {
2918 ret = ERR_PTR(err);
2883 goto out; 2919 goto out;
2920 }
2884 2921
2885 /* 2922 /*
2886 * Build an index value that is used to select the Sector Map 2923 * Build an index value that is used to select the Sector Map
2887 * Configuration that is currently in use. 2924 * Configuration that is currently in use.
2888 */ 2925 */
2889 map_id = map_id << 1 | !!(data_byte & read_data_mask); 2926 map_id = map_id << 1 | !!(*buf & read_data_mask);
2890 i = i + 2;
2891 } 2927 }
2892 2928
2893 /* Find the matching configuration map */ 2929 /*
2894 while (SMPT_MAP_ID(smpt[i]) != map_id) { 2930 * If command descriptors are provided, they always precede map
2931 * descriptors in the table. There is no need to start the iteration
2932 * over smpt array all over again.
2933 *
2934 * Find the matching configuration map.
2935 */
2936 ret = ERR_PTR(-EINVAL);
2937 while (i < smpt_len) {
2938 if (SMPT_MAP_ID(smpt[i]) == map_id) {
2939 ret = smpt + i;
2940 break;
2941 }
2942
2943 /*
2944 * If there are no more configuration map descriptors and no
2945 * configuration ID matched the configuration identifier, the
2946 * sector address map is unknown.
2947 */
2895 if (smpt[i] & SMPT_DESC_END) 2948 if (smpt[i] & SMPT_DESC_END)
2896 goto out; 2949 break;
2950
2897 /* increment the table index to the next map */ 2951 /* increment the table index to the next map */
2898 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1; 2952 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
2899 } 2953 }
2900 2954
2901 ret = smpt + i;
2902 /* fall through */ 2955 /* fall through */
2903out: 2956out:
2957 kfree(buf);
2904 nor->addr_width = addr_width; 2958 nor->addr_width = addr_width;
2905 nor->read_dummy = read_dummy; 2959 nor->read_dummy = read_dummy;
2906 nor->read_opcode = read_opcode; 2960 nor->read_opcode = read_opcode;
@@ -2941,12 +2995,13 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
2941 const u32 *smpt) 2995 const u32 *smpt)
2942{ 2996{
2943 struct spi_nor_erase_map *map = &nor->erase_map; 2997 struct spi_nor_erase_map *map = &nor->erase_map;
2944 const struct spi_nor_erase_type *erase = map->erase_type; 2998 struct spi_nor_erase_type *erase = map->erase_type;
2945 struct spi_nor_erase_region *region; 2999 struct spi_nor_erase_region *region;
2946 u64 offset; 3000 u64 offset;
2947 u32 region_count; 3001 u32 region_count;
2948 int i, j; 3002 int i, j;
2949 u8 erase_type; 3003 u8 uniform_erase_type, save_uniform_erase_type;
3004 u8 erase_type, regions_erase_type;
2950 3005
2951 region_count = SMPT_MAP_REGION_COUNT(*smpt); 3006 region_count = SMPT_MAP_REGION_COUNT(*smpt);
2952 /* 3007 /*
@@ -2959,7 +3014,8 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
2959 return -ENOMEM; 3014 return -ENOMEM;
2960 map->regions = region; 3015 map->regions = region;
2961 3016
2962 map->uniform_erase_type = 0xff; 3017 uniform_erase_type = 0xff;
3018 regions_erase_type = 0;
2963 offset = 0; 3019 offset = 0;
2964 /* Populate regions. */ 3020 /* Populate regions. */
2965 for (i = 0; i < region_count; i++) { 3021 for (i = 0; i < region_count; i++) {
@@ -2974,12 +3030,40 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
2974 * Save the erase types that are supported in all regions and 3030 * Save the erase types that are supported in all regions and
2975 * can erase the entire flash memory. 3031 * can erase the entire flash memory.
2976 */ 3032 */
2977 map->uniform_erase_type &= erase_type; 3033 uniform_erase_type &= erase_type;
3034
3035 /*
3036 * regions_erase_type mask will indicate all the erase types
3037 * supported in this configuration map.
3038 */
3039 regions_erase_type |= erase_type;
2978 3040
2979 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + 3041 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
2980 region[i].size; 3042 region[i].size;
2981 } 3043 }
2982 3044
3045 save_uniform_erase_type = map->uniform_erase_type;
3046 map->uniform_erase_type = spi_nor_sort_erase_mask(map,
3047 uniform_erase_type);
3048
3049 if (!regions_erase_type) {
3050 /*
3051 * Roll back to the previous uniform_erase_type mask, SMPT is
3052 * broken.
3053 */
3054 map->uniform_erase_type = save_uniform_erase_type;
3055 return -EINVAL;
3056 }
3057
3058 /*
3059 * BFPT advertises all the erase types supported by all the possible
3060 * map configurations. Mask out the erase types that are not supported
3061 * by the current map configuration.
3062 */
3063 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3064 if (!(regions_erase_type & BIT(erase[i].idx)))
3065 spi_nor_set_erase_type(&erase[i], 0, 0xFF);
3066
2983 spi_nor_region_mark_end(&region[i - 1]); 3067 spi_nor_region_mark_end(&region[i - 1]);
2984 3068
2985 return 0; 3069 return 0;
@@ -3020,9 +3104,9 @@ static int spi_nor_parse_smpt(struct spi_nor *nor,
3020 for (i = 0; i < smpt_header->length; i++) 3104 for (i = 0; i < smpt_header->length; i++)
3021 smpt[i] = le32_to_cpu(smpt[i]); 3105 smpt[i] = le32_to_cpu(smpt[i]);
3022 3106
3023 sector_map = spi_nor_get_map_in_use(nor, smpt); 3107 sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
3024 if (!sector_map) { 3108 if (IS_ERR(sector_map)) {
3025 ret = -EINVAL; 3109 ret = PTR_ERR(sector_map);
3026 goto out; 3110 goto out;
3027 } 3111 }
3028 3112
@@ -3125,7 +3209,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
3125 if (err) 3209 if (err)
3126 goto exit; 3210 goto exit;
3127 3211
3128 /* Parse other parameter headers. */ 3212 /* Parse optional parameter tables. */
3129 for (i = 0; i < header.nph; i++) { 3213 for (i = 0; i < header.nph; i++) {
3130 param_header = &param_headers[i]; 3214 param_header = &param_headers[i];
3131 3215
@@ -3138,8 +3222,17 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
3138 break; 3222 break;
3139 } 3223 }
3140 3224
3141 if (err) 3225 if (err) {
3142 goto exit; 3226 dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
3227 SFDP_PARAM_HEADER_ID(param_header));
3228 /*
3229 * Let's not drop all information we extracted so far
3230 * if optional table parsers fail. In case of failing,
3231 * each optional parser is responsible to roll back to
3232 * the previously known spi_nor data.
3233 */
3234 err = 0;
3235 }
3143 } 3236 }
3144 3237
3145exit: 3238exit:
@@ -3250,12 +3343,14 @@ static int spi_nor_init_params(struct spi_nor *nor,
3250 memcpy(&sfdp_params, params, sizeof(sfdp_params)); 3343 memcpy(&sfdp_params, params, sizeof(sfdp_params));
3251 memcpy(&prev_map, &nor->erase_map, sizeof(prev_map)); 3344 memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
3252 3345
3253 if (spi_nor_parse_sfdp(nor, &sfdp_params)) 3346 if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
3347 nor->addr_width = 0;
3254 /* restore previous erase map */ 3348 /* restore previous erase map */
3255 memcpy(&nor->erase_map, &prev_map, 3349 memcpy(&nor->erase_map, &prev_map,
3256 sizeof(nor->erase_map)); 3350 sizeof(nor->erase_map));
3257 else 3351 } else {
3258 memcpy(params, &sfdp_params, sizeof(*params)); 3352 memcpy(params, &sfdp_params, sizeof(*params));
3353 }
3259 } 3354 }
3260 3355
3261 return 0; 3356 return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ffa37adb7681..333387f1f1fe 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3112,13 +3112,13 @@ static int bond_slave_netdev_event(unsigned long event,
3112 case NETDEV_CHANGE: 3112 case NETDEV_CHANGE:
3113 /* For 802.3ad mode only: 3113 /* For 802.3ad mode only:
3114 * Getting invalid Speed/Duplex values here will put slave 3114 * Getting invalid Speed/Duplex values here will put slave
3115 * in weird state. So mark it as link-down for the time 3115 * in weird state. So mark it as link-fail for the time
3116 * being and let link-monitoring (miimon) set it right when 3116 * being and let link-monitoring (miimon) set it right when
3117 * correct speeds/duplex are available. 3117 * correct speeds/duplex are available.
3118 */ 3118 */
3119 if (bond_update_speed_duplex(slave) && 3119 if (bond_update_speed_duplex(slave) &&
3120 BOND_MODE(bond) == BOND_MODE_8023AD) 3120 BOND_MODE(bond) == BOND_MODE_8023AD)
3121 slave->link = BOND_LINK_DOWN; 3121 slave->link = BOND_LINK_FAIL;
3122 3122
3123 if (BOND_MODE(bond) == BOND_MODE_8023AD) 3123 if (BOND_MODE(bond) == BOND_MODE_8023AD)
3124 bond_3ad_adapter_speed_duplex_changed(slave); 3124 bond_3ad_adapter_speed_duplex_changed(slave);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 49163570a63a..3b3f88ffab53 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -477,6 +477,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
477} 477}
478EXPORT_SYMBOL_GPL(can_put_echo_skb); 478EXPORT_SYMBOL_GPL(can_put_echo_skb);
479 479
480struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
481{
482 struct can_priv *priv = netdev_priv(dev);
483 struct sk_buff *skb = priv->echo_skb[idx];
484 struct canfd_frame *cf;
485
486 if (idx >= priv->echo_skb_max) {
487 netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
488 __func__, idx, priv->echo_skb_max);
489 return NULL;
490 }
491
492 if (!skb) {
493 netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
494 __func__, idx);
495 return NULL;
496 }
497
498 /* Using "struct canfd_frame::len" for the frame
499 * length is supported on both CAN and CANFD frames.
500 */
501 cf = (struct canfd_frame *)skb->data;
502 *len_ptr = cf->len;
503 priv->echo_skb[idx] = NULL;
504
505 return skb;
506}
507
480/* 508/*
481 * Get the skb from the stack and loop it back locally 509 * Get the skb from the stack and loop it back locally
482 * 510 *
@@ -486,22 +514,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
486 */ 514 */
487unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) 515unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
488{ 516{
489 struct can_priv *priv = netdev_priv(dev); 517 struct sk_buff *skb;
490 518 u8 len;
491 BUG_ON(idx >= priv->echo_skb_max);
492
493 if (priv->echo_skb[idx]) {
494 struct sk_buff *skb = priv->echo_skb[idx];
495 struct can_frame *cf = (struct can_frame *)skb->data;
496 u8 dlc = cf->can_dlc;
497 519
498 netif_rx(priv->echo_skb[idx]); 520 skb = __can_get_echo_skb(dev, idx, &len);
499 priv->echo_skb[idx] = NULL; 521 if (!skb)
522 return 0;
500 523
501 return dlc; 524 netif_rx(skb);
502 }
503 525
504 return 0; 526 return len;
505} 527}
506EXPORT_SYMBOL_GPL(can_get_echo_skb); 528EXPORT_SYMBOL_GPL(can_get_echo_skb);
507 529
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 8e972ef08637..75ce11395ee8 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -135,13 +135,12 @@
135 135
136/* FLEXCAN interrupt flag register (IFLAG) bits */ 136/* FLEXCAN interrupt flag register (IFLAG) bits */
137/* Errata ERR005829 step7: Reserve first valid MB */ 137/* Errata ERR005829 step7: Reserve first valid MB */
138#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 138#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
139#define FLEXCAN_TX_MB_OFF_FIFO 9
140#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 139#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
141#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1 140#define FLEXCAN_TX_MB 63
142#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1) 141#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
143#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63 142#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1)
144#define FLEXCAN_IFLAG_MB(x) BIT(x) 143#define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f)
145#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) 144#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
146#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) 145#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
147#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) 146#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
@@ -259,9 +258,7 @@ struct flexcan_priv {
259 struct can_rx_offload offload; 258 struct can_rx_offload offload;
260 259
261 struct flexcan_regs __iomem *regs; 260 struct flexcan_regs __iomem *regs;
262 struct flexcan_mb __iomem *tx_mb;
263 struct flexcan_mb __iomem *tx_mb_reserved; 261 struct flexcan_mb __iomem *tx_mb_reserved;
264 u8 tx_mb_idx;
265 u32 reg_ctrl_default; 262 u32 reg_ctrl_default;
266 u32 reg_imask1_default; 263 u32 reg_imask1_default;
267 u32 reg_imask2_default; 264 u32 reg_imask2_default;
@@ -515,6 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
515static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) 512static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
516{ 513{
517 const struct flexcan_priv *priv = netdev_priv(dev); 514 const struct flexcan_priv *priv = netdev_priv(dev);
515 struct flexcan_regs __iomem *regs = priv->regs;
518 struct can_frame *cf = (struct can_frame *)skb->data; 516 struct can_frame *cf = (struct can_frame *)skb->data;
519 u32 can_id; 517 u32 can_id;
520 u32 data; 518 u32 data;
@@ -537,17 +535,17 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
537 535
538 if (cf->can_dlc > 0) { 536 if (cf->can_dlc > 0) {
539 data = be32_to_cpup((__be32 *)&cf->data[0]); 537 data = be32_to_cpup((__be32 *)&cf->data[0]);
540 priv->write(data, &priv->tx_mb->data[0]); 538 priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[0]);
541 } 539 }
542 if (cf->can_dlc > 4) { 540 if (cf->can_dlc > 4) {
543 data = be32_to_cpup((__be32 *)&cf->data[4]); 541 data = be32_to_cpup((__be32 *)&cf->data[4]);
544 priv->write(data, &priv->tx_mb->data[1]); 542 priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[1]);
545 } 543 }
546 544
547 can_put_echo_skb(skb, dev, 0); 545 can_put_echo_skb(skb, dev, 0);
548 546
549 priv->write(can_id, &priv->tx_mb->can_id); 547 priv->write(can_id, &regs->mb[FLEXCAN_TX_MB].can_id);
550 priv->write(ctrl, &priv->tx_mb->can_ctrl); 548 priv->write(ctrl, &regs->mb[FLEXCAN_TX_MB].can_ctrl);
551 549
552 /* Errata ERR005829 step8: 550 /* Errata ERR005829 step8:
553 * Write twice INACTIVE(0x8) code to first MB. 551 * Write twice INACTIVE(0x8) code to first MB.
@@ -563,9 +561,13 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
563static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) 561static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
564{ 562{
565 struct flexcan_priv *priv = netdev_priv(dev); 563 struct flexcan_priv *priv = netdev_priv(dev);
564 struct flexcan_regs __iomem *regs = priv->regs;
566 struct sk_buff *skb; 565 struct sk_buff *skb;
567 struct can_frame *cf; 566 struct can_frame *cf;
568 bool rx_errors = false, tx_errors = false; 567 bool rx_errors = false, tx_errors = false;
568 u32 timestamp;
569
570 timestamp = priv->read(&regs->timer) << 16;
569 571
570 skb = alloc_can_err_skb(dev, &cf); 572 skb = alloc_can_err_skb(dev, &cf);
571 if (unlikely(!skb)) 573 if (unlikely(!skb))
@@ -612,17 +614,21 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
612 if (tx_errors) 614 if (tx_errors)
613 dev->stats.tx_errors++; 615 dev->stats.tx_errors++;
614 616
615 can_rx_offload_irq_queue_err_skb(&priv->offload, skb); 617 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
616} 618}
617 619
618static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) 620static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
619{ 621{
620 struct flexcan_priv *priv = netdev_priv(dev); 622 struct flexcan_priv *priv = netdev_priv(dev);
623 struct flexcan_regs __iomem *regs = priv->regs;
621 struct sk_buff *skb; 624 struct sk_buff *skb;
622 struct can_frame *cf; 625 struct can_frame *cf;
623 enum can_state new_state, rx_state, tx_state; 626 enum can_state new_state, rx_state, tx_state;
624 int flt; 627 int flt;
625 struct can_berr_counter bec; 628 struct can_berr_counter bec;
629 u32 timestamp;
630
631 timestamp = priv->read(&regs->timer) << 16;
626 632
627 flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; 633 flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
628 if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { 634 if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
@@ -652,7 +658,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
652 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 658 if (unlikely(new_state == CAN_STATE_BUS_OFF))
653 can_bus_off(dev); 659 can_bus_off(dev);
654 660
655 can_rx_offload_irq_queue_err_skb(&priv->offload, skb); 661 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
656} 662}
657 663
658static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) 664static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -720,9 +726,14 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
720 priv->write(BIT(n - 32), &regs->iflag2); 726 priv->write(BIT(n - 32), &regs->iflag2);
721 } else { 727 } else {
722 priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1); 728 priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
723 priv->read(&regs->timer);
724 } 729 }
725 730
731 /* Read the Free Running Timer. It is optional but recommended
732 * to unlock Mailbox as soon as possible and make it available
733 * for reception.
734 */
735 priv->read(&regs->timer);
736
726 return 1; 737 return 1;
727} 738}
728 739
@@ -732,9 +743,9 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
732 struct flexcan_regs __iomem *regs = priv->regs; 743 struct flexcan_regs __iomem *regs = priv->regs;
733 u32 iflag1, iflag2; 744 u32 iflag1, iflag2;
734 745
735 iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default; 746 iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default &
736 iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default & 747 ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
737 ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); 748 iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default;
738 749
739 return (u64)iflag2 << 32 | iflag1; 750 return (u64)iflag2 << 32 | iflag1;
740} 751}
@@ -746,11 +757,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
746 struct flexcan_priv *priv = netdev_priv(dev); 757 struct flexcan_priv *priv = netdev_priv(dev);
747 struct flexcan_regs __iomem *regs = priv->regs; 758 struct flexcan_regs __iomem *regs = priv->regs;
748 irqreturn_t handled = IRQ_NONE; 759 irqreturn_t handled = IRQ_NONE;
749 u32 reg_iflag1, reg_esr; 760 u32 reg_iflag2, reg_esr;
750 enum can_state last_state = priv->can.state; 761 enum can_state last_state = priv->can.state;
751 762
752 reg_iflag1 = priv->read(&regs->iflag1);
753
754 /* reception interrupt */ 763 /* reception interrupt */
755 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 764 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
756 u64 reg_iflag; 765 u64 reg_iflag;
@@ -764,6 +773,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
764 break; 773 break;
765 } 774 }
766 } else { 775 } else {
776 u32 reg_iflag1;
777
778 reg_iflag1 = priv->read(&regs->iflag1);
767 if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { 779 if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
768 handled = IRQ_HANDLED; 780 handled = IRQ_HANDLED;
769 can_rx_offload_irq_offload_fifo(&priv->offload); 781 can_rx_offload_irq_offload_fifo(&priv->offload);
@@ -779,17 +791,22 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
779 } 791 }
780 } 792 }
781 793
794 reg_iflag2 = priv->read(&regs->iflag2);
795
782 /* transmission complete interrupt */ 796 /* transmission complete interrupt */
783 if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { 797 if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) {
798 u32 reg_ctrl = priv->read(&regs->mb[FLEXCAN_TX_MB].can_ctrl);
799
784 handled = IRQ_HANDLED; 800 handled = IRQ_HANDLED;
785 stats->tx_bytes += can_get_echo_skb(dev, 0); 801 stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload,
802 0, reg_ctrl << 16);
786 stats->tx_packets++; 803 stats->tx_packets++;
787 can_led_event(dev, CAN_LED_EVENT_TX); 804 can_led_event(dev, CAN_LED_EVENT_TX);
788 805
789 /* after sending a RTR frame MB is in RX mode */ 806 /* after sending a RTR frame MB is in RX mode */
790 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, 807 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
791 &priv->tx_mb->can_ctrl); 808 &regs->mb[FLEXCAN_TX_MB].can_ctrl);
792 priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1); 809 priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), &regs->iflag2);
793 netif_wake_queue(dev); 810 netif_wake_queue(dev);
794 } 811 }
795 812
@@ -931,15 +948,13 @@ static int flexcan_chip_start(struct net_device *dev)
931 reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); 948 reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
932 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | 949 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
933 FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | 950 FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
934 FLEXCAN_MCR_IDAM_C; 951 FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB);
935 952
936 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 953 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
937 reg_mcr &= ~FLEXCAN_MCR_FEN; 954 reg_mcr &= ~FLEXCAN_MCR_FEN;
938 reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last); 955 else
939 } else { 956 reg_mcr |= FLEXCAN_MCR_FEN;
940 reg_mcr |= FLEXCAN_MCR_FEN | 957
941 FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
942 }
943 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); 958 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
944 priv->write(reg_mcr, &regs->mcr); 959 priv->write(reg_mcr, &regs->mcr);
945 960
@@ -982,16 +997,17 @@ static int flexcan_chip_start(struct net_device *dev)
982 priv->write(reg_ctrl2, &regs->ctrl2); 997 priv->write(reg_ctrl2, &regs->ctrl2);
983 } 998 }
984 999
985 /* clear and invalidate all mailboxes first */
986 for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
987 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
988 &regs->mb[i].can_ctrl);
989 }
990
991 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 1000 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
992 for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) 1001 for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
993 priv->write(FLEXCAN_MB_CODE_RX_EMPTY, 1002 priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
994 &regs->mb[i].can_ctrl); 1003 &regs->mb[i].can_ctrl);
1004 }
1005 } else {
1006 /* clear and invalidate unused mailboxes first */
1007 for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
1008 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
1009 &regs->mb[i].can_ctrl);
1010 }
995 } 1011 }
996 1012
997 /* Errata ERR005829: mark first TX mailbox as INACTIVE */ 1013 /* Errata ERR005829: mark first TX mailbox as INACTIVE */
@@ -1000,7 +1016,7 @@ static int flexcan_chip_start(struct net_device *dev)
1000 1016
1001 /* mark TX mailbox as INACTIVE */ 1017 /* mark TX mailbox as INACTIVE */
1002 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, 1018 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
1003 &priv->tx_mb->can_ctrl); 1019 &regs->mb[FLEXCAN_TX_MB].can_ctrl);
1004 1020
1005 /* acceptance mask/acceptance code (accept everything) */ 1021 /* acceptance mask/acceptance code (accept everything) */
1006 priv->write(0x0, &regs->rxgmask); 1022 priv->write(0x0, &regs->rxgmask);
@@ -1355,17 +1371,13 @@ static int flexcan_probe(struct platform_device *pdev)
1355 priv->devtype_data = devtype_data; 1371 priv->devtype_data = devtype_data;
1356 priv->reg_xceiver = reg_xceiver; 1372 priv->reg_xceiver = reg_xceiver;
1357 1373
1358 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 1374 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
1359 priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
1360 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; 1375 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
1361 } else { 1376 else
1362 priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
1363 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; 1377 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
1364 }
1365 priv->tx_mb = &regs->mb[priv->tx_mb_idx];
1366 1378
1367 priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); 1379 priv->reg_imask1_default = 0;
1368 priv->reg_imask2_default = 0; 1380 priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
1369 1381
1370 priv->offload.mailbox_read = flexcan_mailbox_read; 1382 priv->offload.mailbox_read = flexcan_mailbox_read;
1371 1383
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 11662f479e76..771a46083739 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -24,6 +24,9 @@
24 24
25#define RCAR_CAN_DRV_NAME "rcar_can" 25#define RCAR_CAN_DRV_NAME "rcar_can"
26 26
27#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
28 BIT(CLKR_CLKEXT))
29
27/* Mailbox configuration: 30/* Mailbox configuration:
28 * mailbox 60 - 63 - Rx FIFO mailboxes 31 * mailbox 60 - 63 - Rx FIFO mailboxes
29 * mailbox 56 - 59 - Tx FIFO mailboxes 32 * mailbox 56 - 59 - Tx FIFO mailboxes
@@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev)
789 goto fail_clk; 792 goto fail_clk;
790 } 793 }
791 794
792 if (clock_select >= ARRAY_SIZE(clock_names)) { 795 if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
793 err = -EINVAL; 796 err = -EINVAL;
794 dev_err(&pdev->dev, "invalid CAN clock selected\n"); 797 dev_err(&pdev->dev, "invalid CAN clock selected\n");
795 goto fail_clk; 798 goto fail_clk;
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index c7d05027a7a0..2ce4fa8698c7 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -211,7 +211,54 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
211} 211}
212EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); 212EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
213 213
214int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) 214int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
215 struct sk_buff *skb, u32 timestamp)
216{
217 struct can_rx_offload_cb *cb;
218 unsigned long flags;
219
220 if (skb_queue_len(&offload->skb_queue) >
221 offload->skb_queue_len_max)
222 return -ENOMEM;
223
224 cb = can_rx_offload_get_cb(skb);
225 cb->timestamp = timestamp;
226
227 spin_lock_irqsave(&offload->skb_queue.lock, flags);
228 __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
229 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
230
231 can_rx_offload_schedule(offload);
232
233 return 0;
234}
235EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
236
237unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
238 unsigned int idx, u32 timestamp)
239{
240 struct net_device *dev = offload->dev;
241 struct net_device_stats *stats = &dev->stats;
242 struct sk_buff *skb;
243 u8 len;
244 int err;
245
246 skb = __can_get_echo_skb(dev, idx, &len);
247 if (!skb)
248 return 0;
249
250 err = can_rx_offload_queue_sorted(offload, skb, timestamp);
251 if (err) {
252 stats->rx_errors++;
253 stats->tx_fifo_errors++;
254 }
255
256 return len;
257}
258EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
259
260int can_rx_offload_queue_tail(struct can_rx_offload *offload,
261 struct sk_buff *skb)
215{ 262{
216 if (skb_queue_len(&offload->skb_queue) > 263 if (skb_queue_len(&offload->skb_queue) >
217 offload->skb_queue_len_max) 264 offload->skb_queue_len_max)
@@ -222,7 +269,7 @@ int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_b
222 269
223 return 0; 270 return 0;
224} 271}
225EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb); 272EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
226 273
227static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) 274static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
228{ 275{
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 53e320c92a8b..ddaf46239e39 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net)
760{ 760{
761 struct hi3110_priv *priv = netdev_priv(net); 761 struct hi3110_priv *priv = netdev_priv(net);
762 struct spi_device *spi = priv->spi; 762 struct spi_device *spi = priv->spi;
763 unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING; 763 unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH;
764 int ret; 764 int ret;
765 765
766 ret = open_candev(net); 766 ret = open_candev(net);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index b939a4c10b84..c89c7d4900d7 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -528,7 +528,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
528 context = &priv->tx_contexts[i]; 528 context = &priv->tx_contexts[i];
529 529
530 context->echo_index = i; 530 context->echo_index = i;
531 can_put_echo_skb(skb, netdev, context->echo_index);
532 ++priv->active_tx_contexts; 531 ++priv->active_tx_contexts;
533 if (priv->active_tx_contexts >= (int)dev->max_tx_urbs) 532 if (priv->active_tx_contexts >= (int)dev->max_tx_urbs)
534 netif_stop_queue(netdev); 533 netif_stop_queue(netdev);
@@ -553,7 +552,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
553 dev_kfree_skb(skb); 552 dev_kfree_skb(skb);
554 spin_lock_irqsave(&priv->tx_contexts_lock, flags); 553 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
555 554
556 can_free_echo_skb(netdev, context->echo_index);
557 context->echo_index = dev->max_tx_urbs; 555 context->echo_index = dev->max_tx_urbs;
558 --priv->active_tx_contexts; 556 --priv->active_tx_contexts;
559 netif_wake_queue(netdev); 557 netif_wake_queue(netdev);
@@ -564,6 +562,8 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
564 562
565 context->priv = priv; 563 context->priv = priv;
566 564
565 can_put_echo_skb(skb, netdev, context->echo_index);
566
567 usb_fill_bulk_urb(urb, dev->udev, 567 usb_fill_bulk_urb(urb, dev->udev,
568 usb_sndbulkpipe(dev->udev, 568 usb_sndbulkpipe(dev->udev,
569 dev->bulk_out->bEndpointAddress), 569 dev->bulk_out->bEndpointAddress),
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index c084bae5ec0a..5fc0be564274 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -1019,6 +1019,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
1019 new_state : CAN_STATE_ERROR_ACTIVE; 1019 new_state : CAN_STATE_ERROR_ACTIVE;
1020 1020
1021 can_change_state(netdev, cf, tx_state, rx_state); 1021 can_change_state(netdev, cf, tx_state, rx_state);
1022
1023 if (priv->can.restart_ms &&
1024 old_state >= CAN_STATE_BUS_OFF &&
1025 new_state < CAN_STATE_BUS_OFF)
1026 cf->can_id |= CAN_ERR_RESTARTED;
1022 } 1027 }
1023 1028
1024 if (new_state == CAN_STATE_BUS_OFF) { 1029 if (new_state == CAN_STATE_BUS_OFF) {
@@ -1028,11 +1033,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
1028 1033
1029 can_bus_off(netdev); 1034 can_bus_off(netdev);
1030 } 1035 }
1031
1032 if (priv->can.restart_ms &&
1033 old_state >= CAN_STATE_BUS_OFF &&
1034 new_state < CAN_STATE_BUS_OFF)
1035 cf->can_id |= CAN_ERR_RESTARTED;
1036 } 1036 }
1037 1037
1038 if (!skb) { 1038 if (!skb) {
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 0678a38b1af4..f3d5bda012a1 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -35,10 +35,6 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/usb.h> 36#include <linux/usb.h>
37 37
38#include <linux/can.h>
39#include <linux/can/dev.h>
40#include <linux/can/error.h>
41
42#define UCAN_DRIVER_NAME "ucan" 38#define UCAN_DRIVER_NAME "ucan"
43#define UCAN_MAX_RX_URBS 8 39#define UCAN_MAX_RX_URBS 8
44/* the CAN controller needs a while to enable/disable the bus */ 40/* the CAN controller needs a while to enable/disable the bus */
@@ -1575,11 +1571,8 @@ err_firmware_needs_update:
1575/* disconnect the device */ 1571/* disconnect the device */
1576static void ucan_disconnect(struct usb_interface *intf) 1572static void ucan_disconnect(struct usb_interface *intf)
1577{ 1573{
1578 struct usb_device *udev;
1579 struct ucan_priv *up = usb_get_intfdata(intf); 1574 struct ucan_priv *up = usb_get_intfdata(intf);
1580 1575
1581 udev = interface_to_usbdev(intf);
1582
1583 usb_set_intfdata(intf, NULL); 1576 usb_set_intfdata(intf, NULL);
1584 1577
1585 if (up) { 1578 if (up) {
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 54e0ca6ed730..86b6464b4525 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev)
1117{ 1117{
1118 int i; 1118 int i;
1119 1119
1120 mutex_init(&dev->reg_mutex);
1121 mutex_init(&dev->stats_mutex);
1122 mutex_init(&dev->alu_mutex);
1123 mutex_init(&dev->vlan_mutex);
1124
1125 dev->ds->ops = &ksz_switch_ops; 1120 dev->ds->ops = &ksz_switch_ops;
1126 1121
1127 for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) { 1122 for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
@@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev)
1206 if (dev->pdata) 1201 if (dev->pdata)
1207 dev->chip_id = dev->pdata->chip_id; 1202 dev->chip_id = dev->pdata->chip_id;
1208 1203
1204 mutex_init(&dev->reg_mutex);
1205 mutex_init(&dev->stats_mutex);
1206 mutex_init(&dev->alu_mutex);
1207 mutex_init(&dev->vlan_mutex);
1208
1209 if (ksz_switch_detect(dev)) 1209 if (ksz_switch_detect(dev))
1210 return -EINVAL; 1210 return -EINVAL;
1211 1211
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index d721ccf7d8be..38e399e0f30e 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
567 if (err) 567 if (err)
568 return err; 568 return err;
569 569
570 /* Keep the histogram mode bits */
571 val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
570 val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL; 572 val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
571 573
572 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val); 574 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 18956e7604a3..a70bb1bb90e7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1848,6 +1848,8 @@ static void ena_down(struct ena_adapter *adapter)
1848 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 1848 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
1849 if (rc) 1849 if (rc)
1850 dev_err(&adapter->pdev->dev, "Device reset failed\n"); 1850 dev_err(&adapter->pdev->dev, "Device reset failed\n");
1851 /* stop submitting admin commands on a device that was reset */
1852 ena_com_set_admin_running_state(adapter->ena_dev, false);
1851 } 1853 }
1852 1854
1853 ena_destroy_all_io_queues(adapter); 1855 ena_destroy_all_io_queues(adapter);
@@ -1914,6 +1916,9 @@ static int ena_close(struct net_device *netdev)
1914 1916
1915 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); 1917 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
1916 1918
1919 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
1920 return 0;
1921
1917 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 1922 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1918 ena_down(adapter); 1923 ena_down(adapter);
1919 1924
@@ -2613,9 +2618,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
2613 ena_down(adapter); 2618 ena_down(adapter);
2614 2619
2615 /* Stop the device from sending AENQ events (in case reset flag is set 2620 /* Stop the device from sending AENQ events (in case reset flag is set
2616 * and device is up, ena_close already reset the device 2621 * and device is up, ena_down() already reset the device.
2617 * In case the reset flag is set and the device is up, ena_down()
2618 * already perform the reset, so it can be skipped.
2619 */ 2622 */
2620 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) 2623 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
2621 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 2624 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
@@ -2694,8 +2697,8 @@ err_device_destroy:
2694 ena_com_abort_admin_commands(ena_dev); 2697 ena_com_abort_admin_commands(ena_dev);
2695 ena_com_wait_for_abort_completion(ena_dev); 2698 ena_com_wait_for_abort_completion(ena_dev);
2696 ena_com_admin_destroy(ena_dev); 2699 ena_com_admin_destroy(ena_dev);
2697 ena_com_mmio_reg_read_request_destroy(ena_dev);
2698 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); 2700 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
2701 ena_com_mmio_reg_read_request_destroy(ena_dev);
2699err: 2702err:
2700 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2703 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2701 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 2704 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -3452,6 +3455,8 @@ err_rss:
3452 ena_com_rss_destroy(ena_dev); 3455 ena_com_rss_destroy(ena_dev);
3453err_free_msix: 3456err_free_msix:
3454 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); 3457 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
3458 /* stop submitting admin commands on a device that was reset */
3459 ena_com_set_admin_running_state(ena_dev, false);
3455 ena_free_mgmnt_irq(adapter); 3460 ena_free_mgmnt_irq(adapter);
3456 ena_disable_msix(adapter); 3461 ena_disable_msix(adapter);
3457err_worker_destroy: 3462err_worker_destroy:
@@ -3498,18 +3503,12 @@ static void ena_remove(struct pci_dev *pdev)
3498 3503
3499 cancel_work_sync(&adapter->reset_task); 3504 cancel_work_sync(&adapter->reset_task);
3500 3505
3501 unregister_netdev(netdev);
3502
3503 /* If the device is running then we want to make sure the device will be
3504 * reset to make sure no more events will be issued by the device.
3505 */
3506 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3507 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3508
3509 rtnl_lock(); 3506 rtnl_lock();
3510 ena_destroy_device(adapter, true); 3507 ena_destroy_device(adapter, true);
3511 rtnl_unlock(); 3508 rtnl_unlock();
3512 3509
3510 unregister_netdev(netdev);
3511
3513 free_netdev(netdev); 3512 free_netdev(netdev);
3514 3513
3515 ena_com_rss_destroy(ena_dev); 3514 ena_com_rss_destroy(ena_dev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 521873642339..dc8b6173d8d8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,7 +45,7 @@
45 45
46#define DRV_MODULE_VER_MAJOR 2 46#define DRV_MODULE_VER_MAJOR 2
47#define DRV_MODULE_VER_MINOR 0 47#define DRV_MODULE_VER_MINOR 0
48#define DRV_MODULE_VER_SUBMINOR 1 48#define DRV_MODULE_VER_SUBMINOR 2
49 49
50#define DRV_MODULE_NAME "ena" 50#define DRV_MODULE_NAME "ena"
51#ifndef DRV_MODULE_VERSION 51#ifndef DRV_MODULE_VERSION
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index b4fc0ed5bce8..9d4899826823 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
1419 1419
1420 prop = of_get_property(nd, "tpe-link-test?", NULL); 1420 prop = of_get_property(nd, "tpe-link-test?", NULL);
1421 if (!prop) 1421 if (!prop)
1422 goto no_link_test; 1422 goto node_put;
1423 1423
1424 if (strcmp(prop, "true")) { 1424 if (strcmp(prop, "true")) {
1425 printk(KERN_NOTICE "SunLance: warning: overriding option " 1425 printk(KERN_NOTICE "SunLance: warning: overriding option "
@@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op,
1428 "to ecd@skynet.be\n"); 1428 "to ecd@skynet.be\n");
1429 auxio_set_lte(AUXIO_LTE_ON); 1429 auxio_set_lte(AUXIO_LTE_ON);
1430 } 1430 }
1431node_put:
1432 of_node_put(nd);
1431no_link_test: 1433no_link_test:
1432 lp->auto_select = 1; 1434 lp->auto_select = 1;
1433 lp->tpe = 0; 1435 lp->tpe = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 6a633c70f603..99ef1daaa4d8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -407,13 +407,13 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
407 struct ethtool_pauseparam *pause) 407 struct ethtool_pauseparam *pause)
408{ 408{
409 struct aq_nic_s *aq_nic = netdev_priv(ndev); 409 struct aq_nic_s *aq_nic = netdev_priv(ndev);
410 u32 fc = aq_nic->aq_nic_cfg.flow_control;
410 411
411 pause->autoneg = 0; 412 pause->autoneg = 0;
412 413
413 if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) 414 pause->rx_pause = !!(fc & AQ_NIC_FC_RX);
414 pause->rx_pause = 1; 415 pause->tx_pause = !!(fc & AQ_NIC_FC_TX);
415 if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) 416
416 pause->tx_pause = 1;
417} 417}
418 418
419static int aq_ethtool_set_pauseparam(struct net_device *ndev, 419static int aq_ethtool_set_pauseparam(struct net_device *ndev,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index e8689241204e..a1e70da358ca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -204,6 +204,10 @@ struct aq_hw_ops {
204 204
205 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); 205 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
206 206
207 int (*hw_set_offload)(struct aq_hw_s *self,
208 struct aq_nic_cfg_s *aq_nic_cfg);
209
210 int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
207}; 211};
208 212
209struct aq_fw_ops { 213struct aq_fw_ops {
@@ -226,6 +230,8 @@ struct aq_fw_ops {
226 230
227 int (*update_stats)(struct aq_hw_s *self); 231 int (*update_stats)(struct aq_hw_s *self);
228 232
233 u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
234
229 int (*set_flow_control)(struct aq_hw_s *self); 235 int (*set_flow_control)(struct aq_hw_s *self);
230 236
231 int (*set_power)(struct aq_hw_s *self, unsigned int power_state, 237 int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e3ae29e523f0..7c07eef275eb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -99,8 +99,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
99 struct aq_nic_s *aq_nic = netdev_priv(ndev); 99 struct aq_nic_s *aq_nic = netdev_priv(ndev);
100 struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic); 100 struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
101 bool is_lro = false; 101 bool is_lro = false;
102 int err = 0;
103
104 aq_cfg->features = features;
102 105
103 if (aq_cfg->hw_features & NETIF_F_LRO) { 106 if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
104 is_lro = features & NETIF_F_LRO; 107 is_lro = features & NETIF_F_LRO;
105 108
106 if (aq_cfg->is_lro != is_lro) { 109 if (aq_cfg->is_lro != is_lro) {
@@ -112,8 +115,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
112 } 115 }
113 } 116 }
114 } 117 }
118 if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM)
119 err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
120 aq_cfg);
115 121
116 return 0; 122 return err;
117} 123}
118 124
119static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) 125static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 5fed24446687..7abdc0952425 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -118,12 +118,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
118 } 118 }
119 119
120 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; 120 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
121 cfg->hw_features = cfg->aq_hw_caps->hw_features; 121 cfg->features = cfg->aq_hw_caps->hw_features;
122} 122}
123 123
124static int aq_nic_update_link_status(struct aq_nic_s *self) 124static int aq_nic_update_link_status(struct aq_nic_s *self)
125{ 125{
126 int err = self->aq_fw_ops->update_link_status(self->aq_hw); 126 int err = self->aq_fw_ops->update_link_status(self->aq_hw);
127 u32 fc = 0;
127 128
128 if (err) 129 if (err)
129 return err; 130 return err;
@@ -133,6 +134,15 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
133 AQ_CFG_DRV_NAME, self->link_status.mbps, 134 AQ_CFG_DRV_NAME, self->link_status.mbps,
134 self->aq_hw->aq_link_status.mbps); 135 self->aq_hw->aq_link_status.mbps);
135 aq_nic_update_interrupt_moderation_settings(self); 136 aq_nic_update_interrupt_moderation_settings(self);
137
138 /* Driver has to update flow control settings on RX block
139 * on any link event.
140 * We should query FW whether it negotiated FC.
141 */
142 if (self->aq_fw_ops->get_flow_control)
143 self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
144 if (self->aq_hw_ops->hw_set_fc)
145 self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
136 } 146 }
137 147
138 self->link_status = self->aq_hw->aq_link_status; 148 self->link_status = self->aq_hw->aq_link_status;
@@ -590,7 +600,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
590 } 600 }
591 } 601 }
592 602
593 if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) { 603 if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
594 packet_filter |= IFF_MULTICAST; 604 packet_filter |= IFF_MULTICAST;
595 self->mc_list.count = i; 605 self->mc_list.count = i;
596 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, 606 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
@@ -772,7 +782,9 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
772 ethtool_link_ksettings_add_link_mode(cmd, advertising, 782 ethtool_link_ksettings_add_link_mode(cmd, advertising,
773 Pause); 783 Pause);
774 784
775 if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) 785 /* Asym is when either RX or TX, but not both */
786 if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
787 !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
776 ethtool_link_ksettings_add_link_mode(cmd, advertising, 788 ethtool_link_ksettings_add_link_mode(cmd, advertising,
777 Asym_Pause); 789 Asym_Pause);
778 790
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index c1582f4e8e1b..44ec47a3d60a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -23,7 +23,7 @@ struct aq_vec_s;
23 23
24struct aq_nic_cfg_s { 24struct aq_nic_cfg_s {
25 const struct aq_hw_caps_s *aq_hw_caps; 25 const struct aq_hw_caps_s *aq_hw_caps;
26 u64 hw_features; 26 u64 features;
27 u32 rxds; /* rx ring size, descriptors # */ 27 u32 rxds; /* rx ring size, descriptors # */
28 u32 txds; /* tx ring size, descriptors # */ 28 u32 txds; /* tx ring size, descriptors # */
29 u32 vecs; /* vecs==allocated irqs */ 29 u32 vecs; /* vecs==allocated irqs */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 3db91446cc67..74550ccc7a20 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
172 return !!budget; 172 return !!budget;
173} 173}
174 174
175static void aq_rx_checksum(struct aq_ring_s *self,
176 struct aq_ring_buff_s *buff,
177 struct sk_buff *skb)
178{
179 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
180 return;
181
182 if (unlikely(buff->is_cso_err)) {
183 ++self->stats.rx.errors;
184 skb->ip_summed = CHECKSUM_NONE;
185 return;
186 }
187 if (buff->is_ip_cso) {
188 __skb_incr_checksum_unnecessary(skb);
189 if (buff->is_udp_cso || buff->is_tcp_cso)
190 __skb_incr_checksum_unnecessary(skb);
191 } else {
192 skb->ip_summed = CHECKSUM_NONE;
193 }
194}
195
175#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 196#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
176int aq_ring_rx_clean(struct aq_ring_s *self, 197int aq_ring_rx_clean(struct aq_ring_s *self,
177 struct napi_struct *napi, 198 struct napi_struct *napi,
@@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
267 } 288 }
268 289
269 skb->protocol = eth_type_trans(skb, ndev); 290 skb->protocol = eth_type_trans(skb, ndev);
270 if (unlikely(buff->is_cso_err)) { 291
271 ++self->stats.rx.errors; 292 aq_rx_checksum(self, buff, skb);
272 skb->ip_summed = CHECKSUM_NONE;
273 } else {
274 if (buff->is_ip_cso) {
275 __skb_incr_checksum_unnecessary(skb);
276 if (buff->is_udp_cso || buff->is_tcp_cso)
277 __skb_incr_checksum_unnecessary(skb);
278 } else {
279 skb->ip_summed = CHECKSUM_NONE;
280 }
281 }
282 293
283 skb_set_hash(skb, buff->rss_hash, 294 skb_set_hash(skb, buff->rss_hash,
284 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : 295 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 76d25d594a0f..f02592f43fe3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -100,12 +100,17 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
100 return err; 100 return err;
101} 101}
102 102
103static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
104{
105 hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
106 return 0;
107}
108
103static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) 109static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
104{ 110{
105 u32 tc = 0U; 111 u32 tc = 0U;
106 u32 buff_size = 0U; 112 u32 buff_size = 0U;
107 unsigned int i_priority = 0U; 113 unsigned int i_priority = 0U;
108 bool is_rx_flow_control = false;
109 114
110 /* TPS Descriptor rate init */ 115 /* TPS Descriptor rate init */
111 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); 116 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -138,7 +143,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
138 143
139 /* QoS Rx buf size per TC */ 144 /* QoS Rx buf size per TC */
140 tc = 0; 145 tc = 0;
141 is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
142 buff_size = HW_ATL_B0_RXBUF_MAX; 146 buff_size = HW_ATL_B0_RXBUF_MAX;
143 147
144 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); 148 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -150,7 +154,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
150 (buff_size * 154 (buff_size *
151 (1024U / 32U) * 50U) / 155 (1024U / 32U) * 50U) /
152 100U, tc); 156 100U, tc);
153 hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); 157
158 hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
154 159
155 /* QoS 802.1p priority -> TC mapping */ 160 /* QoS 802.1p priority -> TC mapping */
156 for (i_priority = 8U; i_priority--;) 161 for (i_priority = 8U; i_priority--;)
@@ -229,8 +234,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
229 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); 234 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
230 235
231 /* RX checksums offloads*/ 236 /* RX checksums offloads*/
232 hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1); 237 hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
233 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1); 238 NETIF_F_RXCSUM));
239 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
240 NETIF_F_RXCSUM));
234 241
235 /* LSO offloads*/ 242 /* LSO offloads*/
236 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 243 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
@@ -655,9 +662,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
655 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) 662 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
656 &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; 663 &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
657 664
658 unsigned int is_err = 1U;
659 unsigned int is_rx_check_sum_enabled = 0U; 665 unsigned int is_rx_check_sum_enabled = 0U;
660 unsigned int pkt_type = 0U; 666 unsigned int pkt_type = 0U;
667 u8 rx_stat = 0U;
661 668
662 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ 669 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
663 break; 670 break;
@@ -665,35 +672,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
665 672
666 buff = &ring->buff_ring[ring->hw_head]; 673 buff = &ring->buff_ring[ring->hw_head];
667 674
668 is_err = (0x0000003CU & rxd_wb->status); 675 rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
669 676
670 is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); 677 is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
671 is_err &= ~0x20U; /* exclude validity bit */
672 678
673 pkt_type = 0xFFU & (rxd_wb->type >> 4); 679 pkt_type = 0xFFU & (rxd_wb->type >> 4);
674 680
675 if (is_rx_check_sum_enabled) { 681 if (is_rx_check_sum_enabled & BIT(0) &&
676 if (0x0U == (pkt_type & 0x3U)) 682 (0x0U == (pkt_type & 0x3U)))
677 buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; 683 buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
678 684
685 if (is_rx_check_sum_enabled & BIT(1)) {
679 if (0x4U == (pkt_type & 0x1CU)) 686 if (0x4U == (pkt_type & 0x1CU))
680 buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; 687 buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
688 !!(rx_stat & BIT(3));
681 else if (0x0U == (pkt_type & 0x1CU)) 689 else if (0x0U == (pkt_type & 0x1CU))
682 buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; 690 buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
683 691 !!(rx_stat & BIT(3));
684 /* Checksum offload workaround for small packets */ 692 }
685 if (rxd_wb->pkt_len <= 60) { 693 buff->is_cso_err = !!(rx_stat & 0x6);
686 buff->is_ip_cso = 0U; 694 /* Checksum offload workaround for small packets */
687 buff->is_cso_err = 0U; 695 if (unlikely(rxd_wb->pkt_len <= 60)) {
688 } 696 buff->is_ip_cso = 0U;
697 buff->is_cso_err = 0U;
689 } 698 }
690
691 is_err &= ~0x18U;
692 699
693 dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); 700 dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
694 701
695 if (is_err || rxd_wb->type & 0x1000U) { 702 if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
696 /* status error or DMA error */ 703 /* MAC error or DMA error */
697 buff->is_error = 1U; 704 buff->is_error = 1U;
698 } else { 705 } else {
699 if (self->aq_nic_cfg->is_rss) { 706 if (self->aq_nic_cfg->is_rss) {
@@ -915,6 +922,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
915static int hw_atl_b0_hw_stop(struct aq_hw_s *self) 922static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
916{ 923{
917 hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); 924 hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
925
926 /* Invalidate Descriptor Cache to prevent writing to the cached
927 * descriptors and to the data pointer of those descriptors
928 */
929 hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
930
918 return aq_hw_err_from_flags(self); 931 return aq_hw_err_from_flags(self);
919} 932}
920 933
@@ -963,4 +976,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
963 .hw_get_regs = hw_atl_utils_hw_get_regs, 976 .hw_get_regs = hw_atl_utils_hw_get_regs,
964 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 977 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
965 .hw_get_fw_version = hw_atl_utils_get_fw_version, 978 .hw_get_fw_version = hw_atl_utils_get_fw_version,
979 .hw_set_offload = hw_atl_b0_hw_offload_set,
980 .hw_set_fc = hw_atl_b0_set_fc,
966}; 981};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index be0a3a90dfad..5502ec5f0f69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
619 HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode); 619 HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
620} 620}
621 621
622void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
623{
624 aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
625 HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
626 HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
627 init);
628}
629
622void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, 630void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
623 u32 rx_pkt_buff_size_per_tc, u32 buffer) 631 u32 rx_pkt_buff_size_per_tc, u32 buffer)
624{ 632{
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 7056c7342afc..41f239928c15 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
325 u32 rx_pkt_buff_size_per_tc, 325 u32 rx_pkt_buff_size_per_tc,
326 u32 buffer); 326 u32 buffer);
327 327
328/* set rdm rx dma descriptor cache init */
329void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
330
328/* set rx xoff enable (per tc) */ 331/* set rx xoff enable (per tc) */
329void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, 332void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
330 u32 buffer); 333 u32 buffer);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 716674a9b729..a715fa317b1c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -293,6 +293,24 @@
293/* default value of bitfield desc{d}_reset */ 293/* default value of bitfield desc{d}_reset */
294#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0 294#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
295 295
296/* rdm_desc_init_i bitfield definitions
297 * preprocessor definitions for the bitfield rdm_desc_init_i.
298 * port="pif_rdm_desc_init_i"
299 */
300
301/* register address for bitfield rdm_desc_init_i */
302#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00
303/* bitmask for bitfield rdm_desc_init_i */
304#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff
305/* inverted bitmask for bitfield rdm_desc_init_i */
306#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000
307/* lower bit position of bitfield rdm_desc_init_i */
308#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0
309/* width of bitfield rdm_desc_init_i */
310#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32
311/* default value of bitfield rdm_desc_init_i */
312#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
313
296/* rx int_desc_wrb_en bitfield definitions 314/* rx int_desc_wrb_en bitfield definitions
297 * preprocessor definitions for the bitfield "int_desc_wrb_en". 315 * preprocessor definitions for the bitfield "int_desc_wrb_en".
298 * port="pif_rdm_int_desc_wrb_en_i" 316 * port="pif_rdm_int_desc_wrb_en_i"
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 096ca5730887..7de3220d9cab 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -30,6 +30,8 @@
30#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 30#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
31#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 31#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
32 32
33#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
34#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
33#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY) 35#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
34#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL) 36#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
35 37
@@ -451,6 +453,24 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
451 return 0; 453 return 0;
452} 454}
453 455
456static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
457{
458 u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
459
460 if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
461 if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
462 *fcmode = AQ_NIC_FC_RX;
463 else
464 *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
465 else
466 if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
467 *fcmode = AQ_NIC_FC_TX;
468 else
469 *fcmode = 0;
470
471 return 0;
472}
473
454const struct aq_fw_ops aq_fw_2x_ops = { 474const struct aq_fw_ops aq_fw_2x_ops = {
455 .init = aq_fw2x_init, 475 .init = aq_fw2x_init,
456 .deinit = aq_fw2x_deinit, 476 .deinit = aq_fw2x_deinit,
@@ -465,4 +485,5 @@ const struct aq_fw_ops aq_fw_2x_ops = {
465 .set_eee_rate = aq_fw2x_set_eee_rate, 485 .set_eee_rate = aq_fw2x_set_eee_rate,
466 .get_eee_rate = aq_fw2x_get_eee_rate, 486 .get_eee_rate = aq_fw2x_get_eee_rate,
467 .set_flow_control = aq_fw2x_set_flow_control, 487 .set_flow_control = aq_fw2x_set_flow_control,
488 .get_flow_control = aq_fw2x_get_flow_control
468}; 489};
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 78c5de467426..9d0e74f6b089 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -140,6 +140,5 @@ struct alx_priv {
140}; 140};
141 141
142extern const struct ethtool_ops alx_ethtool_ops; 142extern const struct ethtool_ops alx_ethtool_ops;
143extern const char alx_drv_name[];
144 143
145#endif 144#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 7968c644ad86..c131cfc1b79d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -49,7 +49,7 @@
49#include "hw.h" 49#include "hw.h"
50#include "reg.h" 50#include "reg.h"
51 51
52const char alx_drv_name[] = "alx"; 52static const char alx_drv_name[] = "alx";
53 53
54static void alx_free_txbuf(struct alx_tx_queue *txq, int entry) 54static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
55{ 55{
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4122553e224b..0e2d99c737e3 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1902,9 +1902,6 @@ static void bcm_sysport_netif_start(struct net_device *dev)
1902 intrl2_1_mask_clear(priv, 0xffffffff); 1902 intrl2_1_mask_clear(priv, 0xffffffff);
1903 else 1903 else
1904 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); 1904 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1905
1906 /* Last call before we start the real business */
1907 netif_tx_start_all_queues(dev);
1908} 1905}
1909 1906
1910static void rbuf_init(struct bcm_sysport_priv *priv) 1907static void rbuf_init(struct bcm_sysport_priv *priv)
@@ -2048,6 +2045,8 @@ static int bcm_sysport_open(struct net_device *dev)
2048 2045
2049 bcm_sysport_netif_start(dev); 2046 bcm_sysport_netif_start(dev);
2050 2047
2048 netif_tx_start_all_queues(dev);
2049
2051 return 0; 2050 return 0;
2052 2051
2053out_clear_rx_int: 2052out_clear_rx_int:
@@ -2071,7 +2070,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
2071 struct bcm_sysport_priv *priv = netdev_priv(dev); 2070 struct bcm_sysport_priv *priv = netdev_priv(dev);
2072 2071
2073 /* stop all software from updating hardware */ 2072 /* stop all software from updating hardware */
2074 netif_tx_stop_all_queues(dev); 2073 netif_tx_disable(dev);
2075 napi_disable(&priv->napi); 2074 napi_disable(&priv->napi);
2076 cancel_work_sync(&priv->dim.dim.work); 2075 cancel_work_sync(&priv->dim.dim.work);
2077 phy_stop(dev->phydev); 2076 phy_stop(dev->phydev);
@@ -2658,12 +2657,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d)
2658 if (!netif_running(dev)) 2657 if (!netif_running(dev))
2659 return 0; 2658 return 0;
2660 2659
2660 netif_device_detach(dev);
2661
2661 bcm_sysport_netif_stop(dev); 2662 bcm_sysport_netif_stop(dev);
2662 2663
2663 phy_suspend(dev->phydev); 2664 phy_suspend(dev->phydev);
2664 2665
2665 netif_device_detach(dev);
2666
2667 /* Disable UniMAC RX */ 2666 /* Disable UniMAC RX */
2668 umac_enable_set(priv, CMD_RX_EN, 0); 2667 umac_enable_set(priv, CMD_RX_EN, 0);
2669 2668
@@ -2746,8 +2745,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
2746 goto out_free_rx_ring; 2745 goto out_free_rx_ring;
2747 } 2746 }
2748 2747
2749 netif_device_attach(dev);
2750
2751 /* RX pipe enable */ 2748 /* RX pipe enable */
2752 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 2749 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2753 2750
@@ -2788,6 +2785,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
2788 2785
2789 bcm_sysport_netif_start(dev); 2786 bcm_sysport_netif_start(dev);
2790 2787
2788 netif_device_attach(dev);
2789
2791 return 0; 2790 return 0;
2792 2791
2793out_free_rx_ring: 2792out_free_rx_ring:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index be1506169076..0de487a8f0eb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2191,6 +2191,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2191#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 2191#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
2192 E1HVN_MAX) 2192 E1HVN_MAX)
2193 2193
2194/* Following is the DMAE channel number allocation for the clients.
2195 * MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively.
2196 * Driver: 0-3 and 8-11 (for PF dmae operations)
2197 * 4 and 12 (for stats requests)
2198 */
2199#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */
2200
2194/* PCIE link and speed */ 2201/* PCIE link and speed */
2195#define PCICFG_LINK_WIDTH 0x1f00000 2202#define PCICFG_LINK_WIDTH 0x1f00000
2196#define PCICFG_LINK_WIDTH_SHIFT 20 2203#define PCICFG_LINK_WIDTH_SHIFT 20
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 3f4d2c8da21a..a9eaaf3e73a4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
6149 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); 6149 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
6150 rdata->path_id = BP_PATH(bp); 6150 rdata->path_id = BP_PATH(bp);
6151 rdata->network_cos_mode = start_params->network_cos_mode; 6151 rdata->network_cos_mode = start_params->network_cos_mode;
6152 rdata->dmae_cmd_id = BNX2X_FW_DMAE_C;
6152 6153
6153 rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); 6154 rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port);
6154 rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); 6155 rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index dd85d790f638..d4c300117529 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1675,7 +1675,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1675 } else { 1675 } else {
1676 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1676 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1677 if (dev->features & NETIF_F_RXCSUM) 1677 if (dev->features & NETIF_F_RXCSUM)
1678 cpr->rx_l4_csum_errors++; 1678 bnapi->cp_ring.rx_l4_csum_errors++;
1679 } 1679 }
1680 } 1680 }
1681 1681
@@ -8714,6 +8714,26 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
8714 return rc; 8714 return rc;
8715} 8715}
8716 8716
8717static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
8718 u32 ring_id, u32 *prod, u32 *cons)
8719{
8720 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
8721 struct hwrm_dbg_ring_info_get_input req = {0};
8722 int rc;
8723
8724 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
8725 req.ring_type = ring_type;
8726 req.fw_ring_id = cpu_to_le32(ring_id);
8727 mutex_lock(&bp->hwrm_cmd_lock);
8728 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8729 if (!rc) {
8730 *prod = le32_to_cpu(resp->producer_index);
8731 *cons = le32_to_cpu(resp->consumer_index);
8732 }
8733 mutex_unlock(&bp->hwrm_cmd_lock);
8734 return rc;
8735}
8736
8717static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 8737static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
8718{ 8738{
8719 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 8739 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
@@ -8821,6 +8841,11 @@ static void bnxt_timer(struct timer_list *t)
8821 bnxt_queue_sp_work(bp); 8841 bnxt_queue_sp_work(bp);
8822 } 8842 }
8823 } 8843 }
8844
8845 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
8846 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
8847 bnxt_queue_sp_work(bp);
8848 }
8824bnxt_restart_timer: 8849bnxt_restart_timer:
8825 mod_timer(&bp->timer, jiffies + bp->current_interval); 8850 mod_timer(&bp->timer, jiffies + bp->current_interval);
8826} 8851}
@@ -8851,6 +8876,44 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
8851 bnxt_rtnl_unlock_sp(bp); 8876 bnxt_rtnl_unlock_sp(bp);
8852} 8877}
8853 8878
8879static void bnxt_chk_missed_irq(struct bnxt *bp)
8880{
8881 int i;
8882
8883 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8884 return;
8885
8886 for (i = 0; i < bp->cp_nr_rings; i++) {
8887 struct bnxt_napi *bnapi = bp->bnapi[i];
8888 struct bnxt_cp_ring_info *cpr;
8889 u32 fw_ring_id;
8890 int j;
8891
8892 if (!bnapi)
8893 continue;
8894
8895 cpr = &bnapi->cp_ring;
8896 for (j = 0; j < 2; j++) {
8897 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
8898 u32 val[2];
8899
8900 if (!cpr2 || cpr2->has_more_work ||
8901 !bnxt_has_work(bp, cpr2))
8902 continue;
8903
8904 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
8905 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
8906 continue;
8907 }
8908 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
8909 bnxt_dbg_hwrm_ring_info_get(bp,
8910 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
8911 fw_ring_id, &val[0], &val[1]);
8912 cpr->missed_irqs++;
8913 }
8914 }
8915}
8916
8854static void bnxt_cfg_ntp_filters(struct bnxt *); 8917static void bnxt_cfg_ntp_filters(struct bnxt *);
8855 8918
8856static void bnxt_sp_task(struct work_struct *work) 8919static void bnxt_sp_task(struct work_struct *work)
@@ -8930,6 +8993,9 @@ static void bnxt_sp_task(struct work_struct *work)
8930 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 8993 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
8931 bnxt_tc_flow_stats_work(bp); 8994 bnxt_tc_flow_stats_work(bp);
8932 8995
8996 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
8997 bnxt_chk_missed_irq(bp);
8998
8933 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 8999 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
8934 * must be the last functions to be called before exiting. 9000 * must be the last functions to be called before exiting.
8935 */ 9001 */
@@ -10087,6 +10153,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10087 } 10153 }
10088 10154
10089 bnxt_hwrm_func_qcfg(bp); 10155 bnxt_hwrm_func_qcfg(bp);
10156 bnxt_hwrm_vnic_qcaps(bp);
10090 bnxt_hwrm_port_led_qcaps(bp); 10157 bnxt_hwrm_port_led_qcaps(bp);
10091 bnxt_ethtool_init(bp); 10158 bnxt_ethtool_init(bp);
10092 bnxt_dcb_init(bp); 10159 bnxt_dcb_init(bp);
@@ -10120,7 +10187,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10120 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 10187 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10121 } 10188 }
10122 10189
10123 bnxt_hwrm_vnic_qcaps(bp);
10124 if (bnxt_rfs_supported(bp)) { 10190 if (bnxt_rfs_supported(bp)) {
10125 dev->hw_features |= NETIF_F_NTUPLE; 10191 dev->hw_features |= NETIF_F_NTUPLE;
10126 if (bnxt_rfs_capable(bp)) { 10192 if (bnxt_rfs_capable(bp)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 498b373c992d..9e99d4ab3e06 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -798,6 +798,8 @@ struct bnxt_cp_ring_info {
798 u8 had_work_done:1; 798 u8 had_work_done:1;
799 u8 has_more_work:1; 799 u8 has_more_work:1;
800 800
801 u32 last_cp_raw_cons;
802
801 struct bnxt_coal rx_ring_coal; 803 struct bnxt_coal rx_ring_coal;
802 u64 rx_packets; 804 u64 rx_packets;
803 u64 rx_bytes; 805 u64 rx_bytes;
@@ -816,6 +818,7 @@ struct bnxt_cp_ring_info {
816 dma_addr_t hw_stats_map; 818 dma_addr_t hw_stats_map;
817 u32 hw_stats_ctx_id; 819 u32 hw_stats_ctx_id;
818 u64 rx_l4_csum_errors; 820 u64 rx_l4_csum_errors;
821 u64 missed_irqs;
819 822
820 struct bnxt_ring_struct cp_ring_struct; 823 struct bnxt_ring_struct cp_ring_struct;
821 824
@@ -1527,6 +1530,7 @@ struct bnxt {
1527#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 1530#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
1528#define BNXT_FLOW_STATS_SP_EVENT 15 1531#define BNXT_FLOW_STATS_SP_EVENT 15
1529#define BNXT_UPDATE_PHY_SP_EVENT 16 1532#define BNXT_UPDATE_PHY_SP_EVENT 16
1533#define BNXT_RING_COAL_NOW_SP_EVENT 17
1530 1534
1531 struct bnxt_hw_resc hw_resc; 1535 struct bnxt_hw_resc hw_resc;
1532 struct bnxt_pf_info pf; 1536 struct bnxt_pf_info pf;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 48078564f025..6cc69a58478a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,7 +137,7 @@ reset_coalesce:
137 return rc; 137 return rc;
138} 138}
139 139
140#define BNXT_NUM_STATS 21 140#define BNXT_NUM_STATS 22
141 141
142#define BNXT_RX_STATS_ENTRY(counter) \ 142#define BNXT_RX_STATS_ENTRY(counter) \
143 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 143 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
@@ -384,6 +384,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
384 for (k = 0; k < stat_fields; j++, k++) 384 for (k = 0; k < stat_fields; j++, k++)
385 buf[j] = le64_to_cpu(hw_stats[k]); 385 buf[j] = le64_to_cpu(hw_stats[k]);
386 buf[j++] = cpr->rx_l4_csum_errors; 386 buf[j++] = cpr->rx_l4_csum_errors;
387 buf[j++] = cpr->missed_irqs;
387 388
388 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += 389 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
389 le64_to_cpu(cpr->hw_stats->rx_discard_pkts); 390 le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
@@ -468,6 +469,8 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
468 buf += ETH_GSTRING_LEN; 469 buf += ETH_GSTRING_LEN;
469 sprintf(buf, "[%d]: rx_l4_csum_errors", i); 470 sprintf(buf, "[%d]: rx_l4_csum_errors", i);
470 buf += ETH_GSTRING_LEN; 471 buf += ETH_GSTRING_LEN;
472 sprintf(buf, "[%d]: missed_irqs", i);
473 buf += ETH_GSTRING_LEN;
471 } 474 }
472 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { 475 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
473 strcpy(buf, bnxt_sw_func_stats[i].string); 476 strcpy(buf, bnxt_sw_func_stats[i].string);
@@ -2942,8 +2945,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
2942 record->asic_state = 0; 2945 record->asic_state = 0;
2943 strlcpy(record->system_name, utsname()->nodename, 2946 strlcpy(record->system_name, utsname()->nodename,
2944 sizeof(record->system_name)); 2947 sizeof(record->system_name));
2945 record->year = cpu_to_le16(tm.tm_year); 2948 record->year = cpu_to_le16(tm.tm_year + 1900);
2946 record->month = cpu_to_le16(tm.tm_mon); 2949 record->month = cpu_to_le16(tm.tm_mon + 1);
2947 record->day = cpu_to_le16(tm.tm_mday); 2950 record->day = cpu_to_le16(tm.tm_mday);
2948 record->hour = cpu_to_le16(tm.tm_hour); 2951 record->hour = cpu_to_le16(tm.tm_hour);
2949 record->minute = cpu_to_le16(tm.tm_min); 2952 record->minute = cpu_to_le16(tm.tm_min);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index beee61292d5e..b59b382d34f9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -43,6 +43,9 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
43 if (ulp_id == BNXT_ROCE_ULP) { 43 if (ulp_id == BNXT_ROCE_ULP) {
44 unsigned int max_stat_ctxs; 44 unsigned int max_stat_ctxs;
45 45
46 if (bp->flags & BNXT_FLAG_CHIP_P5)
47 return -EOPNOTSUPP;
48
46 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); 49 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
47 if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS || 50 if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
48 bp->num_stat_ctxs == max_stat_ctxs) 51 bp->num_stat_ctxs == max_stat_ctxs)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 20c1681bb1af..2d6f090bf644 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
2855 2855
2856 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); 2856 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2857 2857
2858 netif_tx_start_all_queues(dev);
2859 bcmgenet_enable_tx_napi(priv); 2858 bcmgenet_enable_tx_napi(priv);
2860 2859
2861 /* Monitor link interrupts now */ 2860 /* Monitor link interrupts now */
@@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_device *dev)
2937 2936
2938 bcmgenet_netif_start(dev); 2937 bcmgenet_netif_start(dev);
2939 2938
2939 netif_tx_start_all_queues(dev);
2940
2940 return 0; 2941 return 0;
2941 2942
2942err_irq1: 2943err_irq1:
@@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
2958 struct bcmgenet_priv *priv = netdev_priv(dev); 2959 struct bcmgenet_priv *priv = netdev_priv(dev);
2959 2960
2960 bcmgenet_disable_tx_napi(priv); 2961 bcmgenet_disable_tx_napi(priv);
2961 netif_tx_stop_all_queues(dev); 2962 netif_tx_disable(dev);
2962 2963
2963 /* Disable MAC receive */ 2964 /* Disable MAC receive */
2964 umac_enable_set(priv, CMD_RX_EN, false); 2965 umac_enable_set(priv, CMD_RX_EN, false);
@@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d)
3620 if (!netif_running(dev)) 3621 if (!netif_running(dev))
3621 return 0; 3622 return 0;
3622 3623
3624 netif_device_detach(dev);
3625
3623 bcmgenet_netif_stop(dev); 3626 bcmgenet_netif_stop(dev);
3624 3627
3625 if (!device_may_wakeup(d)) 3628 if (!device_may_wakeup(d))
3626 phy_suspend(dev->phydev); 3629 phy_suspend(dev->phydev);
3627 3630
3628 netif_device_detach(dev);
3629
3630 /* Prepare the device for Wake-on-LAN and switch to the slow clock */ 3631 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3631 if (device_may_wakeup(d) && priv->wolopts) { 3632 if (device_may_wakeup(d) && priv->wolopts) {
3632 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); 3633 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
@@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device *d)
3700 /* Always enable ring 16 - descriptor ring */ 3701 /* Always enable ring 16 - descriptor ring */
3701 bcmgenet_enable_dma(priv, dma_ctrl); 3702 bcmgenet_enable_dma(priv, dma_ctrl);
3702 3703
3703 netif_device_attach(dev);
3704
3705 if (!device_may_wakeup(d)) 3704 if (!device_may_wakeup(d))
3706 phy_resume(dev->phydev); 3705 phy_resume(dev->phydev);
3707 3706
@@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device *d)
3710 3709
3711 bcmgenet_netif_start(dev); 3710 bcmgenet_netif_start(dev);
3712 3711
3712 netif_device_attach(dev);
3713
3713 return 0; 3714 return 0;
3714 3715
3715out_clk_disable: 3716out_clk_disable:
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 89295306f161..432c3b867084 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12422,6 +12422,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
12422{ 12422{
12423 struct tg3 *tp = netdev_priv(dev); 12423 struct tg3 *tp = netdev_priv(dev);
12424 int i, irq_sync = 0, err = 0; 12424 int i, irq_sync = 0, err = 0;
12425 bool reset_phy = false;
12425 12426
12426 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12427 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12427 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12428 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
@@ -12453,7 +12454,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
12453 12454
12454 if (netif_running(dev)) { 12455 if (netif_running(dev)) {
12455 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12456 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12456 err = tg3_restart_hw(tp, false); 12457 /* Reset PHY to avoid PHY lock up */
12458 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12459 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12460 tg3_asic_rev(tp) == ASIC_REV_5720)
12461 reset_phy = true;
12462
12463 err = tg3_restart_hw(tp, reset_phy);
12457 if (!err) 12464 if (!err)
12458 tg3_netif_start(tp); 12465 tg3_netif_start(tp);
12459 } 12466 }
@@ -12487,6 +12494,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
12487{ 12494{
12488 struct tg3 *tp = netdev_priv(dev); 12495 struct tg3 *tp = netdev_priv(dev);
12489 int err = 0; 12496 int err = 0;
12497 bool reset_phy = false;
12490 12498
12491 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12499 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12492 tg3_warn_mgmt_link_flap(tp); 12500 tg3_warn_mgmt_link_flap(tp);
@@ -12556,7 +12564,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
12556 12564
12557 if (netif_running(dev)) { 12565 if (netif_running(dev)) {
12558 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12566 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12559 err = tg3_restart_hw(tp, false); 12567 /* Reset PHY to avoid PHY lock up */
12568 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12569 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12570 tg3_asic_rev(tp) == ASIC_REV_5720)
12571 reset_phy = true;
12572
12573 err = tg3_restart_hw(tp, reset_phy);
12560 if (!err) 12574 if (!err)
12561 tg3_netif_start(tp); 12575 tg3_netif_start(tp);
12562 } 12576 }
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 55af04fa03a7..6c8dcb65ff03 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1441,6 +1441,9 @@ static void nic_remove(struct pci_dev *pdev)
1441{ 1441{
1442 struct nicpf *nic = pci_get_drvdata(pdev); 1442 struct nicpf *nic = pci_get_drvdata(pdev);
1443 1443
1444 if (!nic)
1445 return;
1446
1444 if (nic->flags & NIC_SRIOV_ENABLED) 1447 if (nic->flags & NIC_SRIOV_ENABLED)
1445 pci_disable_sriov(pdev); 1448 pci_disable_sriov(pdev);
1446 1449
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 768f584f8392..88f8a8fa93cd 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1784,6 +1784,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1784 bool if_up = netif_running(nic->netdev); 1784 bool if_up = netif_running(nic->netdev);
1785 struct bpf_prog *old_prog; 1785 struct bpf_prog *old_prog;
1786 bool bpf_attached = false; 1786 bool bpf_attached = false;
1787 int ret = 0;
1787 1788
1788 /* For now just support only the usual MTU sized frames */ 1789 /* For now just support only the usual MTU sized frames */
1789 if (prog && (dev->mtu > 1500)) { 1790 if (prog && (dev->mtu > 1500)) {
@@ -1817,8 +1818,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1817 if (nic->xdp_prog) { 1818 if (nic->xdp_prog) {
1818 /* Attach BPF program */ 1819 /* Attach BPF program */
1819 nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); 1820 nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
1820 if (!IS_ERR(nic->xdp_prog)) 1821 if (!IS_ERR(nic->xdp_prog)) {
1821 bpf_attached = true; 1822 bpf_attached = true;
1823 } else {
1824 ret = PTR_ERR(nic->xdp_prog);
1825 nic->xdp_prog = NULL;
1826 }
1822 } 1827 }
1823 1828
1824 /* Calculate Tx queues needed for XDP and network stack */ 1829 /* Calculate Tx queues needed for XDP and network stack */
@@ -1830,7 +1835,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1830 netif_trans_update(nic->netdev); 1835 netif_trans_update(nic->netdev);
1831 } 1836 }
1832 1837
1833 return 0; 1838 return ret;
1834} 1839}
1835 1840
1836static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) 1841static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 187a249ff2d1..fcaf18fa3904 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -585,10 +585,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
585 if (!sq->dmem.base) 585 if (!sq->dmem.base)
586 return; 586 return;
587 587
588 if (sq->tso_hdrs) 588 if (sq->tso_hdrs) {
589 dma_free_coherent(&nic->pdev->dev, 589 dma_free_coherent(&nic->pdev->dev,
590 sq->dmem.q_len * TSO_HEADER_SIZE, 590 sq->dmem.q_len * TSO_HEADER_SIZE,
591 sq->tso_hdrs, sq->tso_hdrs_phys); 591 sq->tso_hdrs, sq->tso_hdrs_phys);
592 sq->tso_hdrs = NULL;
593 }
592 594
593 /* Free pending skbs in the queue */ 595 /* Free pending skbs in the queue */
594 smp_rmb(); 596 smp_rmb();
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 75c1c5ed2387..e2cdfa75673f 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -67,7 +67,6 @@ config CHELSIO_T3
67config CHELSIO_T4 67config CHELSIO_T4
68 tristate "Chelsio Communications T4/T5/T6 Ethernet support" 68 tristate "Chelsio Communications T4/T5/T6 Ethernet support"
69 depends on PCI && (IPV6 || IPV6=n) 69 depends on PCI && (IPV6 || IPV6=n)
70 depends on THERMAL || !THERMAL
71 select FW_LOADER 70 select FW_LOADER
72 select MDIO 71 select MDIO
73 select ZLIB_DEFLATE 72 select ZLIB_DEFLATE
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 78e5d17a1d5f..91d8a885deba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -12,6 +12,4 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
12cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o 12cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
13cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o 13cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
14cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o 14cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
15ifdef CONFIG_THERMAL 15cxgb4-$(CONFIG_THERMAL) += cxgb4_thermal.o
16cxgb4-objs += cxgb4_thermal.o
17endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 05a46926016a..d49db46254cd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5863,7 +5863,7 @@ fw_attach_fail:
5863 if (!is_t4(adapter->params.chip)) 5863 if (!is_t4(adapter->params.chip))
5864 cxgb4_ptp_init(adapter); 5864 cxgb4_ptp_init(adapter);
5865 5865
5866 if (IS_ENABLED(CONFIG_THERMAL) && 5866 if (IS_REACHABLE(CONFIG_THERMAL) &&
5867 !is_t4(adapter->params.chip) && (adapter->flags & FW_OK)) 5867 !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
5868 cxgb4_thermal_init(adapter); 5868 cxgb4_thermal_init(adapter);
5869 5869
@@ -5932,7 +5932,7 @@ static void remove_one(struct pci_dev *pdev)
5932 5932
5933 if (!is_t4(adapter->params.chip)) 5933 if (!is_t4(adapter->params.chip))
5934 cxgb4_ptp_stop(adapter); 5934 cxgb4_ptp_stop(adapter);
5935 if (IS_ENABLED(CONFIG_THERMAL)) 5935 if (IS_REACHABLE(CONFIG_THERMAL))
5936 cxgb4_thermal_remove(adapter); 5936 cxgb4_thermal_remove(adapter);
5937 5937
5938 /* If we allocated filters, free up state associated with any 5938 /* If we allocated filters, free up state associated with any
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index ceec467f590d..949103db8a8a 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -660,7 +660,7 @@ static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
660 660
661 u64_stats_update_begin(&port->tx_stats_syncp); 661 u64_stats_update_begin(&port->tx_stats_syncp);
662 port->tx_frag_stats[nfrags]++; 662 port->tx_frag_stats[nfrags]++;
663 u64_stats_update_end(&port->ir_stats_syncp); 663 u64_stats_update_end(&port->tx_stats_syncp);
664 } 664 }
665 } 665 }
666 666
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 570caeb8ee9e..084f24daf2b5 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
872 struct net_device *netdev = dev_id; 872 struct net_device *netdev = dev_id;
873 struct ftmac100 *priv = netdev_priv(netdev); 873 struct ftmac100 *priv = netdev_priv(netdev);
874 874
875 if (likely(netif_running(netdev))) { 875 /* Disable interrupts for polling */
876 /* Disable interrupts for polling */ 876 ftmac100_disable_all_int(priv);
877 ftmac100_disable_all_int(priv); 877 if (likely(netif_running(netdev)))
878 napi_schedule(&priv->napi); 878 napi_schedule(&priv->napi);
879 }
880 879
881 return IRQ_HANDLED; 880 return IRQ_HANDLED;
882} 881}
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index be268dcde8fa..f9a4e76c5a8b 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -915,10 +915,8 @@ static int hip04_mac_probe(struct platform_device *pdev)
915 } 915 }
916 916
917 ret = register_netdev(ndev); 917 ret = register_netdev(ndev);
918 if (ret) { 918 if (ret)
919 free_netdev(ndev);
920 goto alloc_fail; 919 goto alloc_fail;
921 }
922 920
923 return 0; 921 return 0;
924 922
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 3f96aa30068e..20fcf0d1c2ce 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -3760,7 +3760,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3760 /* Hardware table is only clear when pf resets */ 3760 /* Hardware table is only clear when pf resets */
3761 if (!(handle->flags & HNAE3_SUPPORT_VF)) { 3761 if (!(handle->flags & HNAE3_SUPPORT_VF)) {
3762 ret = hns3_restore_vlan(netdev); 3762 ret = hns3_restore_vlan(netdev);
3763 return ret; 3763 if (ret)
3764 return ret;
3764 } 3765 }
3765 3766
3766 ret = hns3_restore_fd_rules(netdev); 3767 ret = hns3_restore_fd_rules(netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index aa5cb9834d73..494e562fe8c7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1168,14 +1168,14 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1168 */ 1168 */
1169static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) 1169static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1170{ 1170{
1171 struct hclge_vport *vport = hdev->vport; 1171 int i;
1172 u32 i, k, qs_bitmap;
1173 int ret;
1174 1172
1175 for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { 1173 for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1176 qs_bitmap = 0; 1174 u32 qs_bitmap = 0;
1175 int k, ret;
1177 1176
1178 for (k = 0; k < hdev->num_alloc_vport; k++) { 1177 for (k = 0; k < hdev->num_alloc_vport; k++) {
1178 struct hclge_vport *vport = &hdev->vport[k];
1179 u16 qs_id = vport->qs_offset + tc; 1179 u16 qs_id = vport->qs_offset + tc;
1180 u8 grp, sub_grp; 1180 u8 grp, sub_grp;
1181 1181
@@ -1185,8 +1185,6 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1185 HCLGE_BP_SUB_GRP_ID_S); 1185 HCLGE_BP_SUB_GRP_ID_S);
1186 if (i == grp) 1186 if (i == grp)
1187 qs_bitmap |= (1 << sub_grp); 1187 qs_bitmap |= (1 << sub_grp);
1188
1189 vport++;
1190 } 1188 }
1191 1189
1192 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); 1190 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7893beffcc71..c0203a0d5e3b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
485 485
486 for (j = 0; j < rx_pool->size; j++) { 486 for (j = 0; j < rx_pool->size; j++) {
487 if (rx_pool->rx_buff[j].skb) { 487 if (rx_pool->rx_buff[j].skb) {
488 dev_kfree_skb_any(rx_pool->rx_buff[i].skb); 488 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
489 rx_pool->rx_buff[i].skb = NULL; 489 rx_pool->rx_buff[j].skb = NULL;
490 } 490 }
491 } 491 }
492 492
@@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev)
1103 return 0; 1103 return 0;
1104 } 1104 }
1105 1105
1106 mutex_lock(&adapter->reset_lock);
1107
1108 if (adapter->state != VNIC_CLOSED) { 1106 if (adapter->state != VNIC_CLOSED) {
1109 rc = ibmvnic_login(netdev); 1107 rc = ibmvnic_login(netdev);
1110 if (rc) { 1108 if (rc)
1111 mutex_unlock(&adapter->reset_lock);
1112 return rc; 1109 return rc;
1113 }
1114 1110
1115 rc = init_resources(adapter); 1111 rc = init_resources(adapter);
1116 if (rc) { 1112 if (rc) {
1117 netdev_err(netdev, "failed to initialize resources\n"); 1113 netdev_err(netdev, "failed to initialize resources\n");
1118 release_resources(adapter); 1114 release_resources(adapter);
1119 mutex_unlock(&adapter->reset_lock);
1120 return rc; 1115 return rc;
1121 } 1116 }
1122 } 1117 }
@@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev)
1124 rc = __ibmvnic_open(netdev); 1119 rc = __ibmvnic_open(netdev);
1125 netif_carrier_on(netdev); 1120 netif_carrier_on(netdev);
1126 1121
1127 mutex_unlock(&adapter->reset_lock);
1128
1129 return rc; 1122 return rc;
1130} 1123}
1131 1124
@@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev)
1269 return 0; 1262 return 0;
1270 } 1263 }
1271 1264
1272 mutex_lock(&adapter->reset_lock);
1273 rc = __ibmvnic_close(netdev); 1265 rc = __ibmvnic_close(netdev);
1274 ibmvnic_cleanup(netdev); 1266 ibmvnic_cleanup(netdev);
1275 mutex_unlock(&adapter->reset_lock);
1276 1267
1277 return rc; 1268 return rc;
1278} 1269}
@@ -1545,7 +1536,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1545 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 1536 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1546 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 1537 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1547 1538
1548 if (adapter->vlan_header_insertion) { 1539 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1549 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 1540 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1550 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 1541 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1551 } 1542 }
@@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1746 struct ibmvnic_rwi *rwi, u32 reset_state) 1737 struct ibmvnic_rwi *rwi, u32 reset_state)
1747{ 1738{
1748 u64 old_num_rx_queues, old_num_tx_queues; 1739 u64 old_num_rx_queues, old_num_tx_queues;
1740 u64 old_num_rx_slots, old_num_tx_slots;
1749 struct net_device *netdev = adapter->netdev; 1741 struct net_device *netdev = adapter->netdev;
1750 int i, rc; 1742 int i, rc;
1751 1743
@@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1757 1749
1758 old_num_rx_queues = adapter->req_rx_queues; 1750 old_num_rx_queues = adapter->req_rx_queues;
1759 old_num_tx_queues = adapter->req_tx_queues; 1751 old_num_tx_queues = adapter->req_tx_queues;
1752 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1753 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1760 1754
1761 ibmvnic_cleanup(netdev); 1755 ibmvnic_cleanup(netdev);
1762 1756
@@ -1819,21 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1819 if (rc) 1813 if (rc)
1820 return rc; 1814 return rc;
1821 } else if (adapter->req_rx_queues != old_num_rx_queues || 1815 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1822 adapter->req_tx_queues != old_num_tx_queues) { 1816 adapter->req_tx_queues != old_num_tx_queues ||
1823 adapter->map_id = 1; 1817 adapter->req_rx_add_entries_per_subcrq !=
1818 old_num_rx_slots ||
1819 adapter->req_tx_entries_per_subcrq !=
1820 old_num_tx_slots) {
1824 release_rx_pools(adapter); 1821 release_rx_pools(adapter);
1825 release_tx_pools(adapter); 1822 release_tx_pools(adapter);
1826 rc = init_rx_pools(netdev);
1827 if (rc)
1828 return rc;
1829 rc = init_tx_pools(netdev);
1830 if (rc)
1831 return rc;
1832
1833 release_napi(adapter); 1823 release_napi(adapter);
1834 rc = init_napi(adapter); 1824 release_vpd_data(adapter);
1825
1826 rc = init_resources(adapter);
1835 if (rc) 1827 if (rc)
1836 return rc; 1828 return rc;
1829
1837 } else { 1830 } else {
1838 rc = reset_tx_pools(adapter); 1831 rc = reset_tx_pools(adapter);
1839 if (rc) 1832 if (rc)
@@ -1917,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
1917 adapter->state = VNIC_PROBED; 1910 adapter->state = VNIC_PROBED;
1918 return 0; 1911 return 0;
1919 } 1912 }
1920 /* netif_set_real_num_xx_queues needs to take rtnl lock here 1913
1921 * unless wait_for_reset is set, in which case the rtnl lock 1914 rc = init_resources(adapter);
1922 * has already been taken before initializing the reset
1923 */
1924 if (!adapter->wait_for_reset) {
1925 rtnl_lock();
1926 rc = init_resources(adapter);
1927 rtnl_unlock();
1928 } else {
1929 rc = init_resources(adapter);
1930 }
1931 if (rc) 1915 if (rc)
1932 return rc; 1916 return rc;
1933 1917
@@ -1986,13 +1970,21 @@ static void __ibmvnic_reset(struct work_struct *work)
1986 struct ibmvnic_rwi *rwi; 1970 struct ibmvnic_rwi *rwi;
1987 struct ibmvnic_adapter *adapter; 1971 struct ibmvnic_adapter *adapter;
1988 struct net_device *netdev; 1972 struct net_device *netdev;
1973 bool we_lock_rtnl = false;
1989 u32 reset_state; 1974 u32 reset_state;
1990 int rc = 0; 1975 int rc = 0;
1991 1976
1992 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 1977 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1993 netdev = adapter->netdev; 1978 netdev = adapter->netdev;
1994 1979
1995 mutex_lock(&adapter->reset_lock); 1980 /* netif_set_real_num_xx_queues needs to take rtnl lock here
1981 * unless wait_for_reset is set, in which case the rtnl lock
1982 * has already been taken before initializing the reset
1983 */
1984 if (!adapter->wait_for_reset) {
1985 rtnl_lock();
1986 we_lock_rtnl = true;
1987 }
1996 reset_state = adapter->state; 1988 reset_state = adapter->state;
1997 1989
1998 rwi = get_next_rwi(adapter); 1990 rwi = get_next_rwi(adapter);
@@ -2020,12 +2012,11 @@ static void __ibmvnic_reset(struct work_struct *work)
2020 if (rc) { 2012 if (rc) {
2021 netdev_dbg(adapter->netdev, "Reset failed\n"); 2013 netdev_dbg(adapter->netdev, "Reset failed\n");
2022 free_all_rwi(adapter); 2014 free_all_rwi(adapter);
2023 mutex_unlock(&adapter->reset_lock);
2024 return;
2025 } 2015 }
2026 2016
2027 adapter->resetting = false; 2017 adapter->resetting = false;
2028 mutex_unlock(&adapter->reset_lock); 2018 if (we_lock_rtnl)
2019 rtnl_unlock();
2029} 2020}
2030 2021
2031static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 2022static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
@@ -4768,7 +4759,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4768 4759
4769 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 4760 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4770 INIT_LIST_HEAD(&adapter->rwi_list); 4761 INIT_LIST_HEAD(&adapter->rwi_list);
4771 mutex_init(&adapter->reset_lock);
4772 mutex_init(&adapter->rwi_lock); 4762 mutex_init(&adapter->rwi_lock);
4773 adapter->resetting = false; 4763 adapter->resetting = false;
4774 4764
@@ -4840,8 +4830,8 @@ static int ibmvnic_remove(struct vio_dev *dev)
4840 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4830 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4841 4831
4842 adapter->state = VNIC_REMOVING; 4832 adapter->state = VNIC_REMOVING;
4843 unregister_netdev(netdev); 4833 rtnl_lock();
4844 mutex_lock(&adapter->reset_lock); 4834 unregister_netdevice(netdev);
4845 4835
4846 release_resources(adapter); 4836 release_resources(adapter);
4847 release_sub_crqs(adapter, 1); 4837 release_sub_crqs(adapter, 1);
@@ -4852,7 +4842,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
4852 4842
4853 adapter->state = VNIC_REMOVED; 4843 adapter->state = VNIC_REMOVED;
4854 4844
4855 mutex_unlock(&adapter->reset_lock); 4845 rtnl_unlock();
4856 device_remove_file(&dev->dev, &dev_attr_failover); 4846 device_remove_file(&dev->dev, &dev_attr_failover);
4857 free_netdev(netdev); 4847 free_netdev(netdev);
4858 dev_set_drvdata(&dev->dev, NULL); 4848 dev_set_drvdata(&dev->dev, NULL);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 18103b811d4d..99c4f8d331ce 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
1075 struct tasklet_struct tasklet; 1075 struct tasklet_struct tasklet;
1076 enum vnic_state state; 1076 enum vnic_state state;
1077 enum ibmvnic_reset_reason reset_reason; 1077 enum ibmvnic_reset_reason reset_reason;
1078 struct mutex reset_lock, rwi_lock; 1078 struct mutex rwi_lock;
1079 struct list_head rwi_list; 1079 struct list_head rwi_list;
1080 struct work_struct ibmvnic_reset; 1080 struct work_struct ibmvnic_reset;
1081 bool resetting; 1081 bool resetting;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index bc71a21c1dc2..a3f45335437c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1413,7 +1413,7 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1413 } 1413 }
1414 1414
1415 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1415 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1416 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->state); 1416 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1417} 1417}
1418 1418
1419/** 1419/**
@@ -12249,6 +12249,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
12249 NETIF_F_GSO_GRE | 12249 NETIF_F_GSO_GRE |
12250 NETIF_F_GSO_GRE_CSUM | 12250 NETIF_F_GSO_GRE_CSUM |
12251 NETIF_F_GSO_PARTIAL | 12251 NETIF_F_GSO_PARTIAL |
12252 NETIF_F_GSO_IPXIP4 |
12253 NETIF_F_GSO_IPXIP6 |
12252 NETIF_F_GSO_UDP_TUNNEL | 12254 NETIF_F_GSO_UDP_TUNNEL |
12253 NETIF_F_GSO_UDP_TUNNEL_CSUM | 12255 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12254 NETIF_F_SCTP_CRC | 12256 NETIF_F_SCTP_CRC |
@@ -12266,13 +12268,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
12266 /* record features VLANs can make use of */ 12268 /* record features VLANs can make use of */
12267 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 12269 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
12268 12270
12269 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12270 netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12271
12272 hw_features = hw_enc_features | 12271 hw_features = hw_enc_features |
12273 NETIF_F_HW_VLAN_CTAG_TX | 12272 NETIF_F_HW_VLAN_CTAG_TX |
12274 NETIF_F_HW_VLAN_CTAG_RX; 12273 NETIF_F_HW_VLAN_CTAG_RX;
12275 12274
12275 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12276 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12277
12276 netdev->hw_features |= hw_features; 12278 netdev->hw_features |= hw_features;
12277 12279
12278 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 12280 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index add1e457886d..433c8e688c78 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -33,7 +33,7 @@ static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi)
33} 33}
34 34
35/** 35/**
36 * i40e_add_xsk_umem - Store an UMEM for a certain ring/qid 36 * i40e_add_xsk_umem - Store a UMEM for a certain ring/qid
37 * @vsi: Current VSI 37 * @vsi: Current VSI
38 * @umem: UMEM to store 38 * @umem: UMEM to store
39 * @qid: Ring/qid to associate with the UMEM 39 * @qid: Ring/qid to associate with the UMEM
@@ -56,7 +56,7 @@ static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem,
56} 56}
57 57
58/** 58/**
59 * i40e_remove_xsk_umem - Remove an UMEM for a certain ring/qid 59 * i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid
60 * @vsi: Current VSI 60 * @vsi: Current VSI
61 * @qid: Ring/qid associated with the UMEM 61 * @qid: Ring/qid associated with the UMEM
62 **/ 62 **/
@@ -130,7 +130,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
130} 130}
131 131
132/** 132/**
133 * i40e_xsk_umem_enable - Enable/associate an UMEM to a certain ring/qid 133 * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
134 * @vsi: Current VSI 134 * @vsi: Current VSI
135 * @umem: UMEM 135 * @umem: UMEM
136 * @qid: Rx ring to associate UMEM to 136 * @qid: Rx ring to associate UMEM to
@@ -189,7 +189,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
189} 189}
190 190
191/** 191/**
192 * i40e_xsk_umem_disable - Diassociate an UMEM from a certain ring/qid 192 * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
193 * @vsi: Current VSI 193 * @vsi: Current VSI
194 * @qid: Rx ring to associate UMEM to 194 * @qid: Rx ring to associate UMEM to
195 * 195 *
@@ -255,12 +255,12 @@ int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
255} 255}
256 256
257/** 257/**
258 * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM 258 * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
259 * @vsi: Current VSI 259 * @vsi: Current VSI
260 * @umem: UMEM to enable/associate to a ring, or NULL to disable 260 * @umem: UMEM to enable/associate to a ring, or NULL to disable
261 * @qid: Rx ring to (dis)associate UMEM (from)to 261 * @qid: Rx ring to (dis)associate UMEM (from)to
262 * 262 *
263 * This function enables or disables an UMEM to a certain ring. 263 * This function enables or disables a UMEM to a certain ring.
264 * 264 *
265 * Returns 0 on success, <0 on failure 265 * Returns 0 on success, <0 on failure
266 **/ 266 **/
@@ -276,7 +276,7 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
276 * @rx_ring: Rx ring 276 * @rx_ring: Rx ring
277 * @xdp: xdp_buff used as input to the XDP program 277 * @xdp: xdp_buff used as input to the XDP program
278 * 278 *
279 * This function enables or disables an UMEM to a certain ring. 279 * This function enables or disables a UMEM to a certain ring.
280 * 280 *
281 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} 281 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
282 **/ 282 **/
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4c4b5717a627..b8548370f1c7 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -76,6 +76,8 @@ extern const char ice_drv_ver[];
76#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) 76#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
77#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) 77#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
78 78
79#define ICE_MAX_RESET_WAIT 20
80
79#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) 81#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
80 82
81#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 83#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
@@ -189,7 +191,6 @@ struct ice_vsi {
189 u64 tx_linearize; 191 u64 tx_linearize;
190 DECLARE_BITMAP(state, __ICE_STATE_NBITS); 192 DECLARE_BITMAP(state, __ICE_STATE_NBITS);
191 DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS); 193 DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
192 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
193 unsigned int current_netdev_flags; 194 unsigned int current_netdev_flags;
194 u32 tx_restart; 195 u32 tx_restart;
195 u32 tx_busy; 196 u32 tx_busy;
@@ -369,5 +370,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
369int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); 370int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
370void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); 371void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
371void ice_print_link_msg(struct ice_vsi *vsi, bool isup); 372void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
373void ice_napi_del(struct ice_vsi *vsi);
372 374
373#endif /* _ICE_H_ */ 375#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 8cd6a2401fd9..554fd707a6d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -811,6 +811,9 @@ void ice_deinit_hw(struct ice_hw *hw)
811 /* Attempt to disable FW logging before shutting down control queues */ 811 /* Attempt to disable FW logging before shutting down control queues */
812 ice_cfg_fw_log(hw, false); 812 ice_cfg_fw_log(hw, false);
813 ice_shutdown_all_ctrlq(hw); 813 ice_shutdown_all_ctrlq(hw);
814
815 /* Clear VSI contexts if not already cleared */
816 ice_clear_all_vsi_ctx(hw);
814} 817}
815 818
816/** 819/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 96923580f2a6..648acdb4c644 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1517,10 +1517,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
1517 } 1517 }
1518 1518
1519 if (!test_bit(__ICE_DOWN, pf->state)) { 1519 if (!test_bit(__ICE_DOWN, pf->state)) {
1520 /* Give it a little more time to try to come back */ 1520 /* Give it a little more time to try to come back. If still
1521 * down, restart autoneg link or reinitialize the interface.
1522 */
1521 msleep(75); 1523 msleep(75);
1522 if (!test_bit(__ICE_DOWN, pf->state)) 1524 if (!test_bit(__ICE_DOWN, pf->state))
1523 return ice_nway_reset(netdev); 1525 return ice_nway_reset(netdev);
1526
1527 ice_down(vsi);
1528 ice_up(vsi);
1524 } 1529 }
1525 1530
1526 return err; 1531 return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 5fdea6ec7675..596b9fb1c510 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -242,6 +242,8 @@
242#define GLNVM_ULD 0x000B6008 242#define GLNVM_ULD 0x000B6008
243#define GLNVM_ULD_CORER_DONE_M BIT(3) 243#define GLNVM_ULD_CORER_DONE_M BIT(3)
244#define GLNVM_ULD_GLOBR_DONE_M BIT(4) 244#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
245#define GLPCI_CNF2 0x000BE004
246#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
245#define PF_FUNC_RID 0x0009E880 247#define PF_FUNC_RID 0x0009E880
246#define PF_FUNC_RID_FUNC_NUM_S 0 248#define PF_FUNC_RID_FUNC_NUM_S 0
247#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0) 249#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 5bacad01f0c9..1041fa2a7767 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1997,7 +1997,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
1997 status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL); 1997 status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
1998 if (status) { 1998 if (status) {
1999 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", 1999 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
2000 ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status, 2000 ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
2001 vsi->back->hw.adminq.sq_last_status); 2001 vsi->back->hw.adminq.sq_last_status);
2002 goto err_out; 2002 goto err_out;
2003 } 2003 }
@@ -2458,6 +2458,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
2458 * on this wq 2458 * on this wq
2459 */ 2459 */
2460 if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { 2460 if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
2461 ice_napi_del(vsi);
2461 unregister_netdev(vsi->netdev); 2462 unregister_netdev(vsi->netdev);
2462 free_netdev(vsi->netdev); 2463 free_netdev(vsi->netdev);
2463 vsi->netdev = NULL; 2464 vsi->netdev = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 05993451147a..333312a1d595 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1465,7 +1465,7 @@ skip_req_irq:
1465 * ice_napi_del - Remove NAPI handler for the VSI 1465 * ice_napi_del - Remove NAPI handler for the VSI
1466 * @vsi: VSI for which NAPI handler is to be removed 1466 * @vsi: VSI for which NAPI handler is to be removed
1467 */ 1467 */
1468static void ice_napi_del(struct ice_vsi *vsi) 1468void ice_napi_del(struct ice_vsi *vsi)
1469{ 1469{
1470 int v_idx; 1470 int v_idx;
1471 1471
@@ -1622,7 +1622,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
1622{ 1622{
1623 struct ice_netdev_priv *np = netdev_priv(netdev); 1623 struct ice_netdev_priv *np = netdev_priv(netdev);
1624 struct ice_vsi *vsi = np->vsi; 1624 struct ice_vsi *vsi = np->vsi;
1625 int ret;
1626 1625
1627 if (vid >= VLAN_N_VID) { 1626 if (vid >= VLAN_N_VID) {
1628 netdev_err(netdev, "VLAN id requested %d is out of range %d\n", 1627 netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -1635,7 +1634,8 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
1635 1634
1636 /* Enable VLAN pruning when VLAN 0 is added */ 1635 /* Enable VLAN pruning when VLAN 0 is added */
1637 if (unlikely(!vid)) { 1636 if (unlikely(!vid)) {
1638 ret = ice_cfg_vlan_pruning(vsi, true); 1637 int ret = ice_cfg_vlan_pruning(vsi, true);
1638
1639 if (ret) 1639 if (ret)
1640 return ret; 1640 return ret;
1641 } 1641 }
@@ -1644,12 +1644,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
1644 * needed to continue allowing all untagged packets since VLAN prune 1644 * needed to continue allowing all untagged packets since VLAN prune
1645 * list is applied to all packets by the switch 1645 * list is applied to all packets by the switch
1646 */ 1646 */
1647 ret = ice_vsi_add_vlan(vsi, vid); 1647 return ice_vsi_add_vlan(vsi, vid);
1648
1649 if (!ret)
1650 set_bit(vid, vsi->active_vlans);
1651
1652 return ret;
1653} 1648}
1654 1649
1655/** 1650/**
@@ -1677,8 +1672,6 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
1677 if (status) 1672 if (status)
1678 return status; 1673 return status;
1679 1674
1680 clear_bit(vid, vsi->active_vlans);
1681
1682 /* Disable VLAN pruning when VLAN 0 is removed */ 1675 /* Disable VLAN pruning when VLAN 0 is removed */
1683 if (unlikely(!vid)) 1676 if (unlikely(!vid))
1684 status = ice_cfg_vlan_pruning(vsi, false); 1677 status = ice_cfg_vlan_pruning(vsi, false);
@@ -2002,6 +1995,22 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
2002} 1995}
2003 1996
2004/** 1997/**
1998 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
1999 * @pf: pointer to the PF structure
2000 *
2001 * There is no error returned here because the driver should be able to handle
2002 * 128 Byte cache lines, so we only print a warning in case issues are seen,
2003 * specifically with Tx.
2004 */
2005static void ice_verify_cacheline_size(struct ice_pf *pf)
2006{
2007 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
2008 dev_warn(&pf->pdev->dev,
2009 "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
2010 ICE_CACHE_LINE_BYTES);
2011}
2012
2013/**
2005 * ice_probe - Device initialization routine 2014 * ice_probe - Device initialization routine
2006 * @pdev: PCI device information struct 2015 * @pdev: PCI device information struct
2007 * @ent: entry in ice_pci_tbl 2016 * @ent: entry in ice_pci_tbl
@@ -2151,6 +2160,8 @@ static int ice_probe(struct pci_dev *pdev,
2151 /* since everything is good, start the service timer */ 2160 /* since everything is good, start the service timer */
2152 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 2161 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
2153 2162
2163 ice_verify_cacheline_size(pf);
2164
2154 return 0; 2165 return 0;
2155 2166
2156err_alloc_sw_unroll: 2167err_alloc_sw_unroll:
@@ -2182,6 +2193,12 @@ static void ice_remove(struct pci_dev *pdev)
2182 if (!pf) 2193 if (!pf)
2183 return; 2194 return;
2184 2195
2196 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
2197 if (!ice_is_reset_in_progress(pf->state))
2198 break;
2199 msleep(100);
2200 }
2201
2185 set_bit(__ICE_DOWN, pf->state); 2202 set_bit(__ICE_DOWN, pf->state);
2186 ice_service_task_stop(pf); 2203 ice_service_task_stop(pf);
2187 2204
@@ -2510,31 +2527,6 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
2510} 2527}
2511 2528
2512/** 2529/**
2513 * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
2514 * @vsi: the VSI being brought back up
2515 */
2516static int ice_restore_vlan(struct ice_vsi *vsi)
2517{
2518 int err;
2519 u16 vid;
2520
2521 if (!vsi->netdev)
2522 return -EINVAL;
2523
2524 err = ice_vsi_vlan_setup(vsi);
2525 if (err)
2526 return err;
2527
2528 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
2529 err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
2530 if (err)
2531 break;
2532 }
2533
2534 return err;
2535}
2536
2537/**
2538 * ice_vsi_cfg - Setup the VSI 2530 * ice_vsi_cfg - Setup the VSI
2539 * @vsi: the VSI being configured 2531 * @vsi: the VSI being configured
2540 * 2532 *
@@ -2546,7 +2538,9 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
2546 2538
2547 if (vsi->netdev) { 2539 if (vsi->netdev) {
2548 ice_set_rx_mode(vsi->netdev); 2540 ice_set_rx_mode(vsi->netdev);
2549 err = ice_restore_vlan(vsi); 2541
2542 err = ice_vsi_vlan_setup(vsi);
2543
2550 if (err) 2544 if (err)
2551 return err; 2545 return err;
2552 } 2546 }
@@ -3296,7 +3290,7 @@ static void ice_rebuild(struct ice_pf *pf)
3296 struct device *dev = &pf->pdev->dev; 3290 struct device *dev = &pf->pdev->dev;
3297 struct ice_hw *hw = &pf->hw; 3291 struct ice_hw *hw = &pf->hw;
3298 enum ice_status ret; 3292 enum ice_status ret;
3299 int err; 3293 int err, i;
3300 3294
3301 if (test_bit(__ICE_DOWN, pf->state)) 3295 if (test_bit(__ICE_DOWN, pf->state))
3302 goto clear_recovery; 3296 goto clear_recovery;
@@ -3370,6 +3364,22 @@ static void ice_rebuild(struct ice_pf *pf)
3370 } 3364 }
3371 3365
3372 ice_reset_all_vfs(pf, true); 3366 ice_reset_all_vfs(pf, true);
3367
3368 for (i = 0; i < pf->num_alloc_vsi; i++) {
3369 bool link_up;
3370
3371 if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
3372 continue;
3373 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
3374 if (link_up) {
3375 netif_carrier_on(pf->vsi[i]->netdev);
3376 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
3377 } else {
3378 netif_carrier_off(pf->vsi[i]->netdev);
3379 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
3380 }
3381 }
3382
3373 /* if we get here, reset flow is successful */ 3383 /* if we get here, reset flow is successful */
3374 clear_bit(__ICE_RESET_FAILED, pf->state); 3384 clear_bit(__ICE_RESET_FAILED, pf->state);
3375 return; 3385 return;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 33403f39f1b3..40c9c6558956 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -348,6 +348,18 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
348} 348}
349 349
350/** 350/**
351 * ice_clear_all_vsi_ctx - clear all the VSI context entries
352 * @hw: pointer to the hw struct
353 */
354void ice_clear_all_vsi_ctx(struct ice_hw *hw)
355{
356 u16 i;
357
358 for (i = 0; i < ICE_MAX_VSI; i++)
359 ice_clear_vsi_ctx(hw, i);
360}
361
362/**
351 * ice_add_vsi - add VSI context to the hardware and VSI handle list 363 * ice_add_vsi - add VSI context to the hardware and VSI handle list
352 * @hw: pointer to the hw struct 364 * @hw: pointer to the hw struct
353 * @vsi_handle: unique VSI handle provided by drivers 365 * @vsi_handle: unique VSI handle provided by drivers
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index b88d96a1ef69..d5ef0bd58bf9 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -190,6 +190,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
190 struct ice_sq_cd *cd); 190 struct ice_sq_cd *cd);
191bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); 191bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
192struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); 192struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
193void ice_clear_all_vsi_ctx(struct ice_hw *hw);
194/* Switch config */
193enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); 195enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
194 196
195/* Switch/bridge related commands */ 197/* Switch/bridge related commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 5dae968d853e..fe5bbabbb41e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1520,7 +1520,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1520 1520
1521 /* update gso_segs and bytecount */ 1521 /* update gso_segs and bytecount */
1522 first->gso_segs = skb_shinfo(skb)->gso_segs; 1522 first->gso_segs = skb_shinfo(skb)->gso_segs;
1523 first->bytecount = (first->gso_segs - 1) * off->header_len; 1523 first->bytecount += (first->gso_segs - 1) * off->header_len;
1524 1524
1525 cd_tso_len = skb->len - off->header_len; 1525 cd_tso_len = skb->len - off->header_len;
1526 cd_mss = skb_shinfo(skb)->gso_size; 1526 cd_mss = skb_shinfo(skb)->gso_size;
@@ -1556,15 +1556,15 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1556 * magnitude greater than our largest possible GSO size. 1556 * magnitude greater than our largest possible GSO size.
1557 * 1557 *
1558 * This would then be implemented as: 1558 * This would then be implemented as:
1559 * return (((size >> 12) * 85) >> 8) + 1; 1559 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1560 * 1560 *
1561 * Since multiplication and division are commutative, we can reorder 1561 * Since multiplication and division are commutative, we can reorder
1562 * operations into: 1562 * operations into:
1563 * return ((size * 85) >> 20) + 1; 1563 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1564 */ 1564 */
1565static unsigned int ice_txd_use_count(unsigned int size) 1565static unsigned int ice_txd_use_count(unsigned int size)
1566{ 1566{
1567 return ((size * 85) >> 20) + 1; 1567 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1568} 1568}
1569 1569
1570/** 1570/**
@@ -1706,7 +1706,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
1706 * + 1 desc for context descriptor, 1706 * + 1 desc for context descriptor,
1707 * otherwise try next time 1707 * otherwise try next time
1708 */ 1708 */
1709 if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) { 1709 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
1710 ICE_DESCS_FOR_CTX_DESC)) {
1710 tx_ring->tx_stats.tx_busy++; 1711 tx_ring->tx_stats.tx_busy++;
1711 return NETDEV_TX_BUSY; 1712 return NETDEV_TX_BUSY;
1712 } 1713 }
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 1d0f58bd389b..75d0eaf6c9dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -22,8 +22,21 @@
22#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ 22#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
23#define ICE_MAX_TXQ_PER_TXQG 128 23#define ICE_MAX_TXQ_PER_TXQG 128
24 24
25/* Tx Descriptors needed, worst case */ 25/* We are assuming that the cache line is always 64 Bytes here for ice.
26#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 26 * In order to make sure that is a correct assumption there is a check in probe
27 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
28 * size is 128 bytes. We do it this way because we do not want to read the
29 * GLPCI_CNF2 register or a variable containing the value on every pass through
30 * the Tx path.
31 */
32#define ICE_CACHE_LINE_BYTES 64
33#define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
34 sizeof(struct ice_tx_desc))
35#define ICE_DESCS_FOR_CTX_DESC 1
36#define ICE_DESCS_FOR_SKB_DATA_PTR 1
37/* Tx descriptors needed, worst case */
38#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
39 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
27#define ICE_DESC_UNUSED(R) \ 40#define ICE_DESC_UNUSED(R) \
28 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 41 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
29 (R)->next_to_clean - (R)->next_to_use - 1) 42 (R)->next_to_clean - (R)->next_to_use - 1)
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 12f9432abf11..f4dbc81c1988 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -92,12 +92,12 @@ struct ice_link_status {
92 u64 phy_type_low; 92 u64 phy_type_low;
93 u16 max_frame_size; 93 u16 max_frame_size;
94 u16 link_speed; 94 u16 link_speed;
95 u16 req_speeds;
95 u8 lse_ena; /* Link Status Event notification */ 96 u8 lse_ena; /* Link Status Event notification */
96 u8 link_info; 97 u8 link_info;
97 u8 an_info; 98 u8 an_info;
98 u8 ext_info; 99 u8 ext_info;
99 u8 pacing; 100 u8 pacing;
100 u8 req_speeds;
101 /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of 101 /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
102 * ice_aqc_get_phy_caps structure 102 * ice_aqc_get_phy_caps structure
103 */ 103 */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 45f10f8f01dc..e71065f9d391 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -348,7 +348,7 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
348 struct ice_vsi_ctx ctxt = { 0 }; 348 struct ice_vsi_ctx ctxt = { 0 };
349 enum ice_status status; 349 enum ice_status status;
350 350
351 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED | 351 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
352 ICE_AQ_VSI_PVLAN_INSERT_PVID | 352 ICE_AQ_VSI_PVLAN_INSERT_PVID |
353 ICE_AQ_VSI_VLAN_EMOD_STR; 353 ICE_AQ_VSI_VLAN_EMOD_STR;
354 ctxt.info.pvid = cpu_to_le16(vid); 354 ctxt.info.pvid = cpu_to_le16(vid);
@@ -2171,7 +2171,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2171 2171
2172 if (!ice_vsi_add_vlan(vsi, vid)) { 2172 if (!ice_vsi_add_vlan(vsi, vid)) {
2173 vf->num_vlan++; 2173 vf->num_vlan++;
2174 set_bit(vid, vsi->active_vlans);
2175 2174
2176 /* Enable VLAN pruning when VLAN 0 is added */ 2175 /* Enable VLAN pruning when VLAN 0 is added */
2177 if (unlikely(!vid)) 2176 if (unlikely(!vid))
@@ -2190,7 +2189,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2190 */ 2189 */
2191 if (!ice_vsi_kill_vlan(vsi, vid)) { 2190 if (!ice_vsi_kill_vlan(vsi, vid)) {
2192 vf->num_vlan--; 2191 vf->num_vlan--;
2193 clear_bit(vid, vsi->active_vlans);
2194 2192
2195 /* Disable VLAN pruning when removing VLAN 0 */ 2193 /* Disable VLAN pruning when removing VLAN 0 */
2196 if (unlikely(!vid)) 2194 if (unlikely(!vid))
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index c54ebedca6da..c393cb2c0f16 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -842,6 +842,7 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
842 nvm_word = E1000_INVM_DEFAULT_AL; 842 nvm_word = E1000_INVM_DEFAULT_AL;
843 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; 843 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
844 igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); 844 igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
845 phy_word = E1000_PHY_PLL_UNCONF;
845 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { 846 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
846 /* check current state directly from internal PHY */ 847 /* check current state directly from internal PHY */
847 igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); 848 igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 29ced6b74d36..2b95dc9c7a6a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -53,13 +53,15 @@
53 * 2^40 * 10^-9 / 60 = 18.3 minutes. 53 * 2^40 * 10^-9 / 60 = 18.3 minutes.
54 * 54 *
55 * SYSTIM is converted to real time using a timecounter. As 55 * SYSTIM is converted to real time using a timecounter. As
56 * timecounter_cyc2time() allows old timestamps, the timecounter 56 * timecounter_cyc2time() allows old timestamps, the timecounter needs
57 * needs to be updated at least once per half of the SYSTIM interval. 57 * to be updated at least once per half of the SYSTIM interval.
58 * Scheduling of delayed work is not very accurate, so we aim for 8 58 * Scheduling of delayed work is not very accurate, and also the NIC
59 * minutes to be sure the actual interval is shorter than 9.16 minutes. 59 * clock can be adjusted to run up to 6% faster and the system clock
60 * up to 10% slower, so we aim for 6 minutes to be sure the actual
61 * interval in the NIC time is shorter than 9.16 minutes.
60 */ 62 */
61 63
62#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8) 64#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6)
63#define IGB_PTP_TX_TIMEOUT (HZ * 15) 65#define IGB_PTP_TX_TIMEOUT (HZ * 15)
64#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) 66#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
65#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) 67#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 10dbaf4f6e80..9c42f741ed5e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -2262,7 +2262,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2262 *autoneg = false; 2262 *autoneg = false;
2263 2263
2264 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || 2264 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2265 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { 2265 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
2266 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2267 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2266 *speed = IXGBE_LINK_SPEED_1GB_FULL; 2268 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2267 return 0; 2269 return 0;
2268 } 2270 }
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 8c5ba4b81fb7..2d4d10a017e5 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -512,7 +512,8 @@ static int xrx200_probe(struct platform_device *pdev)
512 err = register_netdev(net_dev); 512 err = register_netdev(net_dev);
513 if (err) 513 if (err)
514 goto err_unprepare_clk; 514 goto err_unprepare_clk;
515 return err; 515
516 return 0;
516 517
517err_unprepare_clk: 518err_unprepare_clk:
518 clk_disable_unprepare(priv->clk); 519 clk_disable_unprepare(priv->clk);
@@ -520,7 +521,7 @@ err_unprepare_clk:
520err_uninit_dma: 521err_uninit_dma:
521 xrx200_hw_cleanup(priv); 522 xrx200_hw_cleanup(priv);
522 523
523 return 0; 524 return err;
524} 525}
525 526
526static int xrx200_remove(struct platform_device *pdev) 527static int xrx200_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5bfd349bf41a..e5397c8197b9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -494,7 +494,7 @@ struct mvneta_port {
494#if defined(__LITTLE_ENDIAN) 494#if defined(__LITTLE_ENDIAN)
495struct mvneta_tx_desc { 495struct mvneta_tx_desc {
496 u32 command; /* Options used by HW for packet transmitting.*/ 496 u32 command; /* Options used by HW for packet transmitting.*/
497 u16 reserverd1; /* csum_l4 (for future use) */ 497 u16 reserved1; /* csum_l4 (for future use) */
498 u16 data_size; /* Data size of transmitted packet in bytes */ 498 u16 data_size; /* Data size of transmitted packet in bytes */
499 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 499 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
500 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 500 u32 reserved2; /* hw_cmd - (for future use, PMT) */
@@ -519,7 +519,7 @@ struct mvneta_rx_desc {
519#else 519#else
520struct mvneta_tx_desc { 520struct mvneta_tx_desc {
521 u16 data_size; /* Data size of transmitted packet in bytes */ 521 u16 data_size; /* Data size of transmitted packet in bytes */
522 u16 reserverd1; /* csum_l4 (for future use) */ 522 u16 reserved1; /* csum_l4 (for future use) */
523 u32 command; /* Options used by HW for packet transmitting.*/ 523 u32 command; /* Options used by HW for packet transmitting.*/
524 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 524 u32 reserved2; /* hw_cmd - (for future use, PMT) */
525 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 525 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
@@ -3343,7 +3343,6 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
3343 if (state->interface != PHY_INTERFACE_MODE_NA && 3343 if (state->interface != PHY_INTERFACE_MODE_NA &&
3344 state->interface != PHY_INTERFACE_MODE_QSGMII && 3344 state->interface != PHY_INTERFACE_MODE_QSGMII &&
3345 state->interface != PHY_INTERFACE_MODE_SGMII && 3345 state->interface != PHY_INTERFACE_MODE_SGMII &&
3346 state->interface != PHY_INTERFACE_MODE_2500BASEX &&
3347 !phy_interface_mode_is_8023z(state->interface) && 3346 !phy_interface_mode_is_8023z(state->interface) &&
3348 !phy_interface_mode_is_rgmii(state->interface)) { 3347 !phy_interface_mode_is_rgmii(state->interface)) {
3349 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 3348 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -3357,14 +3356,9 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
3357 /* Asymmetric pause is unsupported */ 3356 /* Asymmetric pause is unsupported */
3358 phylink_set(mask, Pause); 3357 phylink_set(mask, Pause);
3359 3358
3360 /* We cannot use 1Gbps when using the 2.5G interface. */ 3359 /* Half-duplex at speeds higher than 100Mbit is unsupported */
3361 if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { 3360 phylink_set(mask, 1000baseT_Full);
3362 phylink_set(mask, 2500baseT_Full); 3361 phylink_set(mask, 1000baseX_Full);
3363 phylink_set(mask, 2500baseX_Full);
3364 } else {
3365 phylink_set(mask, 1000baseT_Full);
3366 phylink_set(mask, 1000baseX_Full);
3367 }
3368 3362
3369 if (!phy_interface_mode_is_8023z(state->interface)) { 3363 if (!phy_interface_mode_is_8023z(state->interface)) {
3370 /* 10M and 100M are only supported in non-802.3z mode */ 3364 /* 10M and 100M are only supported in non-802.3z mode */
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index deef5a998985..9af34e03892c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
337static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count, 337static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
338 int align, u32 skip_mask, u32 *puid) 338 int align, u32 skip_mask, u32 *puid)
339{ 339{
340 u32 uid; 340 u32 uid = 0;
341 u32 res; 341 u32 res;
342 struct mlx4_zone_allocator *zone_alloc = zone->allocator; 342 struct mlx4_zone_allocator *zone_alloc = zone->allocator;
343 struct mlx4_zone_entry *curr_node; 343 struct mlx4_zone_entry *curr_node;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1857ee0f0871..6f5153afcab4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -1006,7 +1006,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
1006 ring->packets++; 1006 ring->packets++;
1007 } 1007 }
1008 ring->bytes += tx_info->nr_bytes; 1008 ring->bytes += tx_info->nr_bytes;
1009 netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
1010 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); 1009 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
1011 1010
1012 if (tx_info->inl) 1011 if (tx_info->inl)
@@ -1044,7 +1043,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
1044 netif_tx_stop_queue(ring->tx_queue); 1043 netif_tx_stop_queue(ring->tx_queue);
1045 ring->queue_stopped++; 1044 ring->queue_stopped++;
1046 } 1045 }
1047 send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue); 1046
1047 send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
1048 tx_info->nr_bytes,
1049 skb->xmit_more);
1048 1050
1049 real_size = (real_size / 16) & 0x3f; 1051 real_size = (real_size / 16) & 0x3f;
1050 1052
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ebcd2778eeb3..23f1b5b512c2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -540,8 +540,8 @@ struct slave_list {
540struct resource_allocator { 540struct resource_allocator {
541 spinlock_t alloc_lock; /* protect quotas */ 541 spinlock_t alloc_lock; /* protect quotas */
542 union { 542 union {
543 int res_reserved; 543 unsigned int res_reserved;
544 int res_port_rsvd[MLX4_MAX_PORTS]; 544 unsigned int res_port_rsvd[MLX4_MAX_PORTS];
545 }; 545 };
546 union { 546 union {
547 int res_free; 547 int res_free;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 2e84f10f59ba..1a11bc0e1612 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
363 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, 363 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
364 buf); 364 buf);
365 365
366 (*mpt_entry)->lkey = 0;
366 err = mlx4_SW2HW_MPT(dev, mailbox, key); 367 err = mlx4_SW2HW_MPT(dev, mailbox, key);
367 } 368 }
368 369
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index d7fbd5b6ac95..118324802926 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -569,6 +569,7 @@ struct mlx5e_rq {
569 569
570 unsigned long state; 570 unsigned long state;
571 int ix; 571 int ix;
572 unsigned int hw_mtu;
572 573
573 struct net_dim dim; /* Dynamic Interrupt Moderation */ 574 struct net_dim dim; /* Dynamic Interrupt Moderation */
574 575
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 023dc4bccd28..4a37713023be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
88 88
89 eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); 89 eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
90 *speed = mlx5e_port_ptys2speed(eth_proto_oper); 90 *speed = mlx5e_port_ptys2speed(eth_proto_oper);
91 if (!(*speed)) { 91 if (!(*speed))
92 mlx5_core_warn(mdev, "cannot get port speed\n");
93 err = -EINVAL; 92 err = -EINVAL;
94 }
95 93
96 return err; 94 return err;
97} 95}
@@ -258,7 +256,7 @@ static int mlx5e_fec_admin_field(u32 *pplm,
258 case 40000: 256 case 40000:
259 if (!write) 257 if (!write)
260 *fec_policy = MLX5_GET(pplm_reg, pplm, 258 *fec_policy = MLX5_GET(pplm_reg, pplm,
261 fec_override_cap_10g_40g); 259 fec_override_admin_10g_40g);
262 else 260 else
263 MLX5_SET(pplm_reg, pplm, 261 MLX5_SET(pplm_reg, pplm,
264 fec_override_admin_10g_40g, *fec_policy); 262 fec_override_admin_10g_40g, *fec_policy);
@@ -310,7 +308,7 @@ static int mlx5e_get_fec_cap_field(u32 *pplm,
310 case 10000: 308 case 10000:
311 case 40000: 309 case 40000:
312 *fec_cap = MLX5_GET(pplm_reg, pplm, 310 *fec_cap = MLX5_GET(pplm_reg, pplm,
313 fec_override_admin_10g_40g); 311 fec_override_cap_10g_40g);
314 break; 312 break;
315 case 25000: 313 case 25000:
316 *fec_cap = MLX5_GET(pplm_reg, pplm, 314 *fec_cap = MLX5_GET(pplm_reg, pplm,
@@ -394,12 +392,12 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
394 392
395int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) 393int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
396{ 394{
395 u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC);
397 bool fec_mode_not_supp_in_speed = false; 396 bool fec_mode_not_supp_in_speed = false;
398 u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC);
399 u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; 397 u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
400 u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; 398 u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
401 int sz = MLX5_ST_SZ_BYTES(pplm_reg); 399 int sz = MLX5_ST_SZ_BYTES(pplm_reg);
402 u32 current_fec_speed; 400 u8 fec_policy_auto = 0;
403 u8 fec_caps = 0; 401 u8 fec_caps = 0;
404 int err; 402 int err;
405 int i; 403 int i;
@@ -415,23 +413,19 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
415 if (err) 413 if (err)
416 return err; 414 return err;
417 415
418 err = mlx5e_port_linkspeed(dev, &current_fec_speed); 416 MLX5_SET(pplm_reg, out, local_port, 1);
419 if (err)
420 return err;
421 417
422 memset(in, 0, sz); 418 for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) {
423 MLX5_SET(pplm_reg, in, local_port, 1);
424 for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) {
425 mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]); 419 mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
426 /* policy supported for link speed */ 420 /* policy supported for link speed, or policy is auto */
427 if (!!(fec_caps & fec_policy)) { 421 if (fec_caps & fec_policy || fec_policy == fec_policy_auto) {
428 mlx5e_fec_admin_field(in, &fec_policy, 1, 422 mlx5e_fec_admin_field(out, &fec_policy, 1,
429 fec_supported_speeds[i]); 423 fec_supported_speeds[i]);
430 } else { 424 } else {
431 if (fec_supported_speeds[i] == current_fec_speed) 425 /* turn off FEC if supported. Else, leave it the same */
432 return -EOPNOTSUPP; 426 if (fec_caps & fec_policy_nofec)
433 mlx5e_fec_admin_field(in, &no_fec_policy, 1, 427 mlx5e_fec_admin_field(out, &fec_policy_nofec, 1,
434 fec_supported_speeds[i]); 428 fec_supported_speeds[i]);
435 fec_mode_not_supp_in_speed = true; 429 fec_mode_not_supp_in_speed = true;
436 } 430 }
437 } 431 }
@@ -441,5 +435,5 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
441 "FEC policy 0x%x is not supported for some speeds", 435 "FEC policy 0x%x is not supported for some speeds",
442 fec_policy); 436 fec_policy);
443 437
444 return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1); 438 return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1);
445} 439}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index c047da8752da..eac245a93f91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
130 int err; 130 int err;
131 131
132 err = mlx5e_port_linkspeed(priv->mdev, &speed); 132 err = mlx5e_port_linkspeed(priv->mdev, &speed);
133 if (err) 133 if (err) {
134 mlx5_core_warn(priv->mdev, "cannot get port speed\n");
134 return 0; 135 return 0;
136 }
135 137
136 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; 138 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
137 139
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3e770abfd802..25c1c4f96841 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -843,8 +843,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
843 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, 843 ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
844 Autoneg); 844 Autoneg);
845 845
846 err = get_fec_supported_advertised(mdev, link_ksettings); 846 if (get_fec_supported_advertised(mdev, link_ksettings))
847 if (err)
848 netdev_dbg(netdev, "%s: FEC caps query failed: %d\n", 847 netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
849 __func__, err); 848 __func__, err);
850 849
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1243edbedc9e..871313d6b34d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -502,6 +502,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
502 rq->channel = c; 502 rq->channel = c;
503 rq->ix = c->ix; 503 rq->ix = c->ix;
504 rq->mdev = mdev; 504 rq->mdev = mdev;
505 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
505 rq->stats = &c->priv->channel_stats[c->ix].rq; 506 rq->stats = &c->priv->channel_stats[c->ix].rq;
506 507
507 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; 508 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@@ -1623,13 +1624,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1623 int err; 1624 int err;
1624 u32 i; 1625 u32 i;
1625 1626
1627 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1628 if (err)
1629 return err;
1630
1626 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq, 1631 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1627 &cq->wq_ctrl); 1632 &cq->wq_ctrl);
1628 if (err) 1633 if (err)
1629 return err; 1634 return err;
1630 1635
1631 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1632
1633 mcq->cqe_sz = 64; 1636 mcq->cqe_sz = 64;
1634 mcq->set_ci_db = cq->wq_ctrl.db.db; 1637 mcq->set_ci_db = cq->wq_ctrl.db.db;
1635 mcq->arm_db = cq->wq_ctrl.db.db + 1; 1638 mcq->arm_db = cq->wq_ctrl.db.db + 1;
@@ -1687,6 +1690,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1687 int eqn; 1690 int eqn;
1688 int err; 1691 int err;
1689 1692
1693 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1694 if (err)
1695 return err;
1696
1690 inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 1697 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1691 sizeof(u64) * cq->wq_ctrl.buf.npages; 1698 sizeof(u64) * cq->wq_ctrl.buf.npages;
1692 in = kvzalloc(inlen, GFP_KERNEL); 1699 in = kvzalloc(inlen, GFP_KERNEL);
@@ -1700,8 +1707,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1700 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, 1707 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1701 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); 1708 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1702 1709
1703 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1704
1705 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); 1710 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
1706 MLX5_SET(cqc, cqc, c_eqn, eqn); 1711 MLX5_SET(cqc, cqc, c_eqn, eqn);
1707 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); 1712 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
@@ -1921,6 +1926,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1921 int err; 1926 int err;
1922 int eqn; 1927 int eqn;
1923 1928
1929 err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1930 if (err)
1931 return err;
1932
1924 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); 1933 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1925 if (!c) 1934 if (!c)
1926 return -ENOMEM; 1935 return -ENOMEM;
@@ -1937,7 +1946,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1937 c->xdp = !!params->xdp_prog; 1946 c->xdp = !!params->xdp_prog;
1938 c->stats = &priv->channel_stats[ix].ch; 1947 c->stats = &priv->channel_stats[ix].ch;
1939 1948
1940 mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1941 c->irq_desc = irq_to_desc(irq); 1949 c->irq_desc = irq_to_desc(irq);
1942 1950
1943 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); 1951 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@@ -3574,6 +3582,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
3574 return 0; 3582 return 0;
3575} 3583}
3576 3584
3585#ifdef CONFIG_MLX5_ESWITCH
3577static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) 3586static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3578{ 3587{
3579 struct mlx5e_priv *priv = netdev_priv(netdev); 3588 struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3586,6 +3595,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3586 3595
3587 return 0; 3596 return 0;
3588} 3597}
3598#endif
3589 3599
3590static int set_feature_rx_all(struct net_device *netdev, bool enable) 3600static int set_feature_rx_all(struct net_device *netdev, bool enable)
3591{ 3601{
@@ -3684,7 +3694,9 @@ static int mlx5e_set_features(struct net_device *netdev,
3684 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); 3694 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
3685 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, 3695 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
3686 set_feature_cvlan_filter); 3696 set_feature_cvlan_filter);
3697#ifdef CONFIG_MLX5_ESWITCH
3687 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); 3698 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
3699#endif
3688 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); 3700 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
3689 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); 3701 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
3690 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); 3702 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@@ -3755,10 +3767,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
3755 } 3767 }
3756 3768
3757 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 3769 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
3770 bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
3758 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); 3771 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
3759 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); 3772 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
3760 3773
3761 reset = reset && (ppw_old != ppw_new); 3774 reset = reset && (is_linear || (ppw_old != ppw_new));
3762 } 3775 }
3763 3776
3764 if (!reset) { 3777 if (!reset) {
@@ -4678,7 +4691,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
4678 FT_CAP(modify_root) && 4691 FT_CAP(modify_root) &&
4679 FT_CAP(identified_miss_table_mode) && 4692 FT_CAP(identified_miss_table_mode) &&
4680 FT_CAP(flow_table_modify)) { 4693 FT_CAP(flow_table_modify)) {
4694#ifdef CONFIG_MLX5_ESWITCH
4681 netdev->hw_features |= NETIF_F_HW_TC; 4695 netdev->hw_features |= NETIF_F_HW_TC;
4696#endif
4682#ifdef CONFIG_MLX5_EN_ARFS 4697#ifdef CONFIG_MLX5_EN_ARFS
4683 netdev->hw_features |= NETIF_F_NTUPLE; 4698 netdev->hw_features |= NETIF_F_NTUPLE;
4684#endif 4699#endif
@@ -5004,11 +5019,21 @@ err_free_netdev:
5004int mlx5e_attach_netdev(struct mlx5e_priv *priv) 5019int mlx5e_attach_netdev(struct mlx5e_priv *priv)
5005{ 5020{
5006 const struct mlx5e_profile *profile; 5021 const struct mlx5e_profile *profile;
5022 int max_nch;
5007 int err; 5023 int err;
5008 5024
5009 profile = priv->profile; 5025 profile = priv->profile;
5010 clear_bit(MLX5E_STATE_DESTROYING, &priv->state); 5026 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
5011 5027
5028 /* max number of channels may have changed */
5029 max_nch = mlx5e_get_max_num_channels(priv->mdev);
5030 if (priv->channels.params.num_channels > max_nch) {
5031 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
5032 priv->channels.params.num_channels = max_nch;
5033 mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
5034 MLX5E_INDIR_RQT_SIZE, max_nch);
5035 }
5036
5012 err = profile->init_tx(priv); 5037 err = profile->init_tx(priv);
5013 if (err) 5038 if (err)
5014 goto out; 5039 goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 79638dcbae78..16985ca3248d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1104,6 +1104,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1104 u32 frag_size; 1104 u32 frag_size;
1105 bool consumed; 1105 bool consumed;
1106 1106
1107 /* Check packet size. Note LRO doesn't use linear SKB */
1108 if (unlikely(cqe_bcnt > rq->hw_mtu)) {
1109 rq->stats->oversize_pkts_sw_drop++;
1110 return NULL;
1111 }
1112
1107 va = page_address(di->page) + head_offset; 1113 va = page_address(di->page) + head_offset;
1108 data = va + rx_headroom; 1114 data = va + rx_headroom;
1109 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); 1115 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 35ded91203f5..4382ef85488c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
98 return 1; 98 return 1;
99} 99}
100 100
101#ifdef CONFIG_INET
102/* loopback test */
103#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
104static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
105#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
106
107struct mlx5ehdr { 101struct mlx5ehdr {
108 __be32 version; 102 __be32 version;
109 __be64 magic; 103 __be64 magic;
110 char text[ETH_GSTRING_LEN];
111}; 104};
112 105
106#ifdef CONFIG_INET
107/* loopback test */
108#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\
109 sizeof(struct udphdr) + sizeof(struct mlx5ehdr))
110#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
111
113static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) 112static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
114{ 113{
115 struct sk_buff *skb = NULL; 114 struct sk_buff *skb = NULL;
@@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
117 struct ethhdr *ethh; 116 struct ethhdr *ethh;
118 struct udphdr *udph; 117 struct udphdr *udph;
119 struct iphdr *iph; 118 struct iphdr *iph;
120 int datalen, iplen; 119 int iplen;
121
122 datalen = MLX5E_TEST_PKT_SIZE -
123 (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
124 120
125 skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE); 121 skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
126 if (!skb) { 122 if (!skb) {
@@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
149 /* Fill UDP header */ 145 /* Fill UDP header */
150 udph->source = htons(9); 146 udph->source = htons(9);
151 udph->dest = htons(9); /* Discard Protocol */ 147 udph->dest = htons(9); /* Discard Protocol */
152 udph->len = htons(datalen + sizeof(struct udphdr)); 148 udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr));
153 udph->check = 0; 149 udph->check = 0;
154 150
155 /* Fill IP header */ 151 /* Fill IP header */
@@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
157 iph->ttl = 32; 153 iph->ttl = 32;
158 iph->version = 4; 154 iph->version = 4;
159 iph->protocol = IPPROTO_UDP; 155 iph->protocol = IPPROTO_UDP;
160 iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen; 156 iplen = sizeof(struct iphdr) + sizeof(struct udphdr) +
157 sizeof(struct mlx5ehdr);
161 iph->tot_len = htons(iplen); 158 iph->tot_len = htons(iplen);
162 iph->frag_off = 0; 159 iph->frag_off = 0;
163 iph->saddr = 0; 160 iph->saddr = 0;
@@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
170 mlxh = skb_put(skb, sizeof(*mlxh)); 167 mlxh = skb_put(skb, sizeof(*mlxh));
171 mlxh->version = 0; 168 mlxh->version = 0;
172 mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC); 169 mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
173 strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
174 datalen -= sizeof(*mlxh);
175 skb_put_zero(skb, datalen);
176 170
177 skb->csum = 0; 171 skb->csum = 0;
178 skb->ip_summed = CHECKSUM_PARTIAL; 172 skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1e55b9c27ffc..3e99d0728b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = {
83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, 83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, 84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, 85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, 87 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
87 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, 88 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
88 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, 89 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@@ -161,6 +162,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
161 s->rx_wqe_err += rq_stats->wqe_err; 162 s->rx_wqe_err += rq_stats->wqe_err;
162 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; 163 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
163 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; 164 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
165 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
164 s->rx_buff_alloc_err += rq_stats->buff_alloc_err; 166 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
165 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; 167 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
166 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; 168 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@@ -1189,6 +1191,7 @@ static const struct counter_desc rq_stats_desc[] = {
1189 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, 1191 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1190 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 1192 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1191 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 1193 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1194 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1192 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 1195 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1193 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 1196 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1194 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 1197 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 77f74ce11280..3f8e870ef4c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -96,6 +96,7 @@ struct mlx5e_sw_stats {
96 u64 rx_wqe_err; 96 u64 rx_wqe_err;
97 u64 rx_mpwqe_filler_cqes; 97 u64 rx_mpwqe_filler_cqes;
98 u64 rx_mpwqe_filler_strides; 98 u64 rx_mpwqe_filler_strides;
99 u64 rx_oversize_pkts_sw_drop;
99 u64 rx_buff_alloc_err; 100 u64 rx_buff_alloc_err;
100 u64 rx_cqe_compress_blks; 101 u64 rx_cqe_compress_blks;
101 u64 rx_cqe_compress_pkts; 102 u64 rx_cqe_compress_pkts;
@@ -193,6 +194,7 @@ struct mlx5e_rq_stats {
193 u64 wqe_err; 194 u64 wqe_err;
194 u64 mpwqe_filler_cqes; 195 u64 mpwqe_filler_cqes;
195 u64 mpwqe_filler_strides; 196 u64 mpwqe_filler_strides;
197 u64 oversize_pkts_sw_drop;
196 u64 buff_alloc_err; 198 u64 buff_alloc_err;
197 u64 cqe_compress_blks; 199 u64 cqe_compress_blks;
198 u64 cqe_compress_pkts; 200 u64 cqe_compress_pkts;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 608025ca5c04..fca6f4132c91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1447,31 +1447,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1447 inner_headers); 1447 inner_headers);
1448 } 1448 }
1449 1449
1450 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1450 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1451 struct flow_dissector_key_eth_addrs *key = 1451 struct flow_dissector_key_basic *key =
1452 skb_flow_dissector_target(f->dissector, 1452 skb_flow_dissector_target(f->dissector,
1453 FLOW_DISSECTOR_KEY_ETH_ADDRS, 1453 FLOW_DISSECTOR_KEY_BASIC,
1454 f->key); 1454 f->key);
1455 struct flow_dissector_key_eth_addrs *mask = 1455 struct flow_dissector_key_basic *mask =
1456 skb_flow_dissector_target(f->dissector, 1456 skb_flow_dissector_target(f->dissector,
1457 FLOW_DISSECTOR_KEY_ETH_ADDRS, 1457 FLOW_DISSECTOR_KEY_BASIC,
1458 f->mask); 1458 f->mask);
1459 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1460 ntohs(mask->n_proto));
1461 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1462 ntohs(key->n_proto));
1459 1463
1460 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1464 if (mask->n_proto)
1461 dmac_47_16),
1462 mask->dst);
1463 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1464 dmac_47_16),
1465 key->dst);
1466
1467 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1468 smac_47_16),
1469 mask->src);
1470 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1471 smac_47_16),
1472 key->src);
1473
1474 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
1475 *match_level = MLX5_MATCH_L2; 1465 *match_level = MLX5_MATCH_L2;
1476 } 1466 }
1477 1467
@@ -1505,9 +1495,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1505 1495
1506 *match_level = MLX5_MATCH_L2; 1496 *match_level = MLX5_MATCH_L2;
1507 } 1497 }
1508 } else { 1498 } else if (*match_level != MLX5_MATCH_NONE) {
1509 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); 1499 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1510 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); 1500 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1501 *match_level = MLX5_MATCH_L2;
1511 } 1502 }
1512 1503
1513 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { 1504 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -1545,21 +1536,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1545 } 1536 }
1546 } 1537 }
1547 1538
1548 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 1539 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1549 struct flow_dissector_key_basic *key = 1540 struct flow_dissector_key_eth_addrs *key =
1550 skb_flow_dissector_target(f->dissector, 1541 skb_flow_dissector_target(f->dissector,
1551 FLOW_DISSECTOR_KEY_BASIC, 1542 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1552 f->key); 1543 f->key);
1553 struct flow_dissector_key_basic *mask = 1544 struct flow_dissector_key_eth_addrs *mask =
1554 skb_flow_dissector_target(f->dissector, 1545 skb_flow_dissector_target(f->dissector,
1555 FLOW_DISSECTOR_KEY_BASIC, 1546 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1556 f->mask); 1547 f->mask);
1557 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1558 ntohs(mask->n_proto));
1559 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1560 ntohs(key->n_proto));
1561 1548
1562 if (mask->n_proto) 1549 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1550 dmac_47_16),
1551 mask->dst);
1552 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1553 dmac_47_16),
1554 key->dst);
1555
1556 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1557 smac_47_16),
1558 mask->src);
1559 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1560 smac_47_16),
1561 key->src);
1562
1563 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
1563 *match_level = MLX5_MATCH_L2; 1564 *match_level = MLX5_MATCH_L2;
1564 } 1565 }
1565 1566
@@ -1586,10 +1587,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1586 1587
1587 /* the HW doesn't need L3 inline to match on frag=no */ 1588 /* the HW doesn't need L3 inline to match on frag=no */
1588 if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) 1589 if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
1589 *match_level = MLX5_INLINE_MODE_L2; 1590 *match_level = MLX5_MATCH_L2;
1590 /* *** L2 attributes parsing up to here *** */ 1591 /* *** L2 attributes parsing up to here *** */
1591 else 1592 else
1592 *match_level = MLX5_INLINE_MODE_IP; 1593 *match_level = MLX5_MATCH_L3;
1593 } 1594 }
1594 } 1595 }
1595 1596
@@ -2979,7 +2980,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2979 if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) 2980 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
2980 return -EOPNOTSUPP; 2981 return -EOPNOTSUPP;
2981 2982
2982 if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { 2983 if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
2983 NL_SET_ERR_MSG_MOD(extack, 2984 NL_SET_ERR_MSG_MOD(extack,
2984 "current firmware doesn't support split rule for port mirroring"); 2985 "current firmware doesn't support split rule for port mirroring");
2985 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); 2986 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 515e3d6de051..5a22c5874f3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule {
83}; 83};
84 84
85static const struct rhashtable_params rhash_sa = { 85static const struct rhashtable_params rhash_sa = {
86 .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), 86 /* Keep out "cmd" field from the key as it's
87 .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), 87 * value is not constant during the lifetime
88 * of the key object.
89 */
90 .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
91 FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
92 .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
93 FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
88 .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), 94 .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
89 .automatic_shrinking = true, 95 .automatic_shrinking = true,
90 .min_size = 1, 96 .min_size = 1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index b59953daf8b4..11dabd62e2c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -560,9 +560,9 @@ static int mlx5i_close(struct net_device *netdev)
560 560
561 netif_carrier_off(epriv->netdev); 561 netif_carrier_off(epriv->netdev);
562 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); 562 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
563 mlx5i_uninit_underlay_qp(epriv);
564 mlx5e_deactivate_priv_channels(epriv); 563 mlx5e_deactivate_priv_channels(epriv);
565 mlx5e_close_channels(&epriv->channels); 564 mlx5e_close_channels(&epriv->channels);
565 mlx5i_uninit_underlay_qp(epriv);
566unlock: 566unlock:
567 mutex_unlock(&epriv->state_lock); 567 mutex_unlock(&epriv->state_lock);
568 return 0; 568 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index a2df12b79f8e..9bec940330a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3568,7 +3568,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3568 burst_size = 7; 3568 burst_size = 7;
3569 break; 3569 break;
3570 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3570 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3571 is_bytes = true;
3572 rate = 4 * 1024; 3571 rate = 4 * 1024;
3573 burst_size = 4; 3572 burst_size = 4;
3574 break; 3573 break;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 867cddba840f..e8ca98c070f6 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1672,7 +1672,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
1672 netif_wake_queue(adapter->netdev); 1672 netif_wake_queue(adapter->netdev);
1673 } 1673 }
1674 1674
1675 if (!napi_complete_done(napi, weight)) 1675 if (!napi_complete(napi))
1676 goto done; 1676 goto done;
1677 1677
1678 /* enable isr */ 1678 /* enable isr */
@@ -1681,7 +1681,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
1681 lan743x_csr_read(adapter, INT_STS); 1681 lan743x_csr_read(adapter, INT_STS);
1682 1682
1683done: 1683done:
1684 return weight; 1684 return 0;
1685} 1685}
1686 1686
1687static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) 1687static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
@@ -1870,9 +1870,9 @@ static int lan743x_tx_open(struct lan743x_tx *tx)
1870 tx->vector_flags = lan743x_intr_get_vector_flags(adapter, 1870 tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
1871 INT_BIT_DMA_TX_ 1871 INT_BIT_DMA_TX_
1872 (tx->channel_number)); 1872 (tx->channel_number));
1873 netif_napi_add(adapter->netdev, 1873 netif_tx_napi_add(adapter->netdev,
1874 &tx->napi, lan743x_tx_napi_poll, 1874 &tx->napi, lan743x_tx_napi_poll,
1875 tx->ring_size - 1); 1875 tx->ring_size - 1);
1876 napi_enable(&tx->napi); 1876 napi_enable(&tx->napi);
1877 1877
1878 data = 0; 1878 data = 0;
@@ -3017,6 +3017,7 @@ static const struct dev_pm_ops lan743x_pm_ops = {
3017 3017
3018static const struct pci_device_id lan743x_pcidev_tbl[] = { 3018static const struct pci_device_id lan743x_pcidev_tbl[] = {
3019 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, 3019 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
3020 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
3020 { 0, } 3021 { 0, }
3021}; 3022};
3022 3023
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 0e82b6368798..2d6eea18973e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -548,6 +548,7 @@ struct lan743x_adapter;
548/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */ 548/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */
549#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR 549#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR
550#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430) 550#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430)
551#define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431)
551 552
552#define PCI_CONFIG_LENGTH (0x1000) 553#define PCI_CONFIG_LENGTH (0x1000)
553 554
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 8e8fa823d611..69966dfc6e3d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -191,7 +191,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
191static void 191static void
192qed_dcbx_set_params(struct qed_dcbx_results *p_data, 192qed_dcbx_set_params(struct qed_dcbx_results *p_data,
193 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 193 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
194 bool enable, u8 prio, u8 tc, 194 bool app_tlv, bool enable, u8 prio, u8 tc,
195 enum dcbx_protocol_type type, 195 enum dcbx_protocol_type type,
196 enum qed_pci_personality personality) 196 enum qed_pci_personality personality)
197{ 197{
@@ -210,7 +210,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
210 p_data->arr[type].dont_add_vlan0 = true; 210 p_data->arr[type].dont_add_vlan0 = true;
211 211
212 /* QM reconf data */ 212 /* QM reconf data */
213 if (p_hwfn->hw_info.personality == personality) 213 if (app_tlv && p_hwfn->hw_info.personality == personality)
214 qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); 214 qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
215 215
216 /* Configure dcbx vlan priority in doorbell block for roce EDPM */ 216 /* Configure dcbx vlan priority in doorbell block for roce EDPM */
@@ -225,7 +225,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
225static void 225static void
226qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, 226qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
227 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 227 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
228 bool enable, u8 prio, u8 tc, 228 bool app_tlv, bool enable, u8 prio, u8 tc,
229 enum dcbx_protocol_type type) 229 enum dcbx_protocol_type type)
230{ 230{
231 enum qed_pci_personality personality; 231 enum qed_pci_personality personality;
@@ -240,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
240 240
241 personality = qed_dcbx_app_update[i].personality; 241 personality = qed_dcbx_app_update[i].personality;
242 242
243 qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, 243 qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable,
244 prio, tc, type, personality); 244 prio, tc, type, personality);
245 } 245 }
246} 246}
@@ -319,8 +319,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
319 enable = true; 319 enable = true;
320 } 320 }
321 321
322 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, 322 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true,
323 priority, tc, type); 323 enable, priority, tc, type);
324 } 324 }
325 } 325 }
326 326
@@ -341,7 +341,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
341 continue; 341 continue;
342 342
343 enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; 343 enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
344 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, 344 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable,
345 priority, tc, type); 345 priority, tc, type);
346 } 346 }
347 347
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 78a638ec7c0a..979f1e4bc18b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -6071,7 +6071,7 @@ static const char * const s_igu_fifo_error_strs[] = {
6071 "no error", 6071 "no error",
6072 "length error", 6072 "length error",
6073 "function disabled", 6073 "function disabled",
6074 "VF sent command to attnetion address", 6074 "VF sent command to attention address",
6075 "host sent prod update command", 6075 "host sent prod update command",
6076 "read of during interrupt register while in MIMD mode", 6076 "read of during interrupt register while in MIMD mode",
6077 "access to PXP BAR reserved address", 6077 "access to PXP BAR reserved address",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 7ceb2b97538d..88a8576ca9ce 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -185,6 +185,10 @@ void qed_resc_free(struct qed_dev *cdev)
185 qed_iscsi_free(p_hwfn); 185 qed_iscsi_free(p_hwfn);
186 qed_ooo_free(p_hwfn); 186 qed_ooo_free(p_hwfn);
187 } 187 }
188
189 if (QED_IS_RDMA_PERSONALITY(p_hwfn))
190 qed_rdma_info_free(p_hwfn);
191
188 qed_iov_free(p_hwfn); 192 qed_iov_free(p_hwfn);
189 qed_l2_free(p_hwfn); 193 qed_l2_free(p_hwfn);
190 qed_dmae_info_free(p_hwfn); 194 qed_dmae_info_free(p_hwfn);
@@ -481,8 +485,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
481 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 485 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
482 486
483 /* Can't have multiple flags set here */ 487 /* Can't have multiple flags set here */
484 if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) 488 if (bitmap_weight((unsigned long *)&pq_flags,
489 sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
490 DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
491 goto err;
492 }
493
494 if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
495 DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
485 goto err; 496 goto err;
497 }
486 498
487 switch (pq_flags) { 499 switch (pq_flags) {
488 case PQ_FLAGS_RLS: 500 case PQ_FLAGS_RLS:
@@ -506,8 +518,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
506 } 518 }
507 519
508err: 520err:
509 DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 521 return &qm_info->start_pq;
510 return NULL;
511} 522}
512 523
513/* save pq index in qm info */ 524/* save pq index in qm info */
@@ -531,20 +542,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
531{ 542{
532 u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); 543 u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
533 544
545 if (max_tc == 0) {
546 DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
547 PQ_FLAGS_MCOS);
548 return p_hwfn->qm_info.start_pq;
549 }
550
534 if (tc > max_tc) 551 if (tc > max_tc)
535 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 552 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
536 553
537 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 554 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
538} 555}
539 556
540u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) 557u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
541{ 558{
542 u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); 559 u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
543 560
561 if (max_vf == 0) {
562 DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
563 PQ_FLAGS_VFS);
564 return p_hwfn->qm_info.start_pq;
565 }
566
544 if (vf > max_vf) 567 if (vf > max_vf)
545 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 568 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
546 569
547 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 570 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
548} 571}
549 572
550u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) 573u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)
@@ -1081,6 +1104,12 @@ int qed_resc_alloc(struct qed_dev *cdev)
1081 goto alloc_err; 1104 goto alloc_err;
1082 } 1105 }
1083 1106
1107 if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
1108 rc = qed_rdma_info_alloc(p_hwfn);
1109 if (rc)
1110 goto alloc_err;
1111 }
1112
1084 /* DMA info initialization */ 1113 /* DMA info initialization */
1085 rc = qed_dmae_info_alloc(p_hwfn); 1114 rc = qed_dmae_info_alloc(p_hwfn);
1086 if (rc) 1115 if (rc)
@@ -2102,11 +2131,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
2102 if (!p_ptt) 2131 if (!p_ptt)
2103 return -EAGAIN; 2132 return -EAGAIN;
2104 2133
2105 /* If roce info is allocated it means roce is initialized and should
2106 * be enabled in searcher.
2107 */
2108 if (p_hwfn->p_rdma_info && 2134 if (p_hwfn->p_rdma_info &&
2109 p_hwfn->b_rdma_enabled_in_prs) 2135 p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs)
2110 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); 2136 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
2111 2137
2112 /* Re-open incoming traffic */ 2138 /* Re-open incoming traffic */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index cc1b373c0ace..46dc93d3b9b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
147 "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", 147 "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
148 fcoe_pf_params->num_cqs, 148 fcoe_pf_params->num_cqs,
149 p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); 149 p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
150 return -EINVAL; 150 rc = -EINVAL;
151 goto err;
151 } 152 }
152 153
153 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); 154 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
156 157
157 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); 158 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
158 if (rc) 159 if (rc)
159 return rc; 160 goto err;
160 161
161 cxt_info.iid = dummy_cid; 162 cxt_info.iid = dummy_cid;
162 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); 163 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
163 if (rc) { 164 if (rc) {
164 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", 165 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
165 dummy_cid); 166 dummy_cid);
166 return rc; 167 goto err;
167 } 168 }
168 p_cxt = cxt_info.p_cxt; 169 p_cxt = cxt_info.p_cxt;
169 SET_FIELD(p_cxt->tstorm_ag_context.flags3, 170 SET_FIELD(p_cxt->tstorm_ag_context.flags3,
@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
240 rc = qed_spq_post(p_hwfn, p_ent, NULL); 241 rc = qed_spq_post(p_hwfn, p_ent, NULL);
241 242
242 return rc; 243 return rc;
244
245err:
246 qed_sp_destroy_request(p_hwfn, p_ent);
247 return rc;
243} 248}
244 249
245static int 250static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 0f0aba793352..b22f464ea3fa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
992 */ 992 */
993 do { 993 do {
994 index = p_sb_attn->sb_index; 994 index = p_sb_attn->sb_index;
995 /* finish reading index before the loop condition */
996 dma_rmb();
995 attn_bits = le32_to_cpu(p_sb_attn->atten_bits); 997 attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
996 attn_acks = le32_to_cpu(p_sb_attn->atten_ack); 998 attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
997 } while (index != p_sb_attn->sb_index); 999 } while (index != p_sb_attn->sb_index);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 1135387bd99d..4f8a685d1a55 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
200 "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", 200 "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
201 p_params->num_queues, 201 p_params->num_queues,
202 p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); 202 p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
203 qed_sp_destroy_request(p_hwfn, p_ent);
203 return -EINVAL; 204 return -EINVAL;
204 } 205 }
205 206
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 82a1bd1f8a8c..67c02ea93906 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
740 740
741 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 741 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
742 if (rc) { 742 if (rc) {
743 /* Return spq entry which is taken in qed_sp_init_request()*/ 743 qed_sp_destroy_request(p_hwfn, p_ent);
744 qed_spq_return_entry(p_hwfn, p_ent);
745 return rc; 744 return rc;
746 } 745 }
747 746
@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1355 DP_NOTICE(p_hwfn, 1354 DP_NOTICE(p_hwfn,
1356 "%d is not supported yet\n", 1355 "%d is not supported yet\n",
1357 p_filter_cmd->opcode); 1356 p_filter_cmd->opcode);
1357 qed_sp_destroy_request(p_hwfn, *pp_ent);
1358 return -EINVAL; 1358 return -EINVAL;
1359 } 1359 }
1360 1360
@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2056 } else { 2056 } else {
2057 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 2057 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
2058 if (rc) 2058 if (rc)
2059 return rc; 2059 goto err;
2060 2060
2061 if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { 2061 if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
2062 rc = qed_fw_l2_queue(p_hwfn, p_params->qid, 2062 rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
2063 &abs_rx_q_id); 2063 &abs_rx_q_id);
2064 if (rc) 2064 if (rc)
2065 return rc; 2065 goto err;
2066 2066
2067 p_ramrod->rx_qid_valid = 1; 2067 p_ramrod->rx_qid_valid = 1;
2068 p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); 2068 p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2083 (u64)p_params->addr, p_params->length); 2083 (u64)p_params->addr, p_params->length);
2084 2084
2085 return qed_spq_post(p_hwfn, p_ent, NULL); 2085 return qed_spq_post(p_hwfn, p_ent, NULL);
2086
2087err:
2088 qed_sp_destroy_request(p_hwfn, p_ent);
2089 return rc;
2086} 2090}
2087 2091
2088int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, 2092int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 35fd0db6a677..fff7f04d4525 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1782,9 +1782,9 @@ static int qed_drain(struct qed_dev *cdev)
1782 return -EBUSY; 1782 return -EBUSY;
1783 } 1783 }
1784 rc = qed_mcp_drain(hwfn, ptt); 1784 rc = qed_mcp_drain(hwfn, ptt);
1785 qed_ptt_release(hwfn, ptt);
1785 if (rc) 1786 if (rc)
1786 return rc; 1787 return rc;
1787 qed_ptt_release(hwfn, ptt);
1788 } 1788 }
1789 1789
1790 return 0; 1790 return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index f40f654398a0..a96364df4320 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1944,9 +1944,12 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
1944 struct qed_ptt *p_ptt, u32 *p_speed_mask) 1944 struct qed_ptt *p_ptt, u32 *p_speed_mask)
1945{ 1945{
1946 u32 transceiver_type, transceiver_state; 1946 u32 transceiver_type, transceiver_state;
1947 int ret;
1947 1948
1948 qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, 1949 ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
1949 &transceiver_type); 1950 &transceiver_type);
1951 if (ret)
1952 return ret;
1950 1953
1951 if (qed_is_transceiver_ready(transceiver_state, transceiver_type) == 1954 if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
1952 false) 1955 false)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c71391b9c757..7873d6dfd91f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
140 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; 140 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
141} 141}
142 142
143static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, 143int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
144 struct qed_ptt *p_ptt,
145 struct qed_rdma_start_in_params *params)
146{ 144{
147 struct qed_rdma_info *p_rdma_info; 145 struct qed_rdma_info *p_rdma_info;
148 u32 num_cons, num_tasks;
149 int rc = -ENOMEM;
150 146
151 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
152
153 /* Allocate a struct with current pf rdma info */
154 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); 147 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
155 if (!p_rdma_info) 148 if (!p_rdma_info)
156 return rc; 149 return -ENOMEM;
150
151 spin_lock_init(&p_rdma_info->lock);
157 152
158 p_hwfn->p_rdma_info = p_rdma_info; 153 p_hwfn->p_rdma_info = p_rdma_info;
154 return 0;
155}
156
157void qed_rdma_info_free(struct qed_hwfn *p_hwfn)
158{
159 kfree(p_hwfn->p_rdma_info);
160 p_hwfn->p_rdma_info = NULL;
161}
162
163static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
164{
165 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
166 u32 num_cons, num_tasks;
167 int rc = -ENOMEM;
168
169 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
170
159 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 171 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
160 p_rdma_info->proto = PROTOCOLID_IWARP; 172 p_rdma_info->proto = PROTOCOLID_IWARP;
161 else 173 else
@@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
183 /* Allocate a struct with device params and fill it */ 195 /* Allocate a struct with device params and fill it */
184 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); 196 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
185 if (!p_rdma_info->dev) 197 if (!p_rdma_info->dev)
186 goto free_rdma_info; 198 return rc;
187 199
188 /* Allocate a struct with port params and fill it */ 200 /* Allocate a struct with port params and fill it */
189 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); 201 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
@@ -298,8 +310,6 @@ free_rdma_port:
298 kfree(p_rdma_info->port); 310 kfree(p_rdma_info->port);
299free_rdma_dev: 311free_rdma_dev:
300 kfree(p_rdma_info->dev); 312 kfree(p_rdma_info->dev);
301free_rdma_info:
302 kfree(p_rdma_info);
303 313
304 return rc; 314 return rc;
305} 315}
@@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
370 380
371 kfree(p_rdma_info->port); 381 kfree(p_rdma_info->port);
372 kfree(p_rdma_info->dev); 382 kfree(p_rdma_info->dev);
373
374 kfree(p_rdma_info);
375} 383}
376 384
377static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) 385static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
@@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
679 687
680 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); 688 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
681 689
682 spin_lock_init(&p_hwfn->p_rdma_info->lock);
683
684 qed_rdma_init_devinfo(p_hwfn, params); 690 qed_rdma_init_devinfo(p_hwfn, params);
685 qed_rdma_init_port(p_hwfn); 691 qed_rdma_init_port(p_hwfn);
686 qed_rdma_init_events(p_hwfn, params); 692 qed_rdma_init_events(p_hwfn, params);
@@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt)
727 /* Disable RoCE search */ 733 /* Disable RoCE search */
728 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); 734 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
729 p_hwfn->b_rdma_enabled_in_prs = false; 735 p_hwfn->b_rdma_enabled_in_prs = false;
730 736 p_hwfn->p_rdma_info->active = 0;
731 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 737 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
732 738
733 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 739 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
@@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt,
1236 u8 max_stats_queues; 1242 u8 max_stats_queues;
1237 int rc; 1243 int rc;
1238 1244
1239 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { 1245 if (!rdma_cxt || !in_params || !out_params ||
1246 !p_hwfn->p_rdma_info->active) {
1240 DP_ERR(p_hwfn->cdev, 1247 DP_ERR(p_hwfn->cdev,
1241 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", 1248 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
1242 rdma_cxt, in_params, out_params); 1249 rdma_cxt, in_params, out_params);
@@ -1514,6 +1521,7 @@ qed_rdma_register_tid(void *rdma_cxt,
1514 default: 1521 default:
1515 rc = -EINVAL; 1522 rc = -EINVAL;
1516 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1523 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1524 qed_sp_destroy_request(p_hwfn, p_ent);
1517 return rc; 1525 return rc;
1518 } 1526 }
1519 SET_FIELD(p_ramrod->flags1, 1527 SET_FIELD(p_ramrod->flags1,
@@ -1801,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
1801{ 1809{
1802 bool result; 1810 bool result;
1803 1811
1804 /* if rdma info has not been allocated, naturally there are no qps */ 1812 /* if rdma wasn't activated yet, naturally there are no qps */
1805 if (!p_hwfn->p_rdma_info) 1813 if (!p_hwfn->p_rdma_info->active)
1806 return false; 1814 return false;
1807 1815
1808 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1816 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
@@ -1848,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt,
1848 if (!p_ptt) 1856 if (!p_ptt)
1849 goto err; 1857 goto err;
1850 1858
1851 rc = qed_rdma_alloc(p_hwfn, p_ptt, params); 1859 rc = qed_rdma_alloc(p_hwfn);
1852 if (rc) 1860 if (rc)
1853 goto err1; 1861 goto err1;
1854 1862
@@ -1857,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt,
1857 goto err2; 1865 goto err2;
1858 1866
1859 qed_ptt_release(p_hwfn, p_ptt); 1867 qed_ptt_release(p_hwfn, p_ptt);
1868 p_hwfn->p_rdma_info->active = 1;
1860 1869
1861 return rc; 1870 return rc;
1862 1871
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 6f722ee8ee94..3689fe3e5935 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -102,6 +102,7 @@ struct qed_rdma_info {
102 u16 max_queue_zones; 102 u16 max_queue_zones;
103 enum protocol_type proto; 103 enum protocol_type proto;
104 struct qed_iwarp_info iwarp; 104 struct qed_iwarp_info iwarp;
105 u8 active:1;
105}; 106};
106 107
107struct qed_rdma_qp { 108struct qed_rdma_qp {
@@ -176,10 +177,14 @@ struct qed_rdma_qp {
176#if IS_ENABLED(CONFIG_QED_RDMA) 177#if IS_ENABLED(CONFIG_QED_RDMA)
177void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 178void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
178void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 179void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
180int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
181void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
179#else 182#else
180static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} 183static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
181static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, 184static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
182 struct qed_ptt *p_ptt) {} 185 struct qed_ptt *p_ptt) {}
186static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;}
187static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
183#endif 188#endif
184 189
185int 190int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f9167d1354bb..e49fada85410 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
745 DP_NOTICE(p_hwfn, 745 DP_NOTICE(p_hwfn,
746 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", 746 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
747 rc); 747 rc);
748 qed_sp_destroy_request(p_hwfn, p_ent);
748 return rc; 749 return rc;
749 } 750 }
750 751
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index e95431f6acd4..3157c0d99441 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -167,6 +167,9 @@ struct qed_spq_entry {
167 enum spq_mode comp_mode; 167 enum spq_mode comp_mode;
168 struct qed_spq_comp_cb comp_cb; 168 struct qed_spq_comp_cb comp_cb;
169 struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ 169 struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
170
171 /* Posted entry for unlimited list entry in EBLOCK mode */
172 struct qed_spq_entry *post_ent;
170}; 173};
171 174
172struct qed_eq { 175struct qed_eq {
@@ -396,6 +399,17 @@ struct qed_sp_init_data {
396 struct qed_spq_comp_cb *p_comp_data; 399 struct qed_spq_comp_cb *p_comp_data;
397}; 400};
398 401
402/**
403 * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
404 * Should be called on in error flows after initializing the SPQ entry
405 * and before posting it.
406 *
407 * @param p_hwfn
408 * @param p_ent
409 */
410void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
411 struct qed_spq_entry *p_ent);
412
399int qed_sp_init_request(struct qed_hwfn *p_hwfn, 413int qed_sp_init_request(struct qed_hwfn *p_hwfn,
400 struct qed_spq_entry **pp_ent, 414 struct qed_spq_entry **pp_ent,
401 u8 cmd, 415 u8 cmd,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 77b6248ad3b9..888274fa208b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -47,6 +47,19 @@
47#include "qed_sp.h" 47#include "qed_sp.h"
48#include "qed_sriov.h" 48#include "qed_sriov.h"
49 49
50void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
51 struct qed_spq_entry *p_ent)
52{
53 /* qed_spq_get_entry() can either get an entry from the free_pool,
54 * or, if no entries are left, allocate a new entry and add it to
55 * the unlimited_pending list.
56 */
57 if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
58 kfree(p_ent);
59 else
60 qed_spq_return_entry(p_hwfn, p_ent);
61}
62
50int qed_sp_init_request(struct qed_hwfn *p_hwfn, 63int qed_sp_init_request(struct qed_hwfn *p_hwfn,
51 struct qed_spq_entry **pp_ent, 64 struct qed_spq_entry **pp_ent,
52 u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) 65 u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
@@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
80 93
81 case QED_SPQ_MODE_BLOCK: 94 case QED_SPQ_MODE_BLOCK:
82 if (!p_data->p_comp_data) 95 if (!p_data->p_comp_data)
83 return -EINVAL; 96 goto err;
84 97
85 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; 98 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
86 break; 99 break;
@@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
95 default: 108 default:
96 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", 109 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
97 p_ent->comp_mode); 110 p_ent->comp_mode);
98 return -EINVAL; 111 goto err;
99 } 112 }
100 113
101 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 114 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
@@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
109 memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); 122 memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
110 123
111 return 0; 124 return 0;
125
126err:
127 qed_sp_destroy_request(p_hwfn, p_ent);
128
129 return -EINVAL;
112} 130}
113 131
114static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) 132static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index c4a6274dd625..0a9c5bb0fa48 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
142 142
143 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); 143 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
144 rc = qed_mcp_drain(p_hwfn, p_ptt); 144 rc = qed_mcp_drain(p_hwfn, p_ptt);
145 qed_ptt_release(p_hwfn, p_ptt);
145 if (rc) { 146 if (rc) {
146 DP_NOTICE(p_hwfn, "MCP drain failed\n"); 147 DP_NOTICE(p_hwfn, "MCP drain failed\n");
147 goto err; 148 goto err;
@@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
150 /* Retry after drain */ 151 /* Retry after drain */
151 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); 152 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
152 if (!rc) 153 if (!rc)
153 goto out; 154 return 0;
154 155
155 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; 156 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
156 if (comp_done->done == 1) 157 if (comp_done->done == 1) {
157 if (p_fw_ret) 158 if (p_fw_ret)
158 *p_fw_ret = comp_done->fw_return_code; 159 *p_fw_ret = comp_done->fw_return_code;
159out: 160 return 0;
160 qed_ptt_release(p_hwfn, p_ptt); 161 }
161 return 0;
162
163err: 162err:
164 qed_ptt_release(p_hwfn, p_ptt);
165 DP_NOTICE(p_hwfn, 163 DP_NOTICE(p_hwfn,
166 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", 164 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
167 le32_to_cpu(p_ent->elem.hdr.cid), 165 le32_to_cpu(p_ent->elem.hdr.cid),
@@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
685 /* EBLOCK responsible to free the allocated p_ent */ 683 /* EBLOCK responsible to free the allocated p_ent */
686 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) 684 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
687 kfree(p_ent); 685 kfree(p_ent);
686 else
687 p_ent->post_ent = p_en2;
688 688
689 p_ent = p_en2; 689 p_ent = p_en2;
690 } 690 }
@@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
767 SPQ_HIGH_PRI_RESERVE_DEFAULT); 767 SPQ_HIGH_PRI_RESERVE_DEFAULT);
768} 768}
769 769
770/* Avoid overriding of SPQ entries when getting out-of-order completions, by
771 * marking the completions in a bitmap and increasing the chain consumer only
772 * for the first successive completed entries.
773 */
774static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
775{
776 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
777 struct qed_spq *p_spq = p_hwfn->p_spq;
778
779 __set_bit(pos, p_spq->p_comp_bitmap);
780 while (test_bit(p_spq->comp_bitmap_idx,
781 p_spq->p_comp_bitmap)) {
782 __clear_bit(p_spq->comp_bitmap_idx,
783 p_spq->p_comp_bitmap);
784 p_spq->comp_bitmap_idx++;
785 qed_chain_return_produced(&p_spq->chain);
786 }
787}
788
770int qed_spq_post(struct qed_hwfn *p_hwfn, 789int qed_spq_post(struct qed_hwfn *p_hwfn,
771 struct qed_spq_entry *p_ent, u8 *fw_return_code) 790 struct qed_spq_entry *p_ent, u8 *fw_return_code)
772{ 791{
@@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
824 p_ent->queue == &p_spq->unlimited_pending); 843 p_ent->queue == &p_spq->unlimited_pending);
825 844
826 if (p_ent->queue == &p_spq->unlimited_pending) { 845 if (p_ent->queue == &p_spq->unlimited_pending) {
827 /* This is an allocated p_ent which does not need to 846 struct qed_spq_entry *p_post_ent = p_ent->post_ent;
828 * return to pool. 847
829 */
830 kfree(p_ent); 848 kfree(p_ent);
831 return rc; 849
850 /* Return the entry which was actually posted */
851 p_ent = p_post_ent;
832 } 852 }
833 853
834 if (rc) 854 if (rc)
@@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
842spq_post_fail2: 862spq_post_fail2:
843 spin_lock_bh(&p_spq->lock); 863 spin_lock_bh(&p_spq->lock);
844 list_del(&p_ent->list); 864 list_del(&p_ent->list);
845 qed_chain_return_produced(&p_spq->chain); 865 qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
846 866
847spq_post_fail: 867spq_post_fail:
848 /* return to the free pool */ 868 /* return to the free pool */
@@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
874 spin_lock_bh(&p_spq->lock); 894 spin_lock_bh(&p_spq->lock);
875 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { 895 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
876 if (p_ent->elem.hdr.echo == echo) { 896 if (p_ent->elem.hdr.echo == echo) {
877 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
878
879 list_del(&p_ent->list); 897 list_del(&p_ent->list);
880 898 qed_spq_comp_bmap_update(p_hwfn, echo);
881 /* Avoid overriding of SPQ entries when getting
882 * out-of-order completions, by marking the completions
883 * in a bitmap and increasing the chain consumer only
884 * for the first successive completed entries.
885 */
886 __set_bit(pos, p_spq->p_comp_bitmap);
887
888 while (test_bit(p_spq->comp_bitmap_idx,
889 p_spq->p_comp_bitmap)) {
890 __clear_bit(p_spq->comp_bitmap_idx,
891 p_spq->p_comp_bitmap);
892 p_spq->comp_bitmap_idx++;
893 qed_chain_return_produced(&p_spq->chain);
894 }
895
896 p_spq->comp_count++; 899 p_spq->comp_count++;
897 found = p_ent; 900 found = p_ent;
898 break; 901 break;
@@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
931 QED_MSG_SPQ, 934 QED_MSG_SPQ,
932 "Got a completion without a callback function\n"); 935 "Got a completion without a callback function\n");
933 936
934 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || 937 if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
935 (found->queue == &p_spq->unlimited_pending))
936 /* EBLOCK is responsible for returning its own entry into the 938 /* EBLOCK is responsible for returning its own entry into the
937 * free list, unless it originally added the entry into the 939 * free list.
938 * unlimited pending list.
939 */ 940 */
940 qed_spq_return_entry(p_hwfn, found); 941 qed_spq_return_entry(p_hwfn, found);
941 942
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 9b08a9d9e151..ca6290fa0f30 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
101 default: 101 default:
102 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", 102 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
103 p_hwfn->hw_info.personality); 103 p_hwfn->hw_info.personality);
104 qed_sp_destroy_request(p_hwfn, p_ent);
104 return -EINVAL; 105 return -EINVAL;
105 } 106 }
106 107
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 9647578cbe6a..14f26bf3b388 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -459,7 +459,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
459 struct cmd_desc_type0 *first_desc, struct sk_buff *skb, 459 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
460 struct qlcnic_host_tx_ring *tx_ring) 460 struct qlcnic_host_tx_ring *tx_ring)
461{ 461{
462 u8 l4proto, opcode = 0, hdr_len = 0; 462 u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0;
463 u16 flags = 0, vlan_tci = 0; 463 u16 flags = 0, vlan_tci = 0;
464 int copied, offset, copy_len, size; 464 int copied, offset, copy_len, size;
465 struct cmd_desc_type0 *hwdesc; 465 struct cmd_desc_type0 *hwdesc;
@@ -472,14 +472,16 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
472 flags = QLCNIC_FLAGS_VLAN_TAGGED; 472 flags = QLCNIC_FLAGS_VLAN_TAGGED;
473 vlan_tci = ntohs(vh->h_vlan_TCI); 473 vlan_tci = ntohs(vh->h_vlan_TCI);
474 protocol = ntohs(vh->h_vlan_encapsulated_proto); 474 protocol = ntohs(vh->h_vlan_encapsulated_proto);
475 tag_vlan = 1;
475 } else if (skb_vlan_tag_present(skb)) { 476 } else if (skb_vlan_tag_present(skb)) {
476 flags = QLCNIC_FLAGS_VLAN_OOB; 477 flags = QLCNIC_FLAGS_VLAN_OOB;
477 vlan_tci = skb_vlan_tag_get(skb); 478 vlan_tci = skb_vlan_tag_get(skb);
479 tag_vlan = 1;
478 } 480 }
479 if (unlikely(adapter->tx_pvid)) { 481 if (unlikely(adapter->tx_pvid)) {
480 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) 482 if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
481 return -EIO; 483 return -EIO;
482 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) 484 if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED))
483 goto set_flags; 485 goto set_flags;
484 486
485 flags = QLCNIC_FLAGS_VLAN_OOB; 487 flags = QLCNIC_FLAGS_VLAN_OOB;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 0afc3d335d56..d11c16aeb19a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
234 struct net_device *real_dev, 234 struct net_device *real_dev,
235 struct rmnet_endpoint *ep) 235 struct rmnet_endpoint *ep)
236{ 236{
237 struct rmnet_priv *priv; 237 struct rmnet_priv *priv = netdev_priv(rmnet_dev);
238 int rc; 238 int rc;
239 239
240 if (ep->egress_dev) 240 if (ep->egress_dev)
@@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
247 rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 247 rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
248 rmnet_dev->hw_features |= NETIF_F_SG; 248 rmnet_dev->hw_features |= NETIF_F_SG;
249 249
250 priv->real_dev = real_dev;
251
250 rc = register_netdevice(rmnet_dev); 252 rc = register_netdevice(rmnet_dev);
251 if (!rc) { 253 if (!rc) {
252 ep->egress_dev = rmnet_dev; 254 ep->egress_dev = rmnet_dev;
@@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
255 257
256 rmnet_dev->rtnl_link_ops = &rmnet_link_ops; 258 rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
257 259
258 priv = netdev_priv(rmnet_dev);
259 priv->mux_id = id; 260 priv->mux_id = id;
260 priv->real_dev = real_dev;
261 261
262 netdev_dbg(rmnet_dev, "rmnet dev created\n"); 262 netdev_dbg(rmnet_dev, "rmnet dev created\n");
263 } 263 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b1b305f8f414..272b9ca66314 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -365,7 +365,8 @@ struct dma_features {
365 365
366/* GMAC TX FIFO is 8K, Rx FIFO is 16K */ 366/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
367#define BUF_SIZE_16KiB 16384 367#define BUF_SIZE_16KiB 16384
368#define BUF_SIZE_8KiB 8192 368/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
369#define BUF_SIZE_8KiB 8188
369#define BUF_SIZE_4KiB 4096 370#define BUF_SIZE_4KiB 4096
370#define BUF_SIZE_2KiB 2048 371#define BUF_SIZE_2KiB 2048
371 372
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index ca9d7e48034c..40d6356a7e73 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -31,7 +31,7 @@
31/* Enhanced descriptors */ 31/* Enhanced descriptors */
32static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) 32static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
33{ 33{
34 p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1) 34 p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
35 << ERDES1_BUFFER2_SIZE_SHIFT) 35 << ERDES1_BUFFER2_SIZE_SHIFT)
36 & ERDES1_BUFFER2_SIZE_MASK); 36 & ERDES1_BUFFER2_SIZE_MASK);
37 37
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 77914c89d749..5ef91a790f9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
262 int mode, int end) 262 int mode, int end)
263{ 263{
264 p->des0 |= cpu_to_le32(RDES0_OWN); 264 p->des0 |= cpu_to_le32(RDES0_OWN);
265 p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); 265 p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
266 266
267 if (mode == STMMAC_CHAIN_MODE) 267 if (mode == STMMAC_CHAIN_MODE)
268 ehn_desc_rx_set_on_chain(p); 268 ehn_desc_rx_set_on_chain(p);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index abc3f85270cd..d8c5bc412219 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -140,7 +140,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
140static int set_16kib_bfsize(int mtu) 140static int set_16kib_bfsize(int mtu)
141{ 141{
142 int ret = 0; 142 int ret = 0;
143 if (unlikely(mtu >= BUF_SIZE_8KiB)) 143 if (unlikely(mtu > BUF_SIZE_8KiB))
144 ret = BUF_SIZE_16KiB; 144 ret = BUF_SIZE_16KiB;
145 return ret; 145 return ret;
146} 146}
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ef9538ee53d0..82412691ee66 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3605,7 +3605,7 @@ static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3605 "tx_jumbo", 3605 "tx_jumbo",
3606 "rx_mac_control_frames", 3606 "rx_mac_control_frames",
3607 "tx_mac_control_frames", 3607 "tx_mac_control_frames",
3608 "rx_frame_alignement_errors", 3608 "rx_frame_alignment_errors",
3609 "rx_long_ok", 3609 "rx_long_ok",
3610 "rx_long_err", 3610 "rx_long_err",
3611 "tx_sqe_errors", 3611 "tx_sqe_errors",
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 3b7f10a5f06a..c5cae8e74dc4 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. 2/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
3 * 3 *
4 * Copyright (c) 2018 Maciej W. Rozycki 4 * Copyright (c) 2018 Maciej W. Rozycki
@@ -56,7 +56,7 @@
56#define DRV_VERSION "v.1.1.4" 56#define DRV_VERSION "v.1.1.4"
57#define DRV_RELDATE "Oct 6 2018" 57#define DRV_RELDATE "Oct 6 2018"
58 58
59static char version[] = 59static const char version[] =
60 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n"; 60 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n";
61 61
62MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); 62MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
@@ -784,7 +784,7 @@ err_rx:
784static void fza_tx_smt(struct net_device *dev) 784static void fza_tx_smt(struct net_device *dev)
785{ 785{
786 struct fza_private *fp = netdev_priv(dev); 786 struct fza_private *fp = netdev_priv(dev);
787 struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr; 787 struct fza_buffer_tx __iomem *smt_tx_ptr;
788 int i, len; 788 int i, len;
789 u32 own; 789 u32 own;
790 790
@@ -799,6 +799,7 @@ static void fza_tx_smt(struct net_device *dev)
799 799
800 if (!netif_queue_stopped(dev)) { 800 if (!netif_queue_stopped(dev)) {
801 if (dev_nit_active(dev)) { 801 if (dev_nit_active(dev)) {
802 struct fza_buffer_tx *skb_data_ptr;
802 struct sk_buff *skb; 803 struct sk_buff *skb;
803 804
804 /* Length must be a multiple of 4 as only word 805 /* Length must be a multiple of 4 as only word
diff --git a/drivers/net/fddi/defza.h b/drivers/net/fddi/defza.h
index b06acf32738e..93bda61be8e3 100644
--- a/drivers/net/fddi/defza.h
+++ b/drivers/net/fddi/defza.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0+ */
2/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. 2/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
3 * 3 *
4 * Copyright (c) 2018 Maciej W. Rozycki 4 * Copyright (c) 2018 Maciej W. Rozycki
@@ -235,6 +235,7 @@ struct fza_ring_cmd {
235#define FZA_RING_CMD 0x200400 /* command ring address */ 235#define FZA_RING_CMD 0x200400 /* command ring address */
236#define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring 236#define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring
237 * size 237 * size
238 */
238/* Command constants. */ 239/* Command constants. */
239#define FZA_RING_CMD_MASK 0x7fffffff 240#define FZA_RING_CMD_MASK 0x7fffffff
240#define FZA_RING_CMD_NOP 0x00000000 /* nop */ 241#define FZA_RING_CMD_NOP 0x00000000 /* nop */
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index e86ea105c802..704537010453 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -92,7 +92,7 @@ static int bcm54612e_config_init(struct phy_device *phydev)
92 return 0; 92 return 0;
93} 93}
94 94
95static int bcm5481x_config(struct phy_device *phydev) 95static int bcm54xx_config_clock_delay(struct phy_device *phydev)
96{ 96{
97 int rc, val; 97 int rc, val;
98 98
@@ -429,7 +429,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
429 ret = genphy_config_aneg(phydev); 429 ret = genphy_config_aneg(phydev);
430 430
431 /* Then we can set up the delay. */ 431 /* Then we can set up the delay. */
432 bcm5481x_config(phydev); 432 bcm54xx_config_clock_delay(phydev);
433 433
434 if (of_property_read_bool(np, "enet-phy-lane-swap")) { 434 if (of_property_read_bool(np, "enet-phy-lane-swap")) {
435 /* Lane Swap - Undocumented register...magic! */ 435 /* Lane Swap - Undocumented register...magic! */
@@ -442,6 +442,19 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
442 return ret; 442 return ret;
443} 443}
444 444
445static int bcm54616s_config_aneg(struct phy_device *phydev)
446{
447 int ret;
448
449 /* Aneg firsly. */
450 ret = genphy_config_aneg(phydev);
451
452 /* Then we can set up the delay. */
453 bcm54xx_config_clock_delay(phydev);
454
455 return ret;
456}
457
445static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set) 458static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
446{ 459{
447 int val; 460 int val;
@@ -636,6 +649,7 @@ static struct phy_driver broadcom_drivers[] = {
636 .features = PHY_GBIT_FEATURES, 649 .features = PHY_GBIT_FEATURES,
637 .flags = PHY_HAS_INTERRUPT, 650 .flags = PHY_HAS_INTERRUPT,
638 .config_init = bcm54xx_config_init, 651 .config_init = bcm54xx_config_init,
652 .config_aneg = bcm54616s_config_aneg,
639 .ack_interrupt = bcm_phy_ack_intr, 653 .ack_interrupt = bcm_phy_ack_intr,
640 .config_intr = bcm_phy_config_intr, 654 .config_intr = bcm_phy_config_intr,
641}, { 655}, {
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 33265747bf39..0fbcedcdf6e2 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
63 * assume the pin serves as pull-up. If direction is 63 * assume the pin serves as pull-up. If direction is
64 * output, the default value is high. 64 * output, the default value is high.
65 */ 65 */
66 gpiod_set_value(bitbang->mdo, 1); 66 gpiod_set_value_cansleep(bitbang->mdo, 1);
67 return; 67 return;
68 } 68 }
69 69
@@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
78 struct mdio_gpio_info *bitbang = 78 struct mdio_gpio_info *bitbang =
79 container_of(ctrl, struct mdio_gpio_info, ctrl); 79 container_of(ctrl, struct mdio_gpio_info, ctrl);
80 80
81 return gpiod_get_value(bitbang->mdio); 81 return gpiod_get_value_cansleep(bitbang->mdio);
82} 82}
83 83
84static void mdio_set(struct mdiobb_ctrl *ctrl, int what) 84static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
87 container_of(ctrl, struct mdio_gpio_info, ctrl); 87 container_of(ctrl, struct mdio_gpio_info, ctrl);
88 88
89 if (bitbang->mdo) 89 if (bitbang->mdo)
90 gpiod_set_value(bitbang->mdo, what); 90 gpiod_set_value_cansleep(bitbang->mdo, what);
91 else 91 else
92 gpiod_set_value(bitbang->mdio, what); 92 gpiod_set_value_cansleep(bitbang->mdio, what);
93} 93}
94 94
95static void mdc_set(struct mdiobb_ctrl *ctrl, int what) 95static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
97 struct mdio_gpio_info *bitbang = 97 struct mdio_gpio_info *bitbang =
98 container_of(ctrl, struct mdio_gpio_info, ctrl); 98 container_of(ctrl, struct mdio_gpio_info, ctrl);
99 99
100 gpiod_set_value(bitbang->mdc, what); 100 gpiod_set_value_cansleep(bitbang->mdc, what);
101} 101}
102 102
103static const struct mdiobb_ops mdio_gpio_ops = { 103static const struct mdiobb_ops mdio_gpio_ops = {
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index a2e59f4f6f01..7cae17517744 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -810,17 +810,13 @@ static int vsc85xx_default_config(struct phy_device *phydev)
810 810
811 phydev->mdix_ctrl = ETH_TP_MDI_AUTO; 811 phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
812 mutex_lock(&phydev->lock); 812 mutex_lock(&phydev->lock);
813 rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
814 if (rc < 0)
815 goto out_unlock;
816 813
817 reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL); 814 reg_val = RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS;
818 reg_val &= ~(RGMII_RX_CLK_DELAY_MASK); 815
819 reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS); 816 rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
820 phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val); 817 MSCC_PHY_RGMII_CNTL, RGMII_RX_CLK_DELAY_MASK,
818 reg_val);
821 819
822out_unlock:
823 rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
824 mutex_unlock(&phydev->lock); 820 mutex_unlock(&phydev->lock);
825 821
826 return rc; 822 return rc;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ab33d1777132..23ee3967c166 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -2197,6 +2197,14 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
2197 new_driver->mdiodrv.driver.remove = phy_remove; 2197 new_driver->mdiodrv.driver.remove = phy_remove;
2198 new_driver->mdiodrv.driver.owner = owner; 2198 new_driver->mdiodrv.driver.owner = owner;
2199 2199
2200 /* The following works around an issue where the PHY driver doesn't bind
2201 * to the device, resulting in the genphy driver being used instead of
2202 * the dedicated driver. The root cause of the issue isn't known yet
2203 * and seems to be in the base driver core. Once this is fixed we may
2204 * remove this workaround.
2205 */
2206 new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
2207
2200 retval = driver_register(&new_driver->mdiodrv.driver); 2208 retval = driver_register(&new_driver->mdiodrv.driver);
2201 if (retval) { 2209 if (retval) {
2202 pr_err("%s: Error %d in registering driver\n", 2210 pr_err("%s: Error %d in registering driver\n",
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 7fc8508b5231..271e8adc39f1 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = {
220 .flags = PHY_HAS_INTERRUPT, 220 .flags = PHY_HAS_INTERRUPT,
221 }, { 221 }, {
222 .phy_id = 0x001cc816, 222 .phy_id = 0x001cc816,
223 .name = "RTL8201F 10/100Mbps Ethernet", 223 .name = "RTL8201F Fast Ethernet",
224 .phy_id_mask = 0x001fffff, 224 .phy_id_mask = 0x001fffff,
225 .features = PHY_BASIC_FEATURES, 225 .features = PHY_BASIC_FEATURES,
226 .flags = PHY_HAS_INTERRUPT, 226 .flags = PHY_HAS_INTERRUPT,
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index e9f101c9bae2..bfbb39f93554 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -216,9 +216,9 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
216 * it just report sending a packet to the target 216 * it just report sending a packet to the target
217 * (without actual packet transfer). 217 * (without actual packet transfer).
218 */ 218 */
219 dev_kfree_skb_any(skb);
220 ndev->stats.tx_packets++; 219 ndev->stats.tx_packets++;
221 ndev->stats.tx_bytes += skb->len; 220 ndev->stats.tx_bytes += skb->len;
221 dev_kfree_skb_any(skb);
222 } 222 }
223 } 223 }
224 224
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index db633ae9f784..364f514d56d8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -985,8 +985,6 @@ static void team_port_disable(struct team *team,
985 team->en_port_count--; 985 team->en_port_count--;
986 team_queue_override_port_del(team, port); 986 team_queue_override_port_del(team, port);
987 team_adjust_ops(team); 987 team_adjust_ops(team);
988 team_notify_peers(team);
989 team_mcast_rejoin(team);
990 team_lower_state_changed(port); 988 team_lower_state_changed(port);
991} 989}
992 990
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 060135ceaf0e..e244f5d7512a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1536,6 +1536,7 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1536 1536
1537 if (!rx_batched || (!more && skb_queue_empty(queue))) { 1537 if (!rx_batched || (!more && skb_queue_empty(queue))) {
1538 local_bh_disable(); 1538 local_bh_disable();
1539 skb_record_rx_queue(skb, tfile->queue_index);
1539 netif_receive_skb(skb); 1540 netif_receive_skb(skb);
1540 local_bh_enable(); 1541 local_bh_enable();
1541 return; 1542 return;
@@ -1555,8 +1556,11 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1555 struct sk_buff *nskb; 1556 struct sk_buff *nskb;
1556 1557
1557 local_bh_disable(); 1558 local_bh_disable();
1558 while ((nskb = __skb_dequeue(&process_queue))) 1559 while ((nskb = __skb_dequeue(&process_queue))) {
1560 skb_record_rx_queue(nskb, tfile->queue_index);
1559 netif_receive_skb(nskb); 1561 netif_receive_skb(nskb);
1562 }
1563 skb_record_rx_queue(skb, tfile->queue_index);
1560 netif_receive_skb(skb); 1564 netif_receive_skb(skb);
1561 local_bh_enable(); 1565 local_bh_enable();
1562 } 1566 }
@@ -2451,6 +2455,7 @@ build:
2451 if (!rcu_dereference(tun->steering_prog)) 2455 if (!rcu_dereference(tun->steering_prog))
2452 rxhash = __skb_get_hash_symmetric(skb); 2456 rxhash = __skb_get_hash_symmetric(skb);
2453 2457
2458 skb_record_rx_queue(skb, tfile->queue_index);
2454 netif_receive_skb(skb); 2459 netif_receive_skb(skb);
2455 2460
2456 stats = get_cpu_ptr(tun->pcpu_stats); 2461 stats = get_cpu_ptr(tun->pcpu_stats);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 7275761a1177..3d8a70d3ea9b 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -140,7 +140,6 @@ struct ipheth_device {
140 struct usb_device *udev; 140 struct usb_device *udev;
141 struct usb_interface *intf; 141 struct usb_interface *intf;
142 struct net_device *net; 142 struct net_device *net;
143 struct sk_buff *tx_skb;
144 struct urb *tx_urb; 143 struct urb *tx_urb;
145 struct urb *rx_urb; 144 struct urb *rx_urb;
146 unsigned char *tx_buf; 145 unsigned char *tx_buf;
@@ -230,6 +229,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
230 case -ENOENT: 229 case -ENOENT:
231 case -ECONNRESET: 230 case -ECONNRESET:
232 case -ESHUTDOWN: 231 case -ESHUTDOWN:
232 case -EPROTO:
233 return; 233 return;
234 case 0: 234 case 0:
235 break; 235 break;
@@ -281,7 +281,6 @@ static void ipheth_sndbulk_callback(struct urb *urb)
281 dev_err(&dev->intf->dev, "%s: urb status: %d\n", 281 dev_err(&dev->intf->dev, "%s: urb status: %d\n",
282 __func__, status); 282 __func__, status);
283 283
284 dev_kfree_skb_irq(dev->tx_skb);
285 if (status == 0) 284 if (status == 0)
286 netif_wake_queue(dev->net); 285 netif_wake_queue(dev->net);
287 else 286 else
@@ -423,7 +422,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
423 if (skb->len > IPHETH_BUF_SIZE) { 422 if (skb->len > IPHETH_BUF_SIZE) {
424 WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len); 423 WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
425 dev->net->stats.tx_dropped++; 424 dev->net->stats.tx_dropped++;
426 dev_kfree_skb_irq(skb); 425 dev_kfree_skb_any(skb);
427 return NETDEV_TX_OK; 426 return NETDEV_TX_OK;
428 } 427 }
429 428
@@ -443,12 +442,11 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
443 dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n", 442 dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
444 __func__, retval); 443 __func__, retval);
445 dev->net->stats.tx_errors++; 444 dev->net->stats.tx_errors++;
446 dev_kfree_skb_irq(skb); 445 dev_kfree_skb_any(skb);
447 } else { 446 } else {
448 dev->tx_skb = skb;
449
450 dev->net->stats.tx_packets++; 447 dev->net->stats.tx_packets++;
451 dev->net->stats.tx_bytes += skb->len; 448 dev->net->stats.tx_bytes += skb->len;
449 dev_consume_skb_any(skb);
452 netif_stop_queue(net); 450 netif_stop_queue(net);
453 } 451 }
454 452
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 262e7a3c23cb..f2d01cb6f958 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1321 dev->net->ethtool_ops = &smsc95xx_ethtool_ops; 1321 dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
1322 dev->net->flags |= IFF_MULTICAST; 1322 dev->net->flags |= IFF_MULTICAST;
1323 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; 1323 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
1324 dev->net->min_mtu = ETH_MIN_MTU;
1325 dev->net->max_mtu = ETH_DATA_LEN;
1324 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 1326 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1325 1327
1326 pdata->dev = dev; 1328 pdata->dev = dev;
@@ -1598,6 +1600,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
1598 return ret; 1600 return ret;
1599 } 1601 }
1600 1602
1603 cancel_delayed_work_sync(&pdata->carrier_check);
1604
1601 if (pdata->suspend_flags) { 1605 if (pdata->suspend_flags) {
1602 netdev_warn(dev->net, "error during last resume\n"); 1606 netdev_warn(dev->net, "error during last resume\n");
1603 pdata->suspend_flags = 0; 1607 pdata->suspend_flags = 0;
@@ -1840,6 +1844,11 @@ done:
1840 */ 1844 */
1841 if (ret && PMSG_IS_AUTO(message)) 1845 if (ret && PMSG_IS_AUTO(message))
1842 usbnet_resume(intf); 1846 usbnet_resume(intf);
1847
1848 if (ret)
1849 schedule_delayed_work(&pdata->carrier_check,
1850 CARRIER_CHECK_DELAY);
1851
1843 return ret; 1852 return ret;
1844} 1853}
1845 1854
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3e2c041d76ac..cecfd77c9f3c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -70,7 +70,8 @@ static const unsigned long guest_offloads[] = {
70 VIRTIO_NET_F_GUEST_TSO4, 70 VIRTIO_NET_F_GUEST_TSO4,
71 VIRTIO_NET_F_GUEST_TSO6, 71 VIRTIO_NET_F_GUEST_TSO6,
72 VIRTIO_NET_F_GUEST_ECN, 72 VIRTIO_NET_F_GUEST_ECN,
73 VIRTIO_NET_F_GUEST_UFO 73 VIRTIO_NET_F_GUEST_UFO,
74 VIRTIO_NET_F_GUEST_CSUM
74}; 75};
75 76
76struct virtnet_stat_desc { 77struct virtnet_stat_desc {
@@ -2334,9 +2335,6 @@ static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
2334 if (!vi->guest_offloads) 2335 if (!vi->guest_offloads)
2335 return 0; 2336 return 0;
2336 2337
2337 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
2338 offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM;
2339
2340 return virtnet_set_guest_offloads(vi, offloads); 2338 return virtnet_set_guest_offloads(vi, offloads);
2341} 2339}
2342 2340
@@ -2346,8 +2344,6 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
2346 2344
2347 if (!vi->guest_offloads) 2345 if (!vi->guest_offloads)
2348 return 0; 2346 return 0;
2349 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
2350 offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM;
2351 2347
2352 return virtnet_set_guest_offloads(vi, offloads); 2348 return virtnet_set_guest_offloads(vi, offloads);
2353} 2349}
@@ -2365,8 +2361,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2365 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 2361 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2366 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 2362 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2367 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 2363 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
2368 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { 2364 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
2369 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); 2365 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
2366 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
2370 return -EOPNOTSUPP; 2367 return -EOPNOTSUPP;
2371 } 2368 }
2372 2369
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a1c2801ded10..7e49342bae38 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6867,7 +6867,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6867 u32 bitmap; 6867 u32 bitmap;
6868 6868
6869 if (drop) { 6869 if (drop) {
6870 if (vif->type == NL80211_IFTYPE_STATION) { 6870 if (vif && vif->type == NL80211_IFTYPE_STATION) {
6871 bitmap = ~(1 << WMI_MGMT_TID); 6871 bitmap = ~(1 << WMI_MGMT_TID);
6872 list_for_each_entry(arvif, &ar->arvifs, list) { 6872 list_for_each_entry(arvif, &ar->arvifs, list) {
6873 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 6873 if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 1e3b5f4a4cf9..f23cb2f3d296 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1251,6 +1251,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1251 struct ath_vif *avp = (void *)vif->drv_priv; 1251 struct ath_vif *avp = (void *)vif->drv_priv;
1252 struct ath_node *an = &avp->mcast_node; 1252 struct ath_node *an = &avp->mcast_node;
1253 1253
1254 mutex_lock(&sc->mutex);
1254 if (IS_ENABLED(CONFIG_ATH9K_TX99)) { 1255 if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
1255 if (sc->cur_chan->nvifs >= 1) { 1256 if (sc->cur_chan->nvifs >= 1) {
1256 mutex_unlock(&sc->mutex); 1257 mutex_unlock(&sc->mutex);
@@ -1259,8 +1260,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1259 sc->tx99_vif = vif; 1260 sc->tx99_vif = vif;
1260 } 1261 }
1261 1262
1262 mutex_lock(&sc->mutex);
1263
1264 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); 1263 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
1265 sc->cur_chan->nvifs++; 1264 sc->cur_chan->nvifs++;
1266 1265
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 230a378c26fc..7f0a5bade70a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -6005,7 +6005,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
6005 * for subsequent chanspecs. 6005 * for subsequent chanspecs.
6006 */ 6006 */
6007 channel->flags = IEEE80211_CHAN_NO_HT40 | 6007 channel->flags = IEEE80211_CHAN_NO_HT40 |
6008 IEEE80211_CHAN_NO_80MHZ; 6008 IEEE80211_CHAN_NO_80MHZ |
6009 IEEE80211_CHAN_NO_160MHZ;
6009 ch.bw = BRCMU_CHAN_BW_20; 6010 ch.bw = BRCMU_CHAN_BW_20;
6010 cfg->d11inf.encchspec(&ch); 6011 cfg->d11inf.encchspec(&ch);
6011 chaninfo = ch.chspec; 6012 chaninfo = ch.chspec;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
index e7584b842dce..eb5db94f5745 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
@@ -193,6 +193,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
193 } 193 }
194 break; 194 break;
195 case BRCMU_CHSPEC_D11AC_BW_160: 195 case BRCMU_CHSPEC_D11AC_BW_160:
196 ch->bw = BRCMU_CHAN_BW_160;
197 ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
198 BRCMU_CHSPEC_D11AC_SB_SHIFT);
196 switch (ch->sb) { 199 switch (ch->sb) {
197 case BRCMU_CHAN_SB_LLL: 200 case BRCMU_CHAN_SB_LLL:
198 ch->control_ch_num -= CH_70MHZ_APART; 201 ch->control_ch_num -= CH_70MHZ_APART;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 2439e98431ee..7492dfb6729b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2017 Intel Deutschland GmbH 8 * Copyright(c) 2017 Intel Deutschland GmbH
9 * Copyright(c) 2018 Intel Corporation
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +27,7 @@
26 * BSD LICENSE 27 * BSD LICENSE
27 * 28 *
28 * Copyright(c) 2017 Intel Deutschland GmbH 29 * Copyright(c) 2017 Intel Deutschland GmbH
30 * Copyright(c) 2018 Intel Corporation
29 * All rights reserved. 31 * All rights reserved.
30 * 32 *
31 * Redistribution and use in source and binary forms, with or without 33 * Redistribution and use in source and binary forms, with or without
@@ -81,7 +83,7 @@
81#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) 83#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2)
82#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ 84#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \
83 ACPI_SAR_TABLE_SIZE + 3) 85 ACPI_SAR_TABLE_SIZE + 3)
84#define ACPI_WGDS_WIFI_DATA_SIZE 18 86#define ACPI_WGDS_WIFI_DATA_SIZE 19
85#define ACPI_WRDD_WIFI_DATA_SIZE 2 87#define ACPI_WRDD_WIFI_DATA_SIZE 2
86#define ACPI_SPLC_WIFI_DATA_SIZE 2 88#define ACPI_SPLC_WIFI_DATA_SIZE 2
87 89
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 6b95d0e75889..2b8b50a77990 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -154,7 +154,11 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
154 const struct iwl_fw_runtime_ops *ops, void *ops_ctx, 154 const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
155 struct dentry *dbgfs_dir); 155 struct dentry *dbgfs_dir);
156 156
157void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); 157static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
158{
159 kfree(fwrt->dump.d3_debug_data);
160 fwrt->dump.d3_debug_data = NULL;
161}
158 162
159void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); 163void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);
160 164
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index dade206d5511..2ba890445c35 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -893,7 +893,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
893 IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); 893 IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
894 894
895 BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * 895 BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
896 ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE); 896 ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
897 897
898 BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES); 898 BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
899 899
@@ -928,6 +928,11 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
928 return -ENOENT; 928 return -ENOENT;
929} 929}
930 930
931static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
932{
933 return -ENOENT;
934}
935
931static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) 936static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
932{ 937{
933 return 0; 938 return 0;
@@ -954,8 +959,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
954 IWL_DEBUG_RADIO(mvm, 959 IWL_DEBUG_RADIO(mvm,
955 "WRDS SAR BIOS table invalid or unavailable. (%d)\n", 960 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
956 ret); 961 ret);
957 /* if not available, don't fail and don't bother with EWRD */ 962 /*
958 return 0; 963 * If not available, don't fail and don't bother with EWRD.
964 * Return 1 to tell that we can't use WGDS either.
965 */
966 return 1;
959 } 967 }
960 968
961 ret = iwl_mvm_sar_get_ewrd_table(mvm); 969 ret = iwl_mvm_sar_get_ewrd_table(mvm);
@@ -968,9 +976,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
968 /* choose profile 1 (WRDS) as default for both chains */ 976 /* choose profile 1 (WRDS) as default for both chains */
969 ret = iwl_mvm_sar_select_profile(mvm, 1, 1); 977 ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
970 978
971 /* if we don't have profile 0 from BIOS, just skip it */ 979 /*
980 * If we don't have profile 0 from BIOS, just skip it. This
981 * means that SAR Geo will not be enabled either, even if we
982 * have other valid profiles.
983 */
972 if (ret == -ENOENT) 984 if (ret == -ENOENT)
973 return 0; 985 return 1;
974 986
975 return ret; 987 return ret;
976} 988}
@@ -1168,11 +1180,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
1168 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); 1180 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1169 1181
1170 ret = iwl_mvm_sar_init(mvm); 1182 ret = iwl_mvm_sar_init(mvm);
1171 if (ret) 1183 if (ret == 0) {
1172 goto error; 1184 ret = iwl_mvm_sar_geo_init(mvm);
1185 } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
1186 /*
1187 * If basic SAR is not available, we check for WGDS,
1188 * which should *not* be available either. If it is
1189 * available, issue an error, because we can't use SAR
1190 * Geo without basic SAR.
1191 */
1192 IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
1193 }
1173 1194
1174 ret = iwl_mvm_sar_geo_init(mvm); 1195 if (ret < 0)
1175 if (ret)
1176 goto error; 1196 goto error;
1177 1197
1178 iwl_mvm_leds_sync(mvm); 1198 iwl_mvm_leds_sync(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 505b0385d800..00f831d88366 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -301,8 +301,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
301 goto out; 301 goto out;
302 } 302 }
303 303
304 if (changed) 304 if (changed) {
305 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); 305 u32 status = le32_to_cpu(resp->status);
306
307 *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
308 status == MCC_RESP_ILLEGAL);
309 }
306 310
307 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 311 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
308 __le32_to_cpu(resp->n_channels), 312 __le32_to_cpu(resp->n_channels),
@@ -4444,10 +4448,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4444 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 4448 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
4445 } 4449 }
4446 4450
4447 if (!fw_has_capa(&mvm->fw->ucode_capa,
4448 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4449 return;
4450
4451 /* if beacon filtering isn't on mac80211 does it anyway */ 4451 /* if beacon filtering isn't on mac80211 does it anyway */
4452 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) 4452 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4453 return; 4453 return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 3633f27d048a..6fc5cc1f2b5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -539,9 +539,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
539 } 539 }
540 540
541 IWL_DEBUG_LAR(mvm, 541 IWL_DEBUG_LAR(mvm,
542 "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", 542 "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
543 status, mcc, mcc >> 8, mcc & 0xff, 543 status, mcc, mcc >> 8, mcc & 0xff, n_channels);
544 !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
545 544
546exit: 545exit:
547 iwl_free_resp(&cmd); 546 iwl_free_resp(&cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 0e2092526fae..af3fba10abc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -858,6 +858,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
858 iwl_mvm_thermal_exit(mvm); 858 iwl_mvm_thermal_exit(mvm);
859 out_free: 859 out_free:
860 iwl_fw_flush_dump(&mvm->fwrt); 860 iwl_fw_flush_dump(&mvm->fwrt);
861 iwl_fw_runtime_free(&mvm->fwrt);
861 862
862 if (iwlmvm_mod_params.init_dbg) 863 if (iwlmvm_mod_params.init_dbg)
863 return op_mode; 864 return op_mode;
@@ -910,6 +911,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
910 911
911 iwl_mvm_tof_clean(mvm); 912 iwl_mvm_tof_clean(mvm);
912 913
914 iwl_fw_runtime_free(&mvm->fwrt);
913 mutex_destroy(&mvm->mutex); 915 mutex_destroy(&mvm->mutex);
914 mutex_destroy(&mvm->d0i3_suspend_mutex); 916 mutex_destroy(&mvm->d0i3_suspend_mutex);
915 917
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index 0ccbcd7e887d..c30d8f5bbf2a 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -1,6 +1,12 @@
1config MT76_CORE 1config MT76_CORE
2 tristate 2 tristate
3 3
4config MT76_LEDS
5 bool
6 depends on MT76_CORE
7 depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS
8 default y
9
4config MT76_USB 10config MT76_USB
5 tristate 11 tristate
6 depends on MT76_CORE 12 depends on MT76_CORE
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 2a699e8b79bf..7d219ff2d480 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -345,9 +345,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
345 mt76_check_sband(dev, NL80211_BAND_2GHZ); 345 mt76_check_sband(dev, NL80211_BAND_2GHZ);
346 mt76_check_sband(dev, NL80211_BAND_5GHZ); 346 mt76_check_sband(dev, NL80211_BAND_5GHZ);
347 347
348 ret = mt76_led_init(dev); 348 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
349 if (ret) 349 ret = mt76_led_init(dev);
350 return ret; 350 if (ret)
351 return ret;
352 }
351 353
352 return ieee80211_register_hw(hw); 354 return ieee80211_register_hw(hw);
353} 355}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 47c42c607964..7806963b1905 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -71,7 +71,6 @@ struct mt76x02_dev {
71 struct mac_address macaddr_list[8]; 71 struct mac_address macaddr_list[8];
72 72
73 struct mutex phy_mutex; 73 struct mutex phy_mutex;
74 struct mutex mutex;
75 74
76 u8 txdone_seq; 75 u8 txdone_seq;
77 DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); 76 DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 3824290b219d..fd125722d1fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -507,8 +507,10 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
507 mt76x2_dfs_init_detector(dev); 507 mt76x2_dfs_init_detector(dev);
508 508
509 /* init led callbacks */ 509 /* init led callbacks */
510 dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; 510 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
511 dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; 511 dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
512 dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
513 }
512 514
513 ret = mt76_register_device(&dev->mt76, true, mt76x02_rates, 515 ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
514 ARRAY_SIZE(mt76x02_rates)); 516 ARRAY_SIZE(mt76x02_rates));
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 034a06295668..3f001bd6806c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -272,9 +272,9 @@ mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
272 if (val != ~0 && val > 0xffff) 272 if (val != ~0 && val > 0xffff)
273 return -EINVAL; 273 return -EINVAL;
274 274
275 mutex_lock(&dev->mutex); 275 mutex_lock(&dev->mt76.mutex);
276 mt76x2_mac_set_tx_protection(dev, val); 276 mt76x2_mac_set_tx_protection(dev, val);
277 mutex_unlock(&dev->mutex); 277 mutex_unlock(&dev->mt76.mutex);
278 278
279 return 0; 279 return 0;
280} 280}
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 4c2154b9e6a3..bd10165d7eec 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -285,7 +285,7 @@ static int wl1271_probe(struct sdio_func *func,
285 struct resource res[2]; 285 struct resource res[2];
286 mmc_pm_flag_t mmcflags; 286 mmc_pm_flag_t mmcflags;
287 int ret = -ENOMEM; 287 int ret = -ENOMEM;
288 int irq, wakeirq; 288 int irq, wakeirq, num_irqs;
289 const char *chip_family; 289 const char *chip_family;
290 290
291 /* We are only able to handle the wlan function */ 291 /* We are only able to handle the wlan function */
@@ -353,12 +353,17 @@ static int wl1271_probe(struct sdio_func *func,
353 irqd_get_trigger_type(irq_get_irq_data(irq)); 353 irqd_get_trigger_type(irq_get_irq_data(irq));
354 res[0].name = "irq"; 354 res[0].name = "irq";
355 355
356 res[1].start = wakeirq;
357 res[1].flags = IORESOURCE_IRQ |
358 irqd_get_trigger_type(irq_get_irq_data(wakeirq));
359 res[1].name = "wakeirq";
360 356
361 ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); 357 if (wakeirq > 0) {
358 res[1].start = wakeirq;
359 res[1].flags = IORESOURCE_IRQ |
360 irqd_get_trigger_type(irq_get_irq_data(wakeirq));
361 res[1].name = "wakeirq";
362 num_irqs = 2;
363 } else {
364 num_irqs = 1;
365 }
366 ret = platform_device_add_resources(glue->core, res, num_irqs);
362 if (ret) { 367 if (ret) {
363 dev_err(glue->dev, "can't add resources\n"); 368 dev_err(glue->dev, "can't add resources\n");
364 goto out_dev_put; 369 goto out_dev_put;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2e65be8b1387..3cf1b773158e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1519 if (ns->ndev) 1519 if (ns->ndev)
1520 nvme_nvm_update_nvm_info(ns); 1520 nvme_nvm_update_nvm_info(ns);
1521#ifdef CONFIG_NVME_MULTIPATH 1521#ifdef CONFIG_NVME_MULTIPATH
1522 if (ns->head->disk) 1522 if (ns->head->disk) {
1523 nvme_update_disk_info(ns->head->disk, ns, id); 1523 nvme_update_disk_info(ns->head->disk, ns, id);
1524 blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1525 }
1524#endif 1526#endif
1525} 1527}
1526 1528
@@ -3312,6 +3314,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3312 struct nvme_ns *ns, *next; 3314 struct nvme_ns *ns, *next;
3313 LIST_HEAD(ns_list); 3315 LIST_HEAD(ns_list);
3314 3316
3317 /* prevent racing with ns scanning */
3318 flush_work(&ctrl->scan_work);
3319
3315 /* 3320 /*
3316 * The dead states indicates the controller was not gracefully 3321 * The dead states indicates the controller was not gracefully
3317 * disconnected. In that case, we won't be able to flush any data while 3322 * disconnected. In that case, we won't be able to flush any data while
@@ -3474,7 +3479,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
3474 nvme_mpath_stop(ctrl); 3479 nvme_mpath_stop(ctrl);
3475 nvme_stop_keep_alive(ctrl); 3480 nvme_stop_keep_alive(ctrl);
3476 flush_work(&ctrl->async_event_work); 3481 flush_work(&ctrl->async_event_work);
3477 flush_work(&ctrl->scan_work);
3478 cancel_work_sync(&ctrl->fw_act_work); 3482 cancel_work_sync(&ctrl->fw_act_work);
3479 if (ctrl->ops->stop_ctrl) 3483 if (ctrl->ops->stop_ctrl)
3480 ctrl->ops->stop_ctrl(ctrl); 3484 ctrl->ops->stop_ctrl(ctrl);
@@ -3583,7 +3587,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
3583 3587
3584 return 0; 3588 return 0;
3585out_free_name: 3589out_free_name:
3586 kfree_const(dev->kobj.name); 3590 kfree_const(ctrl->device->kobj.name);
3587out_release_instance: 3591out_release_instance:
3588 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 3592 ida_simple_remove(&nvme_instance_ida, ctrl->instance);
3589out: 3593out:
@@ -3605,7 +3609,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
3605 down_read(&ctrl->namespaces_rwsem); 3609 down_read(&ctrl->namespaces_rwsem);
3606 3610
3607 /* Forcibly unquiesce queues to avoid blocking dispatch */ 3611 /* Forcibly unquiesce queues to avoid blocking dispatch */
3608 if (ctrl->admin_q) 3612 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
3609 blk_mq_unquiesce_queue(ctrl->admin_q); 3613 blk_mq_unquiesce_queue(ctrl->admin_q);
3610 3614
3611 list_for_each_entry(ns, &ctrl->namespaces, list) 3615 list_for_each_entry(ns, &ctrl->namespaces, list)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 0b70c8bab045..feb86b59170e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -152,6 +152,7 @@ struct nvme_fc_ctrl {
152 152
153 bool ioq_live; 153 bool ioq_live;
154 bool assoc_active; 154 bool assoc_active;
155 atomic_t err_work_active;
155 u64 association_id; 156 u64 association_id;
156 157
157 struct list_head ctrl_list; /* rport->ctrl_list */ 158 struct list_head ctrl_list; /* rport->ctrl_list */
@@ -160,6 +161,7 @@ struct nvme_fc_ctrl {
160 struct blk_mq_tag_set tag_set; 161 struct blk_mq_tag_set tag_set;
161 162
162 struct delayed_work connect_work; 163 struct delayed_work connect_work;
164 struct work_struct err_work;
163 165
164 struct kref ref; 166 struct kref ref;
165 u32 flags; 167 u32 flags;
@@ -1531,6 +1533,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1531 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1533 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1532 int i; 1534 int i;
1533 1535
1536 /* ensure we've initialized the ops once */
1537 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1538 return;
1539
1534 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) 1540 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1535 __nvme_fc_abort_op(ctrl, aen_op); 1541 __nvme_fc_abort_op(ctrl, aen_op);
1536} 1542}
@@ -1746,12 +1752,12 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1746 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; 1752 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1747 int res; 1753 int res;
1748 1754
1749 nvme_req(rq)->ctrl = &ctrl->ctrl;
1750 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); 1755 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
1751 if (res) 1756 if (res)
1752 return res; 1757 return res;
1753 op->op.fcp_req.first_sgl = &op->sgl[0]; 1758 op->op.fcp_req.first_sgl = &op->sgl[0];
1754 op->op.fcp_req.private = &op->priv[0]; 1759 op->op.fcp_req.private = &op->priv[0];
1760 nvme_req(rq)->ctrl = &ctrl->ctrl;
1755 return res; 1761 return res;
1756} 1762}
1757 1763
@@ -2049,7 +2055,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2049static void 2055static void
2050nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) 2056nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2051{ 2057{
2052 /* only proceed if in LIVE state - e.g. on first error */ 2058 int active;
2059
2060 /*
2061 * if an error (io timeout, etc) while (re)connecting,
2062 * it's an error on creating the new association.
2063 * Start the error recovery thread if it hasn't already
2064 * been started. It is expected there could be multiple
2065 * ios hitting this path before things are cleaned up.
2066 */
2067 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2068 active = atomic_xchg(&ctrl->err_work_active, 1);
2069 if (!active && !schedule_work(&ctrl->err_work)) {
2070 atomic_set(&ctrl->err_work_active, 0);
2071 WARN_ON(1);
2072 }
2073 return;
2074 }
2075
2076 /* Otherwise, only proceed if in LIVE state - e.g. on first error */
2053 if (ctrl->ctrl.state != NVME_CTRL_LIVE) 2077 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2054 return; 2078 return;
2055 2079
@@ -2814,6 +2838,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2814{ 2838{
2815 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2839 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2816 2840
2841 cancel_work_sync(&ctrl->err_work);
2817 cancel_delayed_work_sync(&ctrl->connect_work); 2842 cancel_delayed_work_sync(&ctrl->connect_work);
2818 /* 2843 /*
2819 * kill the association on the link side. this will block 2844 * kill the association on the link side. this will block
@@ -2866,23 +2891,30 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2866} 2891}
2867 2892
2868static void 2893static void
2869nvme_fc_reset_ctrl_work(struct work_struct *work) 2894__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
2870{ 2895{
2871 struct nvme_fc_ctrl *ctrl = 2896 nvme_stop_keep_alive(&ctrl->ctrl);
2872 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2873 int ret;
2874
2875 nvme_stop_ctrl(&ctrl->ctrl);
2876 2897
2877 /* will block will waiting for io to terminate */ 2898 /* will block will waiting for io to terminate */
2878 nvme_fc_delete_association(ctrl); 2899 nvme_fc_delete_association(ctrl);
2879 2900
2880 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 2901 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
2902 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
2881 dev_err(ctrl->ctrl.device, 2903 dev_err(ctrl->ctrl.device,
2882 "NVME-FC{%d}: error_recovery: Couldn't change state " 2904 "NVME-FC{%d}: error_recovery: Couldn't change state "
2883 "to CONNECTING\n", ctrl->cnum); 2905 "to CONNECTING\n", ctrl->cnum);
2884 return; 2906}
2885 } 2907
2908static void
2909nvme_fc_reset_ctrl_work(struct work_struct *work)
2910{
2911 struct nvme_fc_ctrl *ctrl =
2912 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2913 int ret;
2914
2915 __nvme_fc_terminate_io(ctrl);
2916
2917 nvme_stop_ctrl(&ctrl->ctrl);
2886 2918
2887 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) 2919 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
2888 ret = nvme_fc_create_association(ctrl); 2920 ret = nvme_fc_create_association(ctrl);
@@ -2897,6 +2929,24 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
2897 ctrl->cnum); 2929 ctrl->cnum);
2898} 2930}
2899 2931
2932static void
2933nvme_fc_connect_err_work(struct work_struct *work)
2934{
2935 struct nvme_fc_ctrl *ctrl =
2936 container_of(work, struct nvme_fc_ctrl, err_work);
2937
2938 __nvme_fc_terminate_io(ctrl);
2939
2940 atomic_set(&ctrl->err_work_active, 0);
2941
2942 /*
2943 * Rescheduling the connection after recovering
2944 * from the io error is left to the reconnect work
2945 * item, which is what should have stalled waiting on
2946 * the io that had the error that scheduled this work.
2947 */
2948}
2949
2900static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { 2950static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2901 .name = "fc", 2951 .name = "fc",
2902 .module = THIS_MODULE, 2952 .module = THIS_MODULE,
@@ -3007,6 +3057,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3007 ctrl->cnum = idx; 3057 ctrl->cnum = idx;
3008 ctrl->ioq_live = false; 3058 ctrl->ioq_live = false;
3009 ctrl->assoc_active = false; 3059 ctrl->assoc_active = false;
3060 atomic_set(&ctrl->err_work_active, 0);
3010 init_waitqueue_head(&ctrl->ioabort_wait); 3061 init_waitqueue_head(&ctrl->ioabort_wait);
3011 3062
3012 get_device(ctrl->dev); 3063 get_device(ctrl->dev);
@@ -3014,6 +3065,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3014 3065
3015 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); 3066 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3016 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 3067 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3068 INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
3017 spin_lock_init(&ctrl->lock); 3069 spin_lock_init(&ctrl->lock);
3018 3070
3019 /* io queue count */ 3071 /* io queue count */
@@ -3103,6 +3155,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3103fail_ctrl: 3155fail_ctrl:
3104 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); 3156 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3105 cancel_work_sync(&ctrl->ctrl.reset_work); 3157 cancel_work_sync(&ctrl->ctrl.reset_work);
3158 cancel_work_sync(&ctrl->err_work);
3106 cancel_delayed_work_sync(&ctrl->connect_work); 3159 cancel_delayed_work_sync(&ctrl->connect_work);
3107 3160
3108 ctrl->ctrl.opts = NULL; 3161 ctrl->ctrl.opts = NULL;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5e3cc8c59a39..9901afd804ce 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
285 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 285 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
286 /* set to a default value for 512 until disk is validated */ 286 /* set to a default value for 512 until disk is validated */
287 blk_queue_logical_block_size(q, 512); 287 blk_queue_logical_block_size(q, 512);
288 blk_set_stacking_limits(&q->limits);
288 289
289 /* we need to propagate up the VMC settings */ 290 /* we need to propagate up the VMC settings */
290 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 291 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cee79cb388af..081cbdcce880 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -531,6 +531,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
531static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, 531static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
532 struct nvme_id_ctrl *id) 532 struct nvme_id_ctrl *id)
533{ 533{
534 if (ctrl->subsys->cmic & (1 << 3))
535 dev_warn(ctrl->device,
536"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
534 return 0; 537 return 0;
535} 538}
536static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 539static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index d181cafedc58..ab6ec7295bf9 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
184 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); 184 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
185 if (ib_dma_mapping_error(ibdev, qe->dma)) { 185 if (ib_dma_mapping_error(ibdev, qe->dma)) {
186 kfree(qe->data); 186 kfree(qe->data);
187 qe->data = NULL;
187 return -ENOMEM; 188 return -ENOMEM;
188 } 189 }
189 190
@@ -823,6 +824,7 @@ out_free_tagset:
823out_free_async_qe: 824out_free_async_qe:
824 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, 825 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
825 sizeof(struct nvme_command), DMA_TO_DEVICE); 826 sizeof(struct nvme_command), DMA_TO_DEVICE);
827 ctrl->async_event_sqe.data = NULL;
826out_free_queue: 828out_free_queue:
827 nvme_rdma_free_queue(&ctrl->queues[0]); 829 nvme_rdma_free_queue(&ctrl->queues[0]);
828 return error; 830 return error;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f4efe289dc7b..a5f9bbce863f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
420 struct pci_dev *p2p_dev; 420 struct pci_dev *p2p_dev;
421 int ret; 421 int ret;
422 422
423 if (!ctrl->p2p_client) 423 if (!ctrl->p2p_client || !ns->use_p2pmem)
424 return; 424 return;
425 425
426 if (ns->p2p_dev) { 426 if (ns->p2p_dev) {
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ddce100be57a..3f7971d3706d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -122,7 +122,6 @@ struct nvmet_rdma_device {
122 int inline_page_count; 122 int inline_page_count;
123}; 123};
124 124
125static struct workqueue_struct *nvmet_rdma_delete_wq;
126static bool nvmet_rdma_use_srq; 125static bool nvmet_rdma_use_srq;
127module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); 126module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
128MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 127MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1274 1273
1275 if (queue->host_qid == 0) { 1274 if (queue->host_qid == 0) {
1276 /* Let inflight controller teardown complete */ 1275 /* Let inflight controller teardown complete */
1277 flush_workqueue(nvmet_rdma_delete_wq); 1276 flush_scheduled_work();
1278 } 1277 }
1279 1278
1280 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); 1279 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1281 if (ret) { 1280 if (ret) {
1282 queue_work(nvmet_rdma_delete_wq, &queue->release_work); 1281 schedule_work(&queue->release_work);
1283 /* Destroying rdma_cm id is not needed here */ 1282 /* Destroying rdma_cm id is not needed here */
1284 return 0; 1283 return 0;
1285 } 1284 }
@@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1344 1343
1345 if (disconnect) { 1344 if (disconnect) {
1346 rdma_disconnect(queue->cm_id); 1345 rdma_disconnect(queue->cm_id);
1347 queue_work(nvmet_rdma_delete_wq, &queue->release_work); 1346 schedule_work(&queue->release_work);
1348 } 1347 }
1349} 1348}
1350 1349
@@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1374 mutex_unlock(&nvmet_rdma_queue_mutex); 1373 mutex_unlock(&nvmet_rdma_queue_mutex);
1375 1374
1376 pr_err("failed to connect queue %d\n", queue->idx); 1375 pr_err("failed to connect queue %d\n", queue->idx);
1377 queue_work(nvmet_rdma_delete_wq, &queue->release_work); 1376 schedule_work(&queue->release_work);
1378} 1377}
1379 1378
1380/** 1379/**
@@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void)
1656 if (ret) 1655 if (ret)
1657 goto err_ib_client; 1656 goto err_ib_client;
1658 1657
1659 nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
1660 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1661 if (!nvmet_rdma_delete_wq) {
1662 ret = -ENOMEM;
1663 goto err_unreg_transport;
1664 }
1665
1666 return 0; 1658 return 0;
1667 1659
1668err_unreg_transport:
1669 nvmet_unregister_transport(&nvmet_rdma_ops);
1670err_ib_client: 1660err_ib_client:
1671 ib_unregister_client(&nvmet_rdma_ib_client); 1661 ib_unregister_client(&nvmet_rdma_ib_client);
1672 return ret; 1662 return ret;
@@ -1674,7 +1664,6 @@ err_ib_client:
1674 1664
1675static void __exit nvmet_rdma_exit(void) 1665static void __exit nvmet_rdma_exit(void)
1676{ 1666{
1677 destroy_workqueue(nvmet_rdma_delete_wq);
1678 nvmet_unregister_transport(&nvmet_rdma_ops); 1667 nvmet_unregister_transport(&nvmet_rdma_ops);
1679 ib_unregister_client(&nvmet_rdma_ib_client); 1668 ib_unregister_client(&nvmet_rdma_ib_client);
1680 WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); 1669 WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 9b18ce90f907..27f67dfa649d 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -44,6 +44,7 @@ struct nvmem_cell {
44 int bytes; 44 int bytes;
45 int bit_offset; 45 int bit_offset;
46 int nbits; 46 int nbits;
47 struct device_node *np;
47 struct nvmem_device *nvmem; 48 struct nvmem_device *nvmem;
48 struct list_head node; 49 struct list_head node;
49}; 50};
@@ -298,6 +299,7 @@ static void nvmem_cell_drop(struct nvmem_cell *cell)
298 mutex_lock(&nvmem_mutex); 299 mutex_lock(&nvmem_mutex);
299 list_del(&cell->node); 300 list_del(&cell->node);
300 mutex_unlock(&nvmem_mutex); 301 mutex_unlock(&nvmem_mutex);
302 of_node_put(cell->np);
301 kfree(cell->name); 303 kfree(cell->name);
302 kfree(cell); 304 kfree(cell);
303} 305}
@@ -530,6 +532,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
530 return -ENOMEM; 532 return -ENOMEM;
531 533
532 cell->nvmem = nvmem; 534 cell->nvmem = nvmem;
535 cell->np = of_node_get(child);
533 cell->offset = be32_to_cpup(addr++); 536 cell->offset = be32_to_cpup(addr++);
534 cell->bytes = be32_to_cpup(addr); 537 cell->bytes = be32_to_cpup(addr);
535 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 538 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
@@ -960,14 +963,13 @@ out:
960 963
961#if IS_ENABLED(CONFIG_OF) 964#if IS_ENABLED(CONFIG_OF)
962static struct nvmem_cell * 965static struct nvmem_cell *
963nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index) 966nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
964{ 967{
965 struct nvmem_cell *cell = NULL; 968 struct nvmem_cell *cell = NULL;
966 int i = 0;
967 969
968 mutex_lock(&nvmem_mutex); 970 mutex_lock(&nvmem_mutex);
969 list_for_each_entry(cell, &nvmem->cells, node) { 971 list_for_each_entry(cell, &nvmem->cells, node) {
970 if (index == i++) 972 if (np == cell->np)
971 break; 973 break;
972 } 974 }
973 mutex_unlock(&nvmem_mutex); 975 mutex_unlock(&nvmem_mutex);
@@ -1011,7 +1013,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1011 if (IS_ERR(nvmem)) 1013 if (IS_ERR(nvmem))
1012 return ERR_CAST(nvmem); 1014 return ERR_CAST(nvmem);
1013 1015
1014 cell = nvmem_find_cell_by_index(nvmem, index); 1016 cell = nvmem_find_cell_by_node(nvmem, cell_np);
1015 if (!cell) { 1017 if (!cell) {
1016 __nvmem_device_put(nvmem); 1018 __nvmem_device_put(nvmem);
1017 return ERR_PTR(-ENOENT); 1019 return ERR_PTR(-ENOENT);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 0f27fad9fe94..5592437bb3d1 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -149,9 +149,11 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
149 * set by the driver. 149 * set by the driver.
150 */ 150 */
151 mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1); 151 mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
152 dev->bus_dma_mask = mask;
153 dev->coherent_dma_mask &= mask; 152 dev->coherent_dma_mask &= mask;
154 *dev->dma_mask &= mask; 153 *dev->dma_mask &= mask;
154 /* ...but only set bus mask if we found valid dma-ranges earlier */
155 if (!ret)
156 dev->bus_dma_mask = mask;
155 157
156 coherent = of_dma_is_coherent(np); 158 coherent = of_dma_is_coherent(np);
157 dev_dbg(dev, "device is%sdma coherent\n", 159 dev_dbg(dev, "device is%sdma coherent\n",
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 35c64a4295e0..fe6b13608e51 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -104,9 +104,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map)
104 distance = of_read_number(matrix, 1); 104 distance = of_read_number(matrix, 1);
105 matrix++; 105 matrix++;
106 106
107 if ((nodea == nodeb && distance != LOCAL_DISTANCE) ||
108 (nodea != nodeb && distance <= LOCAL_DISTANCE)) {
109 pr_err("Invalid distance[node%d -> node%d] = %d\n",
110 nodea, nodeb, distance);
111 return -EINVAL;
112 }
113
107 numa_set_distance(nodea, nodeb, distance); 114 numa_set_distance(nodea, nodeb, distance);
108 pr_debug("distance[node%d -> node%d] = %d\n",
109 nodea, nodeb, distance);
110 115
111 /* Set default distance of node B->A same as A->B */ 116 /* Set default distance of node B->A same as A->B */
112 if (nodeb > nodea) 117 if (nodeb > nodea)
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 5a4b47958073..38a08805a30c 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -579,10 +579,8 @@ int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
579 */ 579 */
580 count = of_count_phandle_with_args(dev->of_node, 580 count = of_count_phandle_with_args(dev->of_node,
581 "operating-points-v2", NULL); 581 "operating-points-v2", NULL);
582 if (count != 1) 582 if (count == 1)
583 return -ENODEV; 583 index = 0;
584
585 index = 0;
586 } 584 }
587 585
588 opp_table = dev_pm_opp_get_opp_table_indexed(dev, index); 586 opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index 9e5a9a3112c9..1c69c404df11 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -288,7 +288,10 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
288 int ret; 288 int ret;
289 289
290 vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data, 290 vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
291 new_supply_vbb->u_volt); 291 new_supply_vdd->u_volt);
292
293 if (new_supply_vdd->u_volt_min < vdd_uv)
294 new_supply_vdd->u_volt_min = vdd_uv;
292 295
293 /* Scaling up? Scale voltage before frequency */ 296 /* Scaling up? Scale voltage before frequency */
294 if (freq > old_freq) { 297 if (freq > old_freq) {
@@ -414,7 +417,6 @@ static struct platform_driver ti_opp_supply_driver = {
414 .probe = ti_opp_supply_probe, 417 .probe = ti_opp_supply_probe,
415 .driver = { 418 .driver = {
416 .name = "ti_opp_supply", 419 .name = "ti_opp_supply",
417 .owner = THIS_MODULE,
418 .of_match_table = of_match_ptr(ti_opp_supply_of_match), 420 .of_match_table = of_match_ptr(ti_opp_supply_of_match),
419 }, 421 },
420}; 422};
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 2cbef2d7c207..88af6bff945f 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -81,8 +81,6 @@ struct imx6_pcie {
81#define PCIE_PL_PFLR_FORCE_LINK (1 << 15) 81#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
82#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) 82#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
83#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) 83#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
84#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
85#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
86 84
87#define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 85#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
88#define PCIE_PHY_CTRL_DATA_LOC 0 86#define PCIE_PHY_CTRL_DATA_LOC 0
@@ -711,12 +709,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
711 return 0; 709 return 0;
712} 710}
713 711
714static int imx6_pcie_link_up(struct dw_pcie *pci)
715{
716 return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
717 PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
718}
719
720static const struct dw_pcie_host_ops imx6_pcie_host_ops = { 712static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
721 .host_init = imx6_pcie_host_init, 713 .host_init = imx6_pcie_host_init,
722}; 714};
@@ -749,7 +741,7 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
749} 741}
750 742
751static const struct dw_pcie_ops dw_pcie_ops = { 743static const struct dw_pcie_ops dw_pcie_ops = {
752 .link_up = imx6_pcie_link_up, 744 /* No special ops needed, but pcie-designware still expects this struct */
753}; 745};
754 746
755#ifdef CONFIG_PM_SLEEP 747#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 3724d3ef7008..7aa9a82b7ebd 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -88,7 +88,7 @@ static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
88 int i; 88 int i;
89 89
90 for (i = 0; i < PCIE_IATU_NUM; i++) 90 for (i = 0; i < PCIE_IATU_NUM; i++)
91 dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i); 91 dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
92} 92}
93 93
94static int ls1021_pcie_link_up(struct dw_pcie *pci) 94static int ls1021_pcie_link_up(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1e7b02221eac..de8635af4cde 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -440,7 +440,6 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
440 tbl_offset = dw_pcie_readl_dbi(pci, reg); 440 tbl_offset = dw_pcie_readl_dbi(pci, reg);
441 bir = (tbl_offset & PCI_MSIX_TABLE_BIR); 441 bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
442 tbl_offset &= PCI_MSIX_TABLE_OFFSET; 442 tbl_offset &= PCI_MSIX_TABLE_OFFSET;
443 tbl_offset >>= 3;
444 443
445 reg = PCI_BASE_ADDRESS_0 + (4 * bir); 444 reg = PCI_BASE_ADDRESS_0 + (4 * bir);
446 bar_addr_upper = 0; 445 bar_addr_upper = 0;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 2a4aa6468579..921db6f80340 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -793,15 +793,10 @@ static void pci_acpi_setup(struct device *dev)
793{ 793{
794 struct pci_dev *pci_dev = to_pci_dev(dev); 794 struct pci_dev *pci_dev = to_pci_dev(dev);
795 struct acpi_device *adev = ACPI_COMPANION(dev); 795 struct acpi_device *adev = ACPI_COMPANION(dev);
796 int node;
797 796
798 if (!adev) 797 if (!adev)
799 return; 798 return;
800 799
801 node = acpi_get_node(adev->handle);
802 if (node != NUMA_NO_NODE)
803 set_dev_node(dev, node);
804
805 pci_acpi_optimize_delay(pci_dev, adev->handle); 800 pci_acpi_optimize_delay(pci_dev, adev->handle);
806 801
807 pci_acpi_add_pm_notifier(adev, pci_dev); 802 pci_acpi_add_pm_notifier(adev, pci_dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d068f11d08a7..c9d8e3c837de 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5556,9 +5556,13 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5556 u32 lnkcap2, lnkcap; 5556 u32 lnkcap2, lnkcap;
5557 5557
5558 /* 5558 /*
5559 * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link 5559 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
5560 * Speeds Vector in Link Capabilities 2 when supported, falling 5560 * implementation note there recommends using the Supported Link
5561 * back to Max Link Speed in Link Capabilities otherwise. 5561 * Speeds Vector in Link Capabilities 2 when supported.
5562 *
5563 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
5564 * should use the Supported Link Speeds field in Link Capabilities,
5565 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
5562 */ 5566 */
5563 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); 5567 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5564 if (lnkcap2) { /* PCIe r3.0-compliant */ 5568 if (lnkcap2) { /* PCIe r3.0-compliant */
@@ -5574,16 +5578,10 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5574 } 5578 }
5575 5579
5576 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); 5580 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5577 if (lnkcap) { 5581 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5578 if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) 5582 return PCIE_SPEED_5_0GT;
5579 return PCIE_SPEED_16_0GT; 5583 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5580 else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) 5584 return PCIE_SPEED_2_5GT;
5581 return PCIE_SPEED_8_0GT;
5582 else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
5583 return PCIE_SPEED_5_0GT;
5584 else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
5585 return PCIE_SPEED_2_5GT;
5586 }
5587 5585
5588 return PCI_SPEED_UNKNOWN; 5586 return PCI_SPEED_UNKNOWN;
5589} 5587}
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 9ce531194f8a..6d4b44b569bc 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -231,6 +231,7 @@ static const struct qusb2_phy_cfg sdm845_phy_cfg = {
231 .mask_core_ready = CORE_READY_STATUS, 231 .mask_core_ready = CORE_READY_STATUS,
232 .has_pll_override = true, 232 .has_pll_override = true,
233 .autoresume_en = BIT(0), 233 .autoresume_en = BIT(0),
234 .update_tune1_with_efuse = true,
234}; 235};
235 236
236static const char * const qusb2_phy_vreg_names[] = { 237static const char * const qusb2_phy_vreg_names[] = {
@@ -402,10 +403,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
402 403
403 /* 404 /*
404 * Read efuse register having TUNE2/1 parameter's high nibble. 405 * Read efuse register having TUNE2/1 parameter's high nibble.
405 * If efuse register shows value as 0x0, or if we fail to find 406 * If efuse register shows value as 0x0 (indicating value is not
406 * a valid efuse register settings, then use default value 407 * fused), or if we fail to find a valid efuse register setting,
407 * as 0xB for high nibble that we have already set while 408 * then use default value for high nibble that we have already
408 * configuring phy. 409 * set while configuring the phy.
409 */ 410 */
410 val = nvmem_cell_read(qphy->cell, NULL); 411 val = nvmem_cell_read(qphy->cell, NULL);
411 if (IS_ERR(val) || !val[0]) { 412 if (IS_ERR(val) || !val[0]) {
@@ -415,12 +416,13 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
415 416
416 /* Fused TUNE1/2 value is the higher nibble only */ 417 /* Fused TUNE1/2 value is the higher nibble only */
417 if (cfg->update_tune1_with_efuse) 418 if (cfg->update_tune1_with_efuse)
418 qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], 419 qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
419 val[0] << 0x4); 420 val[0] << HSTX_TRIM_SHIFT,
421 HSTX_TRIM_MASK);
420 else 422 else
421 qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2], 423 qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
422 val[0] << 0x4); 424 val[0] << HSTX_TRIM_SHIFT,
423 425 HSTX_TRIM_MASK);
424} 426}
425 427
426static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode) 428static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode)
diff --git a/drivers/phy/socionext/Kconfig b/drivers/phy/socionext/Kconfig
index 467e8147972b..9c85231a6dbc 100644
--- a/drivers/phy/socionext/Kconfig
+++ b/drivers/phy/socionext/Kconfig
@@ -26,7 +26,8 @@ config PHY_UNIPHIER_USB3
26 26
27config PHY_UNIPHIER_PCIE 27config PHY_UNIPHIER_PCIE
28 tristate "Uniphier PHY driver for PCIe controller" 28 tristate "Uniphier PHY driver for PCIe controller"
29 depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF 29 depends on ARCH_UNIPHIER || COMPILE_TEST
30 depends on OF && HAS_IOMEM
30 default PCIE_UNIPHIER 31 default PCIE_UNIPHIER
31 select GENERIC_PHY 32 select GENERIC_PHY
32 help 33 help
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 4ceb06f8a33c..4edeb4cae72a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -830,7 +830,7 @@ static struct meson_bank meson_gxbb_periphs_banks[] = {
830 830
831static struct meson_bank meson_gxbb_aobus_banks[] = { 831static struct meson_bank meson_gxbb_aobus_banks[] = {
832 /* name first last irq pullen pull dir out in */ 832 /* name first last irq pullen pull dir out in */
833 BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), 833 BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
834}; 834};
835 835
836static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = { 836static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 7dae1d7bf6b0..158f618f1695 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -807,7 +807,7 @@ static struct meson_bank meson_gxl_periphs_banks[] = {
807 807
808static struct meson_bank meson_gxl_aobus_banks[] = { 808static struct meson_bank meson_gxl_aobus_banks[] = {
809 /* name first last irq pullen pull dir out in */ 809 /* name first last irq pullen pull dir out in */
810 BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), 810 BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
811}; 811};
812 812
813static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = { 813static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index f8b778a7d471..53d449076dee 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -192,7 +192,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
192 dev_dbg(pc->dev, "pin %u: disable bias\n", pin); 192 dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
193 193
194 meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit); 194 meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
195 ret = regmap_update_bits(pc->reg_pull, reg, 195 ret = regmap_update_bits(pc->reg_pullen, reg,
196 BIT(bit), 0); 196 BIT(bit), 0);
197 if (ret) 197 if (ret)
198 return ret; 198 return ret;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index c6d79315218f..86466173114d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -1053,7 +1053,7 @@ static struct meson_bank meson8_cbus_banks[] = {
1053 1053
1054static struct meson_bank meson8_aobus_banks[] = { 1054static struct meson_bank meson8_aobus_banks[] = {
1055 /* name first last irq pullen pull dir out in */ 1055 /* name first last irq pullen pull dir out in */
1056 BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), 1056 BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
1057}; 1057};
1058 1058
1059static struct meson_pinctrl_data meson8_cbus_pinctrl_data = { 1059static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index bb2a30964fc6..647ad15d5c3c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -906,7 +906,7 @@ static struct meson_bank meson8b_cbus_banks[] = {
906 906
907static struct meson_bank meson8b_aobus_banks[] = { 907static struct meson_bank meson8b_aobus_banks[] = {
908 /* name first lastc irq pullen pull dir out in */ 908 /* name first lastc irq pullen pull dir out in */
909 BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), 909 BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
910}; 910};
911 911
912static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = { 912static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index e79f2a181ad2..b9ec4a16db1f 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -50,8 +50,10 @@ static int __init rtc_hctosys(void)
50 tv64.tv_sec = rtc_tm_to_time64(&tm); 50 tv64.tv_sec = rtc_tm_to_time64(&tm);
51 51
52#if BITS_PER_LONG == 32 52#if BITS_PER_LONG == 32
53 if (tv64.tv_sec > INT_MAX) 53 if (tv64.tv_sec > INT_MAX) {
54 err = -ERANGE;
54 goto err_read; 55 goto err_read;
56 }
55#endif 57#endif
56 58
57 err = do_settimeofday64(&tv64); 59 err = do_settimeofday64(&tv64);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index df0c5776d49b..a5a19ff10535 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -257,6 +257,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
257 struct cmos_rtc *cmos = dev_get_drvdata(dev); 257 struct cmos_rtc *cmos = dev_get_drvdata(dev);
258 unsigned char rtc_control; 258 unsigned char rtc_control;
259 259
260 /* This not only a rtc_op, but also called directly */
260 if (!is_valid_irq(cmos->irq)) 261 if (!is_valid_irq(cmos->irq))
261 return -EIO; 262 return -EIO;
262 263
@@ -452,6 +453,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
452 unsigned char mon, mday, hrs, min, sec, rtc_control; 453 unsigned char mon, mday, hrs, min, sec, rtc_control;
453 int ret; 454 int ret;
454 455
456 /* This not only a rtc_op, but also called directly */
455 if (!is_valid_irq(cmos->irq)) 457 if (!is_valid_irq(cmos->irq))
456 return -EIO; 458 return -EIO;
457 459
@@ -516,9 +518,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
516 struct cmos_rtc *cmos = dev_get_drvdata(dev); 518 struct cmos_rtc *cmos = dev_get_drvdata(dev);
517 unsigned long flags; 519 unsigned long flags;
518 520
519 if (!is_valid_irq(cmos->irq))
520 return -EINVAL;
521
522 spin_lock_irqsave(&rtc_lock, flags); 521 spin_lock_irqsave(&rtc_lock, flags);
523 522
524 if (enabled) 523 if (enabled)
@@ -579,6 +578,12 @@ static const struct rtc_class_ops cmos_rtc_ops = {
579 .alarm_irq_enable = cmos_alarm_irq_enable, 578 .alarm_irq_enable = cmos_alarm_irq_enable,
580}; 579};
581 580
581static const struct rtc_class_ops cmos_rtc_ops_no_alarm = {
582 .read_time = cmos_read_time,
583 .set_time = cmos_set_time,
584 .proc = cmos_procfs,
585};
586
582/*----------------------------------------------------------------*/ 587/*----------------------------------------------------------------*/
583 588
584/* 589/*
@@ -855,9 +860,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
855 dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq); 860 dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
856 goto cleanup1; 861 goto cleanup1;
857 } 862 }
863
864 cmos_rtc.rtc->ops = &cmos_rtc_ops;
865 } else {
866 cmos_rtc.rtc->ops = &cmos_rtc_ops_no_alarm;
858 } 867 }
859 868
860 cmos_rtc.rtc->ops = &cmos_rtc_ops;
861 cmos_rtc.rtc->nvram_old_abi = true; 869 cmos_rtc.rtc->nvram_old_abi = true;
862 retval = rtc_register_device(cmos_rtc.rtc); 870 retval = rtc_register_device(cmos_rtc.rtc);
863 if (retval) 871 if (retval)
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 2751dba850c6..3e1abb455472 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -213,7 +213,7 @@ static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm)
213 /* get a report with all values through requesting one value */ 213 /* get a report with all values through requesting one value */
214 sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev, 214 sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev,
215 HID_USAGE_SENSOR_TIME, hid_time_addresses[0], 215 HID_USAGE_SENSOR_TIME, hid_time_addresses[0],
216 time_state->info[0].report_id, SENSOR_HUB_SYNC); 216 time_state->info[0].report_id, SENSOR_HUB_SYNC, false);
217 /* wait for all values (event) */ 217 /* wait for all values (event) */
218 ret = wait_for_completion_killable_timeout( 218 ret = wait_for_completion_killable_timeout(
219 &time_state->comp_last_time, HZ*6); 219 &time_state->comp_last_time, HZ*6);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 9f99a0966550..7cb786d76e3c 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -303,6 +303,9 @@ static int pcf2127_i2c_gather_write(void *context,
303 memcpy(buf + 1, val, val_size); 303 memcpy(buf + 1, val, val_size);
304 304
305 ret = i2c_master_send(client, buf, val_size + 1); 305 ret = i2c_master_send(client, buf, val_size + 1);
306
307 kfree(buf);
308
306 if (ret != val_size + 1) 309 if (ret != val_size + 1)
307 return ret < 0 ? ret : -EIO; 310 return ret < 0 ? ret : -EIO;
308 311
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index fd77e46eb3b2..70a006ba4d05 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -387,8 +387,10 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
387 * orb specified one of the unsupported formats, we defer 387 * orb specified one of the unsupported formats, we defer
388 * checking for IDAWs in unsupported formats to here. 388 * checking for IDAWs in unsupported formats to here.
389 */ 389 */
390 if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) 390 if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
391 kfree(p);
391 return -EOPNOTSUPP; 392 return -EOPNOTSUPP;
393 }
392 394
393 if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) 395 if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
394 break; 396 break;
@@ -528,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
528 530
529 ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count); 531 ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
530 if (ret < 0) 532 if (ret < 0)
531 goto out_init; 533 goto out_unpin;
532 534
533 /* Translate this direct ccw to a idal ccw. */ 535 /* Translate this direct ccw to a idal ccw. */
534 idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL); 536 idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index f47d16b5810b..a10cec0e86eb 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -22,7 +22,7 @@
22#include "vfio_ccw_private.h" 22#include "vfio_ccw_private.h"
23 23
24struct workqueue_struct *vfio_ccw_work_q; 24struct workqueue_struct *vfio_ccw_work_q;
25struct kmem_cache *vfio_ccw_io_region; 25static struct kmem_cache *vfio_ccw_io_region;
26 26
27/* 27/*
28 * Helpers 28 * Helpers
@@ -134,14 +134,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
134 if (ret) 134 if (ret)
135 goto out_free; 135 goto out_free;
136 136
137 ret = vfio_ccw_mdev_reg(sch);
138 if (ret)
139 goto out_disable;
140
141 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); 137 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
142 atomic_set(&private->avail, 1); 138 atomic_set(&private->avail, 1);
143 private->state = VFIO_CCW_STATE_STANDBY; 139 private->state = VFIO_CCW_STATE_STANDBY;
144 140
141 ret = vfio_ccw_mdev_reg(sch);
142 if (ret)
143 goto out_disable;
144
145 return 0; 145 return 0;
146 146
147out_disable: 147out_disable:
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 048665e4f13d..9f5a201c4c87 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -775,6 +775,8 @@ static int ap_device_probe(struct device *dev)
775 drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; 775 drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
776 if (!!devres != !!drvres) 776 if (!!devres != !!drvres)
777 return -ENODEV; 777 return -ENODEV;
778 /* (re-)init queue's state machine */
779 ap_queue_reinit_state(to_ap_queue(dev));
778 } 780 }
779 781
780 /* Add queue/card to list of active queues/cards */ 782 /* Add queue/card to list of active queues/cards */
@@ -807,6 +809,8 @@ static int ap_device_remove(struct device *dev)
807 struct ap_device *ap_dev = to_ap_dev(dev); 809 struct ap_device *ap_dev = to_ap_dev(dev);
808 struct ap_driver *ap_drv = ap_dev->drv; 810 struct ap_driver *ap_drv = ap_dev->drv;
809 811
812 if (is_queue_dev(dev))
813 ap_queue_remove(to_ap_queue(dev));
810 if (ap_drv->remove) 814 if (ap_drv->remove)
811 ap_drv->remove(ap_dev); 815 ap_drv->remove(ap_dev);
812 816
@@ -1444,10 +1448,6 @@ static void ap_scan_bus(struct work_struct *unused)
1444 aq->ap_dev.device.parent = &ac->ap_dev.device; 1448 aq->ap_dev.device.parent = &ac->ap_dev.device;
1445 dev_set_name(&aq->ap_dev.device, 1449 dev_set_name(&aq->ap_dev.device,
1446 "%02x.%04x", id, dom); 1450 "%02x.%04x", id, dom);
1447 /* Start with a device reset */
1448 spin_lock_bh(&aq->lock);
1449 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
1450 spin_unlock_bh(&aq->lock);
1451 /* Register device */ 1451 /* Register device */
1452 rc = device_register(&aq->ap_dev.device); 1452 rc = device_register(&aq->ap_dev.device);
1453 if (rc) { 1453 if (rc) {
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 3eed1b36c876..bfc66e4a9de1 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -254,6 +254,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
254void ap_queue_remove(struct ap_queue *aq); 254void ap_queue_remove(struct ap_queue *aq);
255void ap_queue_suspend(struct ap_device *ap_dev); 255void ap_queue_suspend(struct ap_device *ap_dev);
256void ap_queue_resume(struct ap_device *ap_dev); 256void ap_queue_resume(struct ap_device *ap_dev);
257void ap_queue_reinit_state(struct ap_queue *aq);
257 258
258struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, 259struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
259 int comp_device_type, unsigned int functions); 260 int comp_device_type, unsigned int functions);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 66f7334bcb03..0aa4b3ccc948 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -718,5 +718,20 @@ void ap_queue_remove(struct ap_queue *aq)
718{ 718{
719 ap_flush_queue(aq); 719 ap_flush_queue(aq);
720 del_timer_sync(&aq->timeout); 720 del_timer_sync(&aq->timeout);
721
722 /* reset with zero, also clears irq registration */
723 spin_lock_bh(&aq->lock);
724 ap_zapq(aq->qid);
725 aq->state = AP_STATE_BORKED;
726 spin_unlock_bh(&aq->lock);
721} 727}
722EXPORT_SYMBOL(ap_queue_remove); 728EXPORT_SYMBOL(ap_queue_remove);
729
730void ap_queue_reinit_state(struct ap_queue *aq)
731{
732 spin_lock_bh(&aq->lock);
733 aq->state = AP_STATE_RESET_START;
734 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
735 spin_unlock_bh(&aq->lock);
736}
737EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 146f54f5cbb8..c50f3e86cc74 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -196,7 +196,6 @@ static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
196 struct ap_queue *aq = to_ap_queue(&ap_dev->device); 196 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
197 struct zcrypt_queue *zq = aq->private; 197 struct zcrypt_queue *zq = aq->private;
198 198
199 ap_queue_remove(aq);
200 if (zq) 199 if (zq)
201 zcrypt_queue_unregister(zq); 200 zcrypt_queue_unregister(zq);
202} 201}
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 546f67676734..35c7c6672713 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -251,7 +251,6 @@ static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
251 struct ap_queue *aq = to_ap_queue(&ap_dev->device); 251 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
252 struct zcrypt_queue *zq = aq->private; 252 struct zcrypt_queue *zq = aq->private;
253 253
254 ap_queue_remove(aq);
255 if (zq) 254 if (zq)
256 zcrypt_queue_unregister(zq); 255 zcrypt_queue_unregister(zq);
257} 256}
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index f9d4c6c7521d..582ffa7e0f18 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -275,7 +275,6 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
275 struct ap_queue *aq = to_ap_queue(&ap_dev->device); 275 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
276 struct zcrypt_queue *zq = aq->private; 276 struct zcrypt_queue *zq = aq->private;
277 277
278 ap_queue_remove(aq);
279 if (zq) 278 if (zq)
280 zcrypt_queue_unregister(zq); 279 zcrypt_queue_unregister(zq);
281} 280}
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index f96ec68af2e5..dcbf5c857743 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -415,9 +415,9 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
415 break; 415 break;
416 416
417 clear_bit_inv(bit, bv); 417 clear_bit_inv(bit, bv);
418 ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
418 barrier(); 419 barrier();
419 smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET); 420 smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
420 ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
421 } 421 }
422 422
423 if (ism->sba->e) { 423 if (ism->sba->e) {
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6843bc7ee9f2..04e294d1d16d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -87,6 +87,18 @@ struct qeth_dbf_info {
87#define SENSE_RESETTING_EVENT_BYTE 1 87#define SENSE_RESETTING_EVENT_BYTE 1
88#define SENSE_RESETTING_EVENT_FLAG 0x80 88#define SENSE_RESETTING_EVENT_FLAG 0x80
89 89
90static inline u32 qeth_get_device_id(struct ccw_device *cdev)
91{
92 struct ccw_dev_id dev_id;
93 u32 id;
94
95 ccw_device_get_id(cdev, &dev_id);
96 id = dev_id.devno;
97 id |= (u32) (dev_id.ssid << 16);
98
99 return id;
100}
101
90/* 102/*
91 * Common IO related definitions 103 * Common IO related definitions
92 */ 104 */
@@ -97,7 +109,8 @@ struct qeth_dbf_info {
97#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev) 109#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
98#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev) 110#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
99#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev) 111#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
100#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev) 112#define CCW_DEVID(cdev) (qeth_get_device_id(cdev))
113#define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card)))
101 114
102/** 115/**
103 * card stuff 116 * card stuff
@@ -830,6 +843,11 @@ struct qeth_trap_id {
830/*some helper functions*/ 843/*some helper functions*/
831#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") 844#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
832 845
846static inline bool qeth_netdev_is_registered(struct net_device *dev)
847{
848 return dev->netdev_ops != NULL;
849}
850
833static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, 851static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
834 unsigned int elements) 852 unsigned int elements)
835{ 853{
@@ -973,7 +991,7 @@ int qeth_wait_for_threads(struct qeth_card *, unsigned long);
973int qeth_do_run_thread(struct qeth_card *, unsigned long); 991int qeth_do_run_thread(struct qeth_card *, unsigned long);
974void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long); 992void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
975void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); 993void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
976int qeth_core_hardsetup_card(struct qeth_card *); 994int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
977void qeth_print_status_message(struct qeth_card *); 995void qeth_print_status_message(struct qeth_card *);
978int qeth_init_qdio_queues(struct qeth_card *); 996int qeth_init_qdio_queues(struct qeth_card *);
979int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, 997int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
@@ -1028,11 +1046,6 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
1028int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); 1046int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
1029void qeth_trace_features(struct qeth_card *); 1047void qeth_trace_features(struct qeth_card *);
1030void qeth_close_dev(struct qeth_card *); 1048void qeth_close_dev(struct qeth_card *);
1031int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
1032 long,
1033 int (*reply_cb)(struct qeth_card *,
1034 struct qeth_reply *, unsigned long),
1035 void *);
1036int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long); 1049int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
1037struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, 1050struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
1038 enum qeth_ipa_funcs, 1051 enum qeth_ipa_funcs,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3274f13aad57..254065271867 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -167,6 +167,8 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
167 return "OSD_1000"; 167 return "OSD_1000";
168 case QETH_LINK_TYPE_10GBIT_ETH: 168 case QETH_LINK_TYPE_10GBIT_ETH:
169 return "OSD_10GIG"; 169 return "OSD_10GIG";
170 case QETH_LINK_TYPE_25GBIT_ETH:
171 return "OSD_25GIG";
170 case QETH_LINK_TYPE_LANE_ETH100: 172 case QETH_LINK_TYPE_LANE_ETH100:
171 return "OSD_FE_LANE"; 173 return "OSD_FE_LANE";
172 case QETH_LINK_TYPE_LANE_TR: 174 case QETH_LINK_TYPE_LANE_TR:
@@ -554,8 +556,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
554 if (!iob) { 556 if (!iob) {
555 dev_warn(&card->gdev->dev, "The qeth device driver " 557 dev_warn(&card->gdev->dev, "The qeth device driver "
556 "failed to recover an error on the device\n"); 558 "failed to recover an error on the device\n");
557 QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob " 559 QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
558 "available\n", dev_name(&card->gdev->dev)); 560 CARD_DEVID(card));
559 return -ENOMEM; 561 return -ENOMEM;
560 } 562 }
561 qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); 563 qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
@@ -563,8 +565,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
563 rc = ccw_device_start(channel->ccwdev, channel->ccw, 565 rc = ccw_device_start(channel->ccwdev, channel->ccw,
564 (addr_t) iob, 0, 0); 566 (addr_t) iob, 0, 0);
565 if (rc) { 567 if (rc) {
566 QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " 568 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
567 "rc=%i\n", dev_name(&card->gdev->dev), rc); 569 rc, CARD_DEVID(card));
568 atomic_set(&channel->irq_pending, 0); 570 atomic_set(&channel->irq_pending, 0);
569 card->read_or_write_problem = 1; 571 card->read_or_write_problem = 1;
570 qeth_schedule_recovery(card); 572 qeth_schedule_recovery(card);
@@ -613,16 +615,14 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
613 const char *ipa_name; 615 const char *ipa_name;
614 int com = cmd->hdr.command; 616 int com = cmd->hdr.command;
615 ipa_name = qeth_get_ipa_cmd_name(com); 617 ipa_name = qeth_get_ipa_cmd_name(com);
618
616 if (rc) 619 if (rc)
617 QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned " 620 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
618 "x%X \"%s\"\n", 621 ipa_name, com, CARD_DEVID(card), rc,
619 ipa_name, com, dev_name(&card->gdev->dev), 622 qeth_get_ipa_msg(rc));
620 QETH_CARD_IFNAME(card), rc,
621 qeth_get_ipa_msg(rc));
622 else 623 else
623 QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n", 624 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
624 ipa_name, com, dev_name(&card->gdev->dev), 625 ipa_name, com, CARD_DEVID(card));
625 QETH_CARD_IFNAME(card));
626} 626}
627 627
628static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 628static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
@@ -711,7 +711,7 @@ static int qeth_check_idx_response(struct qeth_card *card,
711 711
712 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 712 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
713 if ((buffer[2] & 0xc0) == 0xc0) { 713 if ((buffer[2] & 0xc0) == 0xc0) {
714 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#02x\n", 714 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
715 buffer[4]); 715 buffer[4]);
716 QETH_CARD_TEXT(card, 2, "ckidxres"); 716 QETH_CARD_TEXT(card, 2, "ckidxres");
717 QETH_CARD_TEXT(card, 2, " idxterm"); 717 QETH_CARD_TEXT(card, 2, " idxterm");
@@ -972,8 +972,8 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
972 QETH_CARD_TEXT(card, 2, "CGENCHK"); 972 QETH_CARD_TEXT(card, 2, "CGENCHK");
973 dev_warn(&cdev->dev, "The qeth device driver " 973 dev_warn(&cdev->dev, "The qeth device driver "
974 "failed to recover an error on the device\n"); 974 "failed to recover an error on the device\n");
975 QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n", 975 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
976 dev_name(&cdev->dev), dstat, cstat); 976 CCW_DEVID(cdev), dstat, cstat);
977 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 977 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
978 16, 1, irb, 64, 1); 978 16, 1, irb, 64, 1);
979 return 1; 979 return 1;
@@ -1013,8 +1013,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
1013 1013
1014 switch (PTR_ERR(irb)) { 1014 switch (PTR_ERR(irb)) {
1015 case -EIO: 1015 case -EIO:
1016 QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", 1016 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1017 dev_name(&cdev->dev)); 1017 CCW_DEVID(cdev));
1018 QETH_CARD_TEXT(card, 2, "ckirberr"); 1018 QETH_CARD_TEXT(card, 2, "ckirberr");
1019 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 1019 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
1020 break; 1020 break;
@@ -1031,8 +1031,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
1031 } 1031 }
1032 break; 1032 break;
1033 default: 1033 default:
1034 QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", 1034 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1035 dev_name(&cdev->dev), PTR_ERR(irb)); 1035 PTR_ERR(irb), CCW_DEVID(cdev));
1036 QETH_CARD_TEXT(card, 2, "ckirberr"); 1036 QETH_CARD_TEXT(card, 2, "ckirberr");
1037 QETH_CARD_TEXT(card, 2, " rc???"); 1037 QETH_CARD_TEXT(card, 2, " rc???");
1038 } 1038 }
@@ -1114,9 +1114,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1114 dev_warn(&channel->ccwdev->dev, 1114 dev_warn(&channel->ccwdev->dev,
1115 "The qeth device driver failed to recover " 1115 "The qeth device driver failed to recover "
1116 "an error on the device\n"); 1116 "an error on the device\n");
1117 QETH_DBF_MESSAGE(2, "%s sense data available. cstat " 1117 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1118 "0x%X dstat 0x%X\n", 1118 CCW_DEVID(channel->ccwdev), cstat,
1119 dev_name(&channel->ccwdev->dev), cstat, dstat); 1119 dstat);
1120 print_hex_dump(KERN_WARNING, "qeth: irb ", 1120 print_hex_dump(KERN_WARNING, "qeth: irb ",
1121 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1121 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1122 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1122 print_hex_dump(KERN_WARNING, "qeth: sense data ",
@@ -1890,8 +1890,8 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
1890 if (channel->state != CH_STATE_ACTIVATING) { 1890 if (channel->state != CH_STATE_ACTIVATING) {
1891 dev_warn(&channel->ccwdev->dev, "The qeth device driver" 1891 dev_warn(&channel->ccwdev->dev, "The qeth device driver"
1892 " failed to recover an error on the device\n"); 1892 " failed to recover an error on the device\n");
1893 QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", 1893 QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
1894 dev_name(&channel->ccwdev->dev)); 1894 CCW_DEVID(channel->ccwdev));
1895 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); 1895 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
1896 return -ETIME; 1896 return -ETIME;
1897 } 1897 }
@@ -1926,17 +1926,15 @@ static void qeth_idx_write_cb(struct qeth_card *card,
1926 "The adapter is used exclusively by another " 1926 "The adapter is used exclusively by another "
1927 "host\n"); 1927 "host\n");
1928 else 1928 else
1929 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:" 1929 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1930 " negative reply\n", 1930 CCW_DEVID(channel->ccwdev));
1931 dev_name(&channel->ccwdev->dev));
1932 goto out; 1931 goto out;
1933 } 1932 }
1934 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 1933 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1935 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { 1934 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1936 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: " 1935 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1937 "function level mismatch (sent: 0x%x, received: " 1936 CCW_DEVID(channel->ccwdev),
1938 "0x%x)\n", dev_name(&channel->ccwdev->dev), 1937 card->info.func_level, temp);
1939 card->info.func_level, temp);
1940 goto out; 1938 goto out;
1941 } 1939 }
1942 channel->state = CH_STATE_UP; 1940 channel->state = CH_STATE_UP;
@@ -1973,9 +1971,8 @@ static void qeth_idx_read_cb(struct qeth_card *card,
1973 "insufficient authorization\n"); 1971 "insufficient authorization\n");
1974 break; 1972 break;
1975 default: 1973 default:
1976 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" 1974 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1977 " negative reply\n", 1975 CCW_DEVID(channel->ccwdev));
1978 dev_name(&channel->ccwdev->dev));
1979 } 1976 }
1980 QETH_CARD_TEXT_(card, 2, "idxread%c", 1977 QETH_CARD_TEXT_(card, 2, "idxread%c",
1981 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 1978 QETH_IDX_ACT_CAUSE_CODE(iob->data));
@@ -1984,10 +1981,9 @@ static void qeth_idx_read_cb(struct qeth_card *card,
1984 1981
1985 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 1982 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1986 if (temp != qeth_peer_func_level(card->info.func_level)) { 1983 if (temp != qeth_peer_func_level(card->info.func_level)) {
1987 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function " 1984 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1988 "level mismatch (sent: 0x%x, received: 0x%x)\n", 1985 CCW_DEVID(channel->ccwdev),
1989 dev_name(&channel->ccwdev->dev), 1986 card->info.func_level, temp);
1990 card->info.func_level, temp);
1991 goto out; 1987 goto out;
1992 } 1988 }
1993 memcpy(&card->token.issuer_rm_r, 1989 memcpy(&card->token.issuer_rm_r,
@@ -2096,9 +2092,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2096 (addr_t) iob, 0, 0, event_timeout); 2092 (addr_t) iob, 0, 0, event_timeout);
2097 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 2093 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2098 if (rc) { 2094 if (rc) {
2099 QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " 2095 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2100 "ccw_device_start rc = %i\n", 2096 CARD_DEVID(card), rc);
2101 dev_name(&channel->ccwdev->dev), rc);
2102 QETH_CARD_TEXT_(card, 2, " err%d", rc); 2097 QETH_CARD_TEXT_(card, 2, " err%d", rc);
2103 spin_lock_irq(&card->lock); 2098 spin_lock_irq(&card->lock);
2104 list_del_init(&reply->list); 2099 list_del_init(&reply->list);
@@ -2853,8 +2848,8 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2853 } else { 2848 } else {
2854 dev_warn(&card->gdev->dev, 2849 dev_warn(&card->gdev->dev,
2855 "The qeth driver ran out of channel command buffers\n"); 2850 "The qeth driver ran out of channel command buffers\n");
2856 QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers", 2851 QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
2857 dev_name(&card->gdev->dev)); 2852 CARD_DEVID(card));
2858 } 2853 }
2859 2854
2860 return iob; 2855 return iob;
@@ -2989,10 +2984,9 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
2989 return 0; 2984 return 0;
2990 default: 2985 default:
2991 if (cmd->hdr.return_code) { 2986 if (cmd->hdr.return_code) {
2992 QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled " 2987 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
2993 "rc=%d\n", 2988 CARD_DEVID(card),
2994 dev_name(&card->gdev->dev), 2989 cmd->hdr.return_code);
2995 cmd->hdr.return_code);
2996 return 0; 2990 return 0;
2997 } 2991 }
2998 } 2992 }
@@ -3004,8 +2998,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
3004 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; 2998 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
3005 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 2999 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
3006 } else 3000 } else
3007 QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected" 3001 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3008 "\n", dev_name(&card->gdev->dev)); 3002 CARD_DEVID(card));
3009 return 0; 3003 return 0;
3010} 3004}
3011 3005
@@ -4297,10 +4291,9 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4297 cmd->data.setadapterparms.hdr.return_code); 4291 cmd->data.setadapterparms.hdr.return_code);
4298 if (cmd->data.setadapterparms.hdr.return_code != 4292 if (cmd->data.setadapterparms.hdr.return_code !=
4299 SET_ACCESS_CTRL_RC_SUCCESS) 4293 SET_ACCESS_CTRL_RC_SUCCESS)
4300 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n", 4294 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4301 card->gdev->dev.kobj.name, 4295 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4302 access_ctrl_req->subcmd_code, 4296 cmd->data.setadapterparms.hdr.return_code);
4303 cmd->data.setadapterparms.hdr.return_code);
4304 switch (cmd->data.setadapterparms.hdr.return_code) { 4297 switch (cmd->data.setadapterparms.hdr.return_code) {
4305 case SET_ACCESS_CTRL_RC_SUCCESS: 4298 case SET_ACCESS_CTRL_RC_SUCCESS:
4306 if (card->options.isolation == ISOLATION_MODE_NONE) { 4299 if (card->options.isolation == ISOLATION_MODE_NONE) {
@@ -4312,14 +4305,14 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4312 } 4305 }
4313 break; 4306 break;
4314 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4307 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4315 QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already " 4308 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4316 "deactivated\n", dev_name(&card->gdev->dev)); 4309 CARD_DEVID(card));
4317 if (fallback) 4310 if (fallback)
4318 card->options.isolation = card->options.prev_isolation; 4311 card->options.isolation = card->options.prev_isolation;
4319 break; 4312 break;
4320 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4313 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4321 QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already" 4314 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4322 " activated\n", dev_name(&card->gdev->dev)); 4315 CARD_DEVID(card));
4323 if (fallback) 4316 if (fallback)
4324 card->options.isolation = card->options.prev_isolation; 4317 card->options.isolation = card->options.prev_isolation;
4325 break; 4318 break;
@@ -4405,10 +4398,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4405 rc = qeth_setadpparms_set_access_ctrl(card, 4398 rc = qeth_setadpparms_set_access_ctrl(card,
4406 card->options.isolation, fallback); 4399 card->options.isolation, fallback);
4407 if (rc) { 4400 if (rc) {
4408 QETH_DBF_MESSAGE(3, 4401 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4409 "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n", 4402 rc, CARD_DEVID(card));
4410 card->gdev->dev.kobj.name,
4411 rc);
4412 rc = -EOPNOTSUPP; 4403 rc = -EOPNOTSUPP;
4413 } 4404 }
4414 } else if (card->options.isolation != ISOLATION_MODE_NONE) { 4405 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
@@ -4443,7 +4434,8 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4443 rc = BMCR_FULLDPLX; 4434 rc = BMCR_FULLDPLX;
4444 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4435 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4445 (card->info.link_type != QETH_LINK_TYPE_OSN) && 4436 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4446 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH)) 4437 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4438 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4447 rc |= BMCR_SPEED100; 4439 rc |= BMCR_SPEED100;
4448 break; 4440 break;
4449 case MII_BMSR: /* Basic mode status register */ 4441 case MII_BMSR: /* Basic mode status register */
@@ -4526,8 +4518,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
4526{ 4518{
4527 struct qeth_ipa_cmd *cmd; 4519 struct qeth_ipa_cmd *cmd;
4528 struct qeth_arp_query_info *qinfo; 4520 struct qeth_arp_query_info *qinfo;
4529 struct qeth_snmp_cmd *snmp;
4530 unsigned char *data; 4521 unsigned char *data;
4522 void *snmp_data;
4531 __u16 data_len; 4523 __u16 data_len;
4532 4524
4533 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4525 QETH_CARD_TEXT(card, 3, "snpcmdcb");
@@ -4535,7 +4527,6 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
4535 cmd = (struct qeth_ipa_cmd *) sdata; 4527 cmd = (struct qeth_ipa_cmd *) sdata;
4536 data = (unsigned char *)((char *)cmd - reply->offset); 4528 data = (unsigned char *)((char *)cmd - reply->offset);
4537 qinfo = (struct qeth_arp_query_info *) reply->param; 4529 qinfo = (struct qeth_arp_query_info *) reply->param;
4538 snmp = &cmd->data.setadapterparms.data.snmp;
4539 4530
4540 if (cmd->hdr.return_code) { 4531 if (cmd->hdr.return_code) {
4541 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4532 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
@@ -4548,10 +4539,15 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
4548 return 0; 4539 return 0;
4549 } 4540 }
4550 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); 4541 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
4551 if (cmd->data.setadapterparms.hdr.seq_no == 1) 4542 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4552 data_len -= (__u16)((char *)&snmp->data - (char *)cmd); 4543 snmp_data = &cmd->data.setadapterparms.data.snmp;
4553 else 4544 data_len -= offsetof(struct qeth_ipa_cmd,
4554 data_len -= (__u16)((char *)&snmp->request - (char *)cmd); 4545 data.setadapterparms.data.snmp);
4546 } else {
4547 snmp_data = &cmd->data.setadapterparms.data.snmp.request;
4548 data_len -= offsetof(struct qeth_ipa_cmd,
4549 data.setadapterparms.data.snmp.request);
4550 }
4555 4551
4556 /* check if there is enough room in userspace */ 4552 /* check if there is enough room in userspace */
4557 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4553 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
@@ -4564,16 +4560,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
4564 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4560 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4565 cmd->data.setadapterparms.hdr.seq_no); 4561 cmd->data.setadapterparms.hdr.seq_no);
4566 /*copy entries to user buffer*/ 4562 /*copy entries to user buffer*/
4567 if (cmd->data.setadapterparms.hdr.seq_no == 1) { 4563 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4568 memcpy(qinfo->udata + qinfo->udata_offset,
4569 (char *)snmp,
4570 data_len + offsetof(struct qeth_snmp_cmd, data));
4571 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
4572 } else {
4573 memcpy(qinfo->udata + qinfo->udata_offset,
4574 (char *)&snmp->request, data_len);
4575 }
4576 qinfo->udata_offset += data_len; 4564 qinfo->udata_offset += data_len;
4565
4577 /* check if all replies received ... */ 4566 /* check if all replies received ... */
4578 QETH_CARD_TEXT_(card, 4, "srtot%i", 4567 QETH_CARD_TEXT_(card, 4, "srtot%i",
4579 cmd->data.setadapterparms.hdr.used_total); 4568 cmd->data.setadapterparms.hdr.used_total);
@@ -4634,8 +4623,8 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4634 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, 4623 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
4635 qeth_snmp_command_cb, (void *)&qinfo); 4624 qeth_snmp_command_cb, (void *)&qinfo);
4636 if (rc) 4625 if (rc)
4637 QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n", 4626 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4638 QETH_CARD_IFNAME(card), rc); 4627 CARD_DEVID(card), rc);
4639 else { 4628 else {
4640 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4629 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4641 rc = -EFAULT; 4630 rc = -EFAULT;
@@ -4869,8 +4858,8 @@ static void qeth_determine_capabilities(struct qeth_card *card)
4869 4858
4870 rc = qeth_read_conf_data(card, (void **) &prcd, &length); 4859 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4871 if (rc) { 4860 if (rc) {
4872 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", 4861 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4873 dev_name(&card->gdev->dev), rc); 4862 CARD_DEVID(card), rc);
4874 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 4863 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4875 goto out_offline; 4864 goto out_offline;
4876 } 4865 }
@@ -5086,7 +5075,7 @@ static struct ccw_driver qeth_ccw_driver = {
5086 .remove = ccwgroup_remove_ccwdev, 5075 .remove = ccwgroup_remove_ccwdev,
5087}; 5076};
5088 5077
5089int qeth_core_hardsetup_card(struct qeth_card *card) 5078int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5090{ 5079{
5091 int retries = 3; 5080 int retries = 3;
5092 int rc; 5081 int rc;
@@ -5096,8 +5085,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
5096 qeth_update_from_chp_desc(card); 5085 qeth_update_from_chp_desc(card);
5097retry: 5086retry:
5098 if (retries < 3) 5087 if (retries < 3)
5099 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 5088 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5100 dev_name(&card->gdev->dev)); 5089 CARD_DEVID(card));
5101 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 5090 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
5102 ccw_device_set_offline(CARD_DDEV(card)); 5091 ccw_device_set_offline(CARD_DDEV(card));
5103 ccw_device_set_offline(CARD_WDEV(card)); 5092 ccw_device_set_offline(CARD_WDEV(card));
@@ -5161,13 +5150,20 @@ retriable:
5161 if (rc == IPA_RC_LAN_OFFLINE) { 5150 if (rc == IPA_RC_LAN_OFFLINE) {
5162 dev_warn(&card->gdev->dev, 5151 dev_warn(&card->gdev->dev,
5163 "The LAN is offline\n"); 5152 "The LAN is offline\n");
5164 netif_carrier_off(card->dev); 5153 *carrier_ok = false;
5165 } else { 5154 } else {
5166 rc = -ENODEV; 5155 rc = -ENODEV;
5167 goto out; 5156 goto out;
5168 } 5157 }
5169 } else { 5158 } else {
5170 netif_carrier_on(card->dev); 5159 *carrier_ok = true;
5160 }
5161
5162 if (qeth_netdev_is_registered(card->dev)) {
5163 if (*carrier_ok)
5164 netif_carrier_on(card->dev);
5165 else
5166 netif_carrier_off(card->dev);
5171 } 5167 }
5172 5168
5173 card->options.ipa4.supported_funcs = 0; 5169 card->options.ipa4.supported_funcs = 0;
@@ -5201,8 +5197,8 @@ retriable:
5201out: 5197out:
5202 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5198 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5203 "an error on the device\n"); 5199 "an error on the device\n");
5204 QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n", 5200 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5205 dev_name(&card->gdev->dev), rc); 5201 CARD_DEVID(card), rc);
5206 return rc; 5202 return rc;
5207} 5203}
5208EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); 5204EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
@@ -5481,11 +5477,12 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5481} 5477}
5482EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 5478EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
5483 5479
5484int qeth_send_setassparms(struct qeth_card *card, 5480static int qeth_send_setassparms(struct qeth_card *card,
5485 struct qeth_cmd_buffer *iob, __u16 len, long data, 5481 struct qeth_cmd_buffer *iob, u16 len,
5486 int (*reply_cb)(struct qeth_card *, 5482 long data, int (*reply_cb)(struct qeth_card *,
5487 struct qeth_reply *, unsigned long), 5483 struct qeth_reply *,
5488 void *reply_param) 5484 unsigned long),
5485 void *reply_param)
5489{ 5486{
5490 int rc; 5487 int rc;
5491 struct qeth_ipa_cmd *cmd; 5488 struct qeth_ipa_cmd *cmd;
@@ -5501,7 +5498,6 @@ int qeth_send_setassparms(struct qeth_card *card,
5501 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param); 5498 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
5502 return rc; 5499 return rc;
5503} 5500}
5504EXPORT_SYMBOL_GPL(qeth_send_setassparms);
5505 5501
5506int qeth_send_simple_setassparms_prot(struct qeth_card *card, 5502int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5507 enum qeth_ipa_funcs ipa_func, 5503 enum qeth_ipa_funcs ipa_func,
@@ -6170,8 +6166,14 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
6170 WARN_ON_ONCE(1); 6166 WARN_ON_ONCE(1);
6171 } 6167 }
6172 6168
6173 /* fallthrough from high to low, to select all legal speeds: */ 6169 /* partially does fall through, to also select lower speeds */
6174 switch (maxspeed) { 6170 switch (maxspeed) {
6171 case SPEED_25000:
6172 ethtool_link_ksettings_add_link_mode(cmd, supported,
6173 25000baseSR_Full);
6174 ethtool_link_ksettings_add_link_mode(cmd, advertising,
6175 25000baseSR_Full);
6176 break;
6175 case SPEED_10000: 6177 case SPEED_10000:
6176 ethtool_link_ksettings_add_link_mode(cmd, supported, 6178 ethtool_link_ksettings_add_link_mode(cmd, supported,
6177 10000baseT_Full); 6179 10000baseT_Full);
@@ -6254,6 +6256,10 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
6254 cmd->base.speed = SPEED_10000; 6256 cmd->base.speed = SPEED_10000;
6255 cmd->base.port = PORT_FIBRE; 6257 cmd->base.port = PORT_FIBRE;
6256 break; 6258 break;
6259 case QETH_LINK_TYPE_25GBIT_ETH:
6260 cmd->base.speed = SPEED_25000;
6261 cmd->base.port = PORT_FIBRE;
6262 break;
6257 default: 6263 default:
6258 cmd->base.speed = SPEED_10; 6264 cmd->base.speed = SPEED_10;
6259 cmd->base.port = PORT_TP; 6265 cmd->base.port = PORT_TP;
@@ -6320,6 +6326,9 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
6320 case CARD_INFO_PORTS_10G: 6326 case CARD_INFO_PORTS_10G:
6321 cmd->base.speed = SPEED_10000; 6327 cmd->base.speed = SPEED_10000;
6322 break; 6328 break;
6329 case CARD_INFO_PORTS_25G:
6330 cmd->base.speed = SPEED_25000;
6331 break;
6323 } 6332 }
6324 6333
6325 return 0; 6334 return 0;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index e85090467afe..3e54be201b27 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -90,6 +90,7 @@ enum qeth_link_types {
90 QETH_LINK_TYPE_GBIT_ETH = 0x03, 90 QETH_LINK_TYPE_GBIT_ETH = 0x03,
91 QETH_LINK_TYPE_OSN = 0x04, 91 QETH_LINK_TYPE_OSN = 0x04,
92 QETH_LINK_TYPE_10GBIT_ETH = 0x10, 92 QETH_LINK_TYPE_10GBIT_ETH = 0x10,
93 QETH_LINK_TYPE_25GBIT_ETH = 0x12,
93 QETH_LINK_TYPE_LANE_ETH100 = 0x81, 94 QETH_LINK_TYPE_LANE_ETH100 = 0x81,
94 QETH_LINK_TYPE_LANE_TR = 0x82, 95 QETH_LINK_TYPE_LANE_TR = 0x82,
95 QETH_LINK_TYPE_LANE_ETH1000 = 0x83, 96 QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
@@ -347,6 +348,7 @@ enum qeth_card_info_port_speed {
347 CARD_INFO_PORTS_100M = 0x00000006, 348 CARD_INFO_PORTS_100M = 0x00000006,
348 CARD_INFO_PORTS_1G = 0x00000007, 349 CARD_INFO_PORTS_1G = 0x00000007,
349 CARD_INFO_PORTS_10G = 0x00000008, 350 CARD_INFO_PORTS_10G = 0x00000008,
351 CARD_INFO_PORTS_25G = 0x0000000A,
350}; 352};
351 353
352/* (SET)DELIP(M) IPA stuff ***************************************************/ 354/* (SET)DELIP(M) IPA stuff ***************************************************/
@@ -436,7 +438,7 @@ struct qeth_ipacmd_setassparms {
436 __u32 flags_32bit; 438 __u32 flags_32bit;
437 struct qeth_ipa_caps caps; 439 struct qeth_ipa_caps caps;
438 struct qeth_checksum_cmd chksum; 440 struct qeth_checksum_cmd chksum;
439 struct qeth_arp_cache_entry add_arp_entry; 441 struct qeth_arp_cache_entry arp_entry;
440 struct qeth_arp_query_data query_arp; 442 struct qeth_arp_query_data query_arp;
441 struct qeth_tso_start_data tso; 443 struct qeth_tso_start_data tso;
442 __u8 ip[16]; 444 __u8 ip[16];
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 23aaf373f631..2914a1a69f83 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -146,11 +146,11 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
146 QETH_CARD_TEXT(card, 2, "L2Wmac"); 146 QETH_CARD_TEXT(card, 2, "L2Wmac");
147 rc = qeth_l2_send_setdelmac(card, mac, cmd); 147 rc = qeth_l2_send_setdelmac(card, mac, cmd);
148 if (rc == -EEXIST) 148 if (rc == -EEXIST)
149 QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n", 149 QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
150 mac, QETH_CARD_IFNAME(card)); 150 CARD_DEVID(card));
151 else if (rc) 151 else if (rc)
152 QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n", 152 QETH_DBF_MESSAGE(2, "Failed to register MAC on device %x: %d\n",
153 mac, QETH_CARD_IFNAME(card), rc); 153 CARD_DEVID(card), rc);
154 return rc; 154 return rc;
155} 155}
156 156
@@ -163,8 +163,8 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
163 QETH_CARD_TEXT(card, 2, "L2Rmac"); 163 QETH_CARD_TEXT(card, 2, "L2Rmac");
164 rc = qeth_l2_send_setdelmac(card, mac, cmd); 164 rc = qeth_l2_send_setdelmac(card, mac, cmd);
165 if (rc) 165 if (rc)
166 QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n", 166 QETH_DBF_MESSAGE(2, "Failed to delete MAC on device %u: %d\n",
167 mac, QETH_CARD_IFNAME(card), rc); 167 CARD_DEVID(card), rc);
168 return rc; 168 return rc;
169} 169}
170 170
@@ -260,9 +260,9 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
260 260
261 QETH_CARD_TEXT(card, 2, "L2sdvcb"); 261 QETH_CARD_TEXT(card, 2, "L2sdvcb");
262 if (cmd->hdr.return_code) { 262 if (cmd->hdr.return_code) {
263 QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n", 263 QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
264 cmd->data.setdelvlan.vlan_id, 264 cmd->data.setdelvlan.vlan_id,
265 QETH_CARD_IFNAME(card), cmd->hdr.return_code); 265 CARD_DEVID(card), cmd->hdr.return_code);
266 QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command); 266 QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
267 QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); 267 QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
268 } 268 }
@@ -455,8 +455,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
455 rc = qeth_vm_request_mac(card); 455 rc = qeth_vm_request_mac(card);
456 if (!rc) 456 if (!rc)
457 goto out; 457 goto out;
458 QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n", 458 QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
459 CARD_BUS_ID(card), rc); 459 CARD_DEVID(card), rc);
460 QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc); 460 QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
461 /* fall back to alternative mechanism: */ 461 /* fall back to alternative mechanism: */
462 } 462 }
@@ -468,8 +468,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
468 rc = qeth_setadpparms_change_macaddr(card); 468 rc = qeth_setadpparms_change_macaddr(card);
469 if (!rc) 469 if (!rc)
470 goto out; 470 goto out;
471 QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n", 471 QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
472 CARD_BUS_ID(card), rc); 472 CARD_DEVID(card), rc);
473 QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc); 473 QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
474 /* fall back once more: */ 474 /* fall back once more: */
475 } 475 }
@@ -826,7 +826,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
826 826
827 if (cgdev->state == CCWGROUP_ONLINE) 827 if (cgdev->state == CCWGROUP_ONLINE)
828 qeth_l2_set_offline(cgdev); 828 qeth_l2_set_offline(cgdev);
829 unregister_netdev(card->dev); 829 if (qeth_netdev_is_registered(card->dev))
830 unregister_netdev(card->dev);
830} 831}
831 832
832static const struct ethtool_ops qeth_l2_ethtool_ops = { 833static const struct ethtool_ops qeth_l2_ethtool_ops = {
@@ -862,11 +863,11 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
862 .ndo_set_features = qeth_set_features 863 .ndo_set_features = qeth_set_features
863}; 864};
864 865
865static int qeth_l2_setup_netdev(struct qeth_card *card) 866static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
866{ 867{
867 int rc; 868 int rc;
868 869
869 if (card->dev->netdev_ops) 870 if (qeth_netdev_is_registered(card->dev))
870 return 0; 871 return 0;
871 872
872 card->dev->priv_flags |= IFF_UNICAST_FLT; 873 card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -919,6 +920,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
919 qeth_l2_request_initial_mac(card); 920 qeth_l2_request_initial_mac(card);
920 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); 921 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
921 rc = register_netdev(card->dev); 922 rc = register_netdev(card->dev);
923 if (!rc && carrier_ok)
924 netif_carrier_on(card->dev);
925
922 if (rc) 926 if (rc)
923 card->dev->netdev_ops = NULL; 927 card->dev->netdev_ops = NULL;
924 return rc; 928 return rc;
@@ -949,6 +953,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
949 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 953 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
950 int rc = 0; 954 int rc = 0;
951 enum qeth_card_states recover_flag; 955 enum qeth_card_states recover_flag;
956 bool carrier_ok;
952 957
953 mutex_lock(&card->discipline_mutex); 958 mutex_lock(&card->discipline_mutex);
954 mutex_lock(&card->conf_mutex); 959 mutex_lock(&card->conf_mutex);
@@ -956,7 +961,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
956 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 961 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
957 962
958 recover_flag = card->state; 963 recover_flag = card->state;
959 rc = qeth_core_hardsetup_card(card); 964 rc = qeth_core_hardsetup_card(card, &carrier_ok);
960 if (rc) { 965 if (rc) {
961 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); 966 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
962 rc = -ENODEV; 967 rc = -ENODEV;
@@ -967,7 +972,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
967 dev_info(&card->gdev->dev, 972 dev_info(&card->gdev->dev,
968 "The device represents a Bridge Capable Port\n"); 973 "The device represents a Bridge Capable Port\n");
969 974
970 rc = qeth_l2_setup_netdev(card); 975 rc = qeth_l2_setup_netdev(card, carrier_ok);
971 if (rc) 976 if (rc)
972 goto out_remove; 977 goto out_remove;
973 978
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0b161cc1fd2e..f08b745c2007 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -278,9 +278,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
278 278
279 QETH_CARD_TEXT(card, 4, "clearip"); 279 QETH_CARD_TEXT(card, 4, "clearip");
280 280
281 if (recover && card->options.sniffer)
282 return;
283
284 spin_lock_bh(&card->ip_lock); 281 spin_lock_bh(&card->ip_lock);
285 282
286 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 283 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
@@ -494,9 +491,8 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
494 QETH_PROT_IPV4); 491 QETH_PROT_IPV4);
495 if (rc) { 492 if (rc) {
496 card->options.route4.type = NO_ROUTER; 493 card->options.route4.type = NO_ROUTER;
497 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" 494 QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
498 " on %s. Type set to 'no router'.\n", rc, 495 rc, CARD_DEVID(card));
499 QETH_CARD_IFNAME(card));
500 } 496 }
501 return rc; 497 return rc;
502} 498}
@@ -518,9 +514,8 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
518 QETH_PROT_IPV6); 514 QETH_PROT_IPV6);
519 if (rc) { 515 if (rc) {
520 card->options.route6.type = NO_ROUTER; 516 card->options.route6.type = NO_ROUTER;
521 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" 517 QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
522 " on %s. Type set to 'no router'.\n", rc, 518 rc, CARD_DEVID(card));
523 QETH_CARD_IFNAME(card));
524 } 519 }
525 return rc; 520 return rc;
526} 521}
@@ -663,6 +658,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
663 int rc = 0; 658 int rc = 0;
664 int cnt = 3; 659 int cnt = 3;
665 660
661 if (card->options.sniffer)
662 return 0;
666 663
667 if (addr->proto == QETH_PROT_IPV4) { 664 if (addr->proto == QETH_PROT_IPV4) {
668 QETH_CARD_TEXT(card, 2, "setaddr4"); 665 QETH_CARD_TEXT(card, 2, "setaddr4");
@@ -697,6 +694,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
697{ 694{
698 int rc = 0; 695 int rc = 0;
699 696
697 if (card->options.sniffer)
698 return 0;
699
700 if (addr->proto == QETH_PROT_IPV4) { 700 if (addr->proto == QETH_PROT_IPV4) {
701 QETH_CARD_TEXT(card, 2, "deladdr4"); 701 QETH_CARD_TEXT(card, 2, "deladdr4");
702 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); 702 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
@@ -1070,8 +1070,8 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
1070 } 1070 }
1071 break; 1071 break;
1072 default: 1072 default:
1073 QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n", 1073 QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n",
1074 cmd->data.diagass.action, QETH_CARD_IFNAME(card)); 1074 cmd->data.diagass.action, CARD_DEVID(card));
1075 } 1075 }
1076 1076
1077 return 0; 1077 return 0;
@@ -1517,32 +1517,25 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
1517 qeth_l3_handle_promisc_mode(card); 1517 qeth_l3_handle_promisc_mode(card);
1518} 1518}
1519 1519
1520static const char *qeth_l3_arp_get_error_cause(int *rc) 1520static int qeth_l3_arp_makerc(int rc)
1521{ 1521{
1522 switch (*rc) { 1522 switch (rc) {
1523 case QETH_IPA_ARP_RC_FAILED: 1523 case IPA_RC_SUCCESS:
1524 *rc = -EIO; 1524 return 0;
1525 return "operation failed";
1526 case QETH_IPA_ARP_RC_NOTSUPP: 1525 case QETH_IPA_ARP_RC_NOTSUPP:
1527 *rc = -EOPNOTSUPP;
1528 return "operation not supported";
1529 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
1530 *rc = -EINVAL;
1531 return "argument out of range";
1532 case QETH_IPA_ARP_RC_Q_NOTSUPP: 1526 case QETH_IPA_ARP_RC_Q_NOTSUPP:
1533 *rc = -EOPNOTSUPP; 1527 return -EOPNOTSUPP;
1534 return "query operation not supported"; 1528 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
1529 return -EINVAL;
1535 case QETH_IPA_ARP_RC_Q_NO_DATA: 1530 case QETH_IPA_ARP_RC_Q_NO_DATA:
1536 *rc = -ENOENT; 1531 return -ENOENT;
1537 return "no query data available";
1538 default: 1532 default:
1539 return "unknown error"; 1533 return -EIO;
1540 } 1534 }
1541} 1535}
1542 1536
1543static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) 1537static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
1544{ 1538{
1545 int tmp;
1546 int rc; 1539 int rc;
1547 1540
1548 QETH_CARD_TEXT(card, 3, "arpstnoe"); 1541 QETH_CARD_TEXT(card, 3, "arpstnoe");
@@ -1560,13 +1553,10 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
1560 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, 1553 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
1561 IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 1554 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
1562 no_entries); 1555 no_entries);
1563 if (rc) { 1556 if (rc)
1564 tmp = rc; 1557 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
1565 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on " 1558 CARD_DEVID(card), rc);
1566 "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card), 1559 return qeth_l3_arp_makerc(rc);
1567 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1568 }
1569 return rc;
1570} 1560}
1571 1561
1572static __u32 get_arp_entry_size(struct qeth_card *card, 1562static __u32 get_arp_entry_size(struct qeth_card *card,
@@ -1716,7 +1706,6 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
1716{ 1706{
1717 struct qeth_cmd_buffer *iob; 1707 struct qeth_cmd_buffer *iob;
1718 struct qeth_ipa_cmd *cmd; 1708 struct qeth_ipa_cmd *cmd;
1719 int tmp;
1720 int rc; 1709 int rc;
1721 1710
1722 QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot); 1711 QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
@@ -1735,15 +1724,10 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
1735 rc = qeth_l3_send_ipa_arp_cmd(card, iob, 1724 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
1736 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN, 1725 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
1737 qeth_l3_arp_query_cb, (void *)qinfo); 1726 qeth_l3_arp_query_cb, (void *)qinfo);
1738 if (rc) { 1727 if (rc)
1739 tmp = rc; 1728 QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
1740 QETH_DBF_MESSAGE(2, 1729 CARD_DEVID(card), rc);
1741 "Error while querying ARP cache on %s: %s " 1730 return qeth_l3_arp_makerc(rc);
1742 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
1743 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1744 }
1745
1746 return rc;
1747} 1731}
1748 1732
1749static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) 1733static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
@@ -1793,15 +1777,18 @@ out:
1793 return rc; 1777 return rc;
1794} 1778}
1795 1779
1796static int qeth_l3_arp_add_entry(struct qeth_card *card, 1780static int qeth_l3_arp_modify_entry(struct qeth_card *card,
1797 struct qeth_arp_cache_entry *entry) 1781 struct qeth_arp_cache_entry *entry,
1782 enum qeth_arp_process_subcmds arp_cmd)
1798{ 1783{
1784 struct qeth_arp_cache_entry *cmd_entry;
1799 struct qeth_cmd_buffer *iob; 1785 struct qeth_cmd_buffer *iob;
1800 char buf[16];
1801 int tmp;
1802 int rc; 1786 int rc;
1803 1787
1804 QETH_CARD_TEXT(card, 3, "arpadent"); 1788 if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY)
1789 QETH_CARD_TEXT(card, 3, "arpadd");
1790 else
1791 QETH_CARD_TEXT(card, 3, "arpdel");
1805 1792
1806 /* 1793 /*
1807 * currently GuestLAN only supports the ARP assist function 1794 * currently GuestLAN only supports the ARP assist function
@@ -1814,71 +1801,25 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
1814 return -EOPNOTSUPP; 1801 return -EOPNOTSUPP;
1815 } 1802 }
1816 1803
1817 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1804 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
1818 IPA_CMD_ASS_ARP_ADD_ENTRY, 1805 sizeof(*cmd_entry), QETH_PROT_IPV4);
1819 sizeof(struct qeth_arp_cache_entry),
1820 QETH_PROT_IPV4);
1821 if (!iob) 1806 if (!iob)
1822 return -ENOMEM; 1807 return -ENOMEM;
1823 rc = qeth_send_setassparms(card, iob,
1824 sizeof(struct qeth_arp_cache_entry),
1825 (unsigned long) entry,
1826 qeth_setassparms_cb, NULL);
1827 if (rc) {
1828 tmp = rc;
1829 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
1830 QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
1831 "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
1832 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1833 }
1834 return rc;
1835}
1836
1837static int qeth_l3_arp_remove_entry(struct qeth_card *card,
1838 struct qeth_arp_cache_entry *entry)
1839{
1840 struct qeth_cmd_buffer *iob;
1841 char buf[16] = {0, };
1842 int tmp;
1843 int rc;
1844 1808
1845 QETH_CARD_TEXT(card, 3, "arprment"); 1809 cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
1810 ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
1811 memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
1812 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
1813 if (rc)
1814 QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
1815 arp_cmd, CARD_DEVID(card), rc);
1846 1816
1847 /* 1817 return qeth_l3_arp_makerc(rc);
1848 * currently GuestLAN only supports the ARP assist function
1849 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
1850 * thus we say EOPNOTSUPP for this ARP function
1851 */
1852 if (card->info.guestlan)
1853 return -EOPNOTSUPP;
1854 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1855 return -EOPNOTSUPP;
1856 }
1857 memcpy(buf, entry, 12);
1858 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
1859 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
1860 12,
1861 QETH_PROT_IPV4);
1862 if (!iob)
1863 return -ENOMEM;
1864 rc = qeth_send_setassparms(card, iob,
1865 12, (unsigned long)buf,
1866 qeth_setassparms_cb, NULL);
1867 if (rc) {
1868 tmp = rc;
1869 memset(buf, 0, 16);
1870 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
1871 QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
1872 " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
1873 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1874 }
1875 return rc;
1876} 1818}
1877 1819
1878static int qeth_l3_arp_flush_cache(struct qeth_card *card) 1820static int qeth_l3_arp_flush_cache(struct qeth_card *card)
1879{ 1821{
1880 int rc; 1822 int rc;
1881 int tmp;
1882 1823
1883 QETH_CARD_TEXT(card, 3, "arpflush"); 1824 QETH_CARD_TEXT(card, 3, "arpflush");
1884 1825
@@ -1894,19 +1835,17 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
1894 } 1835 }
1895 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, 1836 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
1896 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); 1837 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
1897 if (rc) { 1838 if (rc)
1898 tmp = rc; 1839 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
1899 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s " 1840 CARD_DEVID(card), rc);
1900 "(0x%x/%d)\n", QETH_CARD_IFNAME(card), 1841 return qeth_l3_arp_makerc(rc);
1901 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
1902 }
1903 return rc;
1904} 1842}
1905 1843
1906static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1844static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1907{ 1845{
1908 struct qeth_card *card = dev->ml_priv; 1846 struct qeth_card *card = dev->ml_priv;
1909 struct qeth_arp_cache_entry arp_entry; 1847 struct qeth_arp_cache_entry arp_entry;
1848 enum qeth_arp_process_subcmds arp_cmd;
1910 int rc = 0; 1849 int rc = 0;
1911 1850
1912 switch (cmd) { 1851 switch (cmd) {
@@ -1925,27 +1864,16 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1925 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data); 1864 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
1926 break; 1865 break;
1927 case SIOC_QETH_ARP_ADD_ENTRY: 1866 case SIOC_QETH_ARP_ADD_ENTRY:
1928 if (!capable(CAP_NET_ADMIN)) {
1929 rc = -EPERM;
1930 break;
1931 }
1932 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
1933 sizeof(struct qeth_arp_cache_entry)))
1934 rc = -EFAULT;
1935 else
1936 rc = qeth_l3_arp_add_entry(card, &arp_entry);
1937 break;
1938 case SIOC_QETH_ARP_REMOVE_ENTRY: 1867 case SIOC_QETH_ARP_REMOVE_ENTRY:
1939 if (!capable(CAP_NET_ADMIN)) { 1868 if (!capable(CAP_NET_ADMIN))
1940 rc = -EPERM; 1869 return -EPERM;
1941 break; 1870 if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry)))
1942 } 1871 return -EFAULT;
1943 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, 1872
1944 sizeof(struct qeth_arp_cache_entry))) 1873 arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
1945 rc = -EFAULT; 1874 IPA_CMD_ASS_ARP_ADD_ENTRY :
1946 else 1875 IPA_CMD_ASS_ARP_REMOVE_ENTRY;
1947 rc = qeth_l3_arp_remove_entry(card, &arp_entry); 1876 return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd);
1948 break;
1949 case SIOC_QETH_ARP_FLUSH_CACHE: 1877 case SIOC_QETH_ARP_FLUSH_CACHE:
1950 if (!capable(CAP_NET_ADMIN)) { 1878 if (!capable(CAP_NET_ADMIN)) {
1951 rc = -EPERM; 1879 rc = -EPERM;
@@ -2383,12 +2311,12 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
2383 .ndo_neigh_setup = qeth_l3_neigh_setup, 2311 .ndo_neigh_setup = qeth_l3_neigh_setup,
2384}; 2312};
2385 2313
2386static int qeth_l3_setup_netdev(struct qeth_card *card) 2314static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
2387{ 2315{
2388 unsigned int headroom; 2316 unsigned int headroom;
2389 int rc; 2317 int rc;
2390 2318
2391 if (card->dev->netdev_ops) 2319 if (qeth_netdev_is_registered(card->dev))
2392 return 0; 2320 return 0;
2393 2321
2394 if (card->info.type == QETH_CARD_TYPE_OSD || 2322 if (card->info.type == QETH_CARD_TYPE_OSD ||
@@ -2457,6 +2385,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2457 2385
2458 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); 2386 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
2459 rc = register_netdev(card->dev); 2387 rc = register_netdev(card->dev);
2388 if (!rc && carrier_ok)
2389 netif_carrier_on(card->dev);
2390
2460out: 2391out:
2461 if (rc) 2392 if (rc)
2462 card->dev->netdev_ops = NULL; 2393 card->dev->netdev_ops = NULL;
@@ -2497,7 +2428,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
2497 if (cgdev->state == CCWGROUP_ONLINE) 2428 if (cgdev->state == CCWGROUP_ONLINE)
2498 qeth_l3_set_offline(cgdev); 2429 qeth_l3_set_offline(cgdev);
2499 2430
2500 unregister_netdev(card->dev); 2431 if (qeth_netdev_is_registered(card->dev))
2432 unregister_netdev(card->dev);
2501 qeth_l3_clear_ip_htable(card, 0); 2433 qeth_l3_clear_ip_htable(card, 0);
2502 qeth_l3_clear_ipato_list(card); 2434 qeth_l3_clear_ipato_list(card);
2503} 2435}
@@ -2507,6 +2439,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
2507 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2439 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2508 int rc = 0; 2440 int rc = 0;
2509 enum qeth_card_states recover_flag; 2441 enum qeth_card_states recover_flag;
2442 bool carrier_ok;
2510 2443
2511 mutex_lock(&card->discipline_mutex); 2444 mutex_lock(&card->discipline_mutex);
2512 mutex_lock(&card->conf_mutex); 2445 mutex_lock(&card->conf_mutex);
@@ -2514,14 +2447,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
2514 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 2447 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
2515 2448
2516 recover_flag = card->state; 2449 recover_flag = card->state;
2517 rc = qeth_core_hardsetup_card(card); 2450 rc = qeth_core_hardsetup_card(card, &carrier_ok);
2518 if (rc) { 2451 if (rc) {
2519 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); 2452 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
2520 rc = -ENODEV; 2453 rc = -ENODEV;
2521 goto out_remove; 2454 goto out_remove;
2522 } 2455 }
2523 2456
2524 rc = qeth_l3_setup_netdev(card); 2457 rc = qeth_l3_setup_netdev(card, carrier_ok);
2525 if (rc) 2458 if (rc)
2526 goto out_remove; 2459 goto out_remove;
2527 2460
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 5c8ed7350a04..a36e4cf1841d 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -220,6 +220,7 @@ static int d7s_probe(struct platform_device *op)
220 dev_set_drvdata(&op->dev, p); 220 dev_set_drvdata(&op->dev, p);
221 d7s_device = p; 221 d7s_device = p;
222 err = 0; 222 err = 0;
223 of_node_put(opts);
223 224
224out: 225out:
225 return err; 226 return err;
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 56e962a01493..b8481927bfe4 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp,
910 for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) { 910 for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) {
911 pchild->mon_type[len] = ENVCTRL_NOMON; 911 pchild->mon_type[len] = ENVCTRL_NOMON;
912 } 912 }
913 of_node_put(root_node);
913 return; 914 return;
914 } 915 }
916 of_node_put(root_node);
915 } 917 }
916 918
917 /* Get the monitor channels. */ 919 /* Get the monitor channels. */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c7fccbb8f554..fa6e0c3b3aa6 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -697,6 +697,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
697 */ 697 */
698 scsi_mq_uninit_cmd(cmd); 698 scsi_mq_uninit_cmd(cmd);
699 699
700 /*
701 * queue is still alive, so grab the ref for preventing it
702 * from being cleaned up during running queue.
703 */
704 percpu_ref_get(&q->q_usage_counter);
705
700 __blk_mq_end_request(req, error); 706 __blk_mq_end_request(req, error);
701 707
702 if (scsi_target(sdev)->single_lun || 708 if (scsi_target(sdev)->single_lun ||
@@ -704,6 +710,8 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
704 kblockd_schedule_work(&sdev->requeue_work); 710 kblockd_schedule_work(&sdev->requeue_work);
705 else 711 else
706 blk_mq_run_hw_queues(q, true); 712 blk_mq_run_hw_queues(q, true);
713
714 percpu_ref_put(&q->q_usage_counter);
707 } else { 715 } else {
708 unsigned long flags; 716 unsigned long flags;
709 717
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 7218fb963d0a..1382a8df6c75 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -777,9 +777,6 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
777 u8 la = txn->la; 777 u8 la = txn->la;
778 bool usr_msg = false; 778 bool usr_msg = false;
779 779
780 if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
781 return -EPROTONOSUPPORT;
782
783 if (txn->mt == SLIM_MSG_MT_CORE && 780 if (txn->mt == SLIM_MSG_MT_CORE &&
784 (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION && 781 (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
785 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW)) 782 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index 4399d1873e2d..9be41089edde 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -61,12 +61,6 @@
61#define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL 0x58 61#define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL 0x58
62#define SLIM_MSG_MC_RECONFIGURE_NOW 0x5F 62#define SLIM_MSG_MC_RECONFIGURE_NOW 0x5F
63 63
64/*
65 * Clock pause flag to indicate that the reconfig message
66 * corresponds to clock pause sequence
67 */
68#define SLIM_MSG_CLK_PAUSE_SEQ_FLG (1U << 8)
69
70/* Clock pause values per SLIMbus spec */ 64/* Clock pause values per SLIMbus spec */
71#define SLIM_CLK_FAST 0 65#define SLIM_CLK_FAST 0
72#define SLIM_CLK_CONST_PHASE 1 66#define SLIM_CLK_CONST_PHASE 1
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 3dc31627c655..0c2867deb36f 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -522,11 +522,11 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
522 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len); 522 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
523 mtk_spi_setup_packet(master); 523 mtk_spi_setup_packet(master);
524 524
525 cnt = len / 4; 525 cnt = mdata->xfer_len / 4;
526 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, 526 iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
527 trans->tx_buf + mdata->num_xfered, cnt); 527 trans->tx_buf + mdata->num_xfered, cnt);
528 528
529 remainder = len % 4; 529 remainder = mdata->xfer_len % 4;
530 if (remainder > 0) { 530 if (remainder > 0) {
531 reg_val = 0; 531 reg_val = 0;
532 memcpy(&reg_val, 532 memcpy(&reg_val,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index f024c3fc3679..2fd8881fcd65 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1540,13 +1540,26 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
1540/* work with hotplug and coldplug */ 1540/* work with hotplug and coldplug */
1541MODULE_ALIAS("platform:omap2_mcspi"); 1541MODULE_ALIAS("platform:omap2_mcspi");
1542 1542
1543#ifdef CONFIG_SUSPEND 1543static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
1544static int omap2_mcspi_suspend_noirq(struct device *dev)
1545{ 1544{
1546 return pinctrl_pm_select_sleep_state(dev); 1545 struct spi_master *master = dev_get_drvdata(dev);
1546 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1547 int error;
1548
1549 error = pinctrl_pm_select_sleep_state(dev);
1550 if (error)
1551 dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
1552 __func__, error);
1553
1554 error = spi_master_suspend(master);
1555 if (error)
1556 dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
1557 __func__, error);
1558
1559 return pm_runtime_force_suspend(dev);
1547} 1560}
1548 1561
1549static int omap2_mcspi_resume_noirq(struct device *dev) 1562static int __maybe_unused omap2_mcspi_resume(struct device *dev)
1550{ 1563{
1551 struct spi_master *master = dev_get_drvdata(dev); 1564 struct spi_master *master = dev_get_drvdata(dev);
1552 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1565 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
@@ -1557,17 +1570,17 @@ static int omap2_mcspi_resume_noirq(struct device *dev)
1557 dev_warn(mcspi->dev, "%s: failed to set pins: %i\n", 1570 dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
1558 __func__, error); 1571 __func__, error);
1559 1572
1560 return 0; 1573 error = spi_master_resume(master);
1561} 1574 if (error)
1575 dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
1576 __func__, error);
1562 1577
1563#else 1578 return pm_runtime_force_resume(dev);
1564#define omap2_mcspi_suspend_noirq NULL 1579}
1565#define omap2_mcspi_resume_noirq NULL
1566#endif
1567 1580
1568static const struct dev_pm_ops omap2_mcspi_pm_ops = { 1581static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1569 .suspend_noirq = omap2_mcspi_suspend_noirq, 1582 SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
1570 .resume_noirq = omap2_mcspi_resume_noirq, 1583 omap2_mcspi_resume)
1571 .runtime_resume = omap_mcspi_runtime_resume, 1584 .runtime_resume = omap_mcspi_runtime_resume,
1572}; 1585};
1573 1586
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index e90b17775284..09a940066c0e 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -1005,35 +1005,38 @@ enum i8254_mode {
1005 * and INSN_DEVICE_CONFIG_GET_ROUTES. 1005 * and INSN_DEVICE_CONFIG_GET_ROUTES.
1006 */ 1006 */
1007#define NI_NAMES_BASE 0x8000u 1007#define NI_NAMES_BASE 0x8000u
1008
1009#define _TERM_N(base, n, x) ((base) + ((x) & ((n) - 1)))
1010
1008/* 1011/*
1009 * not necessarily all allowed 64 PFIs are valid--certainly not for all devices 1012 * not necessarily all allowed 64 PFIs are valid--certainly not for all devices
1010 */ 1013 */
1011#define NI_PFI(x) (NI_NAMES_BASE + ((x) & 0x3f)) 1014#define NI_PFI(x) _TERM_N(NI_NAMES_BASE, 64, x)
1012/* 8 trigger lines by standard, Some devices cannot talk to all eight. */ 1015/* 8 trigger lines by standard, Some devices cannot talk to all eight. */
1013#define TRIGGER_LINE(x) (NI_PFI(-1) + 1 + ((x) & 0x7)) 1016#define TRIGGER_LINE(x) _TERM_N(NI_PFI(-1) + 1, 8, x)
1014/* 4 RTSI shared MUXes to route signals to/from TRIGGER_LINES on NI hardware */ 1017/* 4 RTSI shared MUXes to route signals to/from TRIGGER_LINES on NI hardware */
1015#define NI_RTSI_BRD(x) (TRIGGER_LINE(-1) + 1 + ((x) & 0x3)) 1018#define NI_RTSI_BRD(x) _TERM_N(TRIGGER_LINE(-1) + 1, 4, x)
1016 1019
1017/* *** Counter/timer names : 8 counters max *** */ 1020/* *** Counter/timer names : 8 counters max *** */
1018#define NI_COUNTER_NAMES_BASE (NI_RTSI_BRD(-1) + 1) 1021#define NI_MAX_COUNTERS 8
1019#define NI_MAX_COUNTERS 7 1022#define NI_COUNTER_NAMES_BASE (NI_RTSI_BRD(-1) + 1)
1020#define NI_CtrSource(x) (NI_COUNTER_NAMES_BASE + ((x) & NI_MAX_COUNTERS)) 1023#define NI_CtrSource(x) _TERM_N(NI_COUNTER_NAMES_BASE, NI_MAX_COUNTERS, x)
1021/* Gate, Aux, A,B,Z are all treated, at times as gates */ 1024/* Gate, Aux, A,B,Z are all treated, at times as gates */
1022#define NI_GATES_NAMES_BASE (NI_CtrSource(-1) + 1) 1025#define NI_GATES_NAMES_BASE (NI_CtrSource(-1) + 1)
1023#define NI_CtrGate(x) (NI_GATES_NAMES_BASE + ((x) & NI_MAX_COUNTERS)) 1026#define NI_CtrGate(x) _TERM_N(NI_GATES_NAMES_BASE, NI_MAX_COUNTERS, x)
1024#define NI_CtrAux(x) (NI_CtrGate(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1027#define NI_CtrAux(x) _TERM_N(NI_CtrGate(-1) + 1, NI_MAX_COUNTERS, x)
1025#define NI_CtrA(x) (NI_CtrAux(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1028#define NI_CtrA(x) _TERM_N(NI_CtrAux(-1) + 1, NI_MAX_COUNTERS, x)
1026#define NI_CtrB(x) (NI_CtrA(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1029#define NI_CtrB(x) _TERM_N(NI_CtrA(-1) + 1, NI_MAX_COUNTERS, x)
1027#define NI_CtrZ(x) (NI_CtrB(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1030#define NI_CtrZ(x) _TERM_N(NI_CtrB(-1) + 1, NI_MAX_COUNTERS, x)
1028#define NI_GATES_NAMES_MAX NI_CtrZ(-1) 1031#define NI_GATES_NAMES_MAX NI_CtrZ(-1)
1029#define NI_CtrArmStartTrigger(x) (NI_CtrZ(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1032#define NI_CtrArmStartTrigger(x) _TERM_N(NI_CtrZ(-1) + 1, NI_MAX_COUNTERS, x)
1030#define NI_CtrInternalOutput(x) \ 1033#define NI_CtrInternalOutput(x) \
1031 (NI_CtrArmStartTrigger(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1034 _TERM_N(NI_CtrArmStartTrigger(-1) + 1, NI_MAX_COUNTERS, x)
1032/** external pin(s) labeled conveniently as Ctr<i>Out. */ 1035/** external pin(s) labeled conveniently as Ctr<i>Out. */
1033#define NI_CtrOut(x) (NI_CtrInternalOutput(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1036#define NI_CtrOut(x) _TERM_N(NI_CtrInternalOutput(-1) + 1, NI_MAX_COUNTERS, x)
1034/** For Buffered sampling of ctr -- x series capability. */ 1037/** For Buffered sampling of ctr -- x series capability. */
1035#define NI_CtrSampleClock(x) (NI_CtrOut(-1) + 1 + ((x) & NI_MAX_COUNTERS)) 1038#define NI_CtrSampleClock(x) _TERM_N(NI_CtrOut(-1) + 1, NI_MAX_COUNTERS, x)
1036#define NI_COUNTER_NAMES_MAX NI_CtrSampleClock(-1) 1039#define NI_COUNTER_NAMES_MAX NI_CtrSampleClock(-1)
1037 1040
1038enum ni_common_signal_names { 1041enum ni_common_signal_names {
1039 /* PXI_Star: this is a non-NI-specific signal */ 1042 /* PXI_Star: this is a non-NI-specific signal */
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 2d1e0325d04d..5edf59ac6706 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -2843,7 +2843,8 @@ static int ni_ao_insn_config(struct comedi_device *dev,
2843 return ni_ao_arm(dev, s); 2843 return ni_ao_arm(dev, s);
2844 case INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS: 2844 case INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS:
2845 /* we don't care about actual channels */ 2845 /* we don't care about actual channels */
2846 data[1] = board->ao_speed; 2846 /* data[3] : chanlist_len */
2847 data[1] = board->ao_speed * data[3];
2847 data[2] = 0; 2848 data[2] = 0;
2848 return 0; 2849 return 0;
2849 default: 2850 default:
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
index a53231b08d30..e3425bf082ae 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -310,6 +310,7 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
310 ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2); 310 ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
311 break; 311 break;
312 } 312 }
313 /* fall through */
313 314
314 case IPIPEIF_SDRAM_YUV: 315 case IPIPEIF_SDRAM_YUV:
315 /* Set clock divider */ 316 /* Set clock divider */
diff --git a/drivers/staging/media/sunxi/cedrus/TODO b/drivers/staging/media/sunxi/cedrus/TODO
index ec277ece47af..a951b3fd1ea1 100644
--- a/drivers/staging/media/sunxi/cedrus/TODO
+++ b/drivers/staging/media/sunxi/cedrus/TODO
@@ -5,3 +5,8 @@ Before this stateless decoder driver can leave the staging area:
5* Userspace support for the Request API needs to be reviewed; 5* Userspace support for the Request API needs to be reviewed;
6* Another stateless decoder driver should be submitted; 6* Another stateless decoder driver should be submitted;
7* At least one stateless encoder driver should be submitted. 7* At least one stateless encoder driver should be submitted.
8* When queueing a request containing references to I frames, the
9 refcount of the memory for those I frames needs to be incremented
10 and decremented when the request is completed. This will likely
11 require some help from vb2. The driver should fail the request
12 if the memory/buffer is gone.
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 82558455384a..c912c70b3ef7 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -108,17 +108,6 @@ static int cedrus_request_validate(struct media_request *req)
108 unsigned int count; 108 unsigned int count;
109 unsigned int i; 109 unsigned int i;
110 110
111 count = vb2_request_buffer_cnt(req);
112 if (!count) {
113 v4l2_info(&ctx->dev->v4l2_dev,
114 "No buffer was provided with the request\n");
115 return -ENOENT;
116 } else if (count > 1) {
117 v4l2_info(&ctx->dev->v4l2_dev,
118 "More than one buffer was provided with the request\n");
119 return -EINVAL;
120 }
121
122 list_for_each_entry(obj, &req->objects, list) { 111 list_for_each_entry(obj, &req->objects, list) {
123 struct vb2_buffer *vb; 112 struct vb2_buffer *vb;
124 113
@@ -133,6 +122,17 @@ static int cedrus_request_validate(struct media_request *req)
133 if (!ctx) 122 if (!ctx)
134 return -ENOENT; 123 return -ENOENT;
135 124
125 count = vb2_request_buffer_cnt(req);
126 if (!count) {
127 v4l2_info(&ctx->dev->v4l2_dev,
128 "No buffer was provided with the request\n");
129 return -ENOENT;
130 } else if (count > 1) {
131 v4l2_info(&ctx->dev->v4l2_dev,
132 "More than one buffer was provided with the request\n");
133 return -EINVAL;
134 }
135
136 parent_hdl = &ctx->hdl; 136 parent_hdl = &ctx->hdl;
137 137
138 hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl); 138 hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
@@ -253,7 +253,7 @@ static const struct v4l2_m2m_ops cedrus_m2m_ops = {
253 253
254static const struct media_device_ops cedrus_m2m_media_ops = { 254static const struct media_device_ops cedrus_m2m_media_ops = {
255 .req_validate = cedrus_request_validate, 255 .req_validate = cedrus_request_validate,
256 .req_queue = vb2_m2m_request_queue, 256 .req_queue = v4l2_m2m_request_queue,
257}; 257};
258 258
259static int cedrus_probe(struct platform_device *pdev) 259static int cedrus_probe(struct platform_device *pdev)
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 6a18cf73c85e..18936cdb1083 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -351,7 +351,7 @@ static ssize_t set_datatype_show(struct device *dev,
351 351
352 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) { 352 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
353 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type) 353 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
354 return snprintf(buf, PAGE_SIZE, ch_data_type[i].name); 354 return snprintf(buf, PAGE_SIZE, "%s", ch_data_type[i].name);
355 } 355 }
356 return snprintf(buf, PAGE_SIZE, "unconfigured\n"); 356 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
357} 357}
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index df6ebf41bdea..5831f816c17b 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -335,6 +335,8 @@ static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
335 /* tx desc */ 335 /* tx desc */
336 src = sg->src_addr; 336 src = sg->src_addr;
337 for (i = 0; i < chan->desc->num_sgs; i++) { 337 for (i = 0; i < chan->desc->num_sgs; i++) {
338 tx_desc = &chan->tx_ring[chan->tx_idx];
339
338 if (len > HSDMA_MAX_PLEN) 340 if (len > HSDMA_MAX_PLEN)
339 tlen = HSDMA_MAX_PLEN; 341 tlen = HSDMA_MAX_PLEN;
340 else 342 else
@@ -344,7 +346,6 @@ static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
344 tx_desc->addr1 = src; 346 tx_desc->addr1 = src;
345 tx_desc->flags |= HSDMA_DESC_PLEN1(tlen); 347 tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
346 } else { 348 } else {
347 tx_desc = &chan->tx_ring[chan->tx_idx];
348 tx_desc->addr0 = src; 349 tx_desc->addr0 = src;
349 tx_desc->flags = HSDMA_DESC_PLEN0(tlen); 350 tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
350 351
diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
index b8566ed898f1..aa98fbb17013 100644
--- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
+++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
@@ -82,7 +82,7 @@ static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev,
82 struct property *prop; 82 struct property *prop;
83 const char *function_name, *group_name; 83 const char *function_name, *group_name;
84 int ret; 84 int ret;
85 int ngroups; 85 int ngroups = 0;
86 unsigned int reserved_maps = 0; 86 unsigned int reserved_maps = 0;
87 87
88 for_each_node_with_property(np_config, "group") 88 for_each_node_with_property(np_config, "group")
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 85077947b9b8..85aba8a503cd 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -109,12 +109,12 @@ static void update_recvframe_phyinfo(union recv_frame *precvframe,
109 rx_bssid = get_hdr_bssid(wlanhdr); 109 rx_bssid = get_hdr_bssid(wlanhdr);
110 pkt_info.bssid_match = ((!IsFrameTypeCtrl(wlanhdr)) && 110 pkt_info.bssid_match = ((!IsFrameTypeCtrl(wlanhdr)) &&
111 !pattrib->icv_err && !pattrib->crc_err && 111 !pattrib->icv_err && !pattrib->crc_err &&
112 !ether_addr_equal(rx_bssid, my_bssid)); 112 ether_addr_equal(rx_bssid, my_bssid));
113 113
114 rx_ra = get_ra(wlanhdr); 114 rx_ra = get_ra(wlanhdr);
115 my_hwaddr = myid(&padapter->eeprompriv); 115 my_hwaddr = myid(&padapter->eeprompriv);
116 pkt_info.to_self = pkt_info.bssid_match && 116 pkt_info.to_self = pkt_info.bssid_match &&
117 !ether_addr_equal(rx_ra, my_hwaddr); 117 ether_addr_equal(rx_ra, my_hwaddr);
118 118
119 119
120 pkt_info.is_beacon = pkt_info.bssid_match && 120 pkt_info.is_beacon = pkt_info.bssid_match &&
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index af2234798fa8..db553f2e4c0b 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -1277,7 +1277,7 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
1277 1277
1278 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); 1278 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
1279 sinfo->tx_packets = psta->sta_stats.tx_pkts; 1279 sinfo->tx_packets = psta->sta_stats.tx_pkts;
1280 1280 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
1281 } 1281 }
1282 1282
1283 /* for Ad-Hoc/AP mode */ 1283 /* for Ad-Hoc/AP mode */
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 28bfdbdc6e76..b8631baf128d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -2289,7 +2289,7 @@ static int rtw_wx_read32(struct net_device *dev,
2289exit: 2289exit:
2290 kfree(ptmp); 2290 kfree(ptmp);
2291 2291
2292 return 0; 2292 return ret;
2293} 2293}
2294 2294
2295static int rtw_wx_write32(struct net_device *dev, 2295static int rtw_wx_write32(struct net_device *dev,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index ea789376de0f..45de21c210c1 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1795,6 +1795,7 @@ vchiq_compat_ioctl_await_completion(struct file *file,
1795 struct vchiq_await_completion32 args32; 1795 struct vchiq_await_completion32 args32;
1796 struct vchiq_completion_data32 completion32; 1796 struct vchiq_completion_data32 completion32;
1797 unsigned int *msgbufcount32; 1797 unsigned int *msgbufcount32;
1798 unsigned int msgbufcount_native;
1798 compat_uptr_t msgbuf32; 1799 compat_uptr_t msgbuf32;
1799 void *msgbuf; 1800 void *msgbuf;
1800 void **msgbufptr; 1801 void **msgbufptr;
@@ -1906,7 +1907,11 @@ vchiq_compat_ioctl_await_completion(struct file *file,
1906 sizeof(completion32))) 1907 sizeof(completion32)))
1907 return -EFAULT; 1908 return -EFAULT;
1908 1909
1909 args32.msgbufcount--; 1910 if (get_user(msgbufcount_native, &args->msgbufcount))
1911 return -EFAULT;
1912
1913 if (!msgbufcount_native)
1914 args32.msgbufcount--;
1910 1915
1911 msgbufcount32 = 1916 msgbufcount32 =
1912 &((struct vchiq_await_completion32 __user *)arg)->msgbufcount; 1917 &((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 52ff854f0d6c..cd96994dc094 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -863,6 +863,30 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
863} 863}
864static DEVICE_ATTR(key, 0600, key_show, key_store); 864static DEVICE_ATTR(key, 0600, key_show, key_store);
865 865
866static void nvm_authenticate_start(struct tb_switch *sw)
867{
868 struct pci_dev *root_port;
869
870 /*
871 * During host router NVM upgrade we should not allow root port to
872 * go into D3cold because some root ports cannot trigger PME
873 * itself. To be on the safe side keep the root port in D0 during
874 * the whole upgrade process.
875 */
876 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
877 if (root_port)
878 pm_runtime_get_noresume(&root_port->dev);
879}
880
881static void nvm_authenticate_complete(struct tb_switch *sw)
882{
883 struct pci_dev *root_port;
884
885 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
886 if (root_port)
887 pm_runtime_put(&root_port->dev);
888}
889
866static ssize_t nvm_authenticate_show(struct device *dev, 890static ssize_t nvm_authenticate_show(struct device *dev,
867 struct device_attribute *attr, char *buf) 891 struct device_attribute *attr, char *buf)
868{ 892{
@@ -912,10 +936,18 @@ static ssize_t nvm_authenticate_store(struct device *dev,
912 936
913 sw->nvm->authenticating = true; 937 sw->nvm->authenticating = true;
914 938
915 if (!tb_route(sw)) 939 if (!tb_route(sw)) {
940 /*
941 * Keep root port from suspending as long as the
942 * NVM upgrade process is running.
943 */
944 nvm_authenticate_start(sw);
916 ret = nvm_authenticate_host(sw); 945 ret = nvm_authenticate_host(sw);
917 else 946 if (ret)
947 nvm_authenticate_complete(sw);
948 } else {
918 ret = nvm_authenticate_device(sw); 949 ret = nvm_authenticate_device(sw);
950 }
919 pm_runtime_mark_last_busy(&sw->dev); 951 pm_runtime_mark_last_busy(&sw->dev);
920 pm_runtime_put_autosuspend(&sw->dev); 952 pm_runtime_put_autosuspend(&sw->dev);
921 } 953 }
@@ -1334,6 +1366,10 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
1334 if (ret <= 0) 1366 if (ret <= 0)
1335 return ret; 1367 return ret;
1336 1368
1369 /* Now we can allow root port to suspend again */
1370 if (!tb_route(sw))
1371 nvm_authenticate_complete(sw);
1372
1337 if (status) { 1373 if (status) {
1338 tb_sw_info(sw, "switch flash authentication failed\n"); 1374 tb_sw_info(sw, "switch flash authentication failed\n");
1339 tb_switch_set_uuid(sw); 1375 tb_switch_set_uuid(sw);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ff6ba6d86cd8..cc56cb3b3eca 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1614,10 +1614,10 @@ static void sci_request_dma(struct uart_port *port)
1614 hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1614 hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1615 s->rx_timer.function = rx_timer_fn; 1615 s->rx_timer.function = rx_timer_fn;
1616 1616
1617 s->chan_rx_saved = s->chan_rx = chan;
1618
1617 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) 1619 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1618 sci_submit_rx(s); 1620 sci_submit_rx(s);
1619
1620 s->chan_rx_saved = s->chan_rx = chan;
1621 } 1621 }
1622} 1622}
1623 1623
@@ -3102,6 +3102,7 @@ static struct uart_driver sci_uart_driver = {
3102static int sci_remove(struct platform_device *dev) 3102static int sci_remove(struct platform_device *dev)
3103{ 3103{
3104 struct sci_port *port = platform_get_drvdata(dev); 3104 struct sci_port *port = platform_get_drvdata(dev);
3105 unsigned int type = port->port.type; /* uart_remove_... clears it */
3105 3106
3106 sci_ports_in_use &= ~BIT(port->port.line); 3107 sci_ports_in_use &= ~BIT(port->port.line);
3107 uart_remove_one_port(&sci_uart_driver, &port->port); 3108 uart_remove_one_port(&sci_uart_driver, &port->port);
@@ -3112,8 +3113,7 @@ static int sci_remove(struct platform_device *dev)
3112 sysfs_remove_file(&dev->dev.kobj, 3113 sysfs_remove_file(&dev->dev.kobj,
3113 &dev_attr_rx_fifo_trigger.attr); 3114 &dev_attr_rx_fifo_trigger.attr);
3114 } 3115 }
3115 if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB || 3116 if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) {
3116 port->port.type == PORT_HSCIF) {
3117 sysfs_remove_file(&dev->dev.kobj, 3117 sysfs_remove_file(&dev->dev.kobj,
3118 &dev_attr_rx_fifo_timeout.attr); 3118 &dev_attr_rx_fifo_timeout.attr);
3119 } 3119 }
diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
index 70a4ea4eaa6e..990376576970 100644
--- a/drivers/tty/serial/suncore.c
+++ b/drivers/tty/serial/suncore.c
@@ -112,6 +112,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
112 mode = of_get_property(dp, mode_prop, NULL); 112 mode = of_get_property(dp, mode_prop, NULL);
113 if (!mode) 113 if (!mode)
114 mode = "9600,8,n,1,-"; 114 mode = "9600,8,n,1,-";
115 of_node_put(dp);
115 } 116 }
116 117
117 cflag = CREAD | HUPCL | CLOCAL; 118 cflag = CREAD | HUPCL | CLOCAL;
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index 7576ceace571..f438eaa68246 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -77,7 +77,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
77 else 77 else
78 cbaud += 15; 78 cbaud += 15;
79 } 79 }
80 return baud_table[cbaud]; 80 return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
81} 81}
82EXPORT_SYMBOL(tty_termios_baud_rate); 82EXPORT_SYMBOL(tty_termios_baud_rate);
83 83
@@ -113,7 +113,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
113 else 113 else
114 cbaud += 15; 114 cbaud += 15;
115 } 115 }
116 return baud_table[cbaud]; 116 return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
117#else /* IBSHIFT */ 117#else /* IBSHIFT */
118 return tty_termios_baud_rate(termios); 118 return tty_termios_baud_rate(termios);
119#endif /* IBSHIFT */ 119#endif /* IBSHIFT */
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 55370e651db3..41ec8e5010f3 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1548,7 +1548,7 @@ static void csi_K(struct vc_data *vc, int vpar)
1548 scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count); 1548 scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
1549 vc->vc_need_wrap = 0; 1549 vc->vc_need_wrap = 0;
1550 if (con_should_update(vc)) 1550 if (con_should_update(vc))
1551 do_update_region(vc, (unsigned long) start, count); 1551 do_update_region(vc, (unsigned long)(start + offset), count);
1552} 1552}
1553 1553
1554static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */ 1554static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 85644669fbe7..0a357db4b31b 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -961,6 +961,8 @@ int __uio_register_device(struct module *owner,
961 if (ret) 961 if (ret)
962 goto err_uio_dev_add_attributes; 962 goto err_uio_dev_add_attributes;
963 963
964 info->uio_dev = idev;
965
964 if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { 966 if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
965 /* 967 /*
966 * Note that we deliberately don't use devm_request_irq 968 * Note that we deliberately don't use devm_request_irq
@@ -972,11 +974,12 @@ int __uio_register_device(struct module *owner,
972 */ 974 */
973 ret = request_irq(info->irq, uio_interrupt, 975 ret = request_irq(info->irq, uio_interrupt,
974 info->irq_flags, info->name, idev); 976 info->irq_flags, info->name, idev);
975 if (ret) 977 if (ret) {
978 info->uio_dev = NULL;
976 goto err_request_irq; 979 goto err_request_irq;
980 }
977 } 981 }
978 982
979 info->uio_dev = idev;
980 return 0; 983 return 0;
981 984
982err_request_irq: 985err_request_irq:
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 47d75c20c211..1b68fed464cb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1696,6 +1696,9 @@ static const struct usb_device_id acm_ids[] = {
1696 { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */ 1696 { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
1697 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1697 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1698 }, 1698 },
1699 { USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */
1700 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1701 },
1699 { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */ 1702 { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
1700 .driver_info = QUIRK_CONTROL_LINE_STATE, }, 1703 .driver_info = QUIRK_CONTROL_LINE_STATE, },
1701 { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */ 1704 { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index c6077d582d29..0f9381b69a3b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2794,6 +2794,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2794 int i, status; 2794 int i, status;
2795 u16 portchange, portstatus; 2795 u16 portchange, portstatus;
2796 struct usb_port *port_dev = hub->ports[port1 - 1]; 2796 struct usb_port *port_dev = hub->ports[port1 - 1];
2797 int reset_recovery_time;
2797 2798
2798 if (!hub_is_superspeed(hub->hdev)) { 2799 if (!hub_is_superspeed(hub->hdev)) {
2799 if (warm) { 2800 if (warm) {
@@ -2849,7 +2850,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2849 USB_PORT_FEAT_C_BH_PORT_RESET); 2850 USB_PORT_FEAT_C_BH_PORT_RESET);
2850 usb_clear_port_feature(hub->hdev, port1, 2851 usb_clear_port_feature(hub->hdev, port1,
2851 USB_PORT_FEAT_C_PORT_LINK_STATE); 2852 USB_PORT_FEAT_C_PORT_LINK_STATE);
2852 usb_clear_port_feature(hub->hdev, port1, 2853
2854 if (udev)
2855 usb_clear_port_feature(hub->hdev, port1,
2853 USB_PORT_FEAT_C_CONNECTION); 2856 USB_PORT_FEAT_C_CONNECTION);
2854 2857
2855 /* 2858 /*
@@ -2885,11 +2888,18 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2885 2888
2886done: 2889done:
2887 if (status == 0) { 2890 if (status == 0) {
2888 /* TRSTRCY = 10 ms; plus some extra */
2889 if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM) 2891 if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM)
2890 usleep_range(10000, 12000); 2892 usleep_range(10000, 12000);
2891 else 2893 else {
2892 msleep(10 + 40); 2894 /* TRSTRCY = 10 ms; plus some extra */
2895 reset_recovery_time = 10 + 40;
2896
2897 /* Hub needs extra delay after resetting its port. */
2898 if (hub->hdev->quirks & USB_QUIRK_HUB_SLOW_RESET)
2899 reset_recovery_time += 100;
2900
2901 msleep(reset_recovery_time);
2902 }
2893 2903
2894 if (udev) { 2904 if (udev) {
2895 struct usb_hcd *hcd = bus_to_hcd(udev->bus); 2905 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 178d6c6063c0..0690fcff0ea2 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -128,6 +128,9 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
128 case 'n': 128 case 'n':
129 flags |= USB_QUIRK_DELAY_CTRL_MSG; 129 flags |= USB_QUIRK_DELAY_CTRL_MSG;
130 break; 130 break;
131 case 'o':
132 flags |= USB_QUIRK_HUB_SLOW_RESET;
133 break;
131 /* Ignore unrecognized flag characters */ 134 /* Ignore unrecognized flag characters */
132 } 135 }
133 } 136 }
@@ -206,6 +209,9 @@ static const struct usb_device_id usb_quirk_list[] = {
206 /* Microsoft LifeCam-VX700 v2.0 */ 209 /* Microsoft LifeCam-VX700 v2.0 */
207 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, 210 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
208 211
212 /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
213 { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
214
209 /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ 215 /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
210 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, 216 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
211 { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, 217 { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -380,6 +386,9 @@ static const struct usb_device_id usb_quirk_list[] = {
380 { USB_DEVICE(0x1a0a, 0x0200), .driver_info = 386 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
381 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 387 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
382 388
389 /* Terminus Technology Inc. Hub */
390 { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
391
383 /* Corsair K70 RGB */ 392 /* Corsair K70 RGB */
384 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, 393 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
385 394
@@ -391,6 +400,9 @@ static const struct usb_device_id usb_quirk_list[] = {
391 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | 400 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
392 USB_QUIRK_DELAY_CTRL_MSG }, 401 USB_QUIRK_DELAY_CTRL_MSG },
393 402
403 /* Corsair K70 LUX RGB */
404 { USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT },
405
394 /* Corsair K70 LUX */ 406 /* Corsair K70 LUX */
395 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, 407 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
396 408
@@ -411,6 +423,11 @@ static const struct usb_device_id usb_quirk_list[] = {
411 { USB_DEVICE(0x2040, 0x7200), .driver_info = 423 { USB_DEVICE(0x2040, 0x7200), .driver_info =
412 USB_QUIRK_CONFIG_INTF_STRINGS }, 424 USB_QUIRK_CONFIG_INTF_STRINGS },
413 425
426 /* Raydium Touchscreen */
427 { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
428
429 { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
430
414 /* DJI CineSSD */ 431 /* DJI CineSSD */
415 { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, 432 { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
416 433
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index d257c541e51b..7afc10872f1f 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -120,6 +120,7 @@ static int dwc2_pci_probe(struct pci_dev *pci,
120 dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO); 120 dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO);
121 if (!dwc2) { 121 if (!dwc2) {
122 dev_err(dev, "couldn't allocate dwc2 device\n"); 122 dev_err(dev, "couldn't allocate dwc2 device\n");
123 ret = -ENOMEM;
123 goto err; 124 goto err;
124 } 125 }
125 126
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index becfbb87f791..2f2048aa5fde 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1499,6 +1499,7 @@ static int dwc3_probe(struct platform_device *pdev)
1499 1499
1500err5: 1500err5:
1501 dwc3_event_buffers_cleanup(dwc); 1501 dwc3_event_buffers_cleanup(dwc);
1502 dwc3_ulpi_exit(dwc);
1502 1503
1503err4: 1504err4:
1504 dwc3_free_scratch_buffers(dwc); 1505 dwc3_free_scratch_buffers(dwc);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 1286076a8890..842795856bf4 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -283,8 +283,10 @@ err:
283static void dwc3_pci_remove(struct pci_dev *pci) 283static void dwc3_pci_remove(struct pci_dev *pci)
284{ 284{
285 struct dwc3_pci *dwc = pci_get_drvdata(pci); 285 struct dwc3_pci *dwc = pci_get_drvdata(pci);
286 struct pci_dev *pdev = dwc->pci;
286 287
287 gpiod_remove_lookup_table(&platform_bytcr_gpios); 288 if (pdev->device == PCI_DEVICE_ID_INTEL_BYT)
289 gpiod_remove_lookup_table(&platform_bytcr_gpios);
288#ifdef CONFIG_PM 290#ifdef CONFIG_PM
289 cancel_work_sync(&dwc->wakeup_work); 291 cancel_work_sync(&dwc->wakeup_work);
290#endif 292#endif
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 679c12e14522..9f92ee03dde7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1081,7 +1081,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
1081 /* Now prepare one extra TRB to align transfer size */ 1081 /* Now prepare one extra TRB to align transfer size */
1082 trb = &dep->trb_pool[dep->trb_enqueue]; 1082 trb = &dep->trb_pool[dep->trb_enqueue];
1083 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 1083 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr,
1084 maxp - rem, false, 0, 1084 maxp - rem, false, 1,
1085 req->request.stream_id, 1085 req->request.stream_id,
1086 req->request.short_not_ok, 1086 req->request.short_not_ok,
1087 req->request.no_interrupt); 1087 req->request.no_interrupt);
@@ -1125,7 +1125,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
1125 /* Now prepare one extra TRB to align transfer size */ 1125 /* Now prepare one extra TRB to align transfer size */
1126 trb = &dep->trb_pool[dep->trb_enqueue]; 1126 trb = &dep->trb_pool[dep->trb_enqueue];
1127 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, 1127 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
1128 false, 0, req->request.stream_id, 1128 false, 1, req->request.stream_id,
1129 req->request.short_not_ok, 1129 req->request.short_not_ok,
1130 req->request.no_interrupt); 1130 req->request.no_interrupt);
1131 } else if (req->request.zero && req->request.length && 1131 } else if (req->request.zero && req->request.length &&
@@ -1141,7 +1141,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
1141 /* Now prepare one extra TRB to handle ZLP */ 1141 /* Now prepare one extra TRB to handle ZLP */
1142 trb = &dep->trb_pool[dep->trb_enqueue]; 1142 trb = &dep->trb_pool[dep->trb_enqueue];
1143 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, 1143 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
1144 false, 0, req->request.stream_id, 1144 false, 1, req->request.stream_id,
1145 req->request.short_not_ok, 1145 req->request.short_not_ok,
1146 req->request.no_interrupt); 1146 req->request.no_interrupt);
1147 } else { 1147 } else {
@@ -1470,9 +1470,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1470 unsigned transfer_in_flight; 1470 unsigned transfer_in_flight;
1471 unsigned started; 1471 unsigned started;
1472 1472
1473 if (dep->flags & DWC3_EP_STALL)
1474 return 0;
1475
1476 if (dep->number > 1) 1473 if (dep->number > 1)
1477 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1474 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
1478 else 1475 else
@@ -1494,8 +1491,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1494 else 1491 else
1495 dep->flags |= DWC3_EP_STALL; 1492 dep->flags |= DWC3_EP_STALL;
1496 } else { 1493 } else {
1497 if (!(dep->flags & DWC3_EP_STALL))
1498 return 0;
1499 1494
1500 ret = dwc3_send_clear_stall_ep_cmd(dep); 1495 ret = dwc3_send_clear_stall_ep_cmd(dep);
1501 if (ret) 1496 if (ret)
@@ -2259,7 +2254,7 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
2259 * with one TRB pending in the ring. We need to manually clear HWO bit 2254 * with one TRB pending in the ring. We need to manually clear HWO bit
2260 * from that TRB. 2255 * from that TRB.
2261 */ 2256 */
2262 if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) { 2257 if ((req->zero || req->unaligned) && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) {
2263 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2258 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
2264 return 1; 2259 return 1;
2265 } 2260 }
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 3ada83d81bda..31e8bf3578c8 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -215,7 +215,6 @@ struct ffs_io_data {
215 215
216 struct mm_struct *mm; 216 struct mm_struct *mm;
217 struct work_struct work; 217 struct work_struct work;
218 struct work_struct cancellation_work;
219 218
220 struct usb_ep *ep; 219 struct usb_ep *ep;
221 struct usb_request *req; 220 struct usb_request *req;
@@ -1073,31 +1072,22 @@ ffs_epfile_open(struct inode *inode, struct file *file)
1073 return 0; 1072 return 0;
1074} 1073}
1075 1074
1076static void ffs_aio_cancel_worker(struct work_struct *work)
1077{
1078 struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
1079 cancellation_work);
1080
1081 ENTER();
1082
1083 usb_ep_dequeue(io_data->ep, io_data->req);
1084}
1085
1086static int ffs_aio_cancel(struct kiocb *kiocb) 1075static int ffs_aio_cancel(struct kiocb *kiocb)
1087{ 1076{
1088 struct ffs_io_data *io_data = kiocb->private; 1077 struct ffs_io_data *io_data = kiocb->private;
1089 struct ffs_data *ffs = io_data->ffs; 1078 struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
1090 int value; 1079 int value;
1091 1080
1092 ENTER(); 1081 ENTER();
1093 1082
1094 if (likely(io_data && io_data->ep && io_data->req)) { 1083 spin_lock_irq(&epfile->ffs->eps_lock);
1095 INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker); 1084
1096 queue_work(ffs->io_completion_wq, &io_data->cancellation_work); 1085 if (likely(io_data && io_data->ep && io_data->req))
1097 value = -EINPROGRESS; 1086 value = usb_ep_dequeue(io_data->ep, io_data->req);
1098 } else { 1087 else
1099 value = -EINVAL; 1088 value = -EINVAL;
1100 } 1089
1090 spin_unlock_irq(&epfile->ffs->eps_lock);
1101 1091
1102 return value; 1092 return value;
1103} 1093}
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 1000d864929c..0f026d445e31 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -401,12 +401,12 @@ done:
401static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) 401static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
402{ 402{
403 struct usb_request *req; 403 struct usb_request *req;
404 struct usb_request *tmp;
405 unsigned long flags; 404 unsigned long flags;
406 405
407 /* fill unused rxq slots with some skb */ 406 /* fill unused rxq slots with some skb */
408 spin_lock_irqsave(&dev->req_lock, flags); 407 spin_lock_irqsave(&dev->req_lock, flags);
409 list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) { 408 while (!list_empty(&dev->rx_reqs)) {
409 req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
410 list_del_init(&req->list); 410 list_del_init(&req->list);
411 spin_unlock_irqrestore(&dev->req_lock, flags); 411 spin_unlock_irqrestore(&dev->req_lock, flags);
412 412
@@ -1125,7 +1125,6 @@ void gether_disconnect(struct gether *link)
1125{ 1125{
1126 struct eth_dev *dev = link->ioport; 1126 struct eth_dev *dev = link->ioport;
1127 struct usb_request *req; 1127 struct usb_request *req;
1128 struct usb_request *tmp;
1129 1128
1130 WARN_ON(!dev); 1129 WARN_ON(!dev);
1131 if (!dev) 1130 if (!dev)
@@ -1142,7 +1141,8 @@ void gether_disconnect(struct gether *link)
1142 */ 1141 */
1143 usb_ep_disable(link->in_ep); 1142 usb_ep_disable(link->in_ep);
1144 spin_lock(&dev->req_lock); 1143 spin_lock(&dev->req_lock);
1145 list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) { 1144 while (!list_empty(&dev->tx_reqs)) {
1145 req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
1146 list_del(&req->list); 1146 list_del(&req->list);
1147 1147
1148 spin_unlock(&dev->req_lock); 1148 spin_unlock(&dev->req_lock);
@@ -1154,7 +1154,8 @@ void gether_disconnect(struct gether *link)
1154 1154
1155 usb_ep_disable(link->out_ep); 1155 usb_ep_disable(link->out_ep);
1156 spin_lock(&dev->req_lock); 1156 spin_lock(&dev->req_lock);
1157 list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) { 1157 while (!list_empty(&dev->rx_reqs)) {
1158 req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1158 list_del(&req->list); 1159 list_del(&req->list);
1159 1160
1160 spin_unlock(&dev->req_lock); 1161 spin_unlock(&dev->req_lock);
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 3a16431da321..fcf13ef33b31 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2033,6 +2033,7 @@ static inline int machine_without_vbus_sense(void)
2033{ 2033{
2034 return machine_is_omap_innovator() 2034 return machine_is_omap_innovator()
2035 || machine_is_omap_osk() 2035 || machine_is_omap_osk()
2036 || machine_is_omap_palmte()
2036 || machine_is_sx1() 2037 || machine_is_sx1()
2037 /* No known omap7xx boards with vbus sense */ 2038 /* No known omap7xx boards with vbus sense */
2038 || cpu_is_omap7xx(); 2039 || cpu_is_omap7xx();
@@ -2041,7 +2042,7 @@ static inline int machine_without_vbus_sense(void)
2041static int omap_udc_start(struct usb_gadget *g, 2042static int omap_udc_start(struct usb_gadget *g,
2042 struct usb_gadget_driver *driver) 2043 struct usb_gadget_driver *driver)
2043{ 2044{
2044 int status = -ENODEV; 2045 int status;
2045 struct omap_ep *ep; 2046 struct omap_ep *ep;
2046 unsigned long flags; 2047 unsigned long flags;
2047 2048
@@ -2079,6 +2080,7 @@ static int omap_udc_start(struct usb_gadget *g,
2079 goto done; 2080 goto done;
2080 } 2081 }
2081 } else { 2082 } else {
2083 status = 0;
2082 if (can_pullup(udc)) 2084 if (can_pullup(udc))
2083 pullup_enable(udc); 2085 pullup_enable(udc);
2084 else 2086 else
@@ -2593,9 +2595,22 @@ omap_ep_setup(char *name, u8 addr, u8 type,
2593 2595
2594static void omap_udc_release(struct device *dev) 2596static void omap_udc_release(struct device *dev)
2595{ 2597{
2596 complete(udc->done); 2598 pullup_disable(udc);
2599 if (!IS_ERR_OR_NULL(udc->transceiver)) {
2600 usb_put_phy(udc->transceiver);
2601 udc->transceiver = NULL;
2602 }
2603 omap_writew(0, UDC_SYSCON1);
2604 remove_proc_file();
2605 if (udc->dc_clk) {
2606 if (udc->clk_requested)
2607 omap_udc_enable_clock(0);
2608 clk_put(udc->hhc_clk);
2609 clk_put(udc->dc_clk);
2610 }
2611 if (udc->done)
2612 complete(udc->done);
2597 kfree(udc); 2613 kfree(udc);
2598 udc = NULL;
2599} 2614}
2600 2615
2601static int 2616static int
@@ -2627,6 +2642,7 @@ omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
2627 udc->gadget.speed = USB_SPEED_UNKNOWN; 2642 udc->gadget.speed = USB_SPEED_UNKNOWN;
2628 udc->gadget.max_speed = USB_SPEED_FULL; 2643 udc->gadget.max_speed = USB_SPEED_FULL;
2629 udc->gadget.name = driver_name; 2644 udc->gadget.name = driver_name;
2645 udc->gadget.quirk_ep_out_aligned_size = 1;
2630 udc->transceiver = xceiv; 2646 udc->transceiver = xceiv;
2631 2647
2632 /* ep0 is special; put it right after the SETUP buffer */ 2648 /* ep0 is special; put it right after the SETUP buffer */
@@ -2867,8 +2883,8 @@ bad_on_1710:
2867 udc->clr_halt = UDC_RESET_EP; 2883 udc->clr_halt = UDC_RESET_EP;
2868 2884
2869 /* USB general purpose IRQ: ep0, state changes, dma, etc */ 2885 /* USB general purpose IRQ: ep0, state changes, dma, etc */
2870 status = request_irq(pdev->resource[1].start, omap_udc_irq, 2886 status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
2871 0, driver_name, udc); 2887 omap_udc_irq, 0, driver_name, udc);
2872 if (status != 0) { 2888 if (status != 0) {
2873 ERR("can't get irq %d, err %d\n", 2889 ERR("can't get irq %d, err %d\n",
2874 (int) pdev->resource[1].start, status); 2890 (int) pdev->resource[1].start, status);
@@ -2876,20 +2892,20 @@ bad_on_1710:
2876 } 2892 }
2877 2893
2878 /* USB "non-iso" IRQ (PIO for all but ep0) */ 2894 /* USB "non-iso" IRQ (PIO for all but ep0) */
2879 status = request_irq(pdev->resource[2].start, omap_udc_pio_irq, 2895 status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
2880 0, "omap_udc pio", udc); 2896 omap_udc_pio_irq, 0, "omap_udc pio", udc);
2881 if (status != 0) { 2897 if (status != 0) {
2882 ERR("can't get irq %d, err %d\n", 2898 ERR("can't get irq %d, err %d\n",
2883 (int) pdev->resource[2].start, status); 2899 (int) pdev->resource[2].start, status);
2884 goto cleanup2; 2900 goto cleanup1;
2885 } 2901 }
2886#ifdef USE_ISO 2902#ifdef USE_ISO
2887 status = request_irq(pdev->resource[3].start, omap_udc_iso_irq, 2903 status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
2888 0, "omap_udc iso", udc); 2904 omap_udc_iso_irq, 0, "omap_udc iso", udc);
2889 if (status != 0) { 2905 if (status != 0) {
2890 ERR("can't get irq %d, err %d\n", 2906 ERR("can't get irq %d, err %d\n",
2891 (int) pdev->resource[3].start, status); 2907 (int) pdev->resource[3].start, status);
2892 goto cleanup3; 2908 goto cleanup1;
2893 } 2909 }
2894#endif 2910#endif
2895 if (cpu_is_omap16xx() || cpu_is_omap7xx()) { 2911 if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
@@ -2900,23 +2916,8 @@ bad_on_1710:
2900 } 2916 }
2901 2917
2902 create_proc_file(); 2918 create_proc_file();
2903 status = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget, 2919 return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
2904 omap_udc_release); 2920 omap_udc_release);
2905 if (status)
2906 goto cleanup4;
2907
2908 return 0;
2909
2910cleanup4:
2911 remove_proc_file();
2912
2913#ifdef USE_ISO
2914cleanup3:
2915 free_irq(pdev->resource[2].start, udc);
2916#endif
2917
2918cleanup2:
2919 free_irq(pdev->resource[1].start, udc);
2920 2921
2921cleanup1: 2922cleanup1:
2922 kfree(udc); 2923 kfree(udc);
@@ -2943,42 +2944,15 @@ static int omap_udc_remove(struct platform_device *pdev)
2943{ 2944{
2944 DECLARE_COMPLETION_ONSTACK(done); 2945 DECLARE_COMPLETION_ONSTACK(done);
2945 2946
2946 if (!udc)
2947 return -ENODEV;
2948
2949 usb_del_gadget_udc(&udc->gadget);
2950 if (udc->driver)
2951 return -EBUSY;
2952
2953 udc->done = &done; 2947 udc->done = &done;
2954 2948
2955 pullup_disable(udc); 2949 usb_del_gadget_udc(&udc->gadget);
2956 if (!IS_ERR_OR_NULL(udc->transceiver)) {
2957 usb_put_phy(udc->transceiver);
2958 udc->transceiver = NULL;
2959 }
2960 omap_writew(0, UDC_SYSCON1);
2961
2962 remove_proc_file();
2963
2964#ifdef USE_ISO
2965 free_irq(pdev->resource[3].start, udc);
2966#endif
2967 free_irq(pdev->resource[2].start, udc);
2968 free_irq(pdev->resource[1].start, udc);
2969 2950
2970 if (udc->dc_clk) { 2951 wait_for_completion(&done);
2971 if (udc->clk_requested)
2972 omap_udc_enable_clock(0);
2973 clk_put(udc->hhc_clk);
2974 clk_put(udc->dc_clk);
2975 }
2976 2952
2977 release_mem_region(pdev->resource[0].start, 2953 release_mem_region(pdev->resource[0].start,
2978 pdev->resource[0].end - pdev->resource[0].start + 1); 2954 pdev->resource[0].end - pdev->resource[0].start + 1);
2979 2955
2980 wait_for_completion(&done);
2981
2982 return 0; 2956 return 0;
2983} 2957}
2984 2958
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
index 27f00160332e..3c4abb5a1c3f 100644
--- a/drivers/usb/host/xhci-histb.c
+++ b/drivers/usb/host/xhci-histb.c
@@ -325,14 +325,16 @@ static int xhci_histb_remove(struct platform_device *dev)
325 struct xhci_hcd_histb *histb = platform_get_drvdata(dev); 325 struct xhci_hcd_histb *histb = platform_get_drvdata(dev);
326 struct usb_hcd *hcd = histb->hcd; 326 struct usb_hcd *hcd = histb->hcd;
327 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 327 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
328 struct usb_hcd *shared_hcd = xhci->shared_hcd;
328 329
329 xhci->xhc_state |= XHCI_STATE_REMOVING; 330 xhci->xhc_state |= XHCI_STATE_REMOVING;
330 331
331 usb_remove_hcd(xhci->shared_hcd); 332 usb_remove_hcd(shared_hcd);
333 xhci->shared_hcd = NULL;
332 device_wakeup_disable(&dev->dev); 334 device_wakeup_disable(&dev->dev);
333 335
334 usb_remove_hcd(hcd); 336 usb_remove_hcd(hcd);
335 usb_put_hcd(xhci->shared_hcd); 337 usb_put_hcd(shared_hcd);
336 338
337 xhci_histb_host_disable(histb); 339 xhci_histb_host_disable(histb);
338 usb_put_hcd(hcd); 340 usb_put_hcd(hcd);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 12eea73d9f20..94aca1b5ac8a 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -876,7 +876,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
876 status |= USB_PORT_STAT_SUSPEND; 876 status |= USB_PORT_STAT_SUSPEND;
877 } 877 }
878 if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME && 878 if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
879 !DEV_SUPERSPEED_ANY(raw_port_status)) { 879 !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) {
880 if ((raw_port_status & PORT_RESET) || 880 if ((raw_port_status & PORT_RESET) ||
881 !(raw_port_status & PORT_PE)) 881 !(raw_port_status & PORT_PE))
882 return 0xffffffff; 882 return 0xffffffff;
@@ -921,7 +921,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
921 time_left = wait_for_completion_timeout( 921 time_left = wait_for_completion_timeout(
922 &bus_state->rexit_done[wIndex], 922 &bus_state->rexit_done[wIndex],
923 msecs_to_jiffies( 923 msecs_to_jiffies(
924 XHCI_MAX_REXIT_TIMEOUT)); 924 XHCI_MAX_REXIT_TIMEOUT_MS));
925 spin_lock_irqsave(&xhci->lock, flags); 925 spin_lock_irqsave(&xhci->lock, flags);
926 926
927 if (time_left) { 927 if (time_left) {
@@ -935,7 +935,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
935 } else { 935 } else {
936 int port_status = readl(port->addr); 936 int port_status = readl(port->addr);
937 xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n", 937 xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
938 XHCI_MAX_REXIT_TIMEOUT, 938 XHCI_MAX_REXIT_TIMEOUT_MS,
939 port_status); 939 port_status);
940 status |= USB_PORT_STAT_SUSPEND; 940 status |= USB_PORT_STAT_SUSPEND;
941 clear_bit(wIndex, &bus_state->rexit_ports); 941 clear_bit(wIndex, &bus_state->rexit_ports);
@@ -1474,15 +1474,18 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1474 unsigned long flags; 1474 unsigned long flags;
1475 struct xhci_hub *rhub; 1475 struct xhci_hub *rhub;
1476 struct xhci_port **ports; 1476 struct xhci_port **ports;
1477 u32 portsc_buf[USB_MAXCHILDREN];
1478 bool wake_enabled;
1477 1479
1478 rhub = xhci_get_rhub(hcd); 1480 rhub = xhci_get_rhub(hcd);
1479 ports = rhub->ports; 1481 ports = rhub->ports;
1480 max_ports = rhub->num_ports; 1482 max_ports = rhub->num_ports;
1481 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1483 bus_state = &xhci->bus_state[hcd_index(hcd)];
1484 wake_enabled = hcd->self.root_hub->do_remote_wakeup;
1482 1485
1483 spin_lock_irqsave(&xhci->lock, flags); 1486 spin_lock_irqsave(&xhci->lock, flags);
1484 1487
1485 if (hcd->self.root_hub->do_remote_wakeup) { 1488 if (wake_enabled) {
1486 if (bus_state->resuming_ports || /* USB2 */ 1489 if (bus_state->resuming_ports || /* USB2 */
1487 bus_state->port_remote_wakeup) { /* USB3 */ 1490 bus_state->port_remote_wakeup) { /* USB3 */
1488 spin_unlock_irqrestore(&xhci->lock, flags); 1491 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1490,26 +1493,36 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1490 return -EBUSY; 1493 return -EBUSY;
1491 } 1494 }
1492 } 1495 }
1493 1496 /*
1494 port_index = max_ports; 1497 * Prepare ports for suspend, but don't write anything before all ports
1498 * are checked and we know bus suspend can proceed
1499 */
1495 bus_state->bus_suspended = 0; 1500 bus_state->bus_suspended = 0;
1501 port_index = max_ports;
1496 while (port_index--) { 1502 while (port_index--) {
1497 /* suspend the port if the port is not suspended */
1498 u32 t1, t2; 1503 u32 t1, t2;
1499 int slot_id;
1500 1504
1501 t1 = readl(ports[port_index]->addr); 1505 t1 = readl(ports[port_index]->addr);
1502 t2 = xhci_port_state_to_neutral(t1); 1506 t2 = xhci_port_state_to_neutral(t1);
1507 portsc_buf[port_index] = 0;
1503 1508
1504 if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) { 1509 /* Bail out if a USB3 port has a new device in link training */
1505 xhci_dbg(xhci, "port %d not suspended\n", port_index); 1510 if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1506 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 1511 bus_state->bus_suspended = 0;
1507 port_index + 1); 1512 spin_unlock_irqrestore(&xhci->lock, flags);
1508 if (slot_id) { 1513 xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
1514 return -EBUSY;
1515 }
1516
1517 /* suspend ports in U0, or bail out for new connect changes */
1518 if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
1519 if ((t1 & PORT_CSC) && wake_enabled) {
1520 bus_state->bus_suspended = 0;
1509 spin_unlock_irqrestore(&xhci->lock, flags); 1521 spin_unlock_irqrestore(&xhci->lock, flags);
1510 xhci_stop_device(xhci, slot_id, 1); 1522 xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
1511 spin_lock_irqsave(&xhci->lock, flags); 1523 return -EBUSY;
1512 } 1524 }
1525 xhci_dbg(xhci, "port %d not suspended\n", port_index);
1513 t2 &= ~PORT_PLS_MASK; 1526 t2 &= ~PORT_PLS_MASK;
1514 t2 |= PORT_LINK_STROBE | XDEV_U3; 1527 t2 |= PORT_LINK_STROBE | XDEV_U3;
1515 set_bit(port_index, &bus_state->bus_suspended); 1528 set_bit(port_index, &bus_state->bus_suspended);
@@ -1518,7 +1531,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1518 * including the USB 3.0 roothub, but only if CONFIG_PM 1531 * including the USB 3.0 roothub, but only if CONFIG_PM
1519 * is enabled, so also enable remote wake here. 1532 * is enabled, so also enable remote wake here.
1520 */ 1533 */
1521 if (hcd->self.root_hub->do_remote_wakeup) { 1534 if (wake_enabled) {
1522 if (t1 & PORT_CONNECT) { 1535 if (t1 & PORT_CONNECT) {
1523 t2 |= PORT_WKOC_E | PORT_WKDISC_E; 1536 t2 |= PORT_WKOC_E | PORT_WKDISC_E;
1524 t2 &= ~PORT_WKCONN_E; 1537 t2 &= ~PORT_WKCONN_E;
@@ -1538,7 +1551,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1538 1551
1539 t1 = xhci_port_state_to_neutral(t1); 1552 t1 = xhci_port_state_to_neutral(t1);
1540 if (t1 != t2) 1553 if (t1 != t2)
1541 writel(t2, ports[port_index]->addr); 1554 portsc_buf[port_index] = t2;
1555 }
1556
1557 /* write port settings, stopping and suspending ports if needed */
1558 port_index = max_ports;
1559 while (port_index--) {
1560 if (!portsc_buf[port_index])
1561 continue;
1562 if (test_bit(port_index, &bus_state->bus_suspended)) {
1563 int slot_id;
1564
1565 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1566 port_index + 1);
1567 if (slot_id) {
1568 spin_unlock_irqrestore(&xhci->lock, flags);
1569 xhci_stop_device(xhci, slot_id, 1);
1570 spin_lock_irqsave(&xhci->lock, flags);
1571 }
1572 }
1573 writel(portsc_buf[port_index], ports[port_index]->addr);
1542 } 1574 }
1543 hcd->state = HC_STATE_SUSPENDED; 1575 hcd->state = HC_STATE_SUSPENDED;
1544 bus_state->next_statechange = jiffies + msecs_to_jiffies(10); 1576 bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 71d0d33c3286..60987c787e44 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -590,12 +590,14 @@ static int xhci_mtk_remove(struct platform_device *dev)
590 struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev); 590 struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev);
591 struct usb_hcd *hcd = mtk->hcd; 591 struct usb_hcd *hcd = mtk->hcd;
592 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 592 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
593 struct usb_hcd *shared_hcd = xhci->shared_hcd;
593 594
594 usb_remove_hcd(xhci->shared_hcd); 595 usb_remove_hcd(shared_hcd);
596 xhci->shared_hcd = NULL;
595 device_init_wakeup(&dev->dev, false); 597 device_init_wakeup(&dev->dev, false);
596 598
597 usb_remove_hcd(hcd); 599 usb_remove_hcd(hcd);
598 usb_put_hcd(xhci->shared_hcd); 600 usb_put_hcd(shared_hcd);
599 usb_put_hcd(hcd); 601 usb_put_hcd(hcd);
600 xhci_mtk_sch_exit(mtk); 602 xhci_mtk_sch_exit(mtk);
601 xhci_mtk_clks_disable(mtk); 603 xhci_mtk_clks_disable(mtk);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 01c57055c0c5..a9515265db4d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -248,6 +248,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
248 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) 248 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
249 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; 249 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
250 250
251 if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM ||
252 pdev->vendor == PCI_VENDOR_ID_CAVIUM) &&
253 pdev->device == 0x9026)
254 xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT;
255
251 if (xhci->quirks & XHCI_RESET_ON_RESUME) 256 if (xhci->quirks & XHCI_RESET_ON_RESUME)
252 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 257 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
253 "QUIRK: Resetting on resume"); 258 "QUIRK: Resetting on resume");
@@ -380,6 +385,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
380 if (xhci->shared_hcd) { 385 if (xhci->shared_hcd) {
381 usb_remove_hcd(xhci->shared_hcd); 386 usb_remove_hcd(xhci->shared_hcd);
382 usb_put_hcd(xhci->shared_hcd); 387 usb_put_hcd(xhci->shared_hcd);
388 xhci->shared_hcd = NULL;
383 } 389 }
384 390
385 /* Workaround for spurious wakeups at shutdown with HSW */ 391 /* Workaround for spurious wakeups at shutdown with HSW */
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 32b5574ad5c5..ef09cb06212f 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -362,14 +362,16 @@ static int xhci_plat_remove(struct platform_device *dev)
362 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 362 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
363 struct clk *clk = xhci->clk; 363 struct clk *clk = xhci->clk;
364 struct clk *reg_clk = xhci->reg_clk; 364 struct clk *reg_clk = xhci->reg_clk;
365 struct usb_hcd *shared_hcd = xhci->shared_hcd;
365 366
366 xhci->xhc_state |= XHCI_STATE_REMOVING; 367 xhci->xhc_state |= XHCI_STATE_REMOVING;
367 368
368 usb_remove_hcd(xhci->shared_hcd); 369 usb_remove_hcd(shared_hcd);
370 xhci->shared_hcd = NULL;
369 usb_phy_shutdown(hcd->usb_phy); 371 usb_phy_shutdown(hcd->usb_phy);
370 372
371 usb_remove_hcd(hcd); 373 usb_remove_hcd(hcd);
372 usb_put_hcd(xhci->shared_hcd); 374 usb_put_hcd(shared_hcd);
373 375
374 clk_disable_unprepare(clk); 376 clk_disable_unprepare(clk);
375 clk_disable_unprepare(reg_clk); 377 clk_disable_unprepare(reg_clk);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a8d92c90fb58..65750582133f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1521,6 +1521,35 @@ static void handle_device_notification(struct xhci_hcd *xhci,
1521 usb_wakeup_notification(udev->parent, udev->portnum); 1521 usb_wakeup_notification(udev->parent, udev->portnum);
1522} 1522}
1523 1523
1524/*
1525 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
1526 * Controller.
1527 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1528 * If a connection to a USB 1 device is followed by another connection
1529 * to a USB 2 device.
1530 *
1531 * Reset the PHY after the USB device is disconnected if device speed
1532 * is less than HCD_USB3.
1533 * Retry the reset sequence max of 4 times checking the PLL lock status.
1534 *
1535 */
1536static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
1537{
1538 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1539 u32 pll_lock_check;
1540 u32 retry_count = 4;
1541
1542 do {
1543 /* Assert PHY reset */
1544 writel(0x6F, hcd->regs + 0x1048);
1545 udelay(10);
1546 /* De-assert the PHY reset */
1547 writel(0x7F, hcd->regs + 0x1048);
1548 udelay(200);
1549 pll_lock_check = readl(hcd->regs + 0x1070);
1550 } while (!(pll_lock_check & 0x1) && --retry_count);
1551}
1552
1524static void handle_port_status(struct xhci_hcd *xhci, 1553static void handle_port_status(struct xhci_hcd *xhci,
1525 union xhci_trb *event) 1554 union xhci_trb *event)
1526{ 1555{
@@ -1556,6 +1585,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
1556 goto cleanup; 1585 goto cleanup;
1557 } 1586 }
1558 1587
1588 /* We might get interrupts after shared_hcd is removed */
1589 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
1590 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
1591 bogus_port_status = true;
1592 goto cleanup;
1593 }
1594
1559 hcd = port->rhub->hcd; 1595 hcd = port->rhub->hcd;
1560 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1596 bus_state = &xhci->bus_state[hcd_index(hcd)];
1561 hcd_portnum = port->hcd_portnum; 1597 hcd_portnum = port->hcd_portnum;
@@ -1639,7 +1675,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
1639 * RExit to a disconnect state). If so, let the the driver know it's 1675 * RExit to a disconnect state). If so, let the the driver know it's
1640 * out of the RExit state. 1676 * out of the RExit state.
1641 */ 1677 */
1642 if (!DEV_SUPERSPEED_ANY(portsc) && 1678 if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
1643 test_and_clear_bit(hcd_portnum, 1679 test_and_clear_bit(hcd_portnum,
1644 &bus_state->rexit_ports)) { 1680 &bus_state->rexit_ports)) {
1645 complete(&bus_state->rexit_done[hcd_portnum]); 1681 complete(&bus_state->rexit_done[hcd_portnum]);
@@ -1647,8 +1683,12 @@ static void handle_port_status(struct xhci_hcd *xhci,
1647 goto cleanup; 1683 goto cleanup;
1648 } 1684 }
1649 1685
1650 if (hcd->speed < HCD_USB3) 1686 if (hcd->speed < HCD_USB3) {
1651 xhci_test_and_clear_bit(xhci, port, PORT_PLC); 1687 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1688 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
1689 (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
1690 xhci_cavium_reset_phy_quirk(xhci);
1691 }
1652 1692
1653cleanup: 1693cleanup:
1654 /* Update event ring dequeue pointer before dropping the lock */ 1694 /* Update event ring dequeue pointer before dropping the lock */
@@ -2266,6 +2306,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2266 goto cleanup; 2306 goto cleanup;
2267 case COMP_RING_UNDERRUN: 2307 case COMP_RING_UNDERRUN:
2268 case COMP_RING_OVERRUN: 2308 case COMP_RING_OVERRUN:
2309 case COMP_STOPPED_LENGTH_INVALID:
2269 goto cleanup; 2310 goto cleanup;
2270 default: 2311 default:
2271 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", 2312 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 6b5db344de30..938ff06c0349 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1303,6 +1303,7 @@ static int tegra_xusb_remove(struct platform_device *pdev)
1303 1303
1304 usb_remove_hcd(xhci->shared_hcd); 1304 usb_remove_hcd(xhci->shared_hcd);
1305 usb_put_hcd(xhci->shared_hcd); 1305 usb_put_hcd(xhci->shared_hcd);
1306 xhci->shared_hcd = NULL;
1306 usb_remove_hcd(tegra->hcd); 1307 usb_remove_hcd(tegra->hcd);
1307 usb_put_hcd(tegra->hcd); 1308 usb_put_hcd(tegra->hcd);
1308 1309
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 0420eefa647a..c928dbbff881 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -719,8 +719,6 @@ static void xhci_stop(struct usb_hcd *hcd)
719 719
720 /* Only halt host and free memory after both hcds are removed */ 720 /* Only halt host and free memory after both hcds are removed */
721 if (!usb_hcd_is_primary_hcd(hcd)) { 721 if (!usb_hcd_is_primary_hcd(hcd)) {
722 /* usb core will free this hcd shortly, unset pointer */
723 xhci->shared_hcd = NULL;
724 mutex_unlock(&xhci->mutex); 722 mutex_unlock(&xhci->mutex);
725 return; 723 return;
726 } 724 }
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index bf0b3692dc9a..260b259b72bc 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1680,7 +1680,7 @@ struct xhci_bus_state {
1680 * It can take up to 20 ms to transition from RExit to U0 on the 1680 * It can take up to 20 ms to transition from RExit to U0 on the
1681 * Intel Lynx Point LP xHCI host. 1681 * Intel Lynx Point LP xHCI host.
1682 */ 1682 */
1683#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000) 1683#define XHCI_MAX_REXIT_TIMEOUT_MS 20
1684 1684
1685static inline unsigned int hcd_index(struct usb_hcd *hcd) 1685static inline unsigned int hcd_index(struct usb_hcd *hcd)
1686{ 1686{
@@ -1849,6 +1849,7 @@ struct xhci_hcd {
1849#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) 1849#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
1850#define XHCI_ZERO_64B_REGS BIT_ULL(32) 1850#define XHCI_ZERO_64B_REGS BIT_ULL(32)
1851#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) 1851#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
1852#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
1852 1853
1853 unsigned int num_active_eps; 1854 unsigned int num_active_eps;
1854 unsigned int limit_active_eps; 1855 unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index bd539f3058bc..85b48c6ddc7e 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -50,6 +50,7 @@ static const struct usb_device_id appledisplay_table[] = {
50 { APPLEDISPLAY_DEVICE(0x9219) }, 50 { APPLEDISPLAY_DEVICE(0x9219) },
51 { APPLEDISPLAY_DEVICE(0x921c) }, 51 { APPLEDISPLAY_DEVICE(0x921c) },
52 { APPLEDISPLAY_DEVICE(0x921d) }, 52 { APPLEDISPLAY_DEVICE(0x921d) },
53 { APPLEDISPLAY_DEVICE(0x9222) },
53 { APPLEDISPLAY_DEVICE(0x9236) }, 54 { APPLEDISPLAY_DEVICE(0x9236) },
54 55
55 /* Terminating entry */ 56 /* Terminating entry */
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index d17cd95b55bb..6b2140f966ef 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -27,4 +27,14 @@ UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
27 "USB Card Reader", 27 "USB Card Reader",
28 USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), 28 USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
29 29
30UNUSUAL_DEV(0x0bda, 0x0177, 0x0000, 0x9999,
31 "Realtek",
32 "USB Card Reader",
33 USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
34
35UNUSUAL_DEV(0x0bda, 0x0184, 0x0000, 0x9999,
36 "Realtek",
37 "USB Card Reader",
38 USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
39
30#endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */ 40#endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index e36d6c73c4a4..78118883f96c 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -23,6 +23,16 @@ config TYPEC_UCSI
23 23
24if TYPEC_UCSI 24if TYPEC_UCSI
25 25
26config UCSI_CCG
27 tristate "UCSI Interface Driver for Cypress CCGx"
28 depends on I2C
29 help
30 This driver enables UCSI support on platforms that expose a
31 Cypress CCGx Type-C controller over I2C interface.
32
33 To compile the driver as a module, choose M here: the module will be
34 called ucsi_ccg.
35
26config UCSI_ACPI 36config UCSI_ACPI
27 tristate "UCSI ACPI Interface Driver" 37 tristate "UCSI ACPI Interface Driver"
28 depends on ACPI 38 depends on ACPI
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index 7afbea512207..2f4900b26210 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -8,3 +8,5 @@ typec_ucsi-y := ucsi.o
8typec_ucsi-$(CONFIG_TRACING) += trace.o 8typec_ucsi-$(CONFIG_TRACING) += trace.o
9 9
10obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o 10obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
11
12obj-$(CONFIG_UCSI_CCG) += ucsi_ccg.o
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
new file mode 100644
index 000000000000..de8a43bdff68
--- /dev/null
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -0,0 +1,307 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * UCSI driver for Cypress CCGx Type-C controller
4 *
5 * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
6 * Author: Ajay Gupta <ajayg@nvidia.com>
7 *
8 * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
9 */
10#include <linux/acpi.h>
11#include <linux/delay.h>
12#include <linux/i2c.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/platform_device.h>
16
17#include <asm/unaligned.h>
18#include "ucsi.h"
19
20struct ucsi_ccg {
21 struct device *dev;
22 struct ucsi *ucsi;
23 struct ucsi_ppm ppm;
24 struct i2c_client *client;
25};
26
27#define CCGX_RAB_INTR_REG 0x06
28#define CCGX_RAB_UCSI_CONTROL 0x39
29#define CCGX_RAB_UCSI_CONTROL_START BIT(0)
30#define CCGX_RAB_UCSI_CONTROL_STOP BIT(1)
31#define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff))
32
33static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
34{
35 struct i2c_client *client = uc->client;
36 const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
37 unsigned char buf[2];
38 struct i2c_msg msgs[] = {
39 {
40 .addr = client->addr,
41 .flags = 0x0,
42 .len = sizeof(buf),
43 .buf = buf,
44 },
45 {
46 .addr = client->addr,
47 .flags = I2C_M_RD,
48 .buf = data,
49 },
50 };
51 u32 rlen, rem_len = len, max_read_len = len;
52 int status;
53
54 /* check any max_read_len limitation on i2c adapter */
55 if (quirks && quirks->max_read_len)
56 max_read_len = quirks->max_read_len;
57
58 while (rem_len > 0) {
59 msgs[1].buf = &data[len - rem_len];
60 rlen = min_t(u16, rem_len, max_read_len);
61 msgs[1].len = rlen;
62 put_unaligned_le16(rab, buf);
63 status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
64 if (status < 0) {
65 dev_err(uc->dev, "i2c_transfer failed %d\n", status);
66 return status;
67 }
68 rab += rlen;
69 rem_len -= rlen;
70 }
71
72 return 0;
73}
74
75static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
76{
77 struct i2c_client *client = uc->client;
78 unsigned char *buf;
79 struct i2c_msg msgs[] = {
80 {
81 .addr = client->addr,
82 .flags = 0x0,
83 }
84 };
85 int status;
86
87 buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
88 if (!buf)
89 return -ENOMEM;
90
91 put_unaligned_le16(rab, buf);
92 memcpy(buf + sizeof(rab), data, len);
93
94 msgs[0].len = len + sizeof(rab);
95 msgs[0].buf = buf;
96
97 status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
98 if (status < 0) {
99 dev_err(uc->dev, "i2c_transfer failed %d\n", status);
100 kfree(buf);
101 return status;
102 }
103
104 kfree(buf);
105 return 0;
106}
107
108static int ucsi_ccg_init(struct ucsi_ccg *uc)
109{
110 unsigned int count = 10;
111 u8 data;
112 int status;
113
114 data = CCGX_RAB_UCSI_CONTROL_STOP;
115 status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
116 if (status < 0)
117 return status;
118
119 data = CCGX_RAB_UCSI_CONTROL_START;
120 status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
121 if (status < 0)
122 return status;
123
124 /*
125 * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
126 * register write will push response which must be cleared.
127 */
128 do {
129 status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
130 if (status < 0)
131 return status;
132
133 if (!data)
134 return 0;
135
136 status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
137 if (status < 0)
138 return status;
139
140 usleep_range(10000, 11000);
141 } while (--count);
142
143 return -ETIMEDOUT;
144}
145
146static int ucsi_ccg_send_data(struct ucsi_ccg *uc)
147{
148 u8 *ppm = (u8 *)uc->ppm.data;
149 int status;
150 u16 rab;
151
152 rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out));
153 status = ccg_write(uc, rab, ppm +
154 offsetof(struct ucsi_data, message_out),
155 sizeof(uc->ppm.data->message_out));
156 if (status < 0)
157 return status;
158
159 rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl));
160 return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl),
161 sizeof(uc->ppm.data->ctrl));
162}
163
164static int ucsi_ccg_recv_data(struct ucsi_ccg *uc)
165{
166 u8 *ppm = (u8 *)uc->ppm.data;
167 int status;
168 u16 rab;
169
170 rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci));
171 status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci),
172 sizeof(uc->ppm.data->cci));
173 if (status < 0)
174 return status;
175
176 rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in));
177 return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in),
178 sizeof(uc->ppm.data->message_in));
179}
180
181static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc)
182{
183 int status;
184 unsigned char data;
185
186 status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
187 if (status < 0)
188 return status;
189
190 return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
191}
192
193static int ucsi_ccg_sync(struct ucsi_ppm *ppm)
194{
195 struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
196 int status;
197
198 status = ucsi_ccg_recv_data(uc);
199 if (status < 0)
200 return status;
201
202 /* ack interrupt to allow next command to run */
203 return ucsi_ccg_ack_interrupt(uc);
204}
205
206static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
207{
208 struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
209
210 ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
211 return ucsi_ccg_send_data(uc);
212}
213
214static irqreturn_t ccg_irq_handler(int irq, void *data)
215{
216 struct ucsi_ccg *uc = data;
217
218 ucsi_notify(uc->ucsi);
219
220 return IRQ_HANDLED;
221}
222
223static int ucsi_ccg_probe(struct i2c_client *client,
224 const struct i2c_device_id *id)
225{
226 struct device *dev = &client->dev;
227 struct ucsi_ccg *uc;
228 int status;
229 u16 rab;
230
231 uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
232 if (!uc)
233 return -ENOMEM;
234
235 uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL);
236 if (!uc->ppm.data)
237 return -ENOMEM;
238
239 uc->ppm.cmd = ucsi_ccg_cmd;
240 uc->ppm.sync = ucsi_ccg_sync;
241 uc->dev = dev;
242 uc->client = client;
243
244 /* reset ccg device and initialize ucsi */
245 status = ucsi_ccg_init(uc);
246 if (status < 0) {
247 dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
248 return status;
249 }
250
251 status = devm_request_threaded_irq(dev, client->irq, NULL,
252 ccg_irq_handler,
253 IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
254 dev_name(dev), uc);
255 if (status < 0) {
256 dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
257 return status;
258 }
259
260 uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
261 if (IS_ERR(uc->ucsi)) {
262 dev_err(uc->dev, "ucsi_register_ppm failed\n");
263 return PTR_ERR(uc->ucsi);
264 }
265
266 rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version));
267 status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) +
268 offsetof(struct ucsi_data, version),
269 sizeof(uc->ppm.data->version));
270 if (status < 0) {
271 ucsi_unregister_ppm(uc->ucsi);
272 return status;
273 }
274
275 i2c_set_clientdata(client, uc);
276 return 0;
277}
278
279static int ucsi_ccg_remove(struct i2c_client *client)
280{
281 struct ucsi_ccg *uc = i2c_get_clientdata(client);
282
283 ucsi_unregister_ppm(uc->ucsi);
284
285 return 0;
286}
287
288static const struct i2c_device_id ucsi_ccg_device_id[] = {
289 {"ccgx-ucsi", 0},
290 {}
291};
292MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
293
294static struct i2c_driver ucsi_ccg_driver = {
295 .driver = {
296 .name = "ucsi_ccg",
297 },
298 .probe = ucsi_ccg_probe,
299 .remove = ucsi_ccg_remove,
300 .id_table = ucsi_ccg_device_id,
301};
302
303module_i2c_driver(ucsi_ccg_driver);
304
305MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
306MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
307MODULE_LICENSE("GPL v2");
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index fdfc64f5acea..221b7333d067 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -251,25 +251,10 @@ static void release_memory_resource(struct resource *resource)
251 kfree(resource); 251 kfree(resource);
252} 252}
253 253
254/*
255 * Host memory not allocated to dom0. We can use this range for hotplug-based
256 * ballooning.
257 *
258 * It's a type-less resource. Setting IORESOURCE_MEM will make resource
259 * management algorithms (arch_remove_reservations()) look into guest e820,
260 * which we don't want.
261 */
262static struct resource hostmem_resource = {
263 .name = "Host RAM",
264};
265
266void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
267{}
268
269static struct resource *additional_memory_resource(phys_addr_t size) 254static struct resource *additional_memory_resource(phys_addr_t size)
270{ 255{
271 struct resource *res, *res_hostmem; 256 struct resource *res;
272 int ret = -ENOMEM; 257 int ret;
273 258
274 res = kzalloc(sizeof(*res), GFP_KERNEL); 259 res = kzalloc(sizeof(*res), GFP_KERNEL);
275 if (!res) 260 if (!res)
@@ -278,42 +263,13 @@ static struct resource *additional_memory_resource(phys_addr_t size)
278 res->name = "System RAM"; 263 res->name = "System RAM";
279 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 264 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
280 265
281 res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL); 266 ret = allocate_resource(&iomem_resource, res,
282 if (res_hostmem) { 267 size, 0, -1,
283 /* Try to grab a range from hostmem */ 268 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
284 res_hostmem->name = "Host memory"; 269 if (ret < 0) {
285 ret = allocate_resource(&hostmem_resource, res_hostmem, 270 pr_err("Cannot allocate new System RAM resource\n");
286 size, 0, -1, 271 kfree(res);
287 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); 272 return NULL;
288 }
289
290 if (!ret) {
291 /*
292 * Insert this resource into iomem. Because hostmem_resource
293 * tracks portion of guest e820 marked as UNUSABLE noone else
294 * should try to use it.
295 */
296 res->start = res_hostmem->start;
297 res->end = res_hostmem->end;
298 ret = insert_resource(&iomem_resource, res);
299 if (ret < 0) {
300 pr_err("Can't insert iomem_resource [%llx - %llx]\n",
301 res->start, res->end);
302 release_memory_resource(res_hostmem);
303 res_hostmem = NULL;
304 res->start = res->end = 0;
305 }
306 }
307
308 if (ret) {
309 ret = allocate_resource(&iomem_resource, res,
310 size, 0, -1,
311 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
312 if (ret < 0) {
313 pr_err("Cannot allocate new System RAM resource\n");
314 kfree(res);
315 return NULL;
316 }
317 } 273 }
318 274
319#ifdef CONFIG_SPARSEMEM 275#ifdef CONFIG_SPARSEMEM
@@ -325,7 +281,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
325 pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", 281 pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
326 pfn, limit); 282 pfn, limit);
327 release_memory_resource(res); 283 release_memory_resource(res);
328 release_memory_resource(res_hostmem);
329 return NULL; 284 return NULL;
330 } 285 }
331 } 286 }
@@ -750,8 +705,6 @@ static int __init balloon_init(void)
750 set_online_page_callback(&xen_online_page); 705 set_online_page_callback(&xen_online_page);
751 register_memory_notifier(&xen_memory_nb); 706 register_memory_notifier(&xen_memory_nb);
752 register_sysctl_table(xen_root); 707 register_sysctl_table(xen_root);
753
754 arch_xen_balloon_init(&hostmem_resource);
755#endif 708#endif
756 709
757#ifdef CONFIG_XEN_PV 710#ifdef CONFIG_XEN_PV
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f15f89df1f36..7ea6fb6a2e5d 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -914,7 +914,7 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
914 914
915 ret = xenmem_reservation_increase(args->nr_pages, args->frames); 915 ret = xenmem_reservation_increase(args->nr_pages, args->frames);
916 if (ret != args->nr_pages) { 916 if (ret != args->nr_pages) {
917 pr_debug("Failed to decrease reservation for DMA buffer\n"); 917 pr_debug("Failed to increase reservation for DMA buffer\n");
918 ret = -EFAULT; 918 ret = -EFAULT;
919 } else { 919 } else {
920 ret = 0; 920 ret = 0;
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index df1ed37c3269..de01a6d0059d 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -21,15 +21,9 @@
21 21
22MODULE_LICENSE("GPL"); 22MODULE_LICENSE("GPL");
23 23
24static unsigned int limit = 64;
25module_param(limit, uint, 0644);
26MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
27 "the privcmd-buf device per open file");
28
29struct privcmd_buf_private { 24struct privcmd_buf_private {
30 struct mutex lock; 25 struct mutex lock;
31 struct list_head list; 26 struct list_head list;
32 unsigned int allocated;
33}; 27};
34 28
35struct privcmd_buf_vma_private { 29struct privcmd_buf_vma_private {
@@ -60,13 +54,10 @@ static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
60{ 54{
61 unsigned int i; 55 unsigned int i;
62 56
63 vma_priv->file_priv->allocated -= vma_priv->n_pages;
64
65 list_del(&vma_priv->list); 57 list_del(&vma_priv->list);
66 58
67 for (i = 0; i < vma_priv->n_pages; i++) 59 for (i = 0; i < vma_priv->n_pages; i++)
68 if (vma_priv->pages[i]) 60 __free_page(vma_priv->pages[i]);
69 __free_page(vma_priv->pages[i]);
70 61
71 kfree(vma_priv); 62 kfree(vma_priv);
72} 63}
@@ -146,8 +137,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
146 unsigned int i; 137 unsigned int i;
147 int ret = 0; 138 int ret = 0;
148 139
149 if (!(vma->vm_flags & VM_SHARED) || count > limit || 140 if (!(vma->vm_flags & VM_SHARED))
150 file_priv->allocated + count > limit)
151 return -EINVAL; 141 return -EINVAL;
152 142
153 vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), 143 vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
@@ -155,19 +145,15 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
155 if (!vma_priv) 145 if (!vma_priv)
156 return -ENOMEM; 146 return -ENOMEM;
157 147
158 vma_priv->n_pages = count; 148 for (i = 0; i < count; i++) {
159 count = 0;
160 for (i = 0; i < vma_priv->n_pages; i++) {
161 vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); 149 vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
162 if (!vma_priv->pages[i]) 150 if (!vma_priv->pages[i])
163 break; 151 break;
164 count++; 152 vma_priv->n_pages++;
165 } 153 }
166 154
167 mutex_lock(&file_priv->lock); 155 mutex_lock(&file_priv->lock);
168 156
169 file_priv->allocated += count;
170
171 vma_priv->file_priv = file_priv; 157 vma_priv->file_priv = file_priv;
172 vma_priv->users = 1; 158 vma_priv->users = 1;
173 159
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 2f11ca72a281..77224d8f3e6f 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -385,8 +385,8 @@ static int create_active(struct sock_mapping *map, int *evtchn)
385out_error: 385out_error:
386 if (*evtchn >= 0) 386 if (*evtchn >= 0)
387 xenbus_free_evtchn(pvcalls_front_dev, *evtchn); 387 xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
388 kfree(map->active.data.in); 388 free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
389 kfree(map->active.ring); 389 free_page((unsigned long)map->active.ring);
390 return ret; 390 return ret;
391} 391}
392 392
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 23f1387b3ef7..e7df65d32c91 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -36,6 +36,7 @@
36#include <asm/xen/hypervisor.h> 36#include <asm/xen/hypervisor.h>
37 37
38#include <xen/xen.h> 38#include <xen/xen.h>
39#include <xen/xen-ops.h>
39#include <xen/page.h> 40#include <xen/page.h>
40#include <xen/interface/xen.h> 41#include <xen/interface/xen.h>
41#include <xen/interface/memory.h> 42#include <xen/interface/memory.h>
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 43dea3b00c29..8a2562e3a316 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -1075,8 +1075,6 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
1075 if (fc->ac.error < 0) 1075 if (fc->ac.error < 0)
1076 return; 1076 return;
1077 1077
1078 d_drop(new_dentry);
1079
1080 inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key, 1078 inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key,
1081 newfid, newstatus, newcb, fc->cbi); 1079 newfid, newstatus, newcb, fc->cbi);
1082 if (IS_ERR(inode)) { 1080 if (IS_ERR(inode)) {
@@ -1090,7 +1088,7 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
1090 vnode = AFS_FS_I(inode); 1088 vnode = AFS_FS_I(inode);
1091 set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); 1089 set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
1092 afs_vnode_commit_status(fc, vnode, 0); 1090 afs_vnode_commit_status(fc, vnode, 0);
1093 d_add(new_dentry, inode); 1091 d_instantiate(new_dentry, inode);
1094} 1092}
1095 1093
1096/* 1094/*
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index d049cb459742..fde6b4d4121e 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -61,8 +61,11 @@ void afs_fileserver_probe_result(struct afs_call *call)
61 afs_io_error(call, afs_io_error_fs_probe_fail); 61 afs_io_error(call, afs_io_error_fs_probe_fail);
62 goto out; 62 goto out;
63 case -ECONNRESET: /* Responded, but call expired. */ 63 case -ECONNRESET: /* Responded, but call expired. */
64 case -ERFKILL:
65 case -EADDRNOTAVAIL:
64 case -ENETUNREACH: 66 case -ENETUNREACH:
65 case -EHOSTUNREACH: 67 case -EHOSTUNREACH:
68 case -EHOSTDOWN:
66 case -ECONNREFUSED: 69 case -ECONNREFUSED:
67 case -ETIMEDOUT: 70 case -ETIMEDOUT:
68 case -ETIME: 71 case -ETIME:
@@ -132,12 +135,14 @@ out:
132static int afs_do_probe_fileserver(struct afs_net *net, 135static int afs_do_probe_fileserver(struct afs_net *net,
133 struct afs_server *server, 136 struct afs_server *server,
134 struct key *key, 137 struct key *key,
135 unsigned int server_index) 138 unsigned int server_index,
139 struct afs_error *_e)
136{ 140{
137 struct afs_addr_cursor ac = { 141 struct afs_addr_cursor ac = {
138 .index = 0, 142 .index = 0,
139 }; 143 };
140 int ret; 144 bool in_progress = false;
145 int err;
141 146
142 _enter("%pU", &server->uuid); 147 _enter("%pU", &server->uuid);
143 148
@@ -151,15 +156,17 @@ static int afs_do_probe_fileserver(struct afs_net *net,
151 server->probe.rtt = UINT_MAX; 156 server->probe.rtt = UINT_MAX;
152 157
153 for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) { 158 for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
154 ret = afs_fs_get_capabilities(net, server, &ac, key, server_index, 159 err = afs_fs_get_capabilities(net, server, &ac, key, server_index,
155 true); 160 true);
156 if (ret != -EINPROGRESS) { 161 if (err == -EINPROGRESS)
157 afs_fs_probe_done(server); 162 in_progress = true;
158 return ret; 163 else
159 } 164 afs_prioritise_error(_e, err, ac.abort_code);
160 } 165 }
161 166
162 return 0; 167 if (!in_progress)
168 afs_fs_probe_done(server);
169 return in_progress;
163} 170}
164 171
165/* 172/*
@@ -169,21 +176,23 @@ int afs_probe_fileservers(struct afs_net *net, struct key *key,
169 struct afs_server_list *list) 176 struct afs_server_list *list)
170{ 177{
171 struct afs_server *server; 178 struct afs_server *server;
172 int i, ret; 179 struct afs_error e;
180 bool in_progress = false;
181 int i;
173 182
183 e.error = 0;
184 e.responded = false;
174 for (i = 0; i < list->nr_servers; i++) { 185 for (i = 0; i < list->nr_servers; i++) {
175 server = list->servers[i].server; 186 server = list->servers[i].server;
176 if (test_bit(AFS_SERVER_FL_PROBED, &server->flags)) 187 if (test_bit(AFS_SERVER_FL_PROBED, &server->flags))
177 continue; 188 continue;
178 189
179 if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags)) { 190 if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags) &&
180 ret = afs_do_probe_fileserver(net, server, key, i); 191 afs_do_probe_fileserver(net, server, key, i, &e))
181 if (ret) 192 in_progress = true;
182 return ret;
183 }
184 } 193 }
185 194
186 return 0; 195 return in_progress ? 0 : e.error;
187} 196}
188 197
189/* 198/*
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 4c6d8e1112c2..6b17d3620414 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -382,7 +382,7 @@ void afs_zap_data(struct afs_vnode *vnode)
382int afs_validate(struct afs_vnode *vnode, struct key *key) 382int afs_validate(struct afs_vnode *vnode, struct key *key)
383{ 383{
384 time64_t now = ktime_get_real_seconds(); 384 time64_t now = ktime_get_real_seconds();
385 bool valid = false; 385 bool valid;
386 int ret; 386 int ret;
387 387
388 _enter("{v={%llx:%llu} fl=%lx},%x", 388 _enter("{v={%llx:%llu} fl=%lx},%x",
@@ -402,15 +402,21 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
402 vnode->cb_v_break = vnode->volume->cb_v_break; 402 vnode->cb_v_break = vnode->volume->cb_v_break;
403 valid = false; 403 valid = false;
404 } else if (vnode->status.type == AFS_FTYPE_DIR && 404 } else if (vnode->status.type == AFS_FTYPE_DIR &&
405 test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) && 405 (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) ||
406 vnode->cb_expires_at - 10 > now) { 406 vnode->cb_expires_at - 10 <= now)) {
407 valid = true; 407 valid = false;
408 } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) && 408 } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) ||
409 vnode->cb_expires_at - 10 > now) { 409 vnode->cb_expires_at - 10 <= now) {
410 valid = false;
411 } else {
410 valid = true; 412 valid = true;
411 } 413 }
412 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { 414 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
413 valid = true; 415 valid = true;
416 } else {
417 vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
418 vnode->cb_v_break = vnode->volume->cb_v_break;
419 valid = false;
414 } 420 }
415 421
416 read_sequnlock_excl(&vnode->cb_lock); 422 read_sequnlock_excl(&vnode->cb_lock);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5da3b09b7518..8871b9e8645f 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -696,6 +696,14 @@ struct afs_interface {
696}; 696};
697 697
698/* 698/*
699 * Error prioritisation and accumulation.
700 */
701struct afs_error {
702 short error; /* Accumulated error */
703 bool responded; /* T if server responded */
704};
705
706/*
699 * Cursor for iterating over a server's address list. 707 * Cursor for iterating over a server's address list.
700 */ 708 */
701struct afs_addr_cursor { 709struct afs_addr_cursor {
@@ -1015,6 +1023,7 @@ static inline void __afs_stat(atomic_t *s)
1015 * misc.c 1023 * misc.c
1016 */ 1024 */
1017extern int afs_abort_to_error(u32); 1025extern int afs_abort_to_error(u32);
1026extern void afs_prioritise_error(struct afs_error *, int, u32);
1018 1027
1019/* 1028/*
1020 * mntpt.c 1029 * mntpt.c
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 700a5fa7f4ec..bbb1fd51b019 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -118,3 +118,55 @@ int afs_abort_to_error(u32 abort_code)
118 default: return -EREMOTEIO; 118 default: return -EREMOTEIO;
119 } 119 }
120} 120}
121
122/*
123 * Select the error to report from a set of errors.
124 */
125void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code)
126{
127 switch (error) {
128 case 0:
129 return;
130 default:
131 if (e->error == -ETIMEDOUT ||
132 e->error == -ETIME)
133 return;
134 case -ETIMEDOUT:
135 case -ETIME:
136 if (e->error == -ENOMEM ||
137 e->error == -ENONET)
138 return;
139 case -ENOMEM:
140 case -ENONET:
141 if (e->error == -ERFKILL)
142 return;
143 case -ERFKILL:
144 if (e->error == -EADDRNOTAVAIL)
145 return;
146 case -EADDRNOTAVAIL:
147 if (e->error == -ENETUNREACH)
148 return;
149 case -ENETUNREACH:
150 if (e->error == -EHOSTUNREACH)
151 return;
152 case -EHOSTUNREACH:
153 if (e->error == -EHOSTDOWN)
154 return;
155 case -EHOSTDOWN:
156 if (e->error == -ECONNREFUSED)
157 return;
158 case -ECONNREFUSED:
159 if (e->error == -ECONNRESET)
160 return;
161 case -ECONNRESET: /* Responded, but call expired. */
162 if (e->responded)
163 return;
164 e->error = error;
165 return;
166
167 case -ECONNABORTED:
168 e->responded = true;
169 e->error = afs_abort_to_error(abort_code);
170 return;
171 }
172}
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index 00504254c1c2..c3ae324781f8 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -136,7 +136,8 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
136 struct afs_addr_list *alist; 136 struct afs_addr_list *alist;
137 struct afs_server *server; 137 struct afs_server *server;
138 struct afs_vnode *vnode = fc->vnode; 138 struct afs_vnode *vnode = fc->vnode;
139 u32 rtt, abort_code; 139 struct afs_error e;
140 u32 rtt;
140 int error = fc->ac.error, i; 141 int error = fc->ac.error, i;
141 142
142 _enter("%lx[%d],%lx[%d],%d,%d", 143 _enter("%lx[%d],%lx[%d],%d,%d",
@@ -306,8 +307,11 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
306 if (fc->error != -EDESTADDRREQ) 307 if (fc->error != -EDESTADDRREQ)
307 goto iterate_address; 308 goto iterate_address;
308 /* Fall through */ 309 /* Fall through */
310 case -ERFKILL:
311 case -EADDRNOTAVAIL:
309 case -ENETUNREACH: 312 case -ENETUNREACH:
310 case -EHOSTUNREACH: 313 case -EHOSTUNREACH:
314 case -EHOSTDOWN:
311 case -ECONNREFUSED: 315 case -ECONNREFUSED:
312 _debug("no conn"); 316 _debug("no conn");
313 fc->error = error; 317 fc->error = error;
@@ -446,50 +450,15 @@ no_more_servers:
446 if (fc->flags & AFS_FS_CURSOR_VBUSY) 450 if (fc->flags & AFS_FS_CURSOR_VBUSY)
447 goto restart_from_beginning; 451 goto restart_from_beginning;
448 452
449 abort_code = 0; 453 e.error = -EDESTADDRREQ;
450 error = -EDESTADDRREQ; 454 e.responded = false;
451 for (i = 0; i < fc->server_list->nr_servers; i++) { 455 for (i = 0; i < fc->server_list->nr_servers; i++) {
452 struct afs_server *s = fc->server_list->servers[i].server; 456 struct afs_server *s = fc->server_list->servers[i].server;
453 int probe_error = READ_ONCE(s->probe.error);
454 457
455 switch (probe_error) { 458 afs_prioritise_error(&e, READ_ONCE(s->probe.error),
456 case 0: 459 s->probe.abort_code);
457 continue;
458 default:
459 if (error == -ETIMEDOUT ||
460 error == -ETIME)
461 continue;
462 case -ETIMEDOUT:
463 case -ETIME:
464 if (error == -ENOMEM ||
465 error == -ENONET)
466 continue;
467 case -ENOMEM:
468 case -ENONET:
469 if (error == -ENETUNREACH)
470 continue;
471 case -ENETUNREACH:
472 if (error == -EHOSTUNREACH)
473 continue;
474 case -EHOSTUNREACH:
475 if (error == -ECONNREFUSED)
476 continue;
477 case -ECONNREFUSED:
478 if (error == -ECONNRESET)
479 continue;
480 case -ECONNRESET: /* Responded, but call expired. */
481 if (error == -ECONNABORTED)
482 continue;
483 case -ECONNABORTED:
484 abort_code = s->probe.abort_code;
485 error = probe_error;
486 continue;
487 }
488 } 460 }
489 461
490 if (error == -ECONNABORTED)
491 error = afs_abort_to_error(abort_code);
492
493failed_set_error: 462failed_set_error:
494 fc->error = error; 463 fc->error = error;
495failed: 464failed:
@@ -553,8 +522,11 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
553 _leave(" = f [abort]"); 522 _leave(" = f [abort]");
554 return false; 523 return false;
555 524
525 case -ERFKILL:
526 case -EADDRNOTAVAIL:
556 case -ENETUNREACH: 527 case -ENETUNREACH:
557 case -EHOSTUNREACH: 528 case -EHOSTUNREACH:
529 case -EHOSTDOWN:
558 case -ECONNREFUSED: 530 case -ECONNREFUSED:
559 case -ETIMEDOUT: 531 case -ETIMEDOUT:
560 case -ETIME: 532 case -ETIME:
@@ -633,6 +605,7 @@ int afs_end_vnode_operation(struct afs_fs_cursor *fc)
633 struct afs_net *net = afs_v2net(fc->vnode); 605 struct afs_net *net = afs_v2net(fc->vnode);
634 606
635 if (fc->error == -EDESTADDRREQ || 607 if (fc->error == -EDESTADDRREQ ||
608 fc->error == -EADDRNOTAVAIL ||
636 fc->error == -ENETUNREACH || 609 fc->error == -ENETUNREACH ||
637 fc->error == -EHOSTUNREACH) 610 fc->error == -EHOSTUNREACH)
638 afs_dump_edestaddrreq(fc); 611 afs_dump_edestaddrreq(fc);
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 59970886690f..a7b44863d502 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -576,6 +576,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
576{ 576{
577 signed long rtt2, timeout; 577 signed long rtt2, timeout;
578 long ret; 578 long ret;
579 bool stalled = false;
579 u64 rtt; 580 u64 rtt;
580 u32 life, last_life; 581 u32 life, last_life;
581 582
@@ -609,12 +610,20 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
609 610
610 life = rxrpc_kernel_check_life(call->net->socket, call->rxcall); 611 life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
611 if (timeout == 0 && 612 if (timeout == 0 &&
612 life == last_life && signal_pending(current)) 613 life == last_life && signal_pending(current)) {
614 if (stalled)
613 break; 615 break;
616 __set_current_state(TASK_RUNNING);
617 rxrpc_kernel_probe_life(call->net->socket, call->rxcall);
618 timeout = rtt2;
619 stalled = true;
620 continue;
621 }
614 622
615 if (life != last_life) { 623 if (life != last_life) {
616 timeout = rtt2; 624 timeout = rtt2;
617 last_life = life; 625 last_life = life;
626 stalled = false;
618 } 627 }
619 628
620 timeout = schedule_timeout(timeout); 629 timeout = schedule_timeout(timeout);
diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
index c0f616bd70cb..f0b032976487 100644
--- a/fs/afs/vl_probe.c
+++ b/fs/afs/vl_probe.c
@@ -61,8 +61,11 @@ void afs_vlserver_probe_result(struct afs_call *call)
61 afs_io_error(call, afs_io_error_vl_probe_fail); 61 afs_io_error(call, afs_io_error_vl_probe_fail);
62 goto out; 62 goto out;
63 case -ECONNRESET: /* Responded, but call expired. */ 63 case -ECONNRESET: /* Responded, but call expired. */
64 case -ERFKILL:
65 case -EADDRNOTAVAIL:
64 case -ENETUNREACH: 66 case -ENETUNREACH:
65 case -EHOSTUNREACH: 67 case -EHOSTUNREACH:
68 case -EHOSTDOWN:
66 case -ECONNREFUSED: 69 case -ECONNREFUSED:
67 case -ETIMEDOUT: 70 case -ETIMEDOUT:
68 case -ETIME: 71 case -ETIME:
@@ -129,15 +132,17 @@ out:
129 * Probe all of a vlserver's addresses to find out the best route and to 132 * Probe all of a vlserver's addresses to find out the best route and to
130 * query its capabilities. 133 * query its capabilities.
131 */ 134 */
132static int afs_do_probe_vlserver(struct afs_net *net, 135static bool afs_do_probe_vlserver(struct afs_net *net,
133 struct afs_vlserver *server, 136 struct afs_vlserver *server,
134 struct key *key, 137 struct key *key,
135 unsigned int server_index) 138 unsigned int server_index,
139 struct afs_error *_e)
136{ 140{
137 struct afs_addr_cursor ac = { 141 struct afs_addr_cursor ac = {
138 .index = 0, 142 .index = 0,
139 }; 143 };
140 int ret; 144 bool in_progress = false;
145 int err;
141 146
142 _enter("%s", server->name); 147 _enter("%s", server->name);
143 148
@@ -151,15 +156,17 @@ static int afs_do_probe_vlserver(struct afs_net *net,
151 server->probe.rtt = UINT_MAX; 156 server->probe.rtt = UINT_MAX;
152 157
153 for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) { 158 for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
154 ret = afs_vl_get_capabilities(net, &ac, key, server, 159 err = afs_vl_get_capabilities(net, &ac, key, server,
155 server_index, true); 160 server_index, true);
156 if (ret != -EINPROGRESS) { 161 if (err == -EINPROGRESS)
157 afs_vl_probe_done(server); 162 in_progress = true;
158 return ret; 163 else
159 } 164 afs_prioritise_error(_e, err, ac.abort_code);
160 } 165 }
161 166
162 return 0; 167 if (!in_progress)
168 afs_vl_probe_done(server);
169 return in_progress;
163} 170}
164 171
165/* 172/*
@@ -169,21 +176,23 @@ int afs_send_vl_probes(struct afs_net *net, struct key *key,
169 struct afs_vlserver_list *vllist) 176 struct afs_vlserver_list *vllist)
170{ 177{
171 struct afs_vlserver *server; 178 struct afs_vlserver *server;
172 int i, ret; 179 struct afs_error e;
180 bool in_progress = false;
181 int i;
173 182
183 e.error = 0;
184 e.responded = false;
174 for (i = 0; i < vllist->nr_servers; i++) { 185 for (i = 0; i < vllist->nr_servers; i++) {
175 server = vllist->servers[i].server; 186 server = vllist->servers[i].server;
176 if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags)) 187 if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags))
177 continue; 188 continue;
178 189
179 if (!test_and_set_bit_lock(AFS_VLSERVER_FL_PROBING, &server->flags)) { 190 if (!test_and_set_bit_lock(AFS_VLSERVER_FL_PROBING, &server->flags) &&
180 ret = afs_do_probe_vlserver(net, server, key, i); 191 afs_do_probe_vlserver(net, server, key, i, &e))
181 if (ret) 192 in_progress = true;
182 return ret;
183 }
184 } 193 }
185 194
186 return 0; 195 return in_progress ? 0 : e.error;
187} 196}
188 197
189/* 198/*
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index b64a284b99d2..7adde83a0648 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -71,8 +71,9 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
71{ 71{
72 struct afs_addr_list *alist; 72 struct afs_addr_list *alist;
73 struct afs_vlserver *vlserver; 73 struct afs_vlserver *vlserver;
74 struct afs_error e;
74 u32 rtt; 75 u32 rtt;
75 int error = vc->ac.error, abort_code, i; 76 int error = vc->ac.error, i;
76 77
77 _enter("%lx[%d],%lx[%d],%d,%d", 78 _enter("%lx[%d],%lx[%d],%d,%d",
78 vc->untried, vc->index, 79 vc->untried, vc->index,
@@ -119,8 +120,11 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
119 goto failed; 120 goto failed;
120 } 121 }
121 122
123 case -ERFKILL:
124 case -EADDRNOTAVAIL:
122 case -ENETUNREACH: 125 case -ENETUNREACH:
123 case -EHOSTUNREACH: 126 case -EHOSTUNREACH:
127 case -EHOSTDOWN:
124 case -ECONNREFUSED: 128 case -ECONNREFUSED:
125 case -ETIMEDOUT: 129 case -ETIMEDOUT:
126 case -ETIME: 130 case -ETIME:
@@ -235,50 +239,15 @@ no_more_servers:
235 if (vc->flags & AFS_VL_CURSOR_RETRY) 239 if (vc->flags & AFS_VL_CURSOR_RETRY)
236 goto restart_from_beginning; 240 goto restart_from_beginning;
237 241
238 abort_code = 0; 242 e.error = -EDESTADDRREQ;
239 error = -EDESTADDRREQ; 243 e.responded = false;
240 for (i = 0; i < vc->server_list->nr_servers; i++) { 244 for (i = 0; i < vc->server_list->nr_servers; i++) {
241 struct afs_vlserver *s = vc->server_list->servers[i].server; 245 struct afs_vlserver *s = vc->server_list->servers[i].server;
242 int probe_error = READ_ONCE(s->probe.error);
243 246
244 switch (probe_error) { 247 afs_prioritise_error(&e, READ_ONCE(s->probe.error),
245 case 0: 248 s->probe.abort_code);
246 continue;
247 default:
248 if (error == -ETIMEDOUT ||
249 error == -ETIME)
250 continue;
251 case -ETIMEDOUT:
252 case -ETIME:
253 if (error == -ENOMEM ||
254 error == -ENONET)
255 continue;
256 case -ENOMEM:
257 case -ENONET:
258 if (error == -ENETUNREACH)
259 continue;
260 case -ENETUNREACH:
261 if (error == -EHOSTUNREACH)
262 continue;
263 case -EHOSTUNREACH:
264 if (error == -ECONNREFUSED)
265 continue;
266 case -ECONNREFUSED:
267 if (error == -ECONNRESET)
268 continue;
269 case -ECONNRESET: /* Responded, but call expired. */
270 if (error == -ECONNABORTED)
271 continue;
272 case -ECONNABORTED:
273 abort_code = s->probe.abort_code;
274 error = probe_error;
275 continue;
276 }
277 } 249 }
278 250
279 if (error == -ECONNABORTED)
280 error = afs_abort_to_error(abort_code);
281
282failed_set_error: 251failed_set_error:
283 vc->error = error; 252 vc->error = error;
284failed: 253failed:
@@ -341,6 +310,7 @@ int afs_end_vlserver_operation(struct afs_vl_cursor *vc)
341 struct afs_net *net = vc->cell->net; 310 struct afs_net *net = vc->cell->net;
342 311
343 if (vc->error == -EDESTADDRREQ || 312 if (vc->error == -EDESTADDRREQ ||
313 vc->error == -EADDRNOTAVAIL ||
344 vc->error == -ENETUNREACH || 314 vc->error == -ENETUNREACH ||
345 vc->error == -EHOSTUNREACH) 315 vc->error == -EHOSTUNREACH)
346 afs_vl_dump_edestaddrreq(vc); 316 afs_vl_dump_edestaddrreq(vc);
diff --git a/fs/aio.c b/fs/aio.c
index 301e6314183b..97f983592925 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1436,6 +1436,7 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
1436 ret = ioprio_check_cap(iocb->aio_reqprio); 1436 ret = ioprio_check_cap(iocb->aio_reqprio);
1437 if (ret) { 1437 if (ret) {
1438 pr_debug("aio ioprio check cap error: %d\n", ret); 1438 pr_debug("aio ioprio check cap error: %d\n", ret);
1439 fput(req->ki_filp);
1439 return ret; 1440 return ret;
1440 } 1441 }
1441 1442
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80953528572d..68f322f600a0 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3163,6 +3163,9 @@ void btrfs_destroy_inode(struct inode *inode);
3163int btrfs_drop_inode(struct inode *inode); 3163int btrfs_drop_inode(struct inode *inode);
3164int __init btrfs_init_cachep(void); 3164int __init btrfs_init_cachep(void);
3165void __cold btrfs_destroy_cachep(void); 3165void __cold btrfs_destroy_cachep(void);
3166struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
3167 struct btrfs_root *root, int *new,
3168 struct btrfs_path *path);
3166struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 3169struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3167 struct btrfs_root *root, int *was_new); 3170 struct btrfs_root *root, int *was_new);
3168struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 3171struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index b0ab41da91d1..6d776717d8b3 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -477,9 +477,9 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
477 int mirror_num = 0; 477 int mirror_num = 0;
478 int failed_mirror = 0; 478 int failed_mirror = 0;
479 479
480 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
481 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; 480 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
482 while (1) { 481 while (1) {
482 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
483 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE, 483 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
484 mirror_num); 484 mirror_num);
485 if (!ret) { 485 if (!ret) {
@@ -493,15 +493,6 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
493 break; 493 break;
494 } 494 }
495 495
496 /*
497 * This buffer's crc is fine, but its contents are corrupted, so
498 * there is no reason to read the other copies, they won't be
499 * any less wrong.
500 */
501 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags) ||
502 ret == -EUCLEAN)
503 break;
504
505 num_copies = btrfs_num_copies(fs_info, 496 num_copies = btrfs_num_copies(fs_info,
506 eb->start, eb->len); 497 eb->start, eb->len);
507 if (num_copies == 1) 498 if (num_copies == 1)
@@ -1664,9 +1655,8 @@ static int cleaner_kthread(void *arg)
1664 struct btrfs_root *root = arg; 1655 struct btrfs_root *root = arg;
1665 struct btrfs_fs_info *fs_info = root->fs_info; 1656 struct btrfs_fs_info *fs_info = root->fs_info;
1666 int again; 1657 int again;
1667 struct btrfs_trans_handle *trans;
1668 1658
1669 do { 1659 while (1) {
1670 again = 0; 1660 again = 0;
1671 1661
1672 /* Make the cleaner go to sleep early. */ 1662 /* Make the cleaner go to sleep early. */
@@ -1715,42 +1705,16 @@ static int cleaner_kthread(void *arg)
1715 */ 1705 */
1716 btrfs_delete_unused_bgs(fs_info); 1706 btrfs_delete_unused_bgs(fs_info);
1717sleep: 1707sleep:
1708 if (kthread_should_park())
1709 kthread_parkme();
1710 if (kthread_should_stop())
1711 return 0;
1718 if (!again) { 1712 if (!again) {
1719 set_current_state(TASK_INTERRUPTIBLE); 1713 set_current_state(TASK_INTERRUPTIBLE);
1720 if (!kthread_should_stop()) 1714 schedule();
1721 schedule();
1722 __set_current_state(TASK_RUNNING); 1715 __set_current_state(TASK_RUNNING);
1723 } 1716 }
1724 } while (!kthread_should_stop());
1725
1726 /*
1727 * Transaction kthread is stopped before us and wakes us up.
1728 * However we might have started a new transaction and COWed some
1729 * tree blocks when deleting unused block groups for example. So
1730 * make sure we commit the transaction we started to have a clean
1731 * shutdown when evicting the btree inode - if it has dirty pages
1732 * when we do the final iput() on it, eviction will trigger a
1733 * writeback for it which will fail with null pointer dereferences
1734 * since work queues and other resources were already released and
1735 * destroyed by the time the iput/eviction/writeback is made.
1736 */
1737 trans = btrfs_attach_transaction(root);
1738 if (IS_ERR(trans)) {
1739 if (PTR_ERR(trans) != -ENOENT)
1740 btrfs_err(fs_info,
1741 "cleaner transaction attach returned %ld",
1742 PTR_ERR(trans));
1743 } else {
1744 int ret;
1745
1746 ret = btrfs_commit_transaction(trans);
1747 if (ret)
1748 btrfs_err(fs_info,
1749 "cleaner open transaction commit returned %d",
1750 ret);
1751 } 1717 }
1752
1753 return 0;
1754} 1718}
1755 1719
1756static int transaction_kthread(void *arg) 1720static int transaction_kthread(void *arg)
@@ -3931,6 +3895,13 @@ void close_ctree(struct btrfs_fs_info *fs_info)
3931 int ret; 3895 int ret;
3932 3896
3933 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); 3897 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3898 /*
3899 * We don't want the cleaner to start new transactions, add more delayed
3900 * iputs, etc. while we're closing. We can't use kthread_stop() yet
3901 * because that frees the task_struct, and the transaction kthread might
3902 * still try to wake up the cleaner.
3903 */
3904 kthread_park(fs_info->cleaner_kthread);
3934 3905
3935 /* wait for the qgroup rescan worker to stop */ 3906 /* wait for the qgroup rescan worker to stop */
3936 btrfs_qgroup_wait_for_completion(fs_info, false); 3907 btrfs_qgroup_wait_for_completion(fs_info, false);
@@ -3958,9 +3929,8 @@ void close_ctree(struct btrfs_fs_info *fs_info)
3958 3929
3959 if (!sb_rdonly(fs_info->sb)) { 3930 if (!sb_rdonly(fs_info->sb)) {
3960 /* 3931 /*
3961 * If the cleaner thread is stopped and there are 3932 * The cleaner kthread is stopped, so do one final pass over
3962 * block groups queued for removal, the deletion will be 3933 * unused block groups.
3963 * skipped when we quit the cleaner thread.
3964 */ 3934 */
3965 btrfs_delete_unused_bgs(fs_info); 3935 btrfs_delete_unused_bgs(fs_info);
3966 3936
@@ -4359,13 +4329,23 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4359 unpin = pinned_extents; 4329 unpin = pinned_extents;
4360again: 4330again:
4361 while (1) { 4331 while (1) {
4332 /*
4333 * The btrfs_finish_extent_commit() may get the same range as
4334 * ours between find_first_extent_bit and clear_extent_dirty.
4335 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4336 * the same extent range.
4337 */
4338 mutex_lock(&fs_info->unused_bg_unpin_mutex);
4362 ret = find_first_extent_bit(unpin, 0, &start, &end, 4339 ret = find_first_extent_bit(unpin, 0, &start, &end,
4363 EXTENT_DIRTY, NULL); 4340 EXTENT_DIRTY, NULL);
4364 if (ret) 4341 if (ret) {
4342 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4365 break; 4343 break;
4344 }
4366 4345
4367 clear_extent_dirty(unpin, start, end); 4346 clear_extent_dirty(unpin, start, end);
4368 btrfs_error_unpin_extent_range(fs_info, start, end); 4347 btrfs_error_unpin_extent_range(fs_info, start, end);
4348 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4369 cond_resched(); 4349 cond_resched();
4370 } 4350 }
4371 4351
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a3c22e16509b..58e93bce3036 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2089,6 +2089,30 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2089 atomic_inc(&root->log_batch); 2089 atomic_inc(&root->log_batch);
2090 2090
2091 /* 2091 /*
2092 * Before we acquired the inode's lock, someone may have dirtied more
2093 * pages in the target range. We need to make sure that writeback for
2094 * any such pages does not start while we are logging the inode, because
2095 * if it does, any of the following might happen when we are not doing a
2096 * full inode sync:
2097 *
2098 * 1) We log an extent after its writeback finishes but before its
2099 * checksums are added to the csum tree, leading to -EIO errors
2100 * when attempting to read the extent after a log replay.
2101 *
2102 * 2) We can end up logging an extent before its writeback finishes.
2103 * Therefore after the log replay we will have a file extent item
2104 * pointing to an unwritten extent (and no data checksums as well).
2105 *
2106 * So trigger writeback for any eventual new dirty pages and then we
2107 * wait for all ordered extents to complete below.
2108 */
2109 ret = start_ordered_ops(inode, start, end);
2110 if (ret) {
2111 inode_unlock(inode);
2112 goto out;
2113 }
2114
2115 /*
2092 * We have to do this here to avoid the priority inversion of waiting on 2116 * We have to do this here to avoid the priority inversion of waiting on
2093 * IO of a lower priority task while holding a transaciton open. 2117 * IO of a lower priority task while holding a transaciton open.
2094 */ 2118 */
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 4ba0aedc878b..74aa552f4793 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -75,7 +75,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
75 * sure NOFS is set to keep us from deadlocking. 75 * sure NOFS is set to keep us from deadlocking.
76 */ 76 */
77 nofs_flag = memalloc_nofs_save(); 77 nofs_flag = memalloc_nofs_save();
78 inode = btrfs_iget(fs_info->sb, &location, root, NULL); 78 inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path);
79 btrfs_release_path(path);
79 memalloc_nofs_restore(nofs_flag); 80 memalloc_nofs_restore(nofs_flag);
80 if (IS_ERR(inode)) 81 if (IS_ERR(inode))
81 return inode; 82 return inode;
@@ -838,6 +839,25 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
838 path->search_commit_root = 1; 839 path->search_commit_root = 1;
839 path->skip_locking = 1; 840 path->skip_locking = 1;
840 841
842 /*
843 * We must pass a path with search_commit_root set to btrfs_iget in
844 * order to avoid a deadlock when allocating extents for the tree root.
845 *
846 * When we are COWing an extent buffer from the tree root, when looking
847 * for a free extent, at extent-tree.c:find_free_extent(), we can find
848 * block group without its free space cache loaded. When we find one
849 * we must load its space cache which requires reading its free space
850 * cache's inode item from the root tree. If this inode item is located
851 * in the same leaf that we started COWing before, then we end up in
852 * deadlock on the extent buffer (trying to read lock it when we
853 * previously write locked it).
854 *
855 * It's safe to read the inode item using the commit root because
856 * block groups, once loaded, stay in memory forever (until they are
857 * removed) as well as their space caches once loaded. New block groups
858 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
859 * we will never try to read their inode item while the fs is mounted.
860 */
841 inode = lookup_free_space_inode(fs_info, block_group, path); 861 inode = lookup_free_space_inode(fs_info, block_group, path);
842 if (IS_ERR(inode)) { 862 if (IS_ERR(inode)) {
843 btrfs_free_path(path); 863 btrfs_free_path(path);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d3df5b52278c..9ea4c6f0352f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1531,12 +1531,11 @@ out_check:
1531 } 1531 }
1532 btrfs_release_path(path); 1532 btrfs_release_path(path);
1533 1533
1534 if (cur_offset <= end && cow_start == (u64)-1) { 1534 if (cur_offset <= end && cow_start == (u64)-1)
1535 cow_start = cur_offset; 1535 cow_start = cur_offset;
1536 cur_offset = end;
1537 }
1538 1536
1539 if (cow_start != (u64)-1) { 1537 if (cow_start != (u64)-1) {
1538 cur_offset = end;
1540 ret = cow_file_range(inode, locked_page, cow_start, end, end, 1539 ret = cow_file_range(inode, locked_page, cow_start, end, end,
1541 page_started, nr_written, 1, NULL); 1540 page_started, nr_written, 1, NULL);
1542 if (ret) 1541 if (ret)
@@ -3570,10 +3569,11 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3570/* 3569/*
3571 * read an inode from the btree into the in-memory inode 3570 * read an inode from the btree into the in-memory inode
3572 */ 3571 */
3573static int btrfs_read_locked_inode(struct inode *inode) 3572static int btrfs_read_locked_inode(struct inode *inode,
3573 struct btrfs_path *in_path)
3574{ 3574{
3575 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3575 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3576 struct btrfs_path *path; 3576 struct btrfs_path *path = in_path;
3577 struct extent_buffer *leaf; 3577 struct extent_buffer *leaf;
3578 struct btrfs_inode_item *inode_item; 3578 struct btrfs_inode_item *inode_item;
3579 struct btrfs_root *root = BTRFS_I(inode)->root; 3579 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3589,15 +3589,18 @@ static int btrfs_read_locked_inode(struct inode *inode)
3589 if (!ret) 3589 if (!ret)
3590 filled = true; 3590 filled = true;
3591 3591
3592 path = btrfs_alloc_path(); 3592 if (!path) {
3593 if (!path) 3593 path = btrfs_alloc_path();
3594 return -ENOMEM; 3594 if (!path)
3595 return -ENOMEM;
3596 }
3595 3597
3596 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3598 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3597 3599
3598 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3600 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3599 if (ret) { 3601 if (ret) {
3600 btrfs_free_path(path); 3602 if (path != in_path)
3603 btrfs_free_path(path);
3601 return ret; 3604 return ret;
3602 } 3605 }
3603 3606
@@ -3722,7 +3725,8 @@ cache_acl:
3722 btrfs_ino(BTRFS_I(inode)), 3725 btrfs_ino(BTRFS_I(inode)),
3723 root->root_key.objectid, ret); 3726 root->root_key.objectid, ret);
3724 } 3727 }
3725 btrfs_free_path(path); 3728 if (path != in_path)
3729 btrfs_free_path(path);
3726 3730
3727 if (!maybe_acls) 3731 if (!maybe_acls)
3728 cache_no_acl(inode); 3732 cache_no_acl(inode);
@@ -5644,8 +5648,9 @@ static struct inode *btrfs_iget_locked(struct super_block *s,
5644/* Get an inode object given its location and corresponding root. 5648/* Get an inode object given its location and corresponding root.
5645 * Returns in *is_new if the inode was read from disk 5649 * Returns in *is_new if the inode was read from disk
5646 */ 5650 */
5647struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 5651struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
5648 struct btrfs_root *root, int *new) 5652 struct btrfs_root *root, int *new,
5653 struct btrfs_path *path)
5649{ 5654{
5650 struct inode *inode; 5655 struct inode *inode;
5651 5656
@@ -5656,7 +5661,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5656 if (inode->i_state & I_NEW) { 5661 if (inode->i_state & I_NEW) {
5657 int ret; 5662 int ret;
5658 5663
5659 ret = btrfs_read_locked_inode(inode); 5664 ret = btrfs_read_locked_inode(inode, path);
5660 if (!ret) { 5665 if (!ret) {
5661 inode_tree_add(inode); 5666 inode_tree_add(inode);
5662 unlock_new_inode(inode); 5667 unlock_new_inode(inode);
@@ -5678,6 +5683,12 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5678 return inode; 5683 return inode;
5679} 5684}
5680 5685
5686struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5687 struct btrfs_root *root, int *new)
5688{
5689 return btrfs_iget_path(s, location, root, new, NULL);
5690}
5691
5681static struct inode *new_simple_dir(struct super_block *s, 5692static struct inode *new_simple_dir(struct super_block *s,
5682 struct btrfs_key *key, 5693 struct btrfs_key *key,
5683 struct btrfs_root *root) 5694 struct btrfs_root *root)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 3ca6943827ef..802a628e9f7d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3488,6 +3488,8 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3488 const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize; 3488 const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
3489 3489
3490 len = round_down(i_size_read(src), sz) - loff; 3490 len = round_down(i_size_read(src), sz) - loff;
3491 if (len == 0)
3492 return 0;
3491 olen = len; 3493 olen = len;
3492 } 3494 }
3493 } 3495 }
@@ -4257,9 +4259,17 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
4257 goto out_unlock; 4259 goto out_unlock;
4258 if (len == 0) 4260 if (len == 0)
4259 olen = len = src->i_size - off; 4261 olen = len = src->i_size - off;
4260 /* if we extend to eof, continue to block boundary */ 4262 /*
4261 if (off + len == src->i_size) 4263 * If we extend to eof, continue to block boundary if and only if the
4264 * destination end offset matches the destination file's size, otherwise
4265 * we would be corrupting data by placing the eof block into the middle
4266 * of a file.
4267 */
4268 if (off + len == src->i_size) {
4269 if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
4270 goto out_unlock;
4262 len = ALIGN(src->i_size, bs) - off; 4271 len = ALIGN(src->i_size, bs) - off;
4272 }
4263 4273
4264 if (len == 0) { 4274 if (len == 0) {
4265 ret = 0; 4275 ret = 0;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 45868fd76209..f70825af6438 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2659,7 +2659,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2659 int i; 2659 int i;
2660 u64 *i_qgroups; 2660 u64 *i_qgroups;
2661 struct btrfs_fs_info *fs_info = trans->fs_info; 2661 struct btrfs_fs_info *fs_info = trans->fs_info;
2662 struct btrfs_root *quota_root = fs_info->quota_root; 2662 struct btrfs_root *quota_root;
2663 struct btrfs_qgroup *srcgroup; 2663 struct btrfs_qgroup *srcgroup;
2664 struct btrfs_qgroup *dstgroup; 2664 struct btrfs_qgroup *dstgroup;
2665 u32 level_size = 0; 2665 u32 level_size = 0;
@@ -2669,6 +2669,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2669 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2669 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2670 goto out; 2670 goto out;
2671 2671
2672 quota_root = fs_info->quota_root;
2672 if (!quota_root) { 2673 if (!quota_root) {
2673 ret = -EINVAL; 2674 ret = -EINVAL;
2674 goto out; 2675 goto out;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 924116f654a1..a3f75b8926d4 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3959,6 +3959,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3959restart: 3959restart:
3960 if (update_backref_cache(trans, &rc->backref_cache)) { 3960 if (update_backref_cache(trans, &rc->backref_cache)) {
3961 btrfs_end_transaction(trans); 3961 btrfs_end_transaction(trans);
3962 trans = NULL;
3962 continue; 3963 continue;
3963 } 3964 }
3964 3965
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 094cc1444a90..5be83b5a1b43 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -3340,7 +3340,8 @@ static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3340 kfree(m); 3340 kfree(m);
3341} 3341}
3342 3342
3343static void tail_append_pending_moves(struct pending_dir_move *moves, 3343static void tail_append_pending_moves(struct send_ctx *sctx,
3344 struct pending_dir_move *moves,
3344 struct list_head *stack) 3345 struct list_head *stack)
3345{ 3346{
3346 if (list_empty(&moves->list)) { 3347 if (list_empty(&moves->list)) {
@@ -3351,6 +3352,10 @@ static void tail_append_pending_moves(struct pending_dir_move *moves,
3351 list_add_tail(&moves->list, stack); 3352 list_add_tail(&moves->list, stack);
3352 list_splice_tail(&list, stack); 3353 list_splice_tail(&list, stack);
3353 } 3354 }
3355 if (!RB_EMPTY_NODE(&moves->node)) {
3356 rb_erase(&moves->node, &sctx->pending_dir_moves);
3357 RB_CLEAR_NODE(&moves->node);
3358 }
3354} 3359}
3355 3360
3356static int apply_children_dir_moves(struct send_ctx *sctx) 3361static int apply_children_dir_moves(struct send_ctx *sctx)
@@ -3365,7 +3370,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
3365 return 0; 3370 return 0;
3366 3371
3367 INIT_LIST_HEAD(&stack); 3372 INIT_LIST_HEAD(&stack);
3368 tail_append_pending_moves(pm, &stack); 3373 tail_append_pending_moves(sctx, pm, &stack);
3369 3374
3370 while (!list_empty(&stack)) { 3375 while (!list_empty(&stack)) {
3371 pm = list_first_entry(&stack, struct pending_dir_move, list); 3376 pm = list_first_entry(&stack, struct pending_dir_move, list);
@@ -3376,7 +3381,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
3376 goto out; 3381 goto out;
3377 pm = get_pending_dir_moves(sctx, parent_ino); 3382 pm = get_pending_dir_moves(sctx, parent_ino);
3378 if (pm) 3383 if (pm)
3379 tail_append_pending_moves(pm, &stack); 3384 tail_append_pending_moves(sctx, pm, &stack);
3380 } 3385 }
3381 return 0; 3386 return 0;
3382 3387
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index b362b45dd757..645fc81e2a94 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1916,7 +1916,7 @@ restore:
1916} 1916}
1917 1917
1918/* Used to sort the devices by max_avail(descending sort) */ 1918/* Used to sort the devices by max_avail(descending sort) */
1919static int btrfs_cmp_device_free_bytes(const void *dev_info1, 1919static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
1920 const void *dev_info2) 1920 const void *dev_info2)
1921{ 1921{
1922 if (((struct btrfs_device_info *)dev_info1)->max_avail > 1922 if (((struct btrfs_device_info *)dev_info1)->max_avail >
@@ -1945,8 +1945,8 @@ static inline void btrfs_descending_sort_devices(
1945 * The helper to calc the free space on the devices that can be used to store 1945 * The helper to calc the free space on the devices that can be used to store
1946 * file data. 1946 * file data.
1947 */ 1947 */
1948static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, 1948static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
1949 u64 *free_bytes) 1949 u64 *free_bytes)
1950{ 1950{
1951 struct btrfs_device_info *devices_info; 1951 struct btrfs_device_info *devices_info;
1952 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 1952 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
@@ -2237,6 +2237,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
2237 vol = memdup_user((void __user *)arg, sizeof(*vol)); 2237 vol = memdup_user((void __user *)arg, sizeof(*vol));
2238 if (IS_ERR(vol)) 2238 if (IS_ERR(vol))
2239 return PTR_ERR(vol); 2239 return PTR_ERR(vol);
2240 vol->name[BTRFS_PATH_NAME_MAX] = '\0';
2240 2241
2241 switch (cmd) { 2242 switch (cmd) {
2242 case BTRFS_IOC_SCAN_DEV: 2243 case BTRFS_IOC_SCAN_DEV:
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index cab0b1f1f741..1a4e2b101ef2 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -389,13 +389,11 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
389 389
390 /* 390 /*
391 * Here we don't really care about alignment since extent allocator can 391 * Here we don't really care about alignment since extent allocator can
392 * handle it. We care more about the size, as if one block group is 392 * handle it. We care more about the size.
393 * larger than maximum size, it's must be some obvious corruption.
394 */ 393 */
395 if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) { 394 if (key->offset == 0) {
396 block_group_err(fs_info, leaf, slot, 395 block_group_err(fs_info, leaf, slot,
397 "invalid block group size, have %llu expect (0, %llu]", 396 "invalid block group size 0");
398 key->offset, BTRFS_MAX_DATA_CHUNK_SIZE);
399 return -EUCLEAN; 397 return -EUCLEAN;
400 } 398 }
401 399
@@ -440,7 +438,7 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
440 type != (BTRFS_BLOCK_GROUP_METADATA | 438 type != (BTRFS_BLOCK_GROUP_METADATA |
441 BTRFS_BLOCK_GROUP_DATA)) { 439 BTRFS_BLOCK_GROUP_DATA)) {
442 block_group_err(fs_info, leaf, slot, 440 block_group_err(fs_info, leaf, slot,
443"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llu or 0x%llx", 441"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
444 type, hweight64(type), 442 type, hweight64(type),
445 BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA, 443 BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
446 BTRFS_BLOCK_GROUP_SYSTEM, 444 BTRFS_BLOCK_GROUP_SYSTEM,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index e07f3376b7df..a5ce99a6c936 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4396,6 +4396,23 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4396 logged_end = end; 4396 logged_end = end;
4397 4397
4398 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 4398 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4399 /*
4400 * Skip extents outside our logging range. It's important to do
4401 * it for correctness because if we don't ignore them, we may
4402 * log them before their ordered extent completes, and therefore
4403 * we could log them without logging their respective checksums
4404 * (the checksum items are added to the csum tree at the very
4405 * end of btrfs_finish_ordered_io()). Also leave such extents
4406 * outside of our range in the list, since we may have another
4407 * ranged fsync in the near future that needs them. If an extent
4408 * outside our range corresponds to a hole, log it to avoid
4409 * leaving gaps between extents (fsck will complain when we are
4410 * not using the NO_HOLES feature).
4411 */
4412 if ((em->start > end || em->start + em->len <= start) &&
4413 em->block_start != EXTENT_MAP_HOLE)
4414 continue;
4415
4399 list_del_init(&em->list); 4416 list_del_init(&em->list);
4400 /* 4417 /*
4401 * Just an arbitrary number, this can be really CPU intensive 4418 * Just an arbitrary number, this can be really CPU intensive
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 95983c744164..1645fcfd9691 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -244,11 +244,13 @@ wait_for_old_object:
244 244
245 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)); 245 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
246 246
247 cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_retry); 247 cache->cache.ops->put_object(&xobject->fscache,
248 (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
248 goto try_again; 249 goto try_again;
249 250
250requeue: 251requeue:
251 cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo); 252 cache->cache.ops->put_object(&xobject->fscache,
253 (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
252 _leave(" = -ETIMEDOUT"); 254 _leave(" = -ETIMEDOUT");
253 return -ETIMEDOUT; 255 return -ETIMEDOUT;
254} 256}
@@ -336,7 +338,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
336try_again: 338try_again:
337 /* first step is to make up a grave dentry in the graveyard */ 339 /* first step is to make up a grave dentry in the graveyard */
338 sprintf(nbuffer, "%08x%08x", 340 sprintf(nbuffer, "%08x%08x",
339 (uint32_t) get_seconds(), 341 (uint32_t) ktime_get_real_seconds(),
340 (uint32_t) atomic_inc_return(&cache->gravecounter)); 342 (uint32_t) atomic_inc_return(&cache->gravecounter));
341 343
342 /* do the multiway lock magic */ 344 /* do the multiway lock magic */
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 40f7595aad10..8a577409d030 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -535,7 +535,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
535 netpage->index, cachefiles_gfp); 535 netpage->index, cachefiles_gfp);
536 if (ret < 0) { 536 if (ret < 0) {
537 if (ret == -EEXIST) { 537 if (ret == -EEXIST) {
538 put_page(backpage);
539 backpage = NULL;
538 put_page(netpage); 540 put_page(netpage);
541 netpage = NULL;
539 fscache_retrieval_complete(op, 1); 542 fscache_retrieval_complete(op, 1);
540 continue; 543 continue;
541 } 544 }
@@ -608,7 +611,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
608 netpage->index, cachefiles_gfp); 611 netpage->index, cachefiles_gfp);
609 if (ret < 0) { 612 if (ret < 0) {
610 if (ret == -EEXIST) { 613 if (ret == -EEXIST) {
614 put_page(backpage);
615 backpage = NULL;
611 put_page(netpage); 616 put_page(netpage);
617 netpage = NULL;
612 fscache_retrieval_complete(op, 1); 618 fscache_retrieval_complete(op, 1);
613 continue; 619 continue;
614 } 620 }
@@ -962,11 +968,8 @@ void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
962 __releases(&object->fscache.cookie->lock) 968 __releases(&object->fscache.cookie->lock)
963{ 969{
964 struct cachefiles_object *object; 970 struct cachefiles_object *object;
965 struct cachefiles_cache *cache;
966 971
967 object = container_of(_object, struct cachefiles_object, fscache); 972 object = container_of(_object, struct cachefiles_object, fscache);
968 cache = container_of(object->fscache.cache,
969 struct cachefiles_cache, cache);
970 973
971 _enter("%p,{%lu}", object, page->index); 974 _enter("%p,{%lu}", object, page->index);
972 975
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 0a29a00aed2e..511e6c68156a 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -135,7 +135,8 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
135 struct dentry *dentry = object->dentry; 135 struct dentry *dentry = object->dentry;
136 int ret; 136 int ret;
137 137
138 ASSERT(dentry); 138 if (!dentry)
139 return -ESTALE;
139 140
140 _enter("%p,#%d", object, auxdata->len); 141 _enter("%p,#%d", object, auxdata->len);
141 142
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 27cad84dab23..189df668b6a0 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1931,10 +1931,17 @@ static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
1931 if (!prealloc_cf) 1931 if (!prealloc_cf)
1932 return -ENOMEM; 1932 return -ENOMEM;
1933 1933
1934 /* Start by sync'ing the source file */ 1934 /* Start by sync'ing the source and destination files */
1935 ret = file_write_and_wait_range(src_file, src_off, (src_off + len)); 1935 ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
1936 if (ret < 0) 1936 if (ret < 0) {
1937 dout("failed to write src file (%zd)\n", ret);
1938 goto out;
1939 }
1940 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
1941 if (ret < 0) {
1942 dout("failed to write dst file (%zd)\n", ret);
1937 goto out; 1943 goto out;
1944 }
1938 1945
1939 /* 1946 /*
1940 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other 1947 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 67a9aeb2f4ec..bd13a3267ae0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -80,12 +80,8 @@ static int parse_reply_info_in(void **p, void *end,
80 info->symlink = *p; 80 info->symlink = *p;
81 *p += info->symlink_len; 81 *p += info->symlink_len;
82 82
83 if (features & CEPH_FEATURE_DIRLAYOUTHASH) 83 ceph_decode_copy_safe(p, end, &info->dir_layout,
84 ceph_decode_copy_safe(p, end, &info->dir_layout, 84 sizeof(info->dir_layout), bad);
85 sizeof(info->dir_layout), bad);
86 else
87 memset(&info->dir_layout, 0, sizeof(info->dir_layout));
88
89 ceph_decode_32_safe(p, end, info->xattr_len, bad); 85 ceph_decode_32_safe(p, end, info->xattr_len, bad);
90 ceph_decode_need(p, end, info->xattr_len, bad); 86 ceph_decode_need(p, end, info->xattr_len, bad);
91 info->xattr_data = *p; 87 info->xattr_data = *p;
@@ -3182,10 +3178,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
3182 recon_state.pagelist = pagelist; 3178 recon_state.pagelist = pagelist;
3183 if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) 3179 if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
3184 recon_state.msg_version = 3; 3180 recon_state.msg_version = 3;
3185 else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
3186 recon_state.msg_version = 2;
3187 else 3181 else
3188 recon_state.msg_version = 1; 3182 recon_state.msg_version = 2;
3189 err = iterate_session_caps(session, encode_caps_cb, &recon_state); 3183 err = iterate_session_caps(session, encode_caps_cb, &recon_state);
3190 if (err < 0) 3184 if (err < 0)
3191 goto fail; 3185 goto fail;
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 32d4f13784ba..03f4d24db8fe 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -237,7 +237,8 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
237 ceph_put_snap_realm(mdsc, realm); 237 ceph_put_snap_realm(mdsc, realm);
238 realm = next; 238 realm = next;
239 } 239 }
240 ceph_put_snap_realm(mdsc, realm); 240 if (realm)
241 ceph_put_snap_realm(mdsc, realm);
241 up_read(&mdsc->snap_rwsem); 242 up_read(&mdsc->snap_rwsem);
242 243
243 return exceeded; 244 return exceeded;
diff --git a/fs/dax.c b/fs/dax.c
index 616e36ea6aaa..9bcce89ea18e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -98,12 +98,6 @@ static void *dax_make_entry(pfn_t pfn, unsigned long flags)
98 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 98 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
99} 99}
100 100
101static void *dax_make_page_entry(struct page *page)
102{
103 pfn_t pfn = page_to_pfn_t(page);
104 return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0);
105}
106
107static bool dax_is_locked(void *entry) 101static bool dax_is_locked(void *entry)
108{ 102{
109 return xa_to_value(entry) & DAX_LOCKED; 103 return xa_to_value(entry) & DAX_LOCKED;
@@ -116,12 +110,12 @@ static unsigned int dax_entry_order(void *entry)
116 return 0; 110 return 0;
117} 111}
118 112
119static int dax_is_pmd_entry(void *entry) 113static unsigned long dax_is_pmd_entry(void *entry)
120{ 114{
121 return xa_to_value(entry) & DAX_PMD; 115 return xa_to_value(entry) & DAX_PMD;
122} 116}
123 117
124static int dax_is_pte_entry(void *entry) 118static bool dax_is_pte_entry(void *entry)
125{ 119{
126 return !(xa_to_value(entry) & DAX_PMD); 120 return !(xa_to_value(entry) & DAX_PMD);
127} 121}
@@ -222,9 +216,8 @@ static void *get_unlocked_entry(struct xa_state *xas)
222 ewait.wait.func = wake_exceptional_entry_func; 216 ewait.wait.func = wake_exceptional_entry_func;
223 217
224 for (;;) { 218 for (;;) {
225 entry = xas_load(xas); 219 entry = xas_find_conflict(xas);
226 if (!entry || xa_is_internal(entry) || 220 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
227 WARN_ON_ONCE(!xa_is_value(entry)) ||
228 !dax_is_locked(entry)) 221 !dax_is_locked(entry))
229 return entry; 222 return entry;
230 223
@@ -255,6 +248,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
255{ 248{
256 void *old; 249 void *old;
257 250
251 BUG_ON(dax_is_locked(entry));
258 xas_reset(xas); 252 xas_reset(xas);
259 xas_lock_irq(xas); 253 xas_lock_irq(xas);
260 old = xas_store(xas, entry); 254 old = xas_store(xas, entry);
@@ -352,16 +346,27 @@ static struct page *dax_busy_page(void *entry)
352 return NULL; 346 return NULL;
353} 347}
354 348
349/*
350 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
351 * @page: The page whose entry we want to lock
352 *
353 * Context: Process context.
354 * Return: %true if the entry was locked or does not need to be locked.
355 */
355bool dax_lock_mapping_entry(struct page *page) 356bool dax_lock_mapping_entry(struct page *page)
356{ 357{
357 XA_STATE(xas, NULL, 0); 358 XA_STATE(xas, NULL, 0);
358 void *entry; 359 void *entry;
360 bool locked;
359 361
362 /* Ensure page->mapping isn't freed while we look at it */
363 rcu_read_lock();
360 for (;;) { 364 for (;;) {
361 struct address_space *mapping = READ_ONCE(page->mapping); 365 struct address_space *mapping = READ_ONCE(page->mapping);
362 366
367 locked = false;
363 if (!dax_mapping(mapping)) 368 if (!dax_mapping(mapping))
364 return false; 369 break;
365 370
366 /* 371 /*
367 * In the device-dax case there's no need to lock, a 372 * In the device-dax case there's no need to lock, a
@@ -370,8 +375,9 @@ bool dax_lock_mapping_entry(struct page *page)
370 * otherwise we would not have a valid pfn_to_page() 375 * otherwise we would not have a valid pfn_to_page()
371 * translation. 376 * translation.
372 */ 377 */
378 locked = true;
373 if (S_ISCHR(mapping->host->i_mode)) 379 if (S_ISCHR(mapping->host->i_mode))
374 return true; 380 break;
375 381
376 xas.xa = &mapping->i_pages; 382 xas.xa = &mapping->i_pages;
377 xas_lock_irq(&xas); 383 xas_lock_irq(&xas);
@@ -382,28 +388,35 @@ bool dax_lock_mapping_entry(struct page *page)
382 xas_set(&xas, page->index); 388 xas_set(&xas, page->index);
383 entry = xas_load(&xas); 389 entry = xas_load(&xas);
384 if (dax_is_locked(entry)) { 390 if (dax_is_locked(entry)) {
391 rcu_read_unlock();
385 entry = get_unlocked_entry(&xas); 392 entry = get_unlocked_entry(&xas);
386 /* Did the page move while we slept? */ 393 xas_unlock_irq(&xas);
387 if (dax_to_pfn(entry) != page_to_pfn(page)) { 394 put_unlocked_entry(&xas, entry);
388 xas_unlock_irq(&xas); 395 rcu_read_lock();
389 continue; 396 continue;
390 }
391 } 397 }
392 dax_lock_entry(&xas, entry); 398 dax_lock_entry(&xas, entry);
393 xas_unlock_irq(&xas); 399 xas_unlock_irq(&xas);
394 return true; 400 break;
395 } 401 }
402 rcu_read_unlock();
403 return locked;
396} 404}
397 405
398void dax_unlock_mapping_entry(struct page *page) 406void dax_unlock_mapping_entry(struct page *page)
399{ 407{
400 struct address_space *mapping = page->mapping; 408 struct address_space *mapping = page->mapping;
401 XA_STATE(xas, &mapping->i_pages, page->index); 409 XA_STATE(xas, &mapping->i_pages, page->index);
410 void *entry;
402 411
403 if (S_ISCHR(mapping->host->i_mode)) 412 if (S_ISCHR(mapping->host->i_mode))
404 return; 413 return;
405 414
406 dax_unlock_entry(&xas, dax_make_page_entry(page)); 415 rcu_read_lock();
416 entry = xas_load(&xas);
417 rcu_read_unlock();
418 entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
419 dax_unlock_entry(&xas, entry);
407} 420}
408 421
409/* 422/*
@@ -445,11 +458,9 @@ static void *grab_mapping_entry(struct xa_state *xas,
445retry: 458retry:
446 xas_lock_irq(xas); 459 xas_lock_irq(xas);
447 entry = get_unlocked_entry(xas); 460 entry = get_unlocked_entry(xas);
448 if (xa_is_internal(entry))
449 goto fallback;
450 461
451 if (entry) { 462 if (entry) {
452 if (WARN_ON_ONCE(!xa_is_value(entry))) { 463 if (!xa_is_value(entry)) {
453 xas_set_err(xas, EIO); 464 xas_set_err(xas, EIO);
454 goto out_unlock; 465 goto out_unlock;
455 } 466 }
@@ -1628,8 +1639,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1628 /* Did we race with someone splitting entry or so? */ 1639 /* Did we race with someone splitting entry or so? */
1629 if (!entry || 1640 if (!entry ||
1630 (order == 0 && !dax_is_pte_entry(entry)) || 1641 (order == 0 && !dax_is_pte_entry(entry)) ||
1631 (order == PMD_ORDER && (xa_is_internal(entry) || 1642 (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
1632 !dax_is_pmd_entry(entry)))) {
1633 put_unlocked_entry(&xas, entry); 1643 put_unlocked_entry(&xas, entry);
1634 xas_unlock_irq(&xas); 1644 xas_unlock_irq(&xas);
1635 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1645 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 722d17c88edb..41a0e97252ae 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -325,8 +325,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
325 */ 325 */
326 dio->iocb->ki_pos += transferred; 326 dio->iocb->ki_pos += transferred;
327 327
328 if (dio->op == REQ_OP_WRITE) 328 if (ret > 0 && dio->op == REQ_OP_WRITE)
329 ret = generic_write_sync(dio->iocb, transferred); 329 ret = generic_write_sync(dio->iocb, ret);
330 dio->iocb->ki_complete(dio->iocb, ret, 0); 330 dio->iocb->ki_complete(dio->iocb, ret, 0);
331 } 331 }
332 332
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 645158dc33f1..c69927bed4ef 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -77,7 +77,7 @@ static bool dentry_connected(struct dentry *dentry)
77 struct dentry *parent = dget_parent(dentry); 77 struct dentry *parent = dget_parent(dentry);
78 78
79 dput(dentry); 79 dput(dentry);
80 if (IS_ROOT(dentry)) { 80 if (dentry == parent) {
81 dput(parent); 81 dput(parent);
82 return false; 82 return false;
83 } 83 }
@@ -147,6 +147,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
147 tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf)); 147 tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf));
148 if (IS_ERR(tmp)) { 148 if (IS_ERR(tmp)) {
149 dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp)); 149 dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
150 err = PTR_ERR(tmp);
150 goto out_err; 151 goto out_err;
151 } 152 }
152 if (tmp != dentry) { 153 if (tmp != dentry) {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index cb91baa4275d..eb11502e3fcd 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -892,6 +892,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
892 if (sb->s_magic != EXT2_SUPER_MAGIC) 892 if (sb->s_magic != EXT2_SUPER_MAGIC)
893 goto cantfind_ext2; 893 goto cantfind_ext2;
894 894
895 opts.s_mount_opt = 0;
895 /* Set defaults before we parse the mount options */ 896 /* Set defaults before we parse the mount options */
896 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 897 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
897 if (def_mount_opts & EXT2_DEFM_DEBUG) 898 if (def_mount_opts & EXT2_DEFM_DEBUG)
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 62d9a659a8ff..dd8f10db82e9 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -612,9 +612,9 @@ skip_replace:
612 } 612 }
613 613
614cleanup: 614cleanup:
615 brelse(bh);
616 if (!(bh && header == HDR(bh))) 615 if (!(bh && header == HDR(bh)))
617 kfree(header); 616 kfree(header);
617 brelse(bh);
618 up_write(&EXT2_I(inode)->xattr_sem); 618 up_write(&EXT2_I(inode)->xattr_sem);
619 619
620 return error; 620 return error;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 05f01fbd9c7f..22a9d8159720 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5835,9 +5835,10 @@ int ext4_mark_iloc_dirty(handle_t *handle,
5835{ 5835{
5836 int err = 0; 5836 int err = 0;
5837 5837
5838 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5838 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
5839 put_bh(iloc->bh);
5839 return -EIO; 5840 return -EIO;
5840 5841 }
5841 if (IS_I_VERSION(inode)) 5842 if (IS_I_VERSION(inode))
5842 inode_inc_iversion(inode); 5843 inode_inc_iversion(inode);
5843 5844
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 17adcb16a9c8..437f71fe83ae 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -126,6 +126,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
126 if (!is_dx_block && type == INDEX) { 126 if (!is_dx_block && type == INDEX) {
127 ext4_error_inode(inode, func, line, block, 127 ext4_error_inode(inode, func, line, block,
128 "directory leaf block found instead of index block"); 128 "directory leaf block found instead of index block");
129 brelse(bh);
129 return ERR_PTR(-EFSCORRUPTED); 130 return ERR_PTR(-EFSCORRUPTED);
130 } 131 }
131 if (!ext4_has_metadata_csum(inode->i_sb) || 132 if (!ext4_has_metadata_csum(inode->i_sb) ||
@@ -2811,7 +2812,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
2811 list_del_init(&EXT4_I(inode)->i_orphan); 2812 list_del_init(&EXT4_I(inode)->i_orphan);
2812 mutex_unlock(&sbi->s_orphan_lock); 2813 mutex_unlock(&sbi->s_orphan_lock);
2813 } 2814 }
2814 } 2815 } else
2816 brelse(iloc.bh);
2817
2815 jbd_debug(4, "superblock will point to %lu\n", inode->i_ino); 2818 jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
2816 jbd_debug(4, "orphan inode %lu will point to %d\n", 2819 jbd_debug(4, "orphan inode %lu will point to %d\n",
2817 inode->i_ino, NEXT_ORPHAN(inode)); 2820 inode->i_ino, NEXT_ORPHAN(inode));
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ebbc663d0798..a5efee34415f 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -459,16 +459,18 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
459 459
460 BUFFER_TRACE(bh, "get_write_access"); 460 BUFFER_TRACE(bh, "get_write_access");
461 err = ext4_journal_get_write_access(handle, bh); 461 err = ext4_journal_get_write_access(handle, bh);
462 if (err) 462 if (err) {
463 brelse(bh);
463 return err; 464 return err;
465 }
464 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", 466 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
465 first_cluster, first_cluster - start, count2); 467 first_cluster, first_cluster - start, count2);
466 ext4_set_bits(bh->b_data, first_cluster - start, count2); 468 ext4_set_bits(bh->b_data, first_cluster - start, count2);
467 469
468 err = ext4_handle_dirty_metadata(handle, NULL, bh); 470 err = ext4_handle_dirty_metadata(handle, NULL, bh);
471 brelse(bh);
469 if (unlikely(err)) 472 if (unlikely(err))
470 return err; 473 return err;
471 brelse(bh);
472 } 474 }
473 475
474 return 0; 476 return 0;
@@ -605,7 +607,6 @@ handle_bb:
605 bh = bclean(handle, sb, block); 607 bh = bclean(handle, sb, block);
606 if (IS_ERR(bh)) { 608 if (IS_ERR(bh)) {
607 err = PTR_ERR(bh); 609 err = PTR_ERR(bh);
608 bh = NULL;
609 goto out; 610 goto out;
610 } 611 }
611 overhead = ext4_group_overhead_blocks(sb, group); 612 overhead = ext4_group_overhead_blocks(sb, group);
@@ -618,9 +619,9 @@ handle_bb:
618 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count), 619 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
619 sb->s_blocksize * 8, bh->b_data); 620 sb->s_blocksize * 8, bh->b_data);
620 err = ext4_handle_dirty_metadata(handle, NULL, bh); 621 err = ext4_handle_dirty_metadata(handle, NULL, bh);
622 brelse(bh);
621 if (err) 623 if (err)
622 goto out; 624 goto out;
623 brelse(bh);
624 625
625handle_ib: 626handle_ib:
626 if (bg_flags[i] & EXT4_BG_INODE_UNINIT) 627 if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
@@ -635,18 +636,16 @@ handle_ib:
635 bh = bclean(handle, sb, block); 636 bh = bclean(handle, sb, block);
636 if (IS_ERR(bh)) { 637 if (IS_ERR(bh)) {
637 err = PTR_ERR(bh); 638 err = PTR_ERR(bh);
638 bh = NULL;
639 goto out; 639 goto out;
640 } 640 }
641 641
642 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), 642 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
643 sb->s_blocksize * 8, bh->b_data); 643 sb->s_blocksize * 8, bh->b_data);
644 err = ext4_handle_dirty_metadata(handle, NULL, bh); 644 err = ext4_handle_dirty_metadata(handle, NULL, bh);
645 brelse(bh);
645 if (err) 646 if (err)
646 goto out; 647 goto out;
647 brelse(bh);
648 } 648 }
649 bh = NULL;
650 649
651 /* Mark group tables in block bitmap */ 650 /* Mark group tables in block bitmap */
652 for (j = 0; j < GROUP_TABLE_COUNT; j++) { 651 for (j = 0; j < GROUP_TABLE_COUNT; j++) {
@@ -685,7 +684,6 @@ handle_ib:
685 } 684 }
686 685
687out: 686out:
688 brelse(bh);
689 err2 = ext4_journal_stop(handle); 687 err2 = ext4_journal_stop(handle);
690 if (err2 && !err) 688 if (err2 && !err)
691 err = err2; 689 err = err2;
@@ -873,6 +871,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
873 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 871 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
874 if (unlikely(err)) { 872 if (unlikely(err)) {
875 ext4_std_error(sb, err); 873 ext4_std_error(sb, err);
874 iloc.bh = NULL;
876 goto exit_inode; 875 goto exit_inode;
877 } 876 }
878 brelse(dind); 877 brelse(dind);
@@ -924,6 +923,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
924 sizeof(struct buffer_head *), 923 sizeof(struct buffer_head *),
925 GFP_NOFS); 924 GFP_NOFS);
926 if (!n_group_desc) { 925 if (!n_group_desc) {
926 brelse(gdb_bh);
927 err = -ENOMEM; 927 err = -ENOMEM;
928 ext4_warning(sb, "not enough memory for %lu groups", 928 ext4_warning(sb, "not enough memory for %lu groups",
929 gdb_num + 1); 929 gdb_num + 1);
@@ -939,8 +939,6 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
939 kvfree(o_group_desc); 939 kvfree(o_group_desc);
940 BUFFER_TRACE(gdb_bh, "get_write_access"); 940 BUFFER_TRACE(gdb_bh, "get_write_access");
941 err = ext4_journal_get_write_access(handle, gdb_bh); 941 err = ext4_journal_get_write_access(handle, gdb_bh);
942 if (unlikely(err))
943 brelse(gdb_bh);
944 return err; 942 return err;
945} 943}
946 944
@@ -1124,8 +1122,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1124 backup_block, backup_block - 1122 backup_block, backup_block -
1125 ext4_group_first_block_no(sb, group)); 1123 ext4_group_first_block_no(sb, group));
1126 BUFFER_TRACE(bh, "get_write_access"); 1124 BUFFER_TRACE(bh, "get_write_access");
1127 if ((err = ext4_journal_get_write_access(handle, bh))) 1125 if ((err = ext4_journal_get_write_access(handle, bh))) {
1126 brelse(bh);
1128 break; 1127 break;
1128 }
1129 lock_buffer(bh); 1129 lock_buffer(bh);
1130 memcpy(bh->b_data, data, size); 1130 memcpy(bh->b_data, data, size);
1131 if (rest) 1131 if (rest)
@@ -2023,7 +2023,7 @@ retry:
2023 2023
2024 err = ext4_alloc_flex_bg_array(sb, n_group + 1); 2024 err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2025 if (err) 2025 if (err)
2026 return err; 2026 goto out;
2027 2027
2028 err = ext4_mb_alloc_groupinfo(sb, n_group + 1); 2028 err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2029 if (err) 2029 if (err)
@@ -2059,6 +2059,10 @@ retry:
2059 n_blocks_count_retry = 0; 2059 n_blocks_count_retry = 0;
2060 free_flex_gd(flex_gd); 2060 free_flex_gd(flex_gd);
2061 flex_gd = NULL; 2061 flex_gd = NULL;
2062 if (resize_inode) {
2063 iput(resize_inode);
2064 resize_inode = NULL;
2065 }
2062 goto retry; 2066 goto retry;
2063 } 2067 }
2064 2068
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index a221f1cdf704..53ff6c2a26ed 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -4075,6 +4075,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4075 sbi->s_groups_count = blocks_count; 4075 sbi->s_groups_count = blocks_count;
4076 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 4076 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
4077 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 4077 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
4078 if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
4079 le32_to_cpu(es->s_inodes_count)) {
4080 ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
4081 le32_to_cpu(es->s_inodes_count),
4082 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
4083 ret = -EINVAL;
4084 goto failed_mount;
4085 }
4078 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / 4086 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
4079 EXT4_DESC_PER_BLOCK(sb); 4087 EXT4_DESC_PER_BLOCK(sb);
4080 if (ext4_has_feature_meta_bg(sb)) { 4088 if (ext4_has_feature_meta_bg(sb)) {
@@ -4094,14 +4102,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4094 ret = -ENOMEM; 4102 ret = -ENOMEM;
4095 goto failed_mount; 4103 goto failed_mount;
4096 } 4104 }
4097 if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
4098 le32_to_cpu(es->s_inodes_count)) {
4099 ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
4100 le32_to_cpu(es->s_inodes_count),
4101 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
4102 ret = -EINVAL;
4103 goto failed_mount;
4104 }
4105 4105
4106 bgl_lock_init(sbi->s_blockgroup_lock); 4106 bgl_lock_init(sbi->s_blockgroup_lock);
4107 4107
@@ -4510,6 +4510,7 @@ failed_mount6:
4510 percpu_counter_destroy(&sbi->s_freeinodes_counter); 4510 percpu_counter_destroy(&sbi->s_freeinodes_counter);
4511 percpu_counter_destroy(&sbi->s_dirs_counter); 4511 percpu_counter_destroy(&sbi->s_dirs_counter);
4512 percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 4512 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4513 percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
4513failed_mount5: 4514failed_mount5:
4514 ext4_ext_release(sb); 4515 ext4_ext_release(sb);
4515 ext4_release_system_zone(sb); 4516 ext4_release_system_zone(sb);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index f36fc5d5b257..7643d52c776c 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1031,10 +1031,8 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
1031 inode_lock(ea_inode); 1031 inode_lock(ea_inode);
1032 1032
1033 ret = ext4_reserve_inode_write(handle, ea_inode, &iloc); 1033 ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
1034 if (ret) { 1034 if (ret)
1035 iloc.bh = NULL;
1036 goto out; 1035 goto out;
1037 }
1038 1036
1039 ref_count = ext4_xattr_inode_get_ref(ea_inode); 1037 ref_count = ext4_xattr_inode_get_ref(ea_inode);
1040 ref_count += ref_change; 1038 ref_count += ref_change;
@@ -1080,12 +1078,10 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
1080 } 1078 }
1081 1079
1082 ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc); 1080 ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
1083 iloc.bh = NULL;
1084 if (ret) 1081 if (ret)
1085 ext4_warning_inode(ea_inode, 1082 ext4_warning_inode(ea_inode,
1086 "ext4_mark_iloc_dirty() failed ret=%d", ret); 1083 "ext4_mark_iloc_dirty() failed ret=%d", ret);
1087out: 1084out:
1088 brelse(iloc.bh);
1089 inode_unlock(ea_inode); 1085 inode_unlock(ea_inode);
1090 return ret; 1086 return ret;
1091} 1087}
@@ -1388,6 +1384,12 @@ retry:
1388 bh = ext4_getblk(handle, ea_inode, block, 0); 1384 bh = ext4_getblk(handle, ea_inode, block, 0);
1389 if (IS_ERR(bh)) 1385 if (IS_ERR(bh))
1390 return PTR_ERR(bh); 1386 return PTR_ERR(bh);
1387 if (!bh) {
1388 WARN_ON_ONCE(1);
1389 EXT4_ERROR_INODE(ea_inode,
1390 "ext4_getblk() return bh = NULL");
1391 return -EFSCORRUPTED;
1392 }
1391 ret = ext4_journal_get_write_access(handle, bh); 1393 ret = ext4_journal_get_write_access(handle, bh);
1392 if (ret) 1394 if (ret)
1393 goto out; 1395 goto out;
@@ -2276,8 +2278,10 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
2276 if (!bh) 2278 if (!bh)
2277 return ERR_PTR(-EIO); 2279 return ERR_PTR(-EIO);
2278 error = ext4_xattr_check_block(inode, bh); 2280 error = ext4_xattr_check_block(inode, bh);
2279 if (error) 2281 if (error) {
2282 brelse(bh);
2280 return ERR_PTR(error); 2283 return ERR_PTR(error);
2284 }
2281 return bh; 2285 return bh;
2282} 2286}
2283 2287
@@ -2397,6 +2401,8 @@ retry_inode:
2397 error = ext4_xattr_block_set(handle, inode, &i, &bs); 2401 error = ext4_xattr_block_set(handle, inode, &i, &bs);
2398 } else if (error == -ENOSPC) { 2402 } else if (error == -ENOSPC) {
2399 if (EXT4_I(inode)->i_file_acl && !bs.s.base) { 2403 if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
2404 brelse(bs.bh);
2405 bs.bh = NULL;
2400 error = ext4_xattr_block_find(inode, &i, &bs); 2406 error = ext4_xattr_block_find(inode, &i, &bs);
2401 if (error) 2407 if (error)
2402 goto cleanup; 2408 goto cleanup;
@@ -2617,6 +2623,8 @@ out:
2617 kfree(buffer); 2623 kfree(buffer);
2618 if (is) 2624 if (is)
2619 brelse(is->iloc.bh); 2625 brelse(is->iloc.bh);
2626 if (bs)
2627 brelse(bs->bh);
2620 kfree(is); 2628 kfree(is);
2621 kfree(bs); 2629 kfree(bs);
2622 2630
@@ -2696,7 +2704,6 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
2696 struct ext4_inode *raw_inode, handle_t *handle) 2704 struct ext4_inode *raw_inode, handle_t *handle)
2697{ 2705{
2698 struct ext4_xattr_ibody_header *header; 2706 struct ext4_xattr_ibody_header *header;
2699 struct buffer_head *bh;
2700 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2707 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2701 static unsigned int mnt_count; 2708 static unsigned int mnt_count;
2702 size_t min_offs; 2709 size_t min_offs;
@@ -2737,13 +2744,17 @@ retry:
2737 * EA block can hold new_extra_isize bytes. 2744 * EA block can hold new_extra_isize bytes.
2738 */ 2745 */
2739 if (EXT4_I(inode)->i_file_acl) { 2746 if (EXT4_I(inode)->i_file_acl) {
2747 struct buffer_head *bh;
2748
2740 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); 2749 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
2741 error = -EIO; 2750 error = -EIO;
2742 if (!bh) 2751 if (!bh)
2743 goto cleanup; 2752 goto cleanup;
2744 error = ext4_xattr_check_block(inode, bh); 2753 error = ext4_xattr_check_block(inode, bh);
2745 if (error) 2754 if (error) {
2755 brelse(bh);
2746 goto cleanup; 2756 goto cleanup;
2757 }
2747 base = BHDR(bh); 2758 base = BHDR(bh);
2748 end = bh->b_data + bh->b_size; 2759 end = bh->b_data + bh->b_size;
2749 min_offs = end - base; 2760 min_offs = end - base;
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 9edc920f651f..6d9cb1719de5 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -730,6 +730,9 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
730 730
731 if (awaken) 731 if (awaken)
732 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); 732 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
733 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
734 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
735
733 736
734 /* Prevent a race with our last child, which has to signal EV_CLEARED 737 /* Prevent a race with our last child, which has to signal EV_CLEARED
735 * before dropping our spinlock. 738 * before dropping our spinlock.
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ae813e609932..a5e516a40e7a 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -165,9 +165,13 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
165 165
166static void fuse_drop_waiting(struct fuse_conn *fc) 166static void fuse_drop_waiting(struct fuse_conn *fc)
167{ 167{
168 if (fc->connected) { 168 /*
169 atomic_dec(&fc->num_waiting); 169 * lockess check of fc->connected is okay, because atomic_dec_and_test()
170 } else if (atomic_dec_and_test(&fc->num_waiting)) { 170 * provides a memory barrier mached with the one in fuse_wait_aborted()
171 * to ensure no wake-up is missed.
172 */
173 if (atomic_dec_and_test(&fc->num_waiting) &&
174 !READ_ONCE(fc->connected)) {
171 /* wake up aborters */ 175 /* wake up aborters */
172 wake_up_all(&fc->blocked_waitq); 176 wake_up_all(&fc->blocked_waitq);
173 } 177 }
@@ -1768,8 +1772,10 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1768 req->in.args[1].size = total_len; 1772 req->in.args[1].size = total_len;
1769 1773
1770 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique); 1774 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1771 if (err) 1775 if (err) {
1772 fuse_retrieve_end(fc, req); 1776 fuse_retrieve_end(fc, req);
1777 fuse_put_request(fc, req);
1778 }
1773 1779
1774 return err; 1780 return err;
1775} 1781}
@@ -2219,6 +2225,8 @@ EXPORT_SYMBOL_GPL(fuse_abort_conn);
2219 2225
2220void fuse_wait_aborted(struct fuse_conn *fc) 2226void fuse_wait_aborted(struct fuse_conn *fc)
2221{ 2227{
2228 /* matches implicit memory barrier in fuse_drop_waiting() */
2229 smp_mb();
2222 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); 2230 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2223} 2231}
2224 2232
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index cc2121b37bf5..b52f9baaa3e7 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2924,10 +2924,12 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2924 } 2924 }
2925 2925
2926 if (io->async) { 2926 if (io->async) {
2927 bool blocking = io->blocking;
2928
2927 fuse_aio_complete(io, ret < 0 ? ret : 0, -1); 2929 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2928 2930
2929 /* we have a non-extending, async request, so return */ 2931 /* we have a non-extending, async request, so return */
2930 if (!io->blocking) 2932 if (!blocking)
2931 return -EIOCBQUEUED; 2933 return -EIOCBQUEUED;
2932 2934
2933 wait_for_completion(&wait); 2935 wait_for_completion(&wait);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index a683d9b27d76..9a4a15d646eb 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -826,7 +826,7 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
826 ret = gfs2_meta_inode_buffer(ip, &dibh); 826 ret = gfs2_meta_inode_buffer(ip, &dibh);
827 if (ret) 827 if (ret)
828 goto unlock; 828 goto unlock;
829 iomap->private = dibh; 829 mp->mp_bh[0] = dibh;
830 830
831 if (gfs2_is_stuffed(ip)) { 831 if (gfs2_is_stuffed(ip)) {
832 if (flags & IOMAP_WRITE) { 832 if (flags & IOMAP_WRITE) {
@@ -863,9 +863,6 @@ unstuff:
863 len = lblock_stop - lblock + 1; 863 len = lblock_stop - lblock + 1;
864 iomap->length = len << inode->i_blkbits; 864 iomap->length = len << inode->i_blkbits;
865 865
866 get_bh(dibh);
867 mp->mp_bh[0] = dibh;
868
869 height = ip->i_height; 866 height = ip->i_height;
870 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height]) 867 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
871 height++; 868 height++;
@@ -898,8 +895,6 @@ out:
898 iomap->bdev = inode->i_sb->s_bdev; 895 iomap->bdev = inode->i_sb->s_bdev;
899unlock: 896unlock:
900 up_read(&ip->i_rw_mutex); 897 up_read(&ip->i_rw_mutex);
901 if (ret && dibh)
902 brelse(dibh);
903 return ret; 898 return ret;
904 899
905do_alloc: 900do_alloc:
@@ -980,9 +975,9 @@ static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos,
980 975
981static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, 976static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
982 loff_t length, unsigned flags, 977 loff_t length, unsigned flags,
983 struct iomap *iomap) 978 struct iomap *iomap,
979 struct metapath *mp)
984{ 980{
985 struct metapath mp = { .mp_aheight = 1, };
986 struct gfs2_inode *ip = GFS2_I(inode); 981 struct gfs2_inode *ip = GFS2_I(inode);
987 struct gfs2_sbd *sdp = GFS2_SB(inode); 982 struct gfs2_sbd *sdp = GFS2_SB(inode);
988 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 983 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
@@ -996,9 +991,9 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
996 unstuff = gfs2_is_stuffed(ip) && 991 unstuff = gfs2_is_stuffed(ip) &&
997 pos + length > gfs2_max_stuffed_size(ip); 992 pos + length > gfs2_max_stuffed_size(ip);
998 993
999 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp); 994 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp);
1000 if (ret) 995 if (ret)
1001 goto out_release; 996 goto out_unlock;
1002 997
1003 alloc_required = unstuff || iomap->type == IOMAP_HOLE; 998 alloc_required = unstuff || iomap->type == IOMAP_HOLE;
1004 999
@@ -1013,7 +1008,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1013 1008
1014 ret = gfs2_quota_lock_check(ip, &ap); 1009 ret = gfs2_quota_lock_check(ip, &ap);
1015 if (ret) 1010 if (ret)
1016 goto out_release; 1011 goto out_unlock;
1017 1012
1018 ret = gfs2_inplace_reserve(ip, &ap); 1013 ret = gfs2_inplace_reserve(ip, &ap);
1019 if (ret) 1014 if (ret)
@@ -1038,17 +1033,15 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1038 ret = gfs2_unstuff_dinode(ip, NULL); 1033 ret = gfs2_unstuff_dinode(ip, NULL);
1039 if (ret) 1034 if (ret)
1040 goto out_trans_end; 1035 goto out_trans_end;
1041 release_metapath(&mp); 1036 release_metapath(mp);
1042 brelse(iomap->private);
1043 iomap->private = NULL;
1044 ret = gfs2_iomap_get(inode, iomap->offset, iomap->length, 1037 ret = gfs2_iomap_get(inode, iomap->offset, iomap->length,
1045 flags, iomap, &mp); 1038 flags, iomap, mp);
1046 if (ret) 1039 if (ret)
1047 goto out_trans_end; 1040 goto out_trans_end;
1048 } 1041 }
1049 1042
1050 if (iomap->type == IOMAP_HOLE) { 1043 if (iomap->type == IOMAP_HOLE) {
1051 ret = gfs2_iomap_alloc(inode, iomap, flags, &mp); 1044 ret = gfs2_iomap_alloc(inode, iomap, flags, mp);
1052 if (ret) { 1045 if (ret) {
1053 gfs2_trans_end(sdp); 1046 gfs2_trans_end(sdp);
1054 gfs2_inplace_release(ip); 1047 gfs2_inplace_release(ip);
@@ -1056,7 +1049,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1056 goto out_qunlock; 1049 goto out_qunlock;
1057 } 1050 }
1058 } 1051 }
1059 release_metapath(&mp);
1060 if (!gfs2_is_stuffed(ip) && gfs2_is_jdata(ip)) 1052 if (!gfs2_is_stuffed(ip) && gfs2_is_jdata(ip))
1061 iomap->page_done = gfs2_iomap_journaled_page_done; 1053 iomap->page_done = gfs2_iomap_journaled_page_done;
1062 return 0; 1054 return 0;
@@ -1069,10 +1061,7 @@ out_trans_fail:
1069out_qunlock: 1061out_qunlock:
1070 if (alloc_required) 1062 if (alloc_required)
1071 gfs2_quota_unlock(ip); 1063 gfs2_quota_unlock(ip);
1072out_release: 1064out_unlock:
1073 if (iomap->private)
1074 brelse(iomap->private);
1075 release_metapath(&mp);
1076 gfs2_write_unlock(inode); 1065 gfs2_write_unlock(inode);
1077 return ret; 1066 return ret;
1078} 1067}
@@ -1088,10 +1077,10 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1088 1077
1089 trace_gfs2_iomap_start(ip, pos, length, flags); 1078 trace_gfs2_iomap_start(ip, pos, length, flags);
1090 if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) { 1079 if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) {
1091 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap); 1080 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1092 } else { 1081 } else {
1093 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp); 1082 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1094 release_metapath(&mp); 1083
1095 /* 1084 /*
1096 * Silently fall back to buffered I/O for stuffed files or if 1085 * Silently fall back to buffered I/O for stuffed files or if
1097 * we've hot a hole (see gfs2_file_direct_write). 1086 * we've hot a hole (see gfs2_file_direct_write).
@@ -1100,6 +1089,11 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1100 iomap->type != IOMAP_MAPPED) 1089 iomap->type != IOMAP_MAPPED)
1101 ret = -ENOTBLK; 1090 ret = -ENOTBLK;
1102 } 1091 }
1092 if (!ret) {
1093 get_bh(mp.mp_bh[0]);
1094 iomap->private = mp.mp_bh[0];
1095 }
1096 release_metapath(&mp);
1103 trace_gfs2_iomap_end(ip, iomap, ret); 1097 trace_gfs2_iomap_end(ip, iomap, ret);
1104 return ret; 1098 return ret;
1105} 1099}
@@ -1908,10 +1902,16 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1908 if (ret < 0) 1902 if (ret < 0)
1909 goto out; 1903 goto out;
1910 1904
1911 /* issue read-ahead on metadata */ 1905 /* On the first pass, issue read-ahead on metadata. */
1912 if (mp.mp_aheight > 1) { 1906 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1913 for (; ret > 1; ret--) { 1907 unsigned int height = mp.mp_aheight - 1;
1914 metapointer_range(&mp, mp.mp_aheight - ret, 1908
1909 /* No read-ahead for data blocks. */
1910 if (mp.mp_aheight - 1 == strip_h)
1911 height--;
1912
1913 for (; height >= mp.mp_aheight - ret; height--) {
1914 metapointer_range(&mp, height,
1915 start_list, start_aligned, 1915 start_list, start_aligned,
1916 end_list, end_aligned, 1916 end_list, end_aligned,
1917 &start, &end); 1917 &start, &end);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index ffe3032b1043..b08a530433ad 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -733,6 +733,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
733 733
734 if (gl) { 734 if (gl) {
735 glock_clear_object(gl, rgd); 735 glock_clear_object(gl, rgd);
736 gfs2_rgrp_brelse(rgd);
736 gfs2_glock_put(gl); 737 gfs2_glock_put(gl);
737 } 738 }
738 739
@@ -1174,7 +1175,7 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1174 * @rgd: the struct gfs2_rgrpd describing the RG to read in 1175 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1175 * 1176 *
1176 * Read in all of a Resource Group's header and bitmap blocks. 1177 * Read in all of a Resource Group's header and bitmap blocks.
1177 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps. 1178 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
1178 * 1179 *
1179 * Returns: errno 1180 * Returns: errno
1180 */ 1181 */
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 98b96ffb95ed..19017d296173 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -338,13 +338,14 @@ void hfs_bmap_free(struct hfs_bnode *node)
338 338
339 nidx -= len * 8; 339 nidx -= len * 8;
340 i = node->next; 340 i = node->next;
341 hfs_bnode_put(node);
342 if (!i) { 341 if (!i) {
343 /* panic */; 342 /* panic */;
344 pr_crit("unable to free bnode %u. bmap not found!\n", 343 pr_crit("unable to free bnode %u. bmap not found!\n",
345 node->this); 344 node->this);
345 hfs_bnode_put(node);
346 return; 346 return;
347 } 347 }
348 hfs_bnode_put(node);
348 node = hfs_bnode_find(tree, i); 349 node = hfs_bnode_find(tree, i);
349 if (IS_ERR(node)) 350 if (IS_ERR(node))
350 return; 351 return;
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 236efe51eca6..66774f4cb4fd 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -466,14 +466,15 @@ void hfs_bmap_free(struct hfs_bnode *node)
466 466
467 nidx -= len * 8; 467 nidx -= len * 8;
468 i = node->next; 468 i = node->next;
469 hfs_bnode_put(node);
470 if (!i) { 469 if (!i) {
471 /* panic */; 470 /* panic */;
472 pr_crit("unable to free bnode %u. " 471 pr_crit("unable to free bnode %u. "
473 "bmap not found!\n", 472 "bmap not found!\n",
474 node->this); 473 node->this);
474 hfs_bnode_put(node);
475 return; 475 return;
476 } 476 }
477 hfs_bnode_put(node);
477 node = hfs_bnode_find(tree, i); 478 node = hfs_bnode_find(tree, i);
478 if (IS_ERR(node)) 479 if (IS_ERR(node))
479 return; 480 return;
diff --git a/fs/inode.c b/fs/inode.c
index 9e198f00b64c..35d2108d567c 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -730,8 +730,11 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
730 return LRU_REMOVED; 730 return LRU_REMOVED;
731 } 731 }
732 732
733 /* recently referenced inodes get one more pass */ 733 /*
734 if (inode->i_state & I_REFERENCED) { 734 * Recently referenced inodes and inodes with many attached pages
735 * get one more pass.
736 */
737 if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
735 inode->i_state &= ~I_REFERENCED; 738 inode->i_state &= ~I_REFERENCED;
736 spin_unlock(&inode->i_lock); 739 spin_unlock(&inode->i_lock);
737 return LRU_ROTATE; 740 return LRU_ROTATE;
diff --git a/fs/iomap.c b/fs/iomap.c
index 64ce240217a1..3ffb776fbebe 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -142,13 +142,14 @@ static void
142iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, 142iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
143 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) 143 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
144{ 144{
145 loff_t orig_pos = *pos;
146 loff_t isize = i_size_read(inode);
145 unsigned block_bits = inode->i_blkbits; 147 unsigned block_bits = inode->i_blkbits;
146 unsigned block_size = (1 << block_bits); 148 unsigned block_size = (1 << block_bits);
147 unsigned poff = offset_in_page(*pos); 149 unsigned poff = offset_in_page(*pos);
148 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); 150 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
149 unsigned first = poff >> block_bits; 151 unsigned first = poff >> block_bits;
150 unsigned last = (poff + plen - 1) >> block_bits; 152 unsigned last = (poff + plen - 1) >> block_bits;
151 unsigned end = offset_in_page(i_size_read(inode)) >> block_bits;
152 153
153 /* 154 /*
154 * If the block size is smaller than the page size we need to check the 155 * If the block size is smaller than the page size we need to check the
@@ -183,8 +184,12 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
183 * handle both halves separately so that we properly zero data in the 184 * handle both halves separately so that we properly zero data in the
184 * page cache for blocks that are entirely outside of i_size. 185 * page cache for blocks that are entirely outside of i_size.
185 */ 186 */
186 if (first <= end && last > end) 187 if (orig_pos <= isize && orig_pos + length > isize) {
187 plen -= (last - end) * block_size; 188 unsigned end = offset_in_page(isize - 1) >> block_bits;
189
190 if (first <= end && last > end)
191 plen -= (last - end) * block_size;
192 }
188 193
189 *offp = poff; 194 *offp = poff;
190 *lenp = plen; 195 *lenp = plen;
@@ -1580,7 +1585,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1580 struct bio *bio; 1585 struct bio *bio;
1581 bool need_zeroout = false; 1586 bool need_zeroout = false;
1582 bool use_fua = false; 1587 bool use_fua = false;
1583 int nr_pages, ret; 1588 int nr_pages, ret = 0;
1584 size_t copied = 0; 1589 size_t copied = 0;
1585 1590
1586 if ((pos | length | align) & ((1 << blkbits) - 1)) 1591 if ((pos | length | align) & ((1 << blkbits) - 1))
@@ -1596,12 +1601,13 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1596 1601
1597 if (iomap->flags & IOMAP_F_NEW) { 1602 if (iomap->flags & IOMAP_F_NEW) {
1598 need_zeroout = true; 1603 need_zeroout = true;
1599 } else { 1604 } else if (iomap->type == IOMAP_MAPPED) {
1600 /* 1605 /*
1601 * Use a FUA write if we need datasync semantics, this 1606 * Use a FUA write if we need datasync semantics, this is a pure
1602 * is a pure data IO that doesn't require any metadata 1607 * data IO that doesn't require any metadata updates (including
1603 * updates and the underlying device supports FUA. This 1608 * after IO completion such as unwritten extent conversion) and
1604 * allows us to avoid cache flushes on IO completion. 1609 * the underlying device supports FUA. This allows us to avoid
1610 * cache flushes on IO completion.
1605 */ 1611 */
1606 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && 1612 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1607 (dio->flags & IOMAP_DIO_WRITE_FUA) && 1613 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
@@ -1644,8 +1650,14 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1644 1650
1645 ret = bio_iov_iter_get_pages(bio, &iter); 1651 ret = bio_iov_iter_get_pages(bio, &iter);
1646 if (unlikely(ret)) { 1652 if (unlikely(ret)) {
1653 /*
1654 * We have to stop part way through an IO. We must fall
1655 * through to the sub-block tail zeroing here, otherwise
1656 * this short IO may expose stale data in the tail of
1657 * the block we haven't written data to.
1658 */
1647 bio_put(bio); 1659 bio_put(bio);
1648 return copied ? copied : ret; 1660 goto zero_tail;
1649 } 1661 }
1650 1662
1651 n = bio->bi_iter.bi_size; 1663 n = bio->bi_iter.bi_size;
@@ -1676,13 +1688,21 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1676 dio->submit.cookie = submit_bio(bio); 1688 dio->submit.cookie = submit_bio(bio);
1677 } while (nr_pages); 1689 } while (nr_pages);
1678 1690
1679 if (need_zeroout) { 1691 /*
1692 * We need to zeroout the tail of a sub-block write if the extent type
1693 * requires zeroing or the write extends beyond EOF. If we don't zero
1694 * the block tail in the latter case, we can expose stale data via mmap
1695 * reads of the EOF block.
1696 */
1697zero_tail:
1698 if (need_zeroout ||
1699 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
1680 /* zero out from the end of the write to the end of the block */ 1700 /* zero out from the end of the write to the end of the block */
1681 pad = pos & (fs_block_size - 1); 1701 pad = pos & (fs_block_size - 1);
1682 if (pad) 1702 if (pad)
1683 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); 1703 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1684 } 1704 }
1685 return copied; 1705 return copied ? copied : ret;
1686} 1706}
1687 1707
1688static loff_t 1708static loff_t
@@ -1857,6 +1877,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1857 dio->wait_for_completion = true; 1877 dio->wait_for_completion = true;
1858 ret = 0; 1878 ret = 0;
1859 } 1879 }
1880
1881 /*
1882 * Splicing to pipes can fail on a full pipe. We have to
1883 * swallow this to make it look like a short IO
1884 * otherwise the higher splice layers will completely
1885 * mishandle the error and stop moving data.
1886 */
1887 if (ret == -EFAULT)
1888 ret = 0;
1860 break; 1889 break;
1861 } 1890 }
1862 pos += ret; 1891 pos += ret;
diff --git a/fs/namespace.c b/fs/namespace.c
index 98d27da43304..a7f91265ea67 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -695,9 +695,6 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
695 695
696 hlist_for_each_entry(mp, chain, m_hash) { 696 hlist_for_each_entry(mp, chain, m_hash) {
697 if (mp->m_dentry == dentry) { 697 if (mp->m_dentry == dentry) {
698 /* might be worth a WARN_ON() */
699 if (d_unlinked(dentry))
700 return ERR_PTR(-ENOENT);
701 mp->m_count++; 698 mp->m_count++;
702 return mp; 699 return mp;
703 } 700 }
@@ -711,6 +708,9 @@ static struct mountpoint *get_mountpoint(struct dentry *dentry)
711 int ret; 708 int ret;
712 709
713 if (d_mountpoint(dentry)) { 710 if (d_mountpoint(dentry)) {
711 /* might be worth a WARN_ON() */
712 if (d_unlinked(dentry))
713 return ERR_PTR(-ENOENT);
714mountpoint: 714mountpoint:
715 read_seqlock_excl(&mount_lock); 715 read_seqlock_excl(&mount_lock);
716 mp = lookup_mountpoint(dentry); 716 mp = lookup_mountpoint(dentry);
@@ -1540,8 +1540,13 @@ static int do_umount(struct mount *mnt, int flags)
1540 1540
1541 namespace_lock(); 1541 namespace_lock();
1542 lock_mount_hash(); 1542 lock_mount_hash();
1543 event++;
1544 1543
1544 /* Recheck MNT_LOCKED with the locks held */
1545 retval = -EINVAL;
1546 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1547 goto out;
1548
1549 event++;
1545 if (flags & MNT_DETACH) { 1550 if (flags & MNT_DETACH) {
1546 if (!list_empty(&mnt->mnt_list)) 1551 if (!list_empty(&mnt->mnt_list))
1547 umount_tree(mnt, UMOUNT_PROPAGATE); 1552 umount_tree(mnt, UMOUNT_PROPAGATE);
@@ -1555,6 +1560,7 @@ static int do_umount(struct mount *mnt, int flags)
1555 retval = 0; 1560 retval = 0;
1556 } 1561 }
1557 } 1562 }
1563out:
1558 unlock_mount_hash(); 1564 unlock_mount_hash();
1559 namespace_unlock(); 1565 namespace_unlock();
1560 return retval; 1566 return retval;
@@ -1645,7 +1651,7 @@ int ksys_umount(char __user *name, int flags)
1645 goto dput_and_out; 1651 goto dput_and_out;
1646 if (!check_mnt(mnt)) 1652 if (!check_mnt(mnt))
1647 goto dput_and_out; 1653 goto dput_and_out;
1648 if (mnt->mnt.mnt_flags & MNT_LOCKED) 1654 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
1649 goto dput_and_out; 1655 goto dput_and_out;
1650 retval = -EPERM; 1656 retval = -EPERM;
1651 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) 1657 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
@@ -1728,8 +1734,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1728 for (s = r; s; s = next_mnt(s, r)) { 1734 for (s = r; s; s = next_mnt(s, r)) {
1729 if (!(flag & CL_COPY_UNBINDABLE) && 1735 if (!(flag & CL_COPY_UNBINDABLE) &&
1730 IS_MNT_UNBINDABLE(s)) { 1736 IS_MNT_UNBINDABLE(s)) {
1731 s = skip_mnt_tree(s); 1737 if (s->mnt.mnt_flags & MNT_LOCKED) {
1732 continue; 1738 /* Both unbindable and locked. */
1739 q = ERR_PTR(-EPERM);
1740 goto out;
1741 } else {
1742 s = skip_mnt_tree(s);
1743 continue;
1744 }
1733 } 1745 }
1734 if (!(flag & CL_COPY_MNT_NS_FILE) && 1746 if (!(flag & CL_COPY_MNT_NS_FILE) &&
1735 is_mnt_ns_file(s->mnt.mnt_root)) { 1747 is_mnt_ns_file(s->mnt.mnt_root)) {
@@ -1782,7 +1794,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
1782{ 1794{
1783 namespace_lock(); 1795 namespace_lock();
1784 lock_mount_hash(); 1796 lock_mount_hash();
1785 umount_tree(real_mount(mnt), UMOUNT_SYNC); 1797 umount_tree(real_mount(mnt), 0);
1786 unlock_mount_hash(); 1798 unlock_mount_hash();
1787 namespace_unlock(); 1799 namespace_unlock();
1788} 1800}
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index fa515d5ea5ba..315967354954 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -66,7 +66,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
66out_iput: 66out_iput:
67 rcu_read_unlock(); 67 rcu_read_unlock();
68 trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status)); 68 trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
69 iput(inode); 69 nfs_iput_and_deactive(inode);
70out: 70out:
71 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status)); 71 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
72 return res->status; 72 return res->status;
@@ -108,7 +108,7 @@ __be32 nfs4_callback_recall(void *argp, void *resp,
108 } 108 }
109 trace_nfs4_cb_recall(cps->clp, &args->fh, inode, 109 trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
110 &args->stateid, -ntohl(res)); 110 &args->stateid, -ntohl(res));
111 iput(inode); 111 nfs_iput_and_deactive(inode);
112out: 112out:
113 dprintk("%s: exit with status = %d\n", __func__, ntohl(res)); 113 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
114 return res; 114 return res;
@@ -686,20 +686,24 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
686{ 686{
687 struct cb_offloadargs *args = data; 687 struct cb_offloadargs *args = data;
688 struct nfs_server *server; 688 struct nfs_server *server;
689 struct nfs4_copy_state *copy; 689 struct nfs4_copy_state *copy, *tmp_copy;
690 bool found = false; 690 bool found = false;
691 691
692 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
693 if (!copy)
694 return htonl(NFS4ERR_SERVERFAULT);
695
692 spin_lock(&cps->clp->cl_lock); 696 spin_lock(&cps->clp->cl_lock);
693 rcu_read_lock(); 697 rcu_read_lock();
694 list_for_each_entry_rcu(server, &cps->clp->cl_superblocks, 698 list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
695 client_link) { 699 client_link) {
696 list_for_each_entry(copy, &server->ss_copies, copies) { 700 list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
697 if (memcmp(args->coa_stateid.other, 701 if (memcmp(args->coa_stateid.other,
698 copy->stateid.other, 702 tmp_copy->stateid.other,
699 sizeof(args->coa_stateid.other))) 703 sizeof(args->coa_stateid.other)))
700 continue; 704 continue;
701 nfs4_copy_cb_args(copy, args); 705 nfs4_copy_cb_args(tmp_copy, args);
702 complete(&copy->completion); 706 complete(&tmp_copy->completion);
703 found = true; 707 found = true;
704 goto out; 708 goto out;
705 } 709 }
@@ -707,15 +711,11 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
707out: 711out:
708 rcu_read_unlock(); 712 rcu_read_unlock();
709 if (!found) { 713 if (!found) {
710 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
711 if (!copy) {
712 spin_unlock(&cps->clp->cl_lock);
713 return htonl(NFS4ERR_SERVERFAULT);
714 }
715 memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE); 714 memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
716 nfs4_copy_cb_args(copy, args); 715 nfs4_copy_cb_args(copy, args);
717 list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids); 716 list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
718 } 717 } else
718 kfree(copy);
719 spin_unlock(&cps->clp->cl_lock); 719 spin_unlock(&cps->clp->cl_lock);
720 720
721 return 0; 721 return 0;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 07b839560576..6ec2f78c1e19 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -850,16 +850,23 @@ nfs_delegation_find_inode_server(struct nfs_server *server,
850 const struct nfs_fh *fhandle) 850 const struct nfs_fh *fhandle)
851{ 851{
852 struct nfs_delegation *delegation; 852 struct nfs_delegation *delegation;
853 struct inode *res = NULL; 853 struct inode *freeme, *res = NULL;
854 854
855 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 855 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
856 spin_lock(&delegation->lock); 856 spin_lock(&delegation->lock);
857 if (delegation->inode != NULL && 857 if (delegation->inode != NULL &&
858 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { 858 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
859 res = igrab(delegation->inode); 859 freeme = igrab(delegation->inode);
860 if (freeme && nfs_sb_active(freeme->i_sb))
861 res = freeme;
860 spin_unlock(&delegation->lock); 862 spin_unlock(&delegation->lock);
861 if (res != NULL) 863 if (res != NULL)
862 return res; 864 return res;
865 if (freeme) {
866 rcu_read_unlock();
867 iput(freeme);
868 rcu_read_lock();
869 }
863 return ERR_PTR(-EAGAIN); 870 return ERR_PTR(-EAGAIN);
864 } 871 }
865 spin_unlock(&delegation->lock); 872 spin_unlock(&delegation->lock);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 86bcba40ca61..74b36ed883ca 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1361,12 +1361,7 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1361 task)) 1361 task))
1362 return; 1362 return;
1363 1363
1364 if (ff_layout_read_prepare_common(task, hdr)) 1364 ff_layout_read_prepare_common(task, hdr);
1365 return;
1366
1367 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1368 hdr->args.lock_context, FMODE_READ) == -EIO)
1369 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1370} 1365}
1371 1366
1372static void ff_layout_read_call_done(struct rpc_task *task, void *data) 1367static void ff_layout_read_call_done(struct rpc_task *task, void *data)
@@ -1542,12 +1537,7 @@ static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1542 task)) 1537 task))
1543 return; 1538 return;
1544 1539
1545 if (ff_layout_write_prepare_common(task, hdr)) 1540 ff_layout_write_prepare_common(task, hdr);
1546 return;
1547
1548 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1549 hdr->args.lock_context, FMODE_WRITE) == -EIO)
1550 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1551} 1541}
1552 1542
1553static void ff_layout_write_call_done(struct rpc_task *task, void *data) 1543static void ff_layout_write_call_done(struct rpc_task *task, void *data)
@@ -1742,6 +1732,10 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1742 fh = nfs4_ff_layout_select_ds_fh(lseg, idx); 1732 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1743 if (fh) 1733 if (fh)
1744 hdr->args.fh = fh; 1734 hdr->args.fh = fh;
1735
1736 if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
1737 goto out_failed;
1738
1745 /* 1739 /*
1746 * Note that if we ever decide to split across DSes, 1740 * Note that if we ever decide to split across DSes,
1747 * then we may need to handle dense-like offsets. 1741 * then we may need to handle dense-like offsets.
@@ -1804,6 +1798,9 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1804 if (fh) 1798 if (fh)
1805 hdr->args.fh = fh; 1799 hdr->args.fh = fh;
1806 1800
1801 if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
1802 goto out_failed;
1803
1807 /* 1804 /*
1808 * Note that if we ever decide to split across DSes, 1805 * Note that if we ever decide to split across DSes,
1809 * then we may need to handle dense-like offsets. 1806 * then we may need to handle dense-like offsets.
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index 411798346e48..de50a342d5a5 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -215,6 +215,10 @@ unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
215 unsigned int maxnum); 215 unsigned int maxnum);
216struct nfs_fh * 216struct nfs_fh *
217nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx); 217nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
218int
219nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
220 u32 mirror_idx,
221 nfs4_stateid *stateid);
218 222
219struct nfs4_pnfs_ds * 223struct nfs4_pnfs_ds *
220nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, 224nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 74d8d5352438..d23347389626 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -370,6 +370,25 @@ out:
370 return fh; 370 return fh;
371} 371}
372 372
373int
374nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
375 u32 mirror_idx,
376 nfs4_stateid *stateid)
377{
378 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
379
380 if (!ff_layout_mirror_valid(lseg, mirror, false)) {
381 pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
382 __func__, mirror_idx);
383 goto out;
384 }
385
386 nfs4_stateid_copy(stateid, &mirror->stateid);
387 return 1;
388out:
389 return 0;
390}
391
373/** 392/**
374 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call 393 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
375 * @lseg: the layout segment we're operating on 394 * @lseg: the layout segment we're operating on
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index ac5b784a1de0..fed06fd9998d 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -137,31 +137,32 @@ static int handle_async_copy(struct nfs42_copy_res *res,
137 struct file *dst, 137 struct file *dst,
138 nfs4_stateid *src_stateid) 138 nfs4_stateid *src_stateid)
139{ 139{
140 struct nfs4_copy_state *copy; 140 struct nfs4_copy_state *copy, *tmp_copy;
141 int status = NFS4_OK; 141 int status = NFS4_OK;
142 bool found_pending = false; 142 bool found_pending = false;
143 struct nfs_open_context *ctx = nfs_file_open_context(dst); 143 struct nfs_open_context *ctx = nfs_file_open_context(dst);
144 144
145 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
146 if (!copy)
147 return -ENOMEM;
148
145 spin_lock(&server->nfs_client->cl_lock); 149 spin_lock(&server->nfs_client->cl_lock);
146 list_for_each_entry(copy, &server->nfs_client->pending_cb_stateids, 150 list_for_each_entry(tmp_copy, &server->nfs_client->pending_cb_stateids,
147 copies) { 151 copies) {
148 if (memcmp(&res->write_res.stateid, &copy->stateid, 152 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
149 NFS4_STATEID_SIZE)) 153 NFS4_STATEID_SIZE))
150 continue; 154 continue;
151 found_pending = true; 155 found_pending = true;
152 list_del(&copy->copies); 156 list_del(&tmp_copy->copies);
153 break; 157 break;
154 } 158 }
155 if (found_pending) { 159 if (found_pending) {
156 spin_unlock(&server->nfs_client->cl_lock); 160 spin_unlock(&server->nfs_client->cl_lock);
161 kfree(copy);
162 copy = tmp_copy;
157 goto out; 163 goto out;
158 } 164 }
159 165
160 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
161 if (!copy) {
162 spin_unlock(&server->nfs_client->cl_lock);
163 return -ENOMEM;
164 }
165 memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 166 memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
166 init_completion(&copy->completion); 167 init_completion(&copy->completion);
167 copy->parent_state = ctx->state; 168 copy->parent_state = ctx->state;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 8d59c9655ec4..1b994b527518 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -41,6 +41,8 @@ enum nfs4_client_state {
41 NFS4CLNT_MOVED, 41 NFS4CLNT_MOVED,
42 NFS4CLNT_LEASE_MOVED, 42 NFS4CLNT_LEASE_MOVED,
43 NFS4CLNT_DELEGATION_EXPIRED, 43 NFS4CLNT_DELEGATION_EXPIRED,
44 NFS4CLNT_RUN_MANAGER,
45 NFS4CLNT_DELEGRETURN_RUNNING,
44}; 46};
45 47
46#define NFS4_RENEW_TIMEOUT 0x01 48#define NFS4_RENEW_TIMEOUT 0x01
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 62ae0fd345ad..d8decf2ec48f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1210,6 +1210,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
1210 struct task_struct *task; 1210 struct task_struct *task;
1211 char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1]; 1211 char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
1212 1212
1213 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
1213 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) 1214 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1214 return; 1215 return;
1215 __module_get(THIS_MODULE); 1216 __module_get(THIS_MODULE);
@@ -2503,6 +2504,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
2503 2504
2504 /* Ensure exclusive access to NFSv4 state */ 2505 /* Ensure exclusive access to NFSv4 state */
2505 do { 2506 do {
2507 clear_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
2506 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 2508 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
2507 section = "purge state"; 2509 section = "purge state";
2508 status = nfs4_purge_lease(clp); 2510 status = nfs4_purge_lease(clp);
@@ -2593,19 +2595,24 @@ static void nfs4_state_manager(struct nfs_client *clp)
2593 } 2595 }
2594 2596
2595 nfs4_end_drain_session(clp); 2597 nfs4_end_drain_session(clp);
2596 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) { 2598 nfs4_clear_state_manager_bit(clp);
2597 nfs_client_return_marked_delegations(clp); 2599
2598 continue; 2600 if (!test_and_set_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state)) {
2601 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
2602 nfs_client_return_marked_delegations(clp);
2603 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
2604 }
2605 clear_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state);
2599 } 2606 }
2600 2607
2601 nfs4_clear_state_manager_bit(clp);
2602 /* Did we race with an attempt to give us more work? */ 2608 /* Did we race with an attempt to give us more work? */
2603 if (clp->cl_state == 0) 2609 if (!test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
2604 break; 2610 return;
2605 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) 2611 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
2606 break; 2612 return;
2607 } while (refcount_read(&clp->cl_count) > 1); 2613 } while (refcount_read(&clp->cl_count) > 1 && !signalled());
2608 return; 2614 goto out_drain;
2615
2609out_error: 2616out_error:
2610 if (strlen(section)) 2617 if (strlen(section))
2611 section_sep = ": "; 2618 section_sep = ": ";
@@ -2613,6 +2620,7 @@ out_error:
2613 " with error %d\n", section_sep, section, 2620 " with error %d\n", section_sep, section,
2614 clp->cl_hostname, -status); 2621 clp->cl_hostname, -status);
2615 ssleep(1); 2622 ssleep(1);
2623out_drain:
2616 nfs4_end_drain_session(clp); 2624 nfs4_end_drain_session(clp);
2617 nfs4_clear_state_manager_bit(clp); 2625 nfs4_clear_state_manager_bit(clp);
2618} 2626}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index edff074d38c7..d505990dac7c 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1038,6 +1038,9 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1038{ 1038{
1039 __be32 status; 1039 __be32 status;
1040 1040
1041 if (!cstate->save_fh.fh_dentry)
1042 return nfserr_nofilehandle;
1043
1041 status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh, 1044 status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
1042 src_stateid, RD_STATE, src, NULL); 1045 src_stateid, RD_STATE, src, NULL);
1043 if (status) { 1046 if (status) {
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index de99db518571..f2129a5d9f23 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -266,9 +266,7 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc,
266 return; 266 return;
267 267
268 if (nbh == NULL) { /* blocksize == pagesize */ 268 if (nbh == NULL) { /* blocksize == pagesize */
269 xa_lock_irq(&btnc->i_pages); 269 xa_erase_irq(&btnc->i_pages, newkey);
270 __xa_erase(&btnc->i_pages, newkey);
271 xa_unlock_irq(&btnc->i_pages);
272 unlock_page(ctxt->bh->b_page); 270 unlock_page(ctxt->bh->b_page);
273 } else 271 } else
274 brelse(nbh); 272 brelse(nbh);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 5769cf3ff035..e08a6647267b 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -115,12 +115,12 @@ static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
115 continue; 115 continue;
116 mark = iter_info->marks[type]; 116 mark = iter_info->marks[type];
117 /* 117 /*
118 * if the event is for a child and this inode doesn't care about 118 * If the event is for a child and this mark doesn't care about
119 * events on the child, don't send it! 119 * events on a child, don't send it!
120 */ 120 */
121 if (type == FSNOTIFY_OBJ_TYPE_INODE && 121 if (event_mask & FS_EVENT_ON_CHILD &&
122 (event_mask & FS_EVENT_ON_CHILD) && 122 (type != FSNOTIFY_OBJ_TYPE_INODE ||
123 !(mark->mask & FS_EVENT_ON_CHILD)) 123 !(mark->mask & FS_EVENT_ON_CHILD)))
124 continue; 124 continue;
125 125
126 marks_mask |= mark->mask; 126 marks_mask |= mark->mask;
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 2172ba516c61..d2c34900ae05 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -167,9 +167,9 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask
167 parent = dget_parent(dentry); 167 parent = dget_parent(dentry);
168 p_inode = parent->d_inode; 168 p_inode = parent->d_inode;
169 169
170 if (unlikely(!fsnotify_inode_watches_children(p_inode))) 170 if (unlikely(!fsnotify_inode_watches_children(p_inode))) {
171 __fsnotify_update_child_dentry_flags(p_inode); 171 __fsnotify_update_child_dentry_flags(p_inode);
172 else if (p_inode->i_fsnotify_mask & mask) { 172 } else if (p_inode->i_fsnotify_mask & mask & ALL_FSNOTIFY_EVENTS) {
173 struct name_snapshot name; 173 struct name_snapshot name;
174 174
175 /* we are notifying a parent so come up with the new mask which 175 /* we are notifying a parent so come up with the new mask which
@@ -339,6 +339,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
339 sb = mnt->mnt.mnt_sb; 339 sb = mnt->mnt.mnt_sb;
340 mnt_or_sb_mask = mnt->mnt_fsnotify_mask | sb->s_fsnotify_mask; 340 mnt_or_sb_mask = mnt->mnt_fsnotify_mask | sb->s_fsnotify_mask;
341 } 341 }
342 /* An event "on child" is not intended for a mount/sb mark */
343 if (mask & FS_EVENT_ON_CHILD)
344 mnt_or_sb_mask = 0;
342 345
343 /* 346 /*
344 * Optimization: srcu_read_lock() has a memory barrier which can 347 * Optimization: srcu_read_lock() has a memory barrier which can
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index da578ad4c08f..eb1ce30412dc 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2411,8 +2411,16 @@ static int ocfs2_dio_end_io(struct kiocb *iocb,
2411 /* this io's submitter should not have unlocked this before we could */ 2411 /* this io's submitter should not have unlocked this before we could */
2412 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); 2412 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
2413 2413
2414 if (bytes > 0 && private) 2414 if (bytes <= 0)
2415 ret = ocfs2_dio_end_io_write(inode, private, offset, bytes); 2415 mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld",
2416 (long long)bytes);
2417 if (private) {
2418 if (bytes > 0)
2419 ret = ocfs2_dio_end_io_write(inode, private, offset,
2420 bytes);
2421 else
2422 ocfs2_dio_free_write_ctx(inode, private);
2423 }
2416 2424
2417 ocfs2_iocb_clear_rw_locked(iocb); 2425 ocfs2_iocb_clear_rw_locked(iocb);
2418 2426
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 308ea0eb35fd..a396096a5099 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -178,6 +178,15 @@ do { \
178 ##__VA_ARGS__); \ 178 ##__VA_ARGS__); \
179} while (0) 179} while (0)
180 180
181#define mlog_ratelimited(mask, fmt, ...) \
182do { \
183 static DEFINE_RATELIMIT_STATE(_rs, \
184 DEFAULT_RATELIMIT_INTERVAL, \
185 DEFAULT_RATELIMIT_BURST); \
186 if (__ratelimit(&_rs)) \
187 mlog(mask, fmt, ##__VA_ARGS__); \
188} while (0)
189
181#define mlog_errno(st) ({ \ 190#define mlog_errno(st) ({ \
182 int _st = (st); \ 191 int _st = (st); \
183 if (_st != -ERESTARTSYS && _st != -EINTR && \ 192 if (_st != -ERESTARTSYS && _st != -EINTR && \
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 9f88188060db..4bf8d5854b27 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -125,10 +125,10 @@ check_err:
125 125
126check_gen: 126check_gen:
127 if (handle->ih_generation != inode->i_generation) { 127 if (handle->ih_generation != inode->i_generation) {
128 iput(inode);
129 trace_ocfs2_get_dentry_generation((unsigned long long)blkno, 128 trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
130 handle->ih_generation, 129 handle->ih_generation,
131 inode->i_generation); 130 inode->i_generation);
131 iput(inode);
132 result = ERR_PTR(-ESTALE); 132 result = ERR_PTR(-ESTALE);
133 goto bail; 133 goto bail;
134 } 134 }
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 3f1685d7d43b..1565dd8e8856 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -157,18 +157,14 @@ out:
157} 157}
158 158
159/* 159/*
160 * lock allocators, and reserving appropriate number of bits for 160 * lock allocator, and reserve appropriate number of bits for
161 * meta blocks and data clusters. 161 * meta blocks.
162 *
163 * in some cases, we don't need to reserve clusters, just let data_ac
164 * be NULL.
165 */ 162 */
166static int ocfs2_lock_allocators_move_extents(struct inode *inode, 163static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
167 struct ocfs2_extent_tree *et, 164 struct ocfs2_extent_tree *et,
168 u32 clusters_to_move, 165 u32 clusters_to_move,
169 u32 extents_to_split, 166 u32 extents_to_split,
170 struct ocfs2_alloc_context **meta_ac, 167 struct ocfs2_alloc_context **meta_ac,
171 struct ocfs2_alloc_context **data_ac,
172 int extra_blocks, 168 int extra_blocks,
173 int *credits) 169 int *credits)
174{ 170{
@@ -193,13 +189,6 @@ static int ocfs2_lock_allocators_move_extents(struct inode *inode,
193 goto out; 189 goto out;
194 } 190 }
195 191
196 if (data_ac) {
197 ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
198 if (ret) {
199 mlog_errno(ret);
200 goto out;
201 }
202 }
203 192
204 *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el); 193 *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
205 194
@@ -259,10 +248,10 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
259 } 248 }
260 } 249 }
261 250
262 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1, 251 ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
263 &context->meta_ac, 252 *len, 1,
264 &context->data_ac, 253 &context->meta_ac,
265 extra_blocks, &credits); 254 extra_blocks, &credits);
266 if (ret) { 255 if (ret) {
267 mlog_errno(ret); 256 mlog_errno(ret);
268 goto out; 257 goto out;
@@ -285,6 +274,21 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
285 } 274 }
286 } 275 }
287 276
277 /*
278 * Make sure ocfs2_reserve_cluster is called after
279 * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
280 *
281 * If ocfs2_reserve_cluster is called
282 * before __ocfs2_flush_truncate_log, dead lock on global bitmap
283 * may happen.
284 *
285 */
286 ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
287 if (ret) {
288 mlog_errno(ret);
289 goto out_unlock_mutex;
290 }
291
288 handle = ocfs2_start_trans(osb, credits); 292 handle = ocfs2_start_trans(osb, credits);
289 if (IS_ERR(handle)) { 293 if (IS_ERR(handle)) {
290 ret = PTR_ERR(handle); 294 ret = PTR_ERR(handle);
@@ -617,9 +621,10 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
617 } 621 }
618 } 622 }
619 623
620 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1, 624 ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
621 &context->meta_ac, 625 len, 1,
622 NULL, extra_blocks, &credits); 626 &context->meta_ac,
627 extra_blocks, &credits);
623 if (ret) { 628 if (ret) {
624 mlog_errno(ret); 629 mlog_errno(ret);
625 goto out; 630 goto out;
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index ffcff6516e89..e02a9039b5ea 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -816,17 +816,14 @@ static int ramoops_probe(struct platform_device *pdev)
816 816
817 cxt->pstore.data = cxt; 817 cxt->pstore.data = cxt;
818 /* 818 /*
819 * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we 819 * Since bufsize is only used for dmesg crash dumps, it
820 * have to handle dumps, we must have at least record_size buffer. And 820 * must match the size of the dprz record (after PRZ header
821 * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be 821 * and ECC bytes have been accounted for).
822 * ZERO_SIZE_PTR).
823 */ 822 */
824 if (cxt->console_size) 823 cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
825 cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */ 824 cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
826 cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
827 cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
828 if (!cxt->pstore.buf) { 825 if (!cxt->pstore.buf) {
829 pr_err("cannot allocate pstore buffer\n"); 826 pr_err("cannot allocate pstore crash dump buffer\n");
830 err = -ENOMEM; 827 err = -ENOMEM;
831 goto fail_clear; 828 goto fail_clear;
832 } 829 }
diff --git a/fs/read_write.c b/fs/read_write.c
index bfcb4ced5664..4dae0399c75a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -2094,17 +2094,18 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
2094 off = same->src_offset; 2094 off = same->src_offset;
2095 len = same->src_length; 2095 len = same->src_length;
2096 2096
2097 ret = -EISDIR;
2098 if (S_ISDIR(src->i_mode)) 2097 if (S_ISDIR(src->i_mode))
2099 goto out; 2098 return -EISDIR;
2100 2099
2101 ret = -EINVAL;
2102 if (!S_ISREG(src->i_mode)) 2100 if (!S_ISREG(src->i_mode))
2103 goto out; 2101 return -EINVAL;
2102
2103 if (!file->f_op->remap_file_range)
2104 return -EOPNOTSUPP;
2104 2105
2105 ret = remap_verify_area(file, off, len, false); 2106 ret = remap_verify_area(file, off, len, false);
2106 if (ret < 0) 2107 if (ret < 0)
2107 goto out; 2108 return ret;
2108 ret = 0; 2109 ret = 0;
2109 2110
2110 if (off + len > i_size_read(src)) 2111 if (off + len > i_size_read(src))
@@ -2147,10 +2148,8 @@ next_fdput:
2147 fdput(dst_fd); 2148 fdput(dst_fd);
2148next_loop: 2149next_loop:
2149 if (fatal_signal_pending(current)) 2150 if (fatal_signal_pending(current))
2150 goto out; 2151 break;
2151 } 2152 }
2152
2153out:
2154 return ret; 2153 return ret;
2155} 2154}
2156EXPORT_SYMBOL(vfs_dedupe_file_range); 2155EXPORT_SYMBOL(vfs_dedupe_file_range);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 499a20a5a010..273736f41be3 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -275,7 +275,7 @@ static int __sysv_write_inode(struct inode *inode, int wait)
275 } 275 }
276 } 276 }
277 brelse(bh); 277 brelse(bh);
278 return 0; 278 return err;
279} 279}
280 280
281int sysv_write_inode(struct inode *inode, struct writeback_control *wbc) 281int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 8f2f56d9a1bb..e3d684ea3203 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -827,16 +827,20 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
827 827
828 828
829 ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32); 829 ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
830 if (ret < 0) 830 if (ret < 0) {
831 goto out_bh; 831 strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
832 832 pr_warn("incorrect volume identification, setting to "
833 strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret); 833 "'InvalidName'\n");
834 } else {
835 strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
836 }
834 udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident); 837 udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
835 838
836 ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128); 839 ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
837 if (ret < 0) 840 if (ret < 0) {
841 ret = 0;
838 goto out_bh; 842 goto out_bh;
839 843 }
840 outstr[ret] = 0; 844 outstr[ret] = 0;
841 udf_debug("volSetIdent[] = '%s'\n", outstr); 845 udf_debug("volSetIdent[] = '%s'\n", outstr);
842 846
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 45234791fec2..5fcfa96463eb 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -351,6 +351,11 @@ try_again:
351 return u_len; 351 return u_len;
352} 352}
353 353
354/*
355 * Convert CS0 dstring to output charset. Warning: This function may truncate
356 * input string if it is too long as it is used for informational strings only
357 * and it is better to truncate the string than to refuse mounting a media.
358 */
354int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len, 359int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len,
355 const uint8_t *ocu_i, int i_len) 360 const uint8_t *ocu_i, int i_len)
356{ 361{
@@ -359,9 +364,12 @@ int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len,
359 if (i_len > 0) { 364 if (i_len > 0) {
360 s_len = ocu_i[i_len - 1]; 365 s_len = ocu_i[i_len - 1];
361 if (s_len >= i_len) { 366 if (s_len >= i_len) {
362 pr_err("incorrect dstring lengths (%d/%d)\n", 367 pr_warn("incorrect dstring lengths (%d/%d),"
363 s_len, i_len); 368 " truncating\n", s_len, i_len);
364 return -EINVAL; 369 s_len = i_len - 1;
370 /* 2-byte encoding? Need to round properly... */
371 if (ocu_i[0] == 16)
372 s_len -= (s_len - 1) & 2;
365 } 373 }
366 } 374 }
367 375
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 356d2b8568c1..cd58939dc977 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1361,6 +1361,19 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1361 ret = -EINVAL; 1361 ret = -EINVAL;
1362 if (!vma_can_userfault(cur)) 1362 if (!vma_can_userfault(cur))
1363 goto out_unlock; 1363 goto out_unlock;
1364
1365 /*
1366 * UFFDIO_COPY will fill file holes even without
1367 * PROT_WRITE. This check enforces that if this is a
1368 * MAP_SHARED, the process has write permission to the backing
1369 * file. If VM_MAYWRITE is set it also enforces that on a
1370 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
1371 * F_WRITE_SEAL can be taken until the vma is destroyed.
1372 */
1373 ret = -EPERM;
1374 if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
1375 goto out_unlock;
1376
1364 /* 1377 /*
1365 * If this vma contains ending address, and huge pages 1378 * If this vma contains ending address, and huge pages
1366 * check alignment. 1379 * check alignment.
@@ -1406,6 +1419,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1406 BUG_ON(!vma_can_userfault(vma)); 1419 BUG_ON(!vma_can_userfault(vma));
1407 BUG_ON(vma->vm_userfaultfd_ctx.ctx && 1420 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1408 vma->vm_userfaultfd_ctx.ctx != ctx); 1421 vma->vm_userfaultfd_ctx.ctx != ctx);
1422 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1409 1423
1410 /* 1424 /*
1411 * Nothing to do: this vma is already registered into this 1425 * Nothing to do: this vma is already registered into this
@@ -1552,6 +1566,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1552 cond_resched(); 1566 cond_resched();
1553 1567
1554 BUG_ON(!vma_can_userfault(vma)); 1568 BUG_ON(!vma_can_userfault(vma));
1569 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1555 1570
1556 /* 1571 /*
1557 * Nothing to do: this vma is already registered into this 1572 * Nothing to do: this vma is already registered into this
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 6fc5425b1474..2652d00842d6 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -243,7 +243,7 @@ xfs_attr3_leaf_verify(
243 struct xfs_mount *mp = bp->b_target->bt_mount; 243 struct xfs_mount *mp = bp->b_target->bt_mount;
244 struct xfs_attr_leafblock *leaf = bp->b_addr; 244 struct xfs_attr_leafblock *leaf = bp->b_addr;
245 struct xfs_attr_leaf_entry *entries; 245 struct xfs_attr_leaf_entry *entries;
246 uint16_t end; 246 uint32_t end; /* must be 32bit - see below */
247 int i; 247 int i;
248 248
249 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); 249 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -293,6 +293,11 @@ xfs_attr3_leaf_verify(
293 /* 293 /*
294 * Quickly check the freemap information. Attribute data has to be 294 * Quickly check the freemap information. Attribute data has to be
295 * aligned to 4-byte boundaries, and likewise for the free space. 295 * aligned to 4-byte boundaries, and likewise for the free space.
296 *
297 * Note that for 64k block size filesystems, the freemap entries cannot
298 * overflow as they are only be16 fields. However, when checking end
299 * pointer of the freemap, we have to be careful to detect overflows and
300 * so use uint32_t for those checks.
296 */ 301 */
297 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 302 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
298 if (ichdr.freemap[i].base > mp->m_attr_geo->blksize) 303 if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
@@ -303,7 +308,9 @@ xfs_attr3_leaf_verify(
303 return __this_address; 308 return __this_address;
304 if (ichdr.freemap[i].size & 0x3) 309 if (ichdr.freemap[i].size & 0x3)
305 return __this_address; 310 return __this_address;
306 end = ichdr.freemap[i].base + ichdr.freemap[i].size; 311
312 /* be care of 16 bit overflows here */
313 end = (uint32_t)ichdr.freemap[i].base + ichdr.freemap[i].size;
307 if (end < ichdr.freemap[i].base) 314 if (end < ichdr.freemap[i].base)
308 return __this_address; 315 return __this_address;
309 if (end > mp->m_attr_geo->blksize) 316 if (end > mp->m_attr_geo->blksize)
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 74d7228e755b..19e921d1586f 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1694,10 +1694,13 @@ xfs_bmap_add_extent_delay_real(
1694 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1694 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1695 /* 1695 /*
1696 * Filling in all of a previously delayed allocation extent. 1696 * Filling in all of a previously delayed allocation extent.
1697 * The right neighbor is contiguous, the left is not. 1697 * The right neighbor is contiguous, the left is not. Take care
1698 * with delay -> unwritten extent allocation here because the
1699 * delalloc record we are overwriting is always written.
1698 */ 1700 */
1699 PREV.br_startblock = new->br_startblock; 1701 PREV.br_startblock = new->br_startblock;
1700 PREV.br_blockcount += RIGHT.br_blockcount; 1702 PREV.br_blockcount += RIGHT.br_blockcount;
1703 PREV.br_state = new->br_state;
1701 1704
1702 xfs_iext_next(ifp, &bma->icur); 1705 xfs_iext_next(ifp, &bma->icur);
1703 xfs_iext_remove(bma->ip, &bma->icur, state); 1706 xfs_iext_remove(bma->ip, &bma->icur, state);
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 86c50208a143..7fbf8af0b159 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -538,15 +538,18 @@ xfs_inobt_rec_check_count(
538 538
539static xfs_extlen_t 539static xfs_extlen_t
540xfs_inobt_max_size( 540xfs_inobt_max_size(
541 struct xfs_mount *mp) 541 struct xfs_mount *mp,
542 xfs_agnumber_t agno)
542{ 543{
544 xfs_agblock_t agblocks = xfs_ag_block_count(mp, agno);
545
543 /* Bail out if we're uninitialized, which can happen in mkfs. */ 546 /* Bail out if we're uninitialized, which can happen in mkfs. */
544 if (mp->m_inobt_mxr[0] == 0) 547 if (mp->m_inobt_mxr[0] == 0)
545 return 0; 548 return 0;
546 549
547 return xfs_btree_calc_size(mp->m_inobt_mnr, 550 return xfs_btree_calc_size(mp->m_inobt_mnr,
548 (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock / 551 (uint64_t)agblocks * mp->m_sb.sb_inopblock /
549 XFS_INODES_PER_CHUNK); 552 XFS_INODES_PER_CHUNK);
550} 553}
551 554
552static int 555static int
@@ -594,7 +597,7 @@ xfs_finobt_calc_reserves(
594 if (error) 597 if (error)
595 return error; 598 return error;
596 599
597 *ask += xfs_inobt_max_size(mp); 600 *ask += xfs_inobt_max_size(mp, agno);
598 *used += tree_len; 601 *used += tree_len;
599 return 0; 602 return 0;
600} 603}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5d263dfdb3bc..404e581f1ea1 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1042,7 +1042,7 @@ out_trans_cancel:
1042 goto out_unlock; 1042 goto out_unlock;
1043} 1043}
1044 1044
1045static int 1045int
1046xfs_flush_unmap_range( 1046xfs_flush_unmap_range(
1047 struct xfs_inode *ip, 1047 struct xfs_inode *ip,
1048 xfs_off_t offset, 1048 xfs_off_t offset,
@@ -1195,13 +1195,7 @@ xfs_prepare_shift(
1195 * Writeback and invalidate cache for the remainder of the file as we're 1195 * Writeback and invalidate cache for the remainder of the file as we're
1196 * about to shift down every extent from offset to EOF. 1196 * about to shift down every extent from offset to EOF.
1197 */ 1197 */
1198 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1); 1198 error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1199 if (error)
1200 return error;
1201 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1202 offset >> PAGE_SHIFT, -1);
1203 if (error)
1204 return error;
1205 1199
1206 /* 1200 /*
1207 * Clean out anything hanging around in the cow fork now that 1201 * Clean out anything hanging around in the cow fork now that
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 87363d136bb6..7a78229cf1a7 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -80,4 +80,7 @@ int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
80 int whichfork, xfs_extnum_t *nextents, 80 int whichfork, xfs_extnum_t *nextents,
81 xfs_filblks_t *count); 81 xfs_filblks_t *count);
82 82
83int xfs_flush_unmap_range(struct xfs_inode *ip, xfs_off_t offset,
84 xfs_off_t len);
85
83#endif /* __XFS_BMAP_UTIL_H__ */ 86#endif /* __XFS_BMAP_UTIL_H__ */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 12d8455bfbb2..010db5f8fb00 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -1233,9 +1233,23 @@ xfs_buf_iodone(
1233} 1233}
1234 1234
1235/* 1235/*
1236 * Requeue a failed buffer for writeback 1236 * Requeue a failed buffer for writeback.
1237 * 1237 *
1238 * Return true if the buffer has been re-queued properly, false otherwise 1238 * We clear the log item failed state here as well, but we have to be careful
1239 * about reference counts because the only active reference counts on the buffer
1240 * may be the failed log items. Hence if we clear the log item failed state
1241 * before queuing the buffer for IO we can release all active references to
1242 * the buffer and free it, leading to use after free problems in
1243 * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
1244 * order we process them in - the buffer is locked, and we own the buffer list
1245 * so nothing on them is going to change while we are performing this action.
1246 *
1247 * Hence we can safely queue the buffer for IO before we clear the failed log
1248 * item state, therefore always having an active reference to the buffer and
1249 * avoiding the transient zero-reference state that leads to use-after-free.
1250 *
1251 * Return true if the buffer was added to the buffer list, false if it was
1252 * already on the buffer list.
1239 */ 1253 */
1240bool 1254bool
1241xfs_buf_resubmit_failed_buffers( 1255xfs_buf_resubmit_failed_buffers(
@@ -1243,16 +1257,16 @@ xfs_buf_resubmit_failed_buffers(
1243 struct list_head *buffer_list) 1257 struct list_head *buffer_list)
1244{ 1258{
1245 struct xfs_log_item *lip; 1259 struct xfs_log_item *lip;
1260 bool ret;
1261
1262 ret = xfs_buf_delwri_queue(bp, buffer_list);
1246 1263
1247 /* 1264 /*
1248 * Clear XFS_LI_FAILED flag from all items before resubmit 1265 * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this
1249 *
1250 * XFS_LI_FAILED set/clear is protected by ail_lock, caller this
1251 * function already have it acquired 1266 * function already have it acquired
1252 */ 1267 */
1253 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) 1268 list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1254 xfs_clear_li_failed(lip); 1269 xfs_clear_li_failed(lip);
1255 1270
1256 /* Add this buffer back to the delayed write list */ 1271 return ret;
1257 return xfs_buf_delwri_queue(bp, buffer_list);
1258} 1272}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 53c9ab8fb777..e47425071e65 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -920,7 +920,7 @@ out_unlock:
920} 920}
921 921
922 922
923loff_t 923STATIC loff_t
924xfs_file_remap_range( 924xfs_file_remap_range(
925 struct file *file_in, 925 struct file *file_in,
926 loff_t pos_in, 926 loff_t pos_in,
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 6e2c08f30f60..6ecdbb3af7de 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1608,7 +1608,7 @@ xfs_ioc_getbmap(
1608 error = 0; 1608 error = 0;
1609out_free_buf: 1609out_free_buf:
1610 kmem_free(buf); 1610 kmem_free(buf);
1611 return 0; 1611 return error;
1612} 1612}
1613 1613
1614struct getfsmap_info { 1614struct getfsmap_info {
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index 576c375ce12a..6b736ea58d35 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -107,5 +107,5 @@ assfail(char *expr, char *file, int line)
107void 107void
108xfs_hex_dump(void *p, int length) 108xfs_hex_dump(void *p, int length)
109{ 109{
110 print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1); 110 print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
111} 111}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index ecdb086bc23e..322a852ce284 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -296,6 +296,7 @@ xfs_reflink_reserve_cow(
296 if (error) 296 if (error)
297 return error; 297 return error;
298 298
299 xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
299 trace_xfs_reflink_cow_alloc(ip, &got); 300 trace_xfs_reflink_cow_alloc(ip, &got);
300 return 0; 301 return 0;
301} 302}
@@ -1351,10 +1352,19 @@ xfs_reflink_remap_prep(
1351 if (ret) 1352 if (ret)
1352 goto out_unlock; 1353 goto out_unlock;
1353 1354
1354 /* Zap any page cache for the destination file's range. */ 1355 /*
1355 truncate_inode_pages_range(&inode_out->i_data, 1356 * If pos_out > EOF, we may have dirtied blocks between EOF and
1356 round_down(pos_out, PAGE_SIZE), 1357 * pos_out. In that case, we need to extend the flush and unmap to cover
1357 round_up(pos_out + *len, PAGE_SIZE) - 1); 1358 * from EOF to the end of the copy length.
1359 */
1360 if (pos_out > XFS_ISIZE(dest)) {
1361 loff_t flen = *len + (pos_out - XFS_ISIZE(dest));
1362 ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen);
1363 } else {
1364 ret = xfs_flush_unmap_range(dest, pos_out, *len);
1365 }
1366 if (ret)
1367 goto out_unlock;
1358 1368
1359 return 1; 1369 return 1;
1360out_unlock: 1370out_unlock:
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 3043e5ed6495..8a6532aae779 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -280,7 +280,10 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
280 ), 280 ),
281 TP_fast_assign( 281 TP_fast_assign(
282 __entry->dev = bp->b_target->bt_dev; 282 __entry->dev = bp->b_target->bt_dev;
283 __entry->bno = bp->b_bn; 283 if (bp->b_bn == XFS_BUF_DADDR_NULL)
284 __entry->bno = bp->b_maps[0].bm_bn;
285 else
286 __entry->bno = bp->b_bn;
284 __entry->nblks = bp->b_length; 287 __entry->nblks = bp->b_length;
285 __entry->hold = atomic_read(&bp->b_hold); 288 __entry->hold = atomic_read(&bp->b_hold);
286 __entry->pincount = atomic_read(&bp->b_pin_count); 289 __entry->pincount = atomic_read(&bp->b_pin_count);
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 89f3b03b1445..e3667c9a33a5 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -3,7 +3,7 @@
3#define _4LEVEL_FIXUP_H 3#define _4LEVEL_FIXUP_H
4 4
5#define __ARCH_HAS_4LEVEL_HACK 5#define __ARCH_HAS_4LEVEL_HACK
6#define __PAGETABLE_PUD_FOLDED 6#define __PAGETABLE_PUD_FOLDED 1
7 7
8#define PUD_SHIFT PGDIR_SHIFT 8#define PUD_SHIFT PGDIR_SHIFT
9#define PUD_SIZE PGDIR_SIZE 9#define PUD_SIZE PGDIR_SIZE
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index 9c2e0708eb82..73474bb52344 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -3,7 +3,7 @@
3#define _5LEVEL_FIXUP_H 3#define _5LEVEL_FIXUP_H
4 4
5#define __ARCH_HAS_5LEVEL_HACK 5#define __ARCH_HAS_5LEVEL_HACK
6#define __PAGETABLE_P4D_FOLDED 6#define __PAGETABLE_P4D_FOLDED 1
7 7
8#define P4D_SHIFT PGDIR_SHIFT 8#define P4D_SHIFT PGDIR_SHIFT
9#define P4D_SIZE PGDIR_SIZE 9#define P4D_SIZE PGDIR_SIZE
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
index 0c34215263b8..1d6dd38c0e5e 100644
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -5,7 +5,7 @@
5#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
6#include <asm-generic/5level-fixup.h> 6#include <asm-generic/5level-fixup.h>
7 7
8#define __PAGETABLE_PUD_FOLDED 8#define __PAGETABLE_PUD_FOLDED 1
9 9
10/* 10/*
11 * Having the pud type consist of a pgd gets the size right, and allows 11 * Having the pud type consist of a pgd gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 1a29b2a0282b..04cb913797bc 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -4,7 +4,7 @@
4 4
5#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
6 6
7#define __PAGETABLE_P4D_FOLDED 7#define __PAGETABLE_P4D_FOLDED 1
8 8
9typedef struct { pgd_t pgd; } p4d_t; 9typedef struct { pgd_t pgd; } p4d_t;
10 10
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index f35f6e8149e4..b85b8271a73d 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -8,7 +8,7 @@
8 8
9struct mm_struct; 9struct mm_struct;
10 10
11#define __PAGETABLE_PMD_FOLDED 11#define __PAGETABLE_PMD_FOLDED 1
12 12
13/* 13/*
14 * Having the pmd type consist of a pud gets the size right, and allows 14 * Having the pmd type consist of a pud gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index e950b9c50f34..9bef475db6fe 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -9,7 +9,7 @@
9#else 9#else
10#include <asm-generic/pgtable-nop4d.h> 10#include <asm-generic/pgtable-nop4d.h>
11 11
12#define __PAGETABLE_PUD_FOLDED 12#define __PAGETABLE_PUD_FOLDED 1
13 13
14/* 14/*
15 * Having the pud type consist of a p4d gets the size right, and allows 15 * Having the pud type consist of a p4d gets the size right, and allows
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 5657a20e0c59..359fb935ded6 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1127,4 +1127,20 @@ static inline bool arch_has_pfn_modify_check(void)
1127#endif 1127#endif
1128#endif 1128#endif
1129 1129
1130/*
1131 * On some architectures it depends on the mm if the p4d/pud or pmd
1132 * layer of the page table hierarchy is folded or not.
1133 */
1134#ifndef mm_p4d_folded
1135#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
1136#endif
1137
1138#ifndef mm_pud_folded
1139#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
1140#endif
1141
1142#ifndef mm_pmd_folded
1143#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
1144#endif
1145
1130#endif /* _ASM_GENERIC_PGTABLE_H */ 1146#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index a83e1f632eb7..f01623aef2f7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
169 169
170void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, 170void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
171 unsigned int idx); 171 unsigned int idx);
172struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
172unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); 173unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
173void can_free_echo_skb(struct net_device *dev, unsigned int idx); 174void can_free_echo_skb(struct net_device *dev, unsigned int idx);
174 175
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index cb31683bbe15..8268811a697e 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
41int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); 41int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
42int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); 42int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
43int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); 43int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
44int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb); 44int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
45 struct sk_buff *skb, u32 timestamp);
46unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
47 unsigned int idx, u32 timestamp);
48int can_rx_offload_queue_tail(struct can_rx_offload *offload,
49 struct sk_buff *skb);
45void can_rx_offload_reset(struct can_rx_offload *offload); 50void can_rx_offload_reset(struct can_rx_offload *offload);
46void can_rx_offload_del(struct can_rx_offload *offload); 51void can_rx_offload_del(struct can_rx_offload *offload);
47void can_rx_offload_enable(struct can_rx_offload *offload); 52void can_rx_offload_enable(struct can_rx_offload *offload);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 6b92b3395fa9..65a38c4a02a1 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -213,12 +213,6 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
213 CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \ 213 CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
214 CEPH_FEATURE_CEPHX_V2) 214 CEPH_FEATURE_CEPHX_V2)
215 215
216#define CEPH_FEATURES_REQUIRED_DEFAULT \ 216#define CEPH_FEATURES_REQUIRED_DEFAULT 0
217 (CEPH_FEATURE_NOSRCADDR | \
218 CEPH_FEATURE_SUBSCRIBE2 | \
219 CEPH_FEATURE_RECONNECT_SEQ | \
220 CEPH_FEATURE_PGID64 | \
221 CEPH_FEATURE_PGPOOL3 | \
222 CEPH_FEATURE_OSDENC)
223 217
224#endif 218#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index c0f5db3a9621..2010493e1040 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -143,18 +143,6 @@
143#define KASAN_ABI_VERSION 3 143#define KASAN_ABI_VERSION 3
144#endif 144#endif
145 145
146/*
147 * Because __no_sanitize_address conflicts with inlining:
148 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
149 * we do one or the other.
150 */
151#ifdef CONFIG_KASAN
152#define __no_sanitize_address_or_inline \
153 __no_sanitize_address __maybe_unused notrace
154#else
155#define __no_sanitize_address_or_inline inline
156#endif
157
158#if GCC_VERSION >= 50100 146#if GCC_VERSION >= 50100
159#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 147#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
160#endif 148#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 18c80cfa4fc4..06396c1cf127 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -189,7 +189,7 @@ void __read_once_size(const volatile void *p, void *res, int size)
189 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 189 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
190 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. 190 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
191 */ 191 */
192# define __no_kasan_or_inline __no_sanitize_address __maybe_unused 192# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
193#else 193#else
194# define __no_kasan_or_inline __always_inline 194# define __no_kasan_or_inline __always_inline
195#endif 195#endif
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 6b28c1b7310c..f8c400ba1929 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -4,22 +4,26 @@
4 4
5/* 5/*
6 * The attributes in this file are unconditionally defined and they directly 6 * The attributes in this file are unconditionally defined and they directly
7 * map to compiler attribute(s) -- except those that are optional. 7 * map to compiler attribute(s), unless one of the compilers does not support
8 * the attribute. In that case, __has_attribute is used to check for support
9 * and the reason is stated in its comment ("Optional: ...").
8 * 10 *
9 * Any other "attributes" (i.e. those that depend on a configuration option, 11 * Any other "attributes" (i.e. those that depend on a configuration option,
10 * on a compiler, on an architecture, on plugins, on other attributes...) 12 * on a compiler, on an architecture, on plugins, on other attributes...)
11 * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h). 13 * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
14 * The intention is to keep this file as simple as possible, as well as
15 * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
12 * 16 *
13 * This file is meant to be sorted (by actual attribute name, 17 * This file is meant to be sorted (by actual attribute name,
14 * not by #define identifier). Use the __attribute__((__name__)) syntax 18 * not by #define identifier). Use the __attribute__((__name__)) syntax
15 * (i.e. with underscores) to avoid future collisions with other macros. 19 * (i.e. with underscores) to avoid future collisions with other macros.
16 * If an attribute is optional, state the reason in the comment. 20 * Provide links to the documentation of each supported compiler, if it exists.
17 */ 21 */
18 22
19/* 23/*
20 * To check for optional attributes, we use __has_attribute, which is supported 24 * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
21 * on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support 25 * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
22 * 4.6 <= gcc < 5, we implement __has_attribute by hand. 26 * by hand.
23 * 27 *
24 * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__ 28 * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
25 * depending on the compiler used to build it; however, these attributes have 29 * depending on the compiler used to build it; however, these attributes have
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 3439d7d0249a..4a3f9c09c92d 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -130,6 +130,10 @@ struct ftrace_likely_data {
130# define randomized_struct_fields_end 130# define randomized_struct_fields_end
131#endif 131#endif
132 132
133#ifndef asm_volatile_goto
134#define asm_volatile_goto(x...) asm goto(x)
135#endif
136
133/* Are two types/vars the same type (ignoring qualifiers)? */ 137/* Are two types/vars the same type (ignoring qualifiers)? */
134#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) 138#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
135 139
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index bd73e7a91410..9e66bfe369aa 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -5,7 +5,7 @@
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6#include <linux/mem_encrypt.h> 6#include <linux/mem_encrypt.h>
7 7
8#define DIRECT_MAPPING_ERROR 0 8#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0)
9 9
10#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA 10#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
11#include <asm/dma-direct.h> 11#include <asm/dma-direct.h>
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 845174e113ce..100ce4a4aff6 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1167,6 +1167,8 @@ static inline bool efi_enabled(int feature)
1167extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); 1167extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
1168 1168
1169extern bool efi_is_table_address(unsigned long phys_addr); 1169extern bool efi_is_table_address(unsigned long phys_addr);
1170
1171extern int efi_apply_persistent_mem_reservations(void);
1170#else 1172#else
1171static inline bool efi_enabled(int feature) 1173static inline bool efi_enabled(int feature)
1172{ 1174{
@@ -1185,6 +1187,11 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
1185{ 1187{
1186 return false; 1188 return false;
1187} 1189}
1190
1191static inline int efi_apply_persistent_mem_reservations(void)
1192{
1193 return 0;
1194}
1188#endif 1195#endif
1189 1196
1190extern int efi_status_to_err(efi_status_t status); 1197extern int efi_status_to_err(efi_status_t status);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index de629b706d1d..448dcc448f1f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -866,6 +866,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
866 866
867void bpf_jit_free(struct bpf_prog *fp); 867void bpf_jit_free(struct bpf_prog *fp);
868 868
869int bpf_jit_get_func_addr(const struct bpf_prog *prog,
870 const struct bpf_insn *insn, bool extra_pass,
871 u64 *func_addr, bool *func_addr_fixed);
872
869struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); 873struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
870void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); 874void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
871 875
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 34cf0fdd7dc7..610815e3f1aa 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
196static inline void fscache_retrieval_complete(struct fscache_retrieval *op, 196static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
197 int n_pages) 197 int n_pages)
198{ 198{
199 atomic_sub(n_pages, &op->n_pages); 199 if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
200 if (atomic_read(&op->n_pages) <= 0)
201 fscache_op_complete(&op->op, false); 200 fscache_op_complete(&op->op, false);
202} 201}
203 202
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a397907e8d72..dd16e8218db3 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -777,8 +777,8 @@ struct ftrace_ret_stack {
777extern void return_to_handler(void); 777extern void return_to_handler(void);
778 778
779extern int 779extern int
780ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, 780function_graph_enter(unsigned long ret, unsigned long func,
781 unsigned long frame_pointer, unsigned long *retp); 781 unsigned long frame_pointer, unsigned long *retp);
782 782
783unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 783unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
784 unsigned long ret, unsigned long *retp); 784 unsigned long ret, unsigned long *retp);
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 331dc377c275..dc12f5c4b076 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -177,6 +177,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
177* @attr_usage_id: Attribute usage id as per spec 177* @attr_usage_id: Attribute usage id as per spec
178* @report_id: Report id to look for 178* @report_id: Report id to look for
179* @flag: Synchronous or asynchronous read 179* @flag: Synchronous or asynchronous read
180* @is_signed: If true then fields < 32 bits will be sign-extended
180* 181*
181* Issues a synchronous or asynchronous read request for an input attribute. 182* Issues a synchronous or asynchronous read request for an input attribute.
182* Returns data upto 32 bits. 183* Returns data upto 32 bits.
@@ -190,7 +191,8 @@ enum sensor_hub_read_flags {
190int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, 191int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
191 u32 usage_id, 192 u32 usage_id,
192 u32 attr_usage_id, u32 report_id, 193 u32 attr_usage_id, u32 report_id,
193 enum sensor_hub_read_flags flag 194 enum sensor_hub_read_flags flag,
195 bool is_signed
194); 196);
195 197
196/** 198/**
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 2827b87590d8..a355d61940f2 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -722,8 +722,8 @@ struct hid_usage_id {
722 * input will not be passed to raw_event unless hid_device_io_start is 722 * input will not be passed to raw_event unless hid_device_io_start is
723 * called. 723 * called.
724 * 724 *
725 * raw_event and event should return 0 on no action performed, 1 when no 725 * raw_event and event should return negative on error, any other value will
726 * further processing should be done and negative on error 726 * pass the event on to .event() typically return 0 for success.
727 * 727 *
728 * input_mapping shall return a negative value to completely ignore this usage 728 * input_mapping shall return a negative value to completely ignore this usage
729 * (e.g. doubled or invalid usage), zero to continue with parsing of this 729 * (e.g. doubled or invalid usage), zero to continue with parsing of this
@@ -1139,34 +1139,6 @@ static inline u32 hid_report_len(struct hid_report *report)
1139int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, 1139int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
1140 int interrupt); 1140 int interrupt);
1141 1141
1142
1143/**
1144 * struct hid_scroll_counter - Utility class for processing high-resolution
1145 * scroll events.
1146 * @dev: the input device for which events should be reported.
1147 * @microns_per_hi_res_unit: the amount moved by the user's finger for each
1148 * high-resolution unit reported by the mouse, in
1149 * microns.
1150 * @resolution_multiplier: the wheel's resolution in high-resolution mode as a
1151 * multiple of its lower resolution. For example, if
1152 * moving the wheel by one "notch" would result in a
1153 * value of 1 in low-resolution mode but 8 in
1154 * high-resolution, the multiplier is 8.
1155 * @remainder: counts the number of high-resolution units moved since the last
1156 * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
1157 * only be used by class methods.
1158 */
1159struct hid_scroll_counter {
1160 struct input_dev *dev;
1161 int microns_per_hi_res_unit;
1162 int resolution_multiplier;
1163
1164 int remainder;
1165};
1166
1167void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
1168 int hi_res_value);
1169
1170/* HID quirks API */ 1142/* HID quirks API */
1171unsigned long hid_lookup_quirk(const struct hid_device *hdev); 1143unsigned long hid_lookup_quirk(const struct hid_device *hdev);
1172int hid_quirks_init(char **quirks_param, __u16 bus, int count); 1144int hid_quirks_init(char **quirks_param, __u16 bus, int count);
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index e6bb36a97519..8336b2f6f834 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,6 +21,7 @@
21#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) 21#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
22 22
23extern raw_spinlock_t i8253_lock; 23extern raw_spinlock_t i8253_lock;
24extern bool i8253_clear_counter_on_shutdown;
24extern struct clock_event_device i8253_clockevent; 25extern struct clock_event_device i8253_clockevent;
25extern void clockevent_i8253_init(bool oneshot); 26extern void clockevent_i8253_init(bool oneshot);
26 27
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index bac395f1d00a..5228c62af416 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -139,8 +139,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
139struct mempolicy *get_task_policy(struct task_struct *p); 139struct mempolicy *get_task_policy(struct task_struct *p);
140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
141 unsigned long addr); 141 unsigned long addr);
142struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
143 unsigned long addr);
144bool vma_policy_mof(struct vm_area_struct *vma); 142bool vma_policy_mof(struct vm_area_struct *vma);
145 143
146extern void numa_default_policy(void); 144extern void numa_default_policy(void);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index dbff9ff28f2c..34e17e6f8942 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -2473,14 +2473,15 @@ struct mlx5_ifc_xrc_srqc_bits {
2473 2473
2474 u8 wq_signature[0x1]; 2474 u8 wq_signature[0x1];
2475 u8 cont_srq[0x1]; 2475 u8 cont_srq[0x1];
2476 u8 dbr_umem_valid[0x1]; 2476 u8 reserved_at_22[0x1];
2477 u8 rlky[0x1]; 2477 u8 rlky[0x1];
2478 u8 basic_cyclic_rcv_wqe[0x1]; 2478 u8 basic_cyclic_rcv_wqe[0x1];
2479 u8 log_rq_stride[0x3]; 2479 u8 log_rq_stride[0x3];
2480 u8 xrcd[0x18]; 2480 u8 xrcd[0x18];
2481 2481
2482 u8 page_offset[0x6]; 2482 u8 page_offset[0x6];
2483 u8 reserved_at_46[0x2]; 2483 u8 reserved_at_46[0x1];
2484 u8 dbr_umem_valid[0x1];
2484 u8 cqn[0x18]; 2485 u8 cqn[0x18];
2485 2486
2486 u8 reserved_at_60[0x20]; 2487 u8 reserved_at_60[0x20];
@@ -6689,9 +6690,12 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
6689 6690
6690 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; 6691 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
6691 6692
6692 u8 reserved_at_280[0x40]; 6693 u8 reserved_at_280[0x60];
6694
6693 u8 xrc_srq_umem_valid[0x1]; 6695 u8 xrc_srq_umem_valid[0x1];
6694 u8 reserved_at_2c1[0x5bf]; 6696 u8 reserved_at_2e1[0x1f];
6697
6698 u8 reserved_at_300[0x580];
6695 6699
6696 u8 pas[0][0x40]; 6700 u8 pas[0][0x40];
6697}; 6701};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fcf9cc9d535f..5411de93a363 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1744,11 +1744,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1744 1744
1745static inline void mm_inc_nr_puds(struct mm_struct *mm) 1745static inline void mm_inc_nr_puds(struct mm_struct *mm)
1746{ 1746{
1747 if (mm_pud_folded(mm))
1748 return;
1747 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 1749 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1748} 1750}
1749 1751
1750static inline void mm_dec_nr_puds(struct mm_struct *mm) 1752static inline void mm_dec_nr_puds(struct mm_struct *mm)
1751{ 1753{
1754 if (mm_pud_folded(mm))
1755 return;
1752 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 1756 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1753} 1757}
1754#endif 1758#endif
@@ -1768,11 +1772,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1768 1772
1769static inline void mm_inc_nr_pmds(struct mm_struct *mm) 1773static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1770{ 1774{
1775 if (mm_pmd_folded(mm))
1776 return;
1771 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 1777 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1772} 1778}
1773 1779
1774static inline void mm_dec_nr_pmds(struct mm_struct *mm) 1780static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1775{ 1781{
1782 if (mm_pmd_folded(mm))
1783 return;
1776 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 1784 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1777} 1785}
1778#endif 1786#endif
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index abe975c87b90..7f53ece2c039 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
324 */ 324 */
325static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) 325static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
326{ 326{
327 return (u64)nand->memorg.luns_per_target * 327 return nand->memorg.ntargets * nand->memorg.luns_per_target *
328 nand->memorg.eraseblocks_per_lun * 328 nand->memorg.eraseblocks_per_lun;
329 nand->memorg.pages_per_eraseblock;
330} 329}
331 330
332/** 331/**
@@ -569,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
569} 568}
570 569
571/** 570/**
572 * nanddev_pos_next_eraseblock() - Move a position to the next page 571 * nanddev_pos_next_page() - Move a position to the next page
573 * @nand: NAND device 572 * @nand: NAND device
574 * @pos: the position to update 573 * @pos: the position to update
575 * 574 *
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
index c79e859408e6..fd458389f7d1 100644
--- a/include/linux/net_dim.h
+++ b/include/linux/net_dim.h
@@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim,
406 } 406 }
407 /* fall through */ 407 /* fall through */
408 case NET_DIM_START_MEASURE: 408 case NET_DIM_START_MEASURE:
409 net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr,
410 &dim->start_sample);
409 dim->state = NET_DIM_MEASURE_IN_PROGRESS; 411 dim->state = NET_DIM_MEASURE_IN_PROGRESS;
410 break; 412 break;
411 case NET_DIM_APPLY_NEW_PROFILE: 413 case NET_DIM_APPLY_NEW_PROFILE:
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index dc1d9ed33b31..857f8abf7b91 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3190,6 +3190,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3190#endif 3190#endif
3191} 3191}
3192 3192
3193/* Variant of netdev_tx_sent_queue() for drivers that are aware
3194 * that they should not test BQL status themselves.
3195 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
3196 * skb of a batch.
3197 * Returns true if the doorbell must be used to kick the NIC.
3198 */
3199static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3200 unsigned int bytes,
3201 bool xmit_more)
3202{
3203 if (xmit_more) {
3204#ifdef CONFIG_BQL
3205 dql_queued(&dev_queue->dql, bytes);
3206#endif
3207 return netif_tx_queue_stopped(dev_queue);
3208 }
3209 netdev_tx_sent_queue(dev_queue, bytes);
3210 return true;
3211}
3212
3193/** 3213/**
3194 * netdev_sent_queue - report the number of bytes queued to hardware 3214 * netdev_sent_queue - report the number of bytes queued to hardware
3195 * @dev: network device 3215 * @dev: network device
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 34fc80f3eb90..1d100efe74ec 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -314,7 +314,7 @@ enum {
314extern ip_set_id_t ip_set_get_byname(struct net *net, 314extern ip_set_id_t ip_set_get_byname(struct net *net,
315 const char *name, struct ip_set **set); 315 const char *name, struct ip_set **set);
316extern void ip_set_put_byindex(struct net *net, ip_set_id_t index); 316extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
317extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index); 317extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
318extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index); 318extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
319extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index); 319extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
320 320
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 8e2bab1e8e90..70877f8de7e9 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
43 rcu_assign_pointer(comment->c, c); 43 rcu_assign_pointer(comment->c, c);
44} 44}
45 45
46/* Used only when dumping a set, protected by rcu_read_lock_bh() */ 46/* Used only when dumping a set, protected by rcu_read_lock() */
47static inline int 47static inline int
48ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) 48ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
49{ 49{
50 struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c); 50 struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
51 51
52 if (!c) 52 if (!c)
53 return 0; 53 return 0;
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index b8d95564bd53..14edb795ab43 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
21 struct nf_conntrack_tuple tuple; 21 struct nf_conntrack_tuple tuple;
22}; 22};
23 23
24enum grep_conntrack {
25 GRE_CT_UNREPLIED,
26 GRE_CT_REPLIED,
27 GRE_CT_MAX
28};
29
30struct netns_proto_gre {
31 struct nf_proto_net nf;
32 rwlock_t keymap_lock;
33 struct list_head keymap_list;
34 unsigned int gre_timeouts[GRE_CT_MAX];
35};
36
24/* add new tuple->key_reply pair to keymap */ 37/* add new tuple->key_reply pair to keymap */
25int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, 38int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
26 struct nf_conntrack_tuple *t); 39 struct nf_conntrack_tuple *t);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 08f9247e9827..9003e29cde46 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
119void watchdog_nmi_stop(void); 119void watchdog_nmi_stop(void);
120void watchdog_nmi_start(void); 120void watchdog_nmi_start(void);
121int watchdog_nmi_probe(void); 121int watchdog_nmi_probe(void);
122int watchdog_nmi_enable(unsigned int cpu);
123void watchdog_nmi_disable(unsigned int cpu);
122 124
123/** 125/**
124 * touch_nmi_watchdog - restart NMI watchdog timeout. 126 * touch_nmi_watchdog - restart NMI watchdog timeout.
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index f92a47e18034..a93841bfb9f7 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -17,6 +17,8 @@
17#define __DAVINCI_GPIO_PLATFORM_H 17#define __DAVINCI_GPIO_PLATFORM_H
18 18
19struct davinci_gpio_platform_data { 19struct davinci_gpio_platform_data {
20 bool no_auto_base;
21 u32 base;
20 u32 ngpio; 22 u32 ngpio;
21 u32 gpio_unbanked; 23 u32 gpio_unbanked;
22}; 24};
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 8e0725aac0aa..7006008d5b72 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -1,6 +1,7 @@
1#ifndef _LINUX_PSI_H 1#ifndef _LINUX_PSI_H
2#define _LINUX_PSI_H 2#define _LINUX_PSI_H
3 3
4#include <linux/jump_label.h>
4#include <linux/psi_types.h> 5#include <linux/psi_types.h>
5#include <linux/sched.h> 6#include <linux/sched.h>
6 7
@@ -9,7 +10,7 @@ struct css_set;
9 10
10#ifdef CONFIG_PSI 11#ifdef CONFIG_PSI
11 12
12extern bool psi_disabled; 13extern struct static_key_false psi_disabled;
13 14
14void psi_init(void); 15void psi_init(void);
15 16
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index a15bc4d48752..30fcec375a3a 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -90,7 +90,10 @@ struct pstore_record {
90 * 90 *
91 * @buf_lock: spinlock to serialize access to @buf 91 * @buf_lock: spinlock to serialize access to @buf
92 * @buf: preallocated crash dump buffer 92 * @buf: preallocated crash dump buffer
93 * @bufsize: size of @buf available for crash dump writes 93 * @bufsize: size of @buf available for crash dump bytes (must match
94 * smallest number of bytes available for writing to a
95 * backend entry, since compressed bytes don't take kindly
96 * to being truncated)
94 * 97 *
95 * @read_mutex: serializes @open, @read, @close, and @erase callbacks 98 * @read_mutex: serializes @open, @read, @close, and @erase callbacks
96 * @flags: bitfield of frontends the backend can accept writes for 99 * @flags: bitfield of frontends the backend can accept writes for
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 6c2ffed907f5..de20ede2c5c8 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -64,15 +64,12 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
64#define PTRACE_MODE_NOAUDIT 0x04 64#define PTRACE_MODE_NOAUDIT 0x04
65#define PTRACE_MODE_FSCREDS 0x08 65#define PTRACE_MODE_FSCREDS 0x08
66#define PTRACE_MODE_REALCREDS 0x10 66#define PTRACE_MODE_REALCREDS 0x10
67#define PTRACE_MODE_SCHED 0x20
68#define PTRACE_MODE_IBPB 0x40
69 67
70/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ 68/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
71#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) 69#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
72#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) 70#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
73#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) 71#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
74#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) 72#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
75#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
76 73
77/** 74/**
78 * ptrace_may_access - check whether the caller is permitted to access 75 * ptrace_may_access - check whether the caller is permitted to access
@@ -90,20 +87,6 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
90 */ 87 */
91extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); 88extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
92 89
93/**
94 * ptrace_may_access - check whether the caller is permitted to access
95 * a target task.
96 * @task: target task
97 * @mode: selects type of access and caller credentials
98 *
99 * Returns true on success, false on denial.
100 *
101 * Similar to ptrace_may_access(). Only to be called from context switch
102 * code. Does not call into audit and the regular LSM hooks due to locking
103 * constraints.
104 */
105extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
106
107static inline int ptrace_reparented(struct task_struct *child) 90static inline int ptrace_reparented(struct task_struct *child)
108{ 91{
109 return !same_thread_group(child->real_parent, child->parent); 92 return !same_thread_group(child->real_parent, child->parent);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a51c13c2b1a0..291a9bd5b97f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1116,6 +1116,7 @@ struct task_struct {
1116#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1116#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1117 /* Index of current stored address in ret_stack: */ 1117 /* Index of current stored address in ret_stack: */
1118 int curr_ret_stack; 1118 int curr_ret_stack;
1119 int curr_ret_depth;
1119 1120
1120 /* Stack of return addresses for return function tracing: */ 1121 /* Stack of return addresses for return function tracing: */
1121 struct ftrace_ret_stack *ret_stack; 1122 struct ftrace_ret_stack *ret_stack;
@@ -1453,6 +1454,8 @@ static inline bool is_percpu_thread(void)
1453#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1454#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1454#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ 1455#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
1455#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ 1456#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
1457#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
1458#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
1456 1459
1457#define TASK_PFA_TEST(name, func) \ 1460#define TASK_PFA_TEST(name, func) \
1458 static inline bool task_##func(struct task_struct *p) \ 1461 static inline bool task_##func(struct task_struct *p) \
@@ -1484,6 +1487,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1484TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1487TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1485TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1488TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1486 1489
1490TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1491TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1492TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1493
1494TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1495TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1496
1487static inline void 1497static inline void
1488current_restore_flags(unsigned long orig_flags, unsigned long flags) 1498current_restore_flags(unsigned long orig_flags, unsigned long flags)
1489{ 1499{
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644
index 000000000000..59d3736c454c
--- /dev/null
+++ b/include/linux/sched/smt.h
@@ -0,0 +1,20 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SCHED_SMT_H
3#define _LINUX_SCHED_SMT_H
4
5#include <linux/static_key.h>
6
7#ifdef CONFIG_SCHED_SMT
8extern struct static_key_false sched_smt_present;
9
10static __always_inline bool sched_smt_active(void)
11{
12 return static_branch_likely(&sched_smt_present);
13}
14#else
15static inline bool sched_smt_active(void) { return false; }
16#endif
17
18void arch_smt_update(void);
19
20#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0ba687454267..0d1b2c3f127b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1326,6 +1326,22 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
1326 } 1326 }
1327} 1327}
1328 1328
1329static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1330{
1331 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1332 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1333}
1334
1335static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1336{
1337 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1338}
1339
1340static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1341{
1342 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1343}
1344
1329/* Release a reference on a zerocopy structure */ 1345/* Release a reference on a zerocopy structure */
1330static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) 1346static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1331{ 1347{
@@ -1335,7 +1351,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1335 if (uarg->callback == sock_zerocopy_callback) { 1351 if (uarg->callback == sock_zerocopy_callback) {
1336 uarg->zerocopy = uarg->zerocopy && zerocopy; 1352 uarg->zerocopy = uarg->zerocopy && zerocopy;
1337 sock_zerocopy_put(uarg); 1353 sock_zerocopy_put(uarg);
1338 } else { 1354 } else if (!skb_zcopy_is_nouarg(skb)) {
1339 uarg->callback(uarg, zerocopy); 1355 uarg->callback(uarg, zerocopy);
1340 } 1356 }
1341 1357
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 8ed77bb4ed86..a9b0280687d5 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -196,6 +196,7 @@ struct tcp_sock {
196 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 196 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
197 u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ 197 u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
198 u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ 198 u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
199 u32 compressed_ack_rcv_nxt;
199 200
200 u32 tsoffset; /* timestamp offset */ 201 u32 tsoffset; /* timestamp offset */
201 202
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 40b0b4c1bf7b..df20f8bdbfa3 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -83,8 +83,8 @@ static inline int ptrace_report_syscall(struct pt_regs *regs)
83 * tracehook_report_syscall_entry - task is about to attempt a system call 83 * tracehook_report_syscall_entry - task is about to attempt a system call
84 * @regs: user register state of current task 84 * @regs: user register state of current task
85 * 85 *
86 * This will be called if %TIF_SYSCALL_TRACE has been set, when the 86 * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set,
87 * current task has just entered the kernel for a system call. 87 * when the current task has just entered the kernel for a system call.
88 * Full user register state is available here. Changing the values 88 * Full user register state is available here. Changing the values
89 * in @regs can affect the system call number and arguments to be tried. 89 * in @regs can affect the system call number and arguments to be tried.
90 * It is safe to block here, preventing the system call from beginning. 90 * It is safe to block here, preventing the system call from beginning.
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 538ba1a58f5b..e9de8ad0bad7 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -166,7 +166,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
166 struct tracepoint_func *it_func_ptr; \ 166 struct tracepoint_func *it_func_ptr; \
167 void *it_func; \ 167 void *it_func; \
168 void *__data; \ 168 void *__data; \
169 int __maybe_unused idx = 0; \ 169 int __maybe_unused __idx = 0; \
170 \ 170 \
171 if (!(cond)) \ 171 if (!(cond)) \
172 return; \ 172 return; \
@@ -182,7 +182,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
182 * doesn't work from the idle path. \ 182 * doesn't work from the idle path. \
183 */ \ 183 */ \
184 if (rcuidle) { \ 184 if (rcuidle) { \
185 idx = srcu_read_lock_notrace(&tracepoint_srcu); \ 185 __idx = srcu_read_lock_notrace(&tracepoint_srcu);\
186 rcu_irq_enter_irqson(); \ 186 rcu_irq_enter_irqson(); \
187 } \ 187 } \
188 \ 188 \
@@ -198,7 +198,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
198 \ 198 \
199 if (rcuidle) { \ 199 if (rcuidle) { \
200 rcu_irq_exit_irqson(); \ 200 rcu_irq_exit_irqson(); \
201 srcu_read_unlock_notrace(&tracepoint_srcu, idx);\ 201 srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
202 } \ 202 } \
203 \ 203 \
204 preempt_enable_notrace(); \ 204 preempt_enable_notrace(); \
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index b7a99ce56bc9..a1be64c9940f 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -66,4 +66,7 @@
66/* Device needs a pause after every control message. */ 66/* Device needs a pause after every control message. */
67#define USB_QUIRK_DELAY_CTRL_MSG BIT(13) 67#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
68 68
69/* Hub needs extra delay after resetting its port. */
70#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
71
69#endif /* __LINUX_USB_QUIRKS_H */ 72#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index d9514928ddac..564892e19f8c 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -289,9 +289,7 @@ struct xarray {
289void xa_init_flags(struct xarray *, gfp_t flags); 289void xa_init_flags(struct xarray *, gfp_t flags);
290void *xa_load(struct xarray *, unsigned long index); 290void *xa_load(struct xarray *, unsigned long index);
291void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); 291void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
292void *xa_cmpxchg(struct xarray *, unsigned long index, 292void *xa_erase(struct xarray *, unsigned long index);
293 void *old, void *entry, gfp_t);
294int xa_reserve(struct xarray *, unsigned long index, gfp_t);
295void *xa_store_range(struct xarray *, unsigned long first, unsigned long last, 293void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
296 void *entry, gfp_t); 294 void *entry, gfp_t);
297bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); 295bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -344,65 +342,6 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
344} 342}
345 343
346/** 344/**
347 * xa_erase() - Erase this entry from the XArray.
348 * @xa: XArray.
349 * @index: Index of entry.
350 *
351 * This function is the equivalent of calling xa_store() with %NULL as
352 * the third argument. The XArray does not need to allocate memory, so
353 * the user does not need to provide GFP flags.
354 *
355 * Context: Process context. Takes and releases the xa_lock.
356 * Return: The entry which used to be at this index.
357 */
358static inline void *xa_erase(struct xarray *xa, unsigned long index)
359{
360 return xa_store(xa, index, NULL, 0);
361}
362
363/**
364 * xa_insert() - Store this entry in the XArray unless another entry is
365 * already present.
366 * @xa: XArray.
367 * @index: Index into array.
368 * @entry: New entry.
369 * @gfp: Memory allocation flags.
370 *
371 * If you would rather see the existing entry in the array, use xa_cmpxchg().
372 * This function is for users who don't care what the entry is, only that
373 * one is present.
374 *
375 * Context: Process context. Takes and releases the xa_lock.
376 * May sleep if the @gfp flags permit.
377 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
378 * -ENOMEM if memory could not be allocated.
379 */
380static inline int xa_insert(struct xarray *xa, unsigned long index,
381 void *entry, gfp_t gfp)
382{
383 void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
384 if (!curr)
385 return 0;
386 if (xa_is_err(curr))
387 return xa_err(curr);
388 return -EEXIST;
389}
390
391/**
392 * xa_release() - Release a reserved entry.
393 * @xa: XArray.
394 * @index: Index of entry.
395 *
396 * After calling xa_reserve(), you can call this function to release the
397 * reservation. If the entry at @index has been stored to, this function
398 * will do nothing.
399 */
400static inline void xa_release(struct xarray *xa, unsigned long index)
401{
402 xa_cmpxchg(xa, index, NULL, NULL, 0);
403}
404
405/**
406 * xa_for_each() - Iterate over a portion of an XArray. 345 * xa_for_each() - Iterate over a portion of an XArray.
407 * @xa: XArray. 346 * @xa: XArray.
408 * @entry: Entry retrieved from array. 347 * @entry: Entry retrieved from array.
@@ -455,6 +394,7 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
455void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, 394void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
456 void *entry, gfp_t); 395 void *entry, gfp_t);
457int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); 396int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
397int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
458void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); 398void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
459void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); 399void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
460 400
@@ -487,6 +427,58 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
487} 427}
488 428
489/** 429/**
430 * xa_store_bh() - Store this entry in the XArray.
431 * @xa: XArray.
432 * @index: Index into array.
433 * @entry: New entry.
434 * @gfp: Memory allocation flags.
435 *
436 * This function is like calling xa_store() except it disables softirqs
437 * while holding the array lock.
438 *
439 * Context: Any context. Takes and releases the xa_lock while
440 * disabling softirqs.
441 * Return: The entry which used to be at this index.
442 */
443static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
444 void *entry, gfp_t gfp)
445{
446 void *curr;
447
448 xa_lock_bh(xa);
449 curr = __xa_store(xa, index, entry, gfp);
450 xa_unlock_bh(xa);
451
452 return curr;
453}
454
455/**
456 * xa_store_irq() - Erase this entry from the XArray.
457 * @xa: XArray.
458 * @index: Index into array.
459 * @entry: New entry.
460 * @gfp: Memory allocation flags.
461 *
462 * This function is like calling xa_store() except it disables interrupts
463 * while holding the array lock.
464 *
465 * Context: Process context. Takes and releases the xa_lock while
466 * disabling interrupts.
467 * Return: The entry which used to be at this index.
468 */
469static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
470 void *entry, gfp_t gfp)
471{
472 void *curr;
473
474 xa_lock_irq(xa);
475 curr = __xa_store(xa, index, entry, gfp);
476 xa_unlock_irq(xa);
477
478 return curr;
479}
480
481/**
490 * xa_erase_bh() - Erase this entry from the XArray. 482 * xa_erase_bh() - Erase this entry from the XArray.
491 * @xa: XArray. 483 * @xa: XArray.
492 * @index: Index of entry. 484 * @index: Index of entry.
@@ -495,7 +487,7 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
495 * the third argument. The XArray does not need to allocate memory, so 487 * the third argument. The XArray does not need to allocate memory, so
496 * the user does not need to provide GFP flags. 488 * the user does not need to provide GFP flags.
497 * 489 *
498 * Context: Process context. Takes and releases the xa_lock while 490 * Context: Any context. Takes and releases the xa_lock while
499 * disabling softirqs. 491 * disabling softirqs.
500 * Return: The entry which used to be at this index. 492 * Return: The entry which used to be at this index.
501 */ 493 */
@@ -535,6 +527,61 @@ static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
535} 527}
536 528
537/** 529/**
530 * xa_cmpxchg() - Conditionally replace an entry in the XArray.
531 * @xa: XArray.
532 * @index: Index into array.
533 * @old: Old value to test against.
534 * @entry: New value to place in array.
535 * @gfp: Memory allocation flags.
536 *
537 * If the entry at @index is the same as @old, replace it with @entry.
538 * If the return value is equal to @old, then the exchange was successful.
539 *
540 * Context: Any context. Takes and releases the xa_lock. May sleep
541 * if the @gfp flags permit.
542 * Return: The old value at this index or xa_err() if an error happened.
543 */
544static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
545 void *old, void *entry, gfp_t gfp)
546{
547 void *curr;
548
549 xa_lock(xa);
550 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
551 xa_unlock(xa);
552
553 return curr;
554}
555
556/**
557 * xa_insert() - Store this entry in the XArray unless another entry is
558 * already present.
559 * @xa: XArray.
560 * @index: Index into array.
561 * @entry: New entry.
562 * @gfp: Memory allocation flags.
563 *
564 * If you would rather see the existing entry in the array, use xa_cmpxchg().
565 * This function is for users who don't care what the entry is, only that
566 * one is present.
567 *
568 * Context: Process context. Takes and releases the xa_lock.
569 * May sleep if the @gfp flags permit.
570 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
571 * -ENOMEM if memory could not be allocated.
572 */
573static inline int xa_insert(struct xarray *xa, unsigned long index,
574 void *entry, gfp_t gfp)
575{
576 void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
577 if (!curr)
578 return 0;
579 if (xa_is_err(curr))
580 return xa_err(curr);
581 return -EEXIST;
582}
583
584/**
538 * xa_alloc() - Find somewhere to store this entry in the XArray. 585 * xa_alloc() - Find somewhere to store this entry in the XArray.
539 * @xa: XArray. 586 * @xa: XArray.
540 * @id: Pointer to ID. 587 * @id: Pointer to ID.
@@ -575,7 +622,7 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
575 * Updates the @id pointer with the index, then stores the entry at that 622 * Updates the @id pointer with the index, then stores the entry at that
576 * index. A concurrent lookup will not see an uninitialised @id. 623 * index. A concurrent lookup will not see an uninitialised @id.
577 * 624 *
578 * Context: Process context. Takes and releases the xa_lock while 625 * Context: Any context. Takes and releases the xa_lock while
579 * disabling softirqs. May sleep if the @gfp flags permit. 626 * disabling softirqs. May sleep if the @gfp flags permit.
580 * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if 627 * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
581 * there is no more space in the XArray. 628 * there is no more space in the XArray.
@@ -621,6 +668,98 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
621 return err; 668 return err;
622} 669}
623 670
671/**
672 * xa_reserve() - Reserve this index in the XArray.
673 * @xa: XArray.
674 * @index: Index into array.
675 * @gfp: Memory allocation flags.
676 *
677 * Ensures there is somewhere to store an entry at @index in the array.
678 * If there is already something stored at @index, this function does
679 * nothing. If there was nothing there, the entry is marked as reserved.
680 * Loading from a reserved entry returns a %NULL pointer.
681 *
682 * If you do not use the entry that you have reserved, call xa_release()
683 * or xa_erase() to free any unnecessary memory.
684 *
685 * Context: Any context. Takes and releases the xa_lock.
686 * May sleep if the @gfp flags permit.
687 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
688 */
689static inline
690int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
691{
692 int ret;
693
694 xa_lock(xa);
695 ret = __xa_reserve(xa, index, gfp);
696 xa_unlock(xa);
697
698 return ret;
699}
700
701/**
702 * xa_reserve_bh() - Reserve this index in the XArray.
703 * @xa: XArray.
704 * @index: Index into array.
705 * @gfp: Memory allocation flags.
706 *
707 * A softirq-disabling version of xa_reserve().
708 *
709 * Context: Any context. Takes and releases the xa_lock while
710 * disabling softirqs.
711 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
712 */
713static inline
714int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
715{
716 int ret;
717
718 xa_lock_bh(xa);
719 ret = __xa_reserve(xa, index, gfp);
720 xa_unlock_bh(xa);
721
722 return ret;
723}
724
725/**
726 * xa_reserve_irq() - Reserve this index in the XArray.
727 * @xa: XArray.
728 * @index: Index into array.
729 * @gfp: Memory allocation flags.
730 *
731 * An interrupt-disabling version of xa_reserve().
732 *
733 * Context: Process context. Takes and releases the xa_lock while
734 * disabling interrupts.
735 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
736 */
737static inline
738int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
739{
740 int ret;
741
742 xa_lock_irq(xa);
743 ret = __xa_reserve(xa, index, gfp);
744 xa_unlock_irq(xa);
745
746 return ret;
747}
748
749/**
750 * xa_release() - Release a reserved entry.
751 * @xa: XArray.
752 * @index: Index of entry.
753 *
754 * After calling xa_reserve(), you can call this function to release the
755 * reservation. If the entry at @index has been stored to, this function
756 * will do nothing.
757 */
758static inline void xa_release(struct xarray *xa, unsigned long index)
759{
760 xa_cmpxchg(xa, index, NULL, NULL, 0);
761}
762
624/* Everything below here is the Advanced API. Proceed with caution. */ 763/* Everything below here is the Advanced API. Proceed with caution. */
625 764
626/* 765/*
diff --git a/include/media/media-request.h b/include/media/media-request.h
index 0ce75c35131f..bd36d7431698 100644
--- a/include/media/media-request.h
+++ b/include/media/media-request.h
@@ -68,7 +68,7 @@ struct media_request {
68 unsigned int access_count; 68 unsigned int access_count;
69 struct list_head objects; 69 struct list_head objects;
70 unsigned int num_incomplete_objects; 70 unsigned int num_incomplete_objects;
71 struct wait_queue_head poll_wait; 71 wait_queue_head_t poll_wait;
72 spinlock_t lock; 72 spinlock_t lock;
73}; 73};
74 74
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 58c1ecf3d648..5467264771ec 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -624,7 +624,7 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
624 624
625/* v4l2 request helper */ 625/* v4l2 request helper */
626 626
627void vb2_m2m_request_queue(struct media_request *req); 627void v4l2_m2m_request_queue(struct media_request *req);
628 628
629/* v4l2 ioctl helpers */ 629/* v4l2 ioctl helpers */
630 630
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 14b789a123e7..1656c5978498 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -317,6 +317,8 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
317 const struct in6_addr *addr); 317 const struct in6_addr *addr);
318bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev, 318bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
319 const struct in6_addr *addr); 319 const struct in6_addr *addr);
320int ipv6_anycast_init(void);
321void ipv6_anycast_cleanup(void);
320 322
321/* Device notifier */ 323/* Device notifier */
322int register_inet6addr_notifier(struct notifier_block *nb); 324int register_inet6addr_notifier(struct notifier_block *nb);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index de587948042a..1adefe42c0a6 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -77,7 +77,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
77 struct sockaddr_rxrpc *, struct key *); 77 struct sockaddr_rxrpc *, struct key *);
78int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *, 78int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
79 enum rxrpc_call_completion *, u32 *); 79 enum rxrpc_call_completion *, u32 *);
80u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *); 80u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
81void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
81u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); 82u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
82bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, 83bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
83 ktime_t *); 84 ktime_t *);
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d7578cf49c3a..c9c78c15bce0 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -146,10 +146,12 @@ struct ifacaddr6 {
146 struct in6_addr aca_addr; 146 struct in6_addr aca_addr;
147 struct fib6_info *aca_rt; 147 struct fib6_info *aca_rt;
148 struct ifacaddr6 *aca_next; 148 struct ifacaddr6 *aca_next;
149 struct hlist_node aca_addr_lst;
149 int aca_users; 150 int aca_users;
150 refcount_t aca_refcnt; 151 refcount_t aca_refcnt;
151 unsigned long aca_cstamp; 152 unsigned long aca_cstamp;
152 unsigned long aca_tstamp; 153 unsigned long aca_tstamp;
154 struct rcu_head rcu;
153}; 155};
154 156
155#define IFA_HOST IPV6_ADDR_LOOPBACK 157#define IFA_HOST IPV6_ADDR_LOOPBACK
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
index cd24be4c4a99..13d55206bb9f 100644
--- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
9 const struct nf_nat_range2 *range, 9 const struct nf_nat_range2 *range,
10 const struct net_device *out); 10 const struct net_device *out);
11 11
12void nf_nat_masquerade_ipv4_register_notifier(void); 12int nf_nat_masquerade_ipv4_register_notifier(void);
13void nf_nat_masquerade_ipv4_unregister_notifier(void); 13void nf_nat_masquerade_ipv4_unregister_notifier(void);
14 14
15#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */ 15#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
index 0c3b5ebf0bb8..2917bf95c437 100644
--- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -5,7 +5,7 @@
5unsigned int 5unsigned int
6nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, 6nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
7 const struct net_device *out); 7 const struct net_device *out);
8void nf_nat_masquerade_ipv6_register_notifier(void); 8int nf_nat_masquerade_ipv6_register_notifier(void);
9void nf_nat_masquerade_ipv6_unregister_notifier(void); 9void nf_nat_masquerade_ipv6_unregister_notifier(void);
10 10
11#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */ 11#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index eed04af9b75e..ae7b86f587f2 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -153,4 +153,43 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
153 const char *fmt, ...) { } 153 const char *fmt, ...) { }
154#endif /* CONFIG_SYSCTL */ 154#endif /* CONFIG_SYSCTL */
155 155
156static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
157{
158 return &net->ct.nf_ct_proto.generic;
159}
160
161static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
162{
163 return &net->ct.nf_ct_proto.tcp;
164}
165
166static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
167{
168 return &net->ct.nf_ct_proto.udp;
169}
170
171static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
172{
173 return &net->ct.nf_ct_proto.icmp;
174}
175
176static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
177{
178 return &net->ct.nf_ct_proto.icmpv6;
179}
180
181#ifdef CONFIG_NF_CT_PROTO_DCCP
182static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
183{
184 return &net->ct.nf_ct_proto.dccp;
185}
186#endif
187
188#ifdef CONFIG_NF_CT_PROTO_SCTP
189static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
190{
191 return &net->ct.nf_ct_proto.sctp;
192}
193#endif
194
156#endif /*_NF_CONNTRACK_PROTOCOL_H*/ 195#endif /*_NF_CONNTRACK_PROTOCOL_H*/
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 8c2caa370e0f..ab9242e51d9e 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -608,4 +608,16 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
608 SCTP_DEFAULT_MINSEGMENT)); 608 SCTP_DEFAULT_MINSEGMENT));
609} 609}
610 610
611static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
612{
613 __u32 pmtu = sctp_dst_mtu(t->dst);
614
615 if (t->pathmtu == pmtu)
616 return true;
617
618 t->pathmtu = pmtu;
619
620 return false;
621}
622
611#endif /* __net_sctp_h__ */ 623#endif /* __net_sctp_h__ */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f1dab1f4b194..70c10a8f3e90 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1192,7 +1192,7 @@ struct snd_soc_pcm_runtime {
1192 ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \ 1192 ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
1193 (i)++) 1193 (i)++)
1194#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \ 1194#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
1195 for (; ((i--) >= 0) && ((dai) = rtd->codec_dais[i]);) 1195 for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);)
1196 1196
1197 1197
1198/* mixer control */ 1198/* mixer control */
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
index a9834c37ac40..c0e7d24ca256 100644
--- a/include/trace/events/kyber.h
+++ b/include/trace/events/kyber.h
@@ -31,8 +31,8 @@ TRACE_EVENT(kyber_latency,
31 31
32 TP_fast_assign( 32 TP_fast_assign(
33 __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); 33 __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
34 strlcpy(__entry->domain, domain, DOMAIN_LEN); 34 strlcpy(__entry->domain, domain, sizeof(__entry->domain));
35 strlcpy(__entry->type, type, DOMAIN_LEN); 35 strlcpy(__entry->type, type, sizeof(__entry->type));
36 __entry->percentile = percentile; 36 __entry->percentile = percentile;
37 __entry->numerator = numerator; 37 __entry->numerator = numerator;
38 __entry->denominator = denominator; 38 __entry->denominator = denominator;
@@ -60,7 +60,7 @@ TRACE_EVENT(kyber_adjust,
60 60
61 TP_fast_assign( 61 TP_fast_assign(
62 __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); 62 __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
63 strlcpy(__entry->domain, domain, DOMAIN_LEN); 63 strlcpy(__entry->domain, domain, sizeof(__entry->domain));
64 __entry->depth = depth; 64 __entry->depth = depth;
65 ), 65 ),
66 66
@@ -82,7 +82,7 @@ TRACE_EVENT(kyber_throttled,
82 82
83 TP_fast_assign( 83 TP_fast_assign(
84 __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); 84 __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
85 strlcpy(__entry->domain, domain, DOMAIN_LEN); 85 strlcpy(__entry->domain, domain, sizeof(__entry->domain));
86 ), 86 ),
87 87
88 TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), 88 TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 573d5b901fb1..5b50fe4906d2 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -181,6 +181,7 @@ enum rxrpc_timer_trace {
181enum rxrpc_propose_ack_trace { 181enum rxrpc_propose_ack_trace {
182 rxrpc_propose_ack_client_tx_end, 182 rxrpc_propose_ack_client_tx_end,
183 rxrpc_propose_ack_input_data, 183 rxrpc_propose_ack_input_data,
184 rxrpc_propose_ack_ping_for_check_life,
184 rxrpc_propose_ack_ping_for_keepalive, 185 rxrpc_propose_ack_ping_for_keepalive,
185 rxrpc_propose_ack_ping_for_lost_ack, 186 rxrpc_propose_ack_ping_for_lost_ack,
186 rxrpc_propose_ack_ping_for_lost_reply, 187 rxrpc_propose_ack_ping_for_lost_reply,
@@ -380,6 +381,7 @@ enum rxrpc_tx_point {
380#define rxrpc_propose_ack_traces \ 381#define rxrpc_propose_ack_traces \
381 EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ 382 EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
382 EM(rxrpc_propose_ack_input_data, "DataIn ") \ 383 EM(rxrpc_propose_ack_input_data, "DataIn ") \
384 EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
383 EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \ 385 EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
384 EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ 386 EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
385 EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ 387 EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index f07b270d4fc4..9a4bdfadab07 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -107,6 +107,8 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
107#ifdef CREATE_TRACE_POINTS 107#ifdef CREATE_TRACE_POINTS
108static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) 108static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
109{ 109{
110 unsigned int state;
111
110#ifdef CONFIG_SCHED_DEBUG 112#ifdef CONFIG_SCHED_DEBUG
111 BUG_ON(p != current); 113 BUG_ON(p != current);
112#endif /* CONFIG_SCHED_DEBUG */ 114#endif /* CONFIG_SCHED_DEBUG */
@@ -118,7 +120,15 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
118 if (preempt) 120 if (preempt)
119 return TASK_REPORT_MAX; 121 return TASK_REPORT_MAX;
120 122
121 return 1 << task_state_index(p); 123 /*
124 * task_state_index() uses fls() and returns a value from 0-8 range.
125 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
126 * it for left shift operation to get the correct task->state
127 * mapping.
128 */
129 state = task_state_index(p);
130
131 return state ? (1 << (state - 1)) : state;
122} 132}
123#endif /* CREATE_TRACE_POINTS */ 133#endif /* CREATE_TRACE_POINTS */
124 134
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 6d180cc60a5d..3eb5a4c3d60a 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -716,7 +716,6 @@
716 * the situation described above. 716 * the situation described above.
717 */ 717 */
718#define REL_RESERVED 0x0a 718#define REL_RESERVED 0x0a
719#define REL_WHEEL_HI_RES 0x0b
720#define REL_MAX 0x0f 719#define REL_MAX 0x0f
721#define REL_CNT (REL_MAX+1) 720#define REL_CNT (REL_MAX+1)
722 721
@@ -753,15 +752,6 @@
753 752
754#define ABS_MISC 0x28 753#define ABS_MISC 0x28
755 754
756/*
757 * 0x2e is reserved and should not be used in input drivers.
758 * It was used by HID as ABS_MISC+6 and userspace needs to detect if
759 * the next ABS_* event is correct or is just ABS_MISC + n.
760 * We define here ABS_RESERVED so userspace can rely on it and detect
761 * the situation described above.
762 */
763#define ABS_RESERVED 0x2e
764
765#define ABS_MT_SLOT 0x2f /* MT slot being modified */ 755#define ABS_MT_SLOT 0x2f /* MT slot being modified */
766#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ 756#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
767#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ 757#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index f5ff8a76e208..b01eb502d49c 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -83,11 +83,11 @@ struct kfd_ioctl_set_cu_mask_args {
83}; 83};
84 84
85struct kfd_ioctl_get_queue_wave_state_args { 85struct kfd_ioctl_get_queue_wave_state_args {
86 uint64_t ctl_stack_address; /* to KFD */ 86 __u64 ctl_stack_address; /* to KFD */
87 uint32_t ctl_stack_used_size; /* from KFD */ 87 __u32 ctl_stack_used_size; /* from KFD */
88 uint32_t save_area_used_size; /* from KFD */ 88 __u32 save_area_used_size; /* from KFD */
89 uint32_t queue_id; /* to KFD */ 89 __u32 queue_id; /* to KFD */
90 uint32_t pad; 90 __u32 pad;
91}; 91};
92 92
93/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ 93/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -255,10 +255,10 @@ struct kfd_hsa_memory_exception_data {
255 255
256/* hw exception data */ 256/* hw exception data */
257struct kfd_hsa_hw_exception_data { 257struct kfd_hsa_hw_exception_data {
258 uint32_t reset_type; 258 __u32 reset_type;
259 uint32_t reset_cause; 259 __u32 reset_cause;
260 uint32_t memory_lost; 260 __u32 memory_lost;
261 uint32_t gpu_id; 261 __u32 gpu_id;
262}; 262};
263 263
264/* Event data */ 264/* Event data */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 579974b0bf0d..7de4f1bdaf06 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1635,8 +1635,8 @@ enum nft_ng_attributes {
1635 NFTA_NG_MODULUS, 1635 NFTA_NG_MODULUS,
1636 NFTA_NG_TYPE, 1636 NFTA_NG_TYPE,
1637 NFTA_NG_OFFSET, 1637 NFTA_NG_OFFSET,
1638 NFTA_NG_SET_NAME, 1638 NFTA_NG_SET_NAME, /* deprecated */
1639 NFTA_NG_SET_ID, 1639 NFTA_NG_SET_ID, /* deprecated */
1640 __NFTA_NG_MAX 1640 __NFTA_NG_MAX
1641}; 1641};
1642#define NFTA_NG_MAX (__NFTA_NG_MAX - 1) 1642#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
index 156ccd089df1..1610fdbab98d 100644
--- a/include/uapi/linux/netfilter_bridge.h
+++ b/include/uapi/linux/netfilter_bridge.h
@@ -11,6 +11,10 @@
11#include <linux/if_vlan.h> 11#include <linux/if_vlan.h>
12#include <linux/if_pppox.h> 12#include <linux/if_pppox.h>
13 13
14#ifndef __KERNEL__
15#include <limits.h> /* for INT_MIN, INT_MAX */
16#endif
17
14/* Bridge Hooks */ 18/* Bridge Hooks */
15/* After promisc drops, checksum checks. */ 19/* After promisc drops, checksum checks. */
16#define NF_BR_PRE_ROUTING 0 20#define NF_BR_PRE_ROUTING 0
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index c0d7ea0bf5b6..b17201edfa09 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -212,6 +212,7 @@ struct prctl_mm_map {
212#define PR_SET_SPECULATION_CTRL 53 212#define PR_SET_SPECULATION_CTRL 53
213/* Speculation control variants */ 213/* Speculation control variants */
214# define PR_SPEC_STORE_BYPASS 0 214# define PR_SPEC_STORE_BYPASS 0
215# define PR_SPEC_INDIRECT_BRANCH 1
215/* Return and control values for PR_SET/GET_SPECULATION_CTRL */ 216/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
216# define PR_SPEC_NOT_AFFECTED 0 217# define PR_SPEC_NOT_AFFECTED 0
217# define PR_SPEC_PRCTL (1UL << 0) 218# define PR_SPEC_PRCTL (1UL << 0)
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 34dd3d497f2c..c81feb373d3e 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -568,6 +568,8 @@ struct sctp_assoc_reset_event {
568 568
569#define SCTP_ASSOC_CHANGE_DENIED 0x0004 569#define SCTP_ASSOC_CHANGE_DENIED 0x0004
570#define SCTP_ASSOC_CHANGE_FAILED 0x0008 570#define SCTP_ASSOC_CHANGE_FAILED 0x0008
571#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED
572#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED
571struct sctp_stream_change_event { 573struct sctp_stream_change_event {
572 __u16 strchange_type; 574 __u16 strchange_type;
573 __u16 strchange_flags; 575 __u16 strchange_flags;
@@ -1151,6 +1153,7 @@ struct sctp_add_streams {
1151/* SCTP Stream schedulers */ 1153/* SCTP Stream schedulers */
1152enum sctp_sched_type { 1154enum sctp_sched_type {
1153 SCTP_SS_FCFS, 1155 SCTP_SS_FCFS,
1156 SCTP_SS_DEFAULT = SCTP_SS_FCFS,
1154 SCTP_SS_PRIO, 1157 SCTP_SS_PRIO,
1155 SCTP_SS_RR, 1158 SCTP_SS_RR,
1156 SCTP_SS_MAX = SCTP_SS_RR 1159 SCTP_SS_MAX = SCTP_SS_RR
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 51b095898f4b..998983a6e6b7 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -50,6 +50,8 @@
50#ifndef __LINUX_V4L2_CONTROLS_H 50#ifndef __LINUX_V4L2_CONTROLS_H
51#define __LINUX_V4L2_CONTROLS_H 51#define __LINUX_V4L2_CONTROLS_H
52 52
53#include <linux/types.h>
54
53/* Control classes */ 55/* Control classes */
54#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */ 56#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */
55#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */ 57#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */
@@ -1110,6 +1112,7 @@ struct v4l2_mpeg2_sequence {
1110 __u8 profile_and_level_indication; 1112 __u8 profile_and_level_indication;
1111 __u8 progressive_sequence; 1113 __u8 progressive_sequence;
1112 __u8 chroma_format; 1114 __u8 chroma_format;
1115 __u8 pad;
1113}; 1116};
1114 1117
1115struct v4l2_mpeg2_picture { 1118struct v4l2_mpeg2_picture {
@@ -1128,6 +1131,7 @@ struct v4l2_mpeg2_picture {
1128 __u8 alternate_scan; 1131 __u8 alternate_scan;
1129 __u8 repeat_first_field; 1132 __u8 repeat_first_field;
1130 __u8 progressive_frame; 1133 __u8 progressive_frame;
1134 __u8 pad;
1131}; 1135};
1132 1136
1133struct v4l2_ctrl_mpeg2_slice_params { 1137struct v4l2_ctrl_mpeg2_slice_params {
@@ -1142,6 +1146,7 @@ struct v4l2_ctrl_mpeg2_slice_params {
1142 1146
1143 __u8 backward_ref_index; 1147 __u8 backward_ref_index;
1144 __u8 forward_ref_index; 1148 __u8 forward_ref_index;
1149 __u8 pad;
1145}; 1150};
1146 1151
1147struct v4l2_ctrl_mpeg2_quantization { 1152struct v4l2_ctrl_mpeg2_quantization {
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 61f410fd74e4..4914b93a23f2 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
44{ 44{
45} 45}
46#endif 46#endif
47
48#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
49struct resource;
50void arch_xen_balloon_init(struct resource *hostmem_resource);
51#endif
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 18803ff76e27..4969817124a8 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -42,16 +42,12 @@ int xen_setup_shutdown_event(void);
42 42
43extern unsigned long *xen_contiguous_bitmap; 43extern unsigned long *xen_contiguous_bitmap;
44 44
45#ifdef CONFIG_XEN_PV 45#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
46int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, 46int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
47 unsigned int address_bits, 47 unsigned int address_bits,
48 dma_addr_t *dma_handle); 48 dma_addr_t *dma_handle);
49 49
50void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); 50void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
51
52int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
53 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
54 unsigned int domid, bool no_translate, struct page **pages);
55#else 51#else
56static inline int xen_create_contiguous_region(phys_addr_t pstart, 52static inline int xen_create_contiguous_region(phys_addr_t pstart,
57 unsigned int order, 53 unsigned int order,
@@ -63,7 +59,13 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart,
63 59
64static inline void xen_destroy_contiguous_region(phys_addr_t pstart, 60static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
65 unsigned int order) { } 61 unsigned int order) { }
62#endif
66 63
64#if defined(CONFIG_XEN_PV)
65int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
66 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
67 unsigned int domid, bool no_translate, struct page **pages);
68#else
67static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, 69static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
68 xen_pfn_t *pfn, int nr, int *err_ptr, 70 xen_pfn_t *pfn, int nr, int *err_ptr,
69 pgprot_t prot, unsigned int domid, 71 pgprot_t prot, unsigned int domid,
diff --git a/init/Kconfig b/init/Kconfig
index a4112e95724a..cf5b5a0dcbc2 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -509,6 +509,15 @@ config PSI
509 509
510 Say N if unsure. 510 Say N if unsure.
511 511
512config PSI_DEFAULT_DISABLED
513 bool "Require boot parameter to enable pressure stall information tracking"
514 default n
515 depends on PSI
516 help
517 If set, pressure stall information tracking will be disabled
518 per default but can be enabled through passing psi_enable=1
519 on the kernel commandline during boot.
520
512endmenu # "CPU/Task time and stats accounting" 521endmenu # "CPU/Task time and stats accounting"
513 522
514config CPU_ISOLATION 523config CPU_ISOLATION
diff --git a/init/initramfs.c b/init/initramfs.c
index 640557788026..f6f4a1e4cd54 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -291,16 +291,6 @@ static int __init do_reset(void)
291 return 1; 291 return 1;
292} 292}
293 293
294static int __init maybe_link(void)
295{
296 if (nlink >= 2) {
297 char *old = find_link(major, minor, ino, mode, collected);
298 if (old)
299 return (ksys_link(old, collected) < 0) ? -1 : 1;
300 }
301 return 0;
302}
303
304static void __init clean_path(char *path, umode_t fmode) 294static void __init clean_path(char *path, umode_t fmode)
305{ 295{
306 struct kstat st; 296 struct kstat st;
@@ -313,6 +303,18 @@ static void __init clean_path(char *path, umode_t fmode)
313 } 303 }
314} 304}
315 305
306static int __init maybe_link(void)
307{
308 if (nlink >= 2) {
309 char *old = find_link(major, minor, ino, mode, collected);
310 if (old) {
311 clean_path(collected, 0);
312 return (ksys_link(old, collected) < 0) ? -1 : 1;
313 }
314 }
315 return 0;
316}
317
316static __initdata int wfd; 318static __initdata int wfd;
317 319
318static int __init do_name(void) 320static int __init do_name(void)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 6377225b2082..b1a3545d0ec8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -553,7 +553,6 @@ bool is_bpf_text_address(unsigned long addr)
553int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 553int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
554 char *sym) 554 char *sym)
555{ 555{
556 unsigned long symbol_start, symbol_end;
557 struct bpf_prog_aux *aux; 556 struct bpf_prog_aux *aux;
558 unsigned int it = 0; 557 unsigned int it = 0;
559 int ret = -ERANGE; 558 int ret = -ERANGE;
@@ -566,10 +565,9 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
566 if (it++ != symnum) 565 if (it++ != symnum)
567 continue; 566 continue;
568 567
569 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
570 bpf_get_prog_name(aux->prog, sym); 568 bpf_get_prog_name(aux->prog, sym);
571 569
572 *value = symbol_start; 570 *value = (unsigned long)aux->prog->bpf_func;
573 *type = BPF_SYM_ELF_TYPE; 571 *type = BPF_SYM_ELF_TYPE;
574 572
575 ret = 0; 573 ret = 0;
@@ -674,6 +672,40 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
674 bpf_prog_unlock_free(fp); 672 bpf_prog_unlock_free(fp);
675} 673}
676 674
675int bpf_jit_get_func_addr(const struct bpf_prog *prog,
676 const struct bpf_insn *insn, bool extra_pass,
677 u64 *func_addr, bool *func_addr_fixed)
678{
679 s16 off = insn->off;
680 s32 imm = insn->imm;
681 u8 *addr;
682
683 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
684 if (!*func_addr_fixed) {
685 /* Place-holder address till the last pass has collected
686 * all addresses for JITed subprograms in which case we
687 * can pick them up from prog->aux.
688 */
689 if (!extra_pass)
690 addr = NULL;
691 else if (prog->aux->func &&
692 off >= 0 && off < prog->aux->func_cnt)
693 addr = (u8 *)prog->aux->func[off]->bpf_func;
694 else
695 return -EINVAL;
696 } else {
697 /* Address of a BPF helper call. Since part of the core
698 * kernel, it's always at a fixed location. __bpf_call_base
699 * and the helper with imm relative to it are both in core
700 * kernel.
701 */
702 addr = (u8 *)__bpf_call_base + imm;
703 }
704
705 *func_addr = (unsigned long)addr;
706 return 0;
707}
708
677static int bpf_jit_blind_insn(const struct bpf_insn *from, 709static int bpf_jit_blind_insn(const struct bpf_insn *from,
678 const struct bpf_insn *aux, 710 const struct bpf_insn *aux,
679 struct bpf_insn *to_buff) 711 struct bpf_insn *to_buff)
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index c97a8f968638..bed9d48a7ae9 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -139,7 +139,8 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
139 return -ENOENT; 139 return -ENOENT;
140 140
141 new = kmalloc_node(sizeof(struct bpf_storage_buffer) + 141 new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
142 map->value_size, __GFP_ZERO | GFP_USER, 142 map->value_size,
143 __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
143 map->numa_node); 144 map->numa_node);
144 if (!new) 145 if (!new)
145 return -ENOMEM; 146 return -ENOMEM;
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 8bbd72d3a121..b384ea9f3254 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -7,6 +7,7 @@
7#include <linux/bpf.h> 7#include <linux/bpf.h>
8#include <linux/list.h> 8#include <linux/list.h>
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/capability.h>
10#include "percpu_freelist.h" 11#include "percpu_freelist.h"
11 12
12#define QUEUE_STACK_CREATE_FLAG_MASK \ 13#define QUEUE_STACK_CREATE_FLAG_MASK \
@@ -45,8 +46,12 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
45/* Called from syscall */ 46/* Called from syscall */
46static int queue_stack_map_alloc_check(union bpf_attr *attr) 47static int queue_stack_map_alloc_check(union bpf_attr *attr)
47{ 48{
49 if (!capable(CAP_SYS_ADMIN))
50 return -EPERM;
51
48 /* check sanity of attributes */ 52 /* check sanity of attributes */
49 if (attr->max_entries == 0 || attr->key_size != 0 || 53 if (attr->max_entries == 0 || attr->key_size != 0 ||
54 attr->value_size == 0 ||
50 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK) 55 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK)
51 return -EINVAL; 56 return -EINVAL;
52 57
@@ -63,15 +68,10 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
63{ 68{
64 int ret, numa_node = bpf_map_attr_numa_node(attr); 69 int ret, numa_node = bpf_map_attr_numa_node(attr);
65 struct bpf_queue_stack *qs; 70 struct bpf_queue_stack *qs;
66 u32 size, value_size; 71 u64 size, queue_size, cost;
67 u64 queue_size, cost;
68
69 size = attr->max_entries + 1;
70 value_size = attr->value_size;
71
72 queue_size = sizeof(*qs) + (u64) value_size * size;
73 72
74 cost = queue_size; 73 size = (u64) attr->max_entries + 1;
74 cost = queue_size = sizeof(*qs) + size * attr->value_size;
75 if (cost >= U32_MAX - PAGE_SIZE) 75 if (cost >= U32_MAX - PAGE_SIZE)
76 return ERR_PTR(-E2BIG); 76 return ERR_PTR(-E2BIG);
77 77
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ccb93277aae2..cf5040fd5434 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2078,6 +2078,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2078 info.jited_prog_len = 0; 2078 info.jited_prog_len = 0;
2079 info.xlated_prog_len = 0; 2079 info.xlated_prog_len = 0;
2080 info.nr_jited_ksyms = 0; 2080 info.nr_jited_ksyms = 0;
2081 info.nr_jited_func_lens = 0;
2081 goto done; 2082 goto done;
2082 } 2083 }
2083 2084
@@ -2158,11 +2159,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2158 } 2159 }
2159 2160
2160 ulen = info.nr_jited_ksyms; 2161 ulen = info.nr_jited_ksyms;
2161 info.nr_jited_ksyms = prog->aux->func_cnt; 2162 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
2162 if (info.nr_jited_ksyms && ulen) { 2163 if (info.nr_jited_ksyms && ulen) {
2163 if (bpf_dump_raw_ok()) { 2164 if (bpf_dump_raw_ok()) {
2165 unsigned long ksym_addr;
2164 u64 __user *user_ksyms; 2166 u64 __user *user_ksyms;
2165 ulong ksym_addr;
2166 u32 i; 2167 u32 i;
2167 2168
2168 /* copy the address of the kernel symbol 2169 /* copy the address of the kernel symbol
@@ -2170,10 +2171,17 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2170 */ 2171 */
2171 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 2172 ulen = min_t(u32, info.nr_jited_ksyms, ulen);
2172 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 2173 user_ksyms = u64_to_user_ptr(info.jited_ksyms);
2173 for (i = 0; i < ulen; i++) { 2174 if (prog->aux->func_cnt) {
2174 ksym_addr = (ulong) prog->aux->func[i]->bpf_func; 2175 for (i = 0; i < ulen; i++) {
2175 ksym_addr &= PAGE_MASK; 2176 ksym_addr = (unsigned long)
2176 if (put_user((u64) ksym_addr, &user_ksyms[i])) 2177 prog->aux->func[i]->bpf_func;
2178 if (put_user((u64) ksym_addr,
2179 &user_ksyms[i]))
2180 return -EFAULT;
2181 }
2182 } else {
2183 ksym_addr = (unsigned long) prog->bpf_func;
2184 if (put_user((u64) ksym_addr, &user_ksyms[0]))
2177 return -EFAULT; 2185 return -EFAULT;
2178 } 2186 }
2179 } else { 2187 } else {
@@ -2182,7 +2190,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2182 } 2190 }
2183 2191
2184 ulen = info.nr_jited_func_lens; 2192 ulen = info.nr_jited_func_lens;
2185 info.nr_jited_func_lens = prog->aux->func_cnt; 2193 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
2186 if (info.nr_jited_func_lens && ulen) { 2194 if (info.nr_jited_func_lens && ulen) {
2187 if (bpf_dump_raw_ok()) { 2195 if (bpf_dump_raw_ok()) {
2188 u32 __user *user_lens; 2196 u32 __user *user_lens;
@@ -2191,9 +2199,16 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2191 /* copy the JITed image lengths for each function */ 2199 /* copy the JITed image lengths for each function */
2192 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 2200 ulen = min_t(u32, info.nr_jited_func_lens, ulen);
2193 user_lens = u64_to_user_ptr(info.jited_func_lens); 2201 user_lens = u64_to_user_ptr(info.jited_func_lens);
2194 for (i = 0; i < ulen; i++) { 2202 if (prog->aux->func_cnt) {
2195 func_len = prog->aux->func[i]->jited_len; 2203 for (i = 0; i < ulen; i++) {
2196 if (put_user(func_len, &user_lens[i])) 2204 func_len =
2205 prog->aux->func[i]->jited_len;
2206 if (put_user(func_len, &user_lens[i]))
2207 return -EFAULT;
2208 }
2209 } else {
2210 func_len = prog->jited_len;
2211 if (put_user(func_len, &user_lens[0]))
2197 return -EFAULT; 2212 return -EFAULT;
2198 } 2213 }
2199 } else { 2214 } else {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 1971ca325fb4..6dd419550aba 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5650,7 +5650,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
5650 return; 5650 return;
5651 /* NOTE: fake 'exit' subprog should be updated as well. */ 5651 /* NOTE: fake 'exit' subprog should be updated as well. */
5652 for (i = 0; i <= env->subprog_cnt; i++) { 5652 for (i = 0; i <= env->subprog_cnt; i++) {
5653 if (env->subprog_info[i].start < off) 5653 if (env->subprog_info[i].start <= off)
5654 continue; 5654 continue;
5655 env->subprog_info[i].start += len - 1; 5655 env->subprog_info[i].start += len - 1;
5656 } 5656 }
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3c7f3b4c453c..91d5c38eb7e5 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -10,6 +10,7 @@
10#include <linux/sched/signal.h> 10#include <linux/sched/signal.h>
11#include <linux/sched/hotplug.h> 11#include <linux/sched/hotplug.h>
12#include <linux/sched/task.h> 12#include <linux/sched/task.h>
13#include <linux/sched/smt.h>
13#include <linux/unistd.h> 14#include <linux/unistd.h>
14#include <linux/cpu.h> 15#include <linux/cpu.h>
15#include <linux/oom.h> 16#include <linux/oom.h>
@@ -367,6 +368,12 @@ static void lockdep_release_cpus_lock(void)
367 368
368#endif /* CONFIG_HOTPLUG_CPU */ 369#endif /* CONFIG_HOTPLUG_CPU */
369 370
371/*
372 * Architectures that need SMT-specific errata handling during SMT hotplug
373 * should override this.
374 */
375void __weak arch_smt_update(void) { }
376
370#ifdef CONFIG_HOTPLUG_SMT 377#ifdef CONFIG_HOTPLUG_SMT
371enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; 378enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
372EXPORT_SYMBOL_GPL(cpu_smt_control); 379EXPORT_SYMBOL_GPL(cpu_smt_control);
@@ -1011,6 +1018,7 @@ out:
1011 * concurrent CPU hotplug via cpu_add_remove_lock. 1018 * concurrent CPU hotplug via cpu_add_remove_lock.
1012 */ 1019 */
1013 lockup_detector_cleanup(); 1020 lockup_detector_cleanup();
1021 arch_smt_update();
1014 return ret; 1022 return ret;
1015} 1023}
1016 1024
@@ -1139,6 +1147,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1139 ret = cpuhp_up_callbacks(cpu, st, target); 1147 ret = cpuhp_up_callbacks(cpu, st, target);
1140out: 1148out:
1141 cpus_write_unlock(); 1149 cpus_write_unlock();
1150 arch_smt_update();
1142 return ret; 1151 return ret;
1143} 1152}
1144 1153
@@ -2055,12 +2064,6 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
2055 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 2064 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2056} 2065}
2057 2066
2058/*
2059 * Architectures that need SMT-specific errata handling during SMT hotplug
2060 * should override this.
2061 */
2062void __weak arch_smt_update(void) { };
2063
2064static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) 2067static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2065{ 2068{
2066 int cpu, ret = 0; 2069 int cpu, ret = 0;
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
index 6ad4a9fcbd6f..7921ae4fca8d 100644
--- a/kernel/debug/kdb/kdb_bt.c
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -179,14 +179,14 @@ kdb_bt(int argc, const char **argv)
179 kdb_printf("no process for cpu %ld\n", cpu); 179 kdb_printf("no process for cpu %ld\n", cpu);
180 return 0; 180 return 0;
181 } 181 }
182 sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu)); 182 sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
183 kdb_parse(buf); 183 kdb_parse(buf);
184 return 0; 184 return 0;
185 } 185 }
186 kdb_printf("btc: cpu status: "); 186 kdb_printf("btc: cpu status: ");
187 kdb_parse("cpu\n"); 187 kdb_parse("cpu\n");
188 for_each_online_cpu(cpu) { 188 for_each_online_cpu(cpu) {
189 sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu)); 189 sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
190 kdb_parse(buf); 190 kdb_parse(buf);
191 touch_nmi_watchdog(); 191 touch_nmi_watchdog();
192 } 192 }
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index ed5d34925ad0..6a4b41484afe 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -216,7 +216,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
216 int count; 216 int count;
217 int i; 217 int i;
218 int diag, dtab_count; 218 int diag, dtab_count;
219 int key; 219 int key, buf_size, ret;
220 220
221 221
222 diag = kdbgetintenv("DTABCOUNT", &dtab_count); 222 diag = kdbgetintenv("DTABCOUNT", &dtab_count);
@@ -336,9 +336,8 @@ poll_again:
336 else 336 else
337 p_tmp = tmpbuffer; 337 p_tmp = tmpbuffer;
338 len = strlen(p_tmp); 338 len = strlen(p_tmp);
339 count = kallsyms_symbol_complete(p_tmp, 339 buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer);
340 sizeof(tmpbuffer) - 340 count = kallsyms_symbol_complete(p_tmp, buf_size);
341 (p_tmp - tmpbuffer));
342 if (tab == 2 && count > 0) { 341 if (tab == 2 && count > 0) {
343 kdb_printf("\n%d symbols are found.", count); 342 kdb_printf("\n%d symbols are found.", count);
344 if (count > dtab_count) { 343 if (count > dtab_count) {
@@ -350,9 +349,13 @@ poll_again:
350 } 349 }
351 kdb_printf("\n"); 350 kdb_printf("\n");
352 for (i = 0; i < count; i++) { 351 for (i = 0; i < count; i++) {
353 if (WARN_ON(!kallsyms_symbol_next(p_tmp, i))) 352 ret = kallsyms_symbol_next(p_tmp, i, buf_size);
353 if (WARN_ON(!ret))
354 break; 354 break;
355 kdb_printf("%s ", p_tmp); 355 if (ret != -E2BIG)
356 kdb_printf("%s ", p_tmp);
357 else
358 kdb_printf("%s... ", p_tmp);
356 *(p_tmp + len) = '\0'; 359 *(p_tmp + len) = '\0';
357 } 360 }
358 if (i >= dtab_count) 361 if (i >= dtab_count)
diff --git a/kernel/debug/kdb/kdb_keyboard.c b/kernel/debug/kdb/kdb_keyboard.c
index 118527aa60ea..750497b0003a 100644
--- a/kernel/debug/kdb/kdb_keyboard.c
+++ b/kernel/debug/kdb/kdb_keyboard.c
@@ -173,11 +173,11 @@ int kdb_get_kbd_char(void)
173 case KT_LATIN: 173 case KT_LATIN:
174 if (isprint(keychar)) 174 if (isprint(keychar))
175 break; /* printable characters */ 175 break; /* printable characters */
176 /* drop through */ 176 /* fall through */
177 case KT_SPEC: 177 case KT_SPEC:
178 if (keychar == K_ENTER) 178 if (keychar == K_ENTER)
179 break; 179 break;
180 /* drop through */ 180 /* fall through */
181 default: 181 default:
182 return -1; /* ignore unprintables */ 182 return -1; /* ignore unprintables */
183 } 183 }
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index bb4fe4e1a601..d72b32c66f7d 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1192,7 +1192,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
1192 if (reason == KDB_REASON_DEBUG) { 1192 if (reason == KDB_REASON_DEBUG) {
1193 /* special case below */ 1193 /* special case below */
1194 } else { 1194 } else {
1195 kdb_printf("\nEntering kdb (current=0x%p, pid %d) ", 1195 kdb_printf("\nEntering kdb (current=0x%px, pid %d) ",
1196 kdb_current, kdb_current ? kdb_current->pid : 0); 1196 kdb_current, kdb_current ? kdb_current->pid : 0);
1197#if defined(CONFIG_SMP) 1197#if defined(CONFIG_SMP)
1198 kdb_printf("on processor %d ", raw_smp_processor_id()); 1198 kdb_printf("on processor %d ", raw_smp_processor_id());
@@ -1208,7 +1208,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
1208 */ 1208 */
1209 switch (db_result) { 1209 switch (db_result) {
1210 case KDB_DB_BPT: 1210 case KDB_DB_BPT:
1211 kdb_printf("\nEntering kdb (0x%p, pid %d) ", 1211 kdb_printf("\nEntering kdb (0x%px, pid %d) ",
1212 kdb_current, kdb_current->pid); 1212 kdb_current, kdb_current->pid);
1213#if defined(CONFIG_SMP) 1213#if defined(CONFIG_SMP)
1214 kdb_printf("on processor %d ", raw_smp_processor_id()); 1214 kdb_printf("on processor %d ", raw_smp_processor_id());
@@ -1493,6 +1493,7 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
1493 char cbuf[32]; 1493 char cbuf[32];
1494 char *c = cbuf; 1494 char *c = cbuf;
1495 int i; 1495 int i;
1496 int j;
1496 unsigned long word; 1497 unsigned long word;
1497 1498
1498 memset(cbuf, '\0', sizeof(cbuf)); 1499 memset(cbuf, '\0', sizeof(cbuf));
@@ -1538,25 +1539,9 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
1538 wc.word = word; 1539 wc.word = word;
1539#define printable_char(c) \ 1540#define printable_char(c) \
1540 ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; }) 1541 ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
1541 switch (bytesperword) { 1542 for (j = 0; j < bytesperword; j++)
1542 case 8:
1543 *c++ = printable_char(*cp++); 1543 *c++ = printable_char(*cp++);
1544 *c++ = printable_char(*cp++); 1544 addr += bytesperword;
1545 *c++ = printable_char(*cp++);
1546 *c++ = printable_char(*cp++);
1547 addr += 4;
1548 case 4:
1549 *c++ = printable_char(*cp++);
1550 *c++ = printable_char(*cp++);
1551 addr += 2;
1552 case 2:
1553 *c++ = printable_char(*cp++);
1554 addr++;
1555 case 1:
1556 *c++ = printable_char(*cp++);
1557 addr++;
1558 break;
1559 }
1560#undef printable_char 1545#undef printable_char
1561 } 1546 }
1562 } 1547 }
@@ -2048,7 +2033,7 @@ static int kdb_lsmod(int argc, const char **argv)
2048 if (mod->state == MODULE_STATE_UNFORMED) 2033 if (mod->state == MODULE_STATE_UNFORMED)
2049 continue; 2034 continue;
2050 2035
2051 kdb_printf("%-20s%8u 0x%p ", mod->name, 2036 kdb_printf("%-20s%8u 0x%px ", mod->name,
2052 mod->core_layout.size, (void *)mod); 2037 mod->core_layout.size, (void *)mod);
2053#ifdef CONFIG_MODULE_UNLOAD 2038#ifdef CONFIG_MODULE_UNLOAD
2054 kdb_printf("%4d ", module_refcount(mod)); 2039 kdb_printf("%4d ", module_refcount(mod));
@@ -2059,7 +2044,7 @@ static int kdb_lsmod(int argc, const char **argv)
2059 kdb_printf(" (Loading)"); 2044 kdb_printf(" (Loading)");
2060 else 2045 else
2061 kdb_printf(" (Live)"); 2046 kdb_printf(" (Live)");
2062 kdb_printf(" 0x%p", mod->core_layout.base); 2047 kdb_printf(" 0x%px", mod->core_layout.base);
2063 2048
2064#ifdef CONFIG_MODULE_UNLOAD 2049#ifdef CONFIG_MODULE_UNLOAD
2065 { 2050 {
@@ -2341,7 +2326,7 @@ void kdb_ps1(const struct task_struct *p)
2341 return; 2326 return;
2342 2327
2343 cpu = kdb_process_cpu(p); 2328 cpu = kdb_process_cpu(p);
2344 kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n", 2329 kdb_printf("0x%px %8d %8d %d %4d %c 0x%px %c%s\n",
2345 (void *)p, p->pid, p->parent->pid, 2330 (void *)p, p->pid, p->parent->pid,
2346 kdb_task_has_cpu(p), kdb_process_cpu(p), 2331 kdb_task_has_cpu(p), kdb_process_cpu(p),
2347 kdb_task_state_char(p), 2332 kdb_task_state_char(p),
@@ -2354,7 +2339,7 @@ void kdb_ps1(const struct task_struct *p)
2354 } else { 2339 } else {
2355 if (KDB_TSK(cpu) != p) 2340 if (KDB_TSK(cpu) != p)
2356 kdb_printf(" Error: does not match running " 2341 kdb_printf(" Error: does not match running "
2357 "process table (0x%p)\n", KDB_TSK(cpu)); 2342 "process table (0x%px)\n", KDB_TSK(cpu));
2358 } 2343 }
2359 } 2344 }
2360} 2345}
@@ -2687,7 +2672,7 @@ int kdb_register_flags(char *cmd,
2687 for_each_kdbcmd(kp, i) { 2672 for_each_kdbcmd(kp, i) {
2688 if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) { 2673 if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
2689 kdb_printf("Duplicate kdb command registered: " 2674 kdb_printf("Duplicate kdb command registered: "
2690 "%s, func %p help %s\n", cmd, func, help); 2675 "%s, func %px help %s\n", cmd, func, help);
2691 return 1; 2676 return 1;
2692 } 2677 }
2693 } 2678 }
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 1e5a502ba4a7..2118d8258b7c 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -83,7 +83,7 @@ typedef struct __ksymtab {
83 unsigned long sym_start; 83 unsigned long sym_start;
84 unsigned long sym_end; 84 unsigned long sym_end;
85 } kdb_symtab_t; 85 } kdb_symtab_t;
86extern int kallsyms_symbol_next(char *prefix_name, int flag); 86extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size);
87extern int kallsyms_symbol_complete(char *prefix_name, int max_len); 87extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
88 88
89/* Exported Symbols for kernel loadable modules to use. */ 89/* Exported Symbols for kernel loadable modules to use. */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 990b3cc526c8..50bf9b119bad 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -40,7 +40,7 @@
40int kdbgetsymval(const char *symname, kdb_symtab_t *symtab) 40int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
41{ 41{
42 if (KDB_DEBUG(AR)) 42 if (KDB_DEBUG(AR))
43 kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname, 43 kdb_printf("kdbgetsymval: symname=%s, symtab=%px\n", symname,
44 symtab); 44 symtab);
45 memset(symtab, 0, sizeof(*symtab)); 45 memset(symtab, 0, sizeof(*symtab));
46 symtab->sym_start = kallsyms_lookup_name(symname); 46 symtab->sym_start = kallsyms_lookup_name(symname);
@@ -88,7 +88,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
88 char *knt1 = NULL; 88 char *knt1 = NULL;
89 89
90 if (KDB_DEBUG(AR)) 90 if (KDB_DEBUG(AR))
91 kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab); 91 kdb_printf("kdbnearsym: addr=0x%lx, symtab=%px\n", addr, symtab);
92 memset(symtab, 0, sizeof(*symtab)); 92 memset(symtab, 0, sizeof(*symtab));
93 93
94 if (addr < 4096) 94 if (addr < 4096)
@@ -149,7 +149,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
149 symtab->mod_name = "kernel"; 149 symtab->mod_name = "kernel";
150 if (KDB_DEBUG(AR)) 150 if (KDB_DEBUG(AR))
151 kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, " 151 kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, "
152 "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret, 152 "symtab->mod_name=%px, symtab->sym_name=%px (%s)\n", ret,
153 symtab->sym_start, symtab->mod_name, symtab->sym_name, 153 symtab->sym_start, symtab->mod_name, symtab->sym_name,
154 symtab->sym_name); 154 symtab->sym_name);
155 155
@@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len)
221 * Parameters: 221 * Parameters:
222 * prefix_name prefix of a symbol name to lookup 222 * prefix_name prefix of a symbol name to lookup
223 * flag 0 means search from the head, 1 means continue search. 223 * flag 0 means search from the head, 1 means continue search.
224 * buf_size maximum length that can be written to prefix_name
225 * buffer
224 * Returns: 226 * Returns:
225 * 1 if a symbol matches the given prefix. 227 * 1 if a symbol matches the given prefix.
226 * 0 if no string found 228 * 0 if no string found
227 */ 229 */
228int kallsyms_symbol_next(char *prefix_name, int flag) 230int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size)
229{ 231{
230 int prefix_len = strlen(prefix_name); 232 int prefix_len = strlen(prefix_name);
231 static loff_t pos; 233 static loff_t pos;
@@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag)
235 pos = 0; 237 pos = 0;
236 238
237 while ((name = kdb_walk_kallsyms(&pos))) { 239 while ((name = kdb_walk_kallsyms(&pos))) {
238 if (strncmp(name, prefix_name, prefix_len) == 0) { 240 if (!strncmp(name, prefix_name, prefix_len))
239 strncpy(prefix_name, name, strlen(name)+1); 241 return strscpy(prefix_name, name, buf_size);
240 return 1;
241 }
242 } 242 }
243 return 0; 243 return 0;
244} 244}
@@ -432,7 +432,7 @@ int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
432 *word = w8; 432 *word = w8;
433 break; 433 break;
434 } 434 }
435 /* drop through */ 435 /* fall through */
436 default: 436 default:
437 diag = KDB_BADWIDTH; 437 diag = KDB_BADWIDTH;
438 kdb_printf("kdb_getphysword: bad width %ld\n", (long) size); 438 kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
@@ -481,7 +481,7 @@ int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
481 *word = w8; 481 *word = w8;
482 break; 482 break;
483 } 483 }
484 /* drop through */ 484 /* fall through */
485 default: 485 default:
486 diag = KDB_BADWIDTH; 486 diag = KDB_BADWIDTH;
487 kdb_printf("kdb_getword: bad width %ld\n", (long) size); 487 kdb_printf("kdb_getword: bad width %ld\n", (long) size);
@@ -525,7 +525,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size)
525 diag = kdb_putarea(addr, w8); 525 diag = kdb_putarea(addr, w8);
526 break; 526 break;
527 } 527 }
528 /* drop through */ 528 /* fall through */
529 default: 529 default:
530 diag = KDB_BADWIDTH; 530 diag = KDB_BADWIDTH;
531 kdb_printf("kdb_putword: bad width %ld\n", (long) size); 531 kdb_printf("kdb_putword: bad width %ld\n", (long) size);
@@ -887,13 +887,13 @@ void debug_kusage(void)
887 __func__, dah_first); 887 __func__, dah_first);
888 if (dah_first) { 888 if (dah_first) {
889 h_used = (struct debug_alloc_header *)debug_alloc_pool; 889 h_used = (struct debug_alloc_header *)debug_alloc_pool;
890 kdb_printf("%s: h_used %p size %d\n", __func__, h_used, 890 kdb_printf("%s: h_used %px size %d\n", __func__, h_used,
891 h_used->size); 891 h_used->size);
892 } 892 }
893 do { 893 do {
894 h_used = (struct debug_alloc_header *) 894 h_used = (struct debug_alloc_header *)
895 ((char *)h_free + dah_overhead + h_free->size); 895 ((char *)h_free + dah_overhead + h_free->size);
896 kdb_printf("%s: h_used %p size %d caller %p\n", 896 kdb_printf("%s: h_used %px size %d caller %px\n",
897 __func__, h_used, h_used->size, h_used->caller); 897 __func__, h_used, h_used->size, h_used->caller);
898 h_free = (struct debug_alloc_header *) 898 h_free = (struct debug_alloc_header *)
899 (debug_alloc_pool + h_free->next); 899 (debug_alloc_pool + h_free->next);
@@ -902,7 +902,7 @@ void debug_kusage(void)
902 ((char *)h_free + dah_overhead + h_free->size); 902 ((char *)h_free + dah_overhead + h_free->size);
903 if ((char *)h_used - debug_alloc_pool != 903 if ((char *)h_used - debug_alloc_pool !=
904 sizeof(debug_alloc_pool_aligned)) 904 sizeof(debug_alloc_pool_aligned))
905 kdb_printf("%s: h_used %p size %d caller %p\n", 905 kdb_printf("%s: h_used %px size %d caller %px\n",
906 __func__, h_used, h_used->size, h_used->caller); 906 __func__, h_used, h_used->size, h_used->caller);
907out: 907out:
908 spin_unlock(&dap_lock); 908 spin_unlock(&dap_lock);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 5731daa09a32..045930e32c0e 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -679,7 +679,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
679 } 679 }
680 680
681 if (!dev_is_dma_coherent(dev) && 681 if (!dev_is_dma_coherent(dev) &&
682 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 682 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0 &&
683 dev_addr != DIRECT_MAPPING_ERROR)
683 arch_sync_dma_for_device(dev, phys, size, dir); 684 arch_sync_dma_for_device(dev, phys, size, dir);
684 685
685 return dev_addr; 686 return dev_addr;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 96d4bee83489..322e97bbb437 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -829,7 +829,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
829 BUG_ON((uprobe->offset & ~PAGE_MASK) + 829 BUG_ON((uprobe->offset & ~PAGE_MASK) +
830 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); 830 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
831 831
832 smp_wmb(); /* pairs with rmb() in find_active_uprobe() */ 832 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
833 set_bit(UPROBE_COPY_INSN, &uprobe->flags); 833 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
834 834
835 out: 835 out:
@@ -2178,10 +2178,18 @@ static void handle_swbp(struct pt_regs *regs)
2178 * After we hit the bp, _unregister + _register can install the 2178 * After we hit the bp, _unregister + _register can install the
2179 * new and not-yet-analyzed uprobe at the same address, restart. 2179 * new and not-yet-analyzed uprobe at the same address, restart.
2180 */ 2180 */
2181 smp_rmb(); /* pairs with wmb() in install_breakpoint() */
2182 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) 2181 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
2183 goto out; 2182 goto out;
2184 2183
2184 /*
2185 * Pairs with the smp_wmb() in prepare_uprobe().
2186 *
2187 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2188 * we must also see the stores to &uprobe->arch performed by the
2189 * prepare_uprobe() call.
2190 */
2191 smp_rmb();
2192
2185 /* Tracing handlers use ->utask to communicate with fetch methods */ 2193 /* Tracing handlers use ->utask to communicate with fetch methods */
2186 if (!get_utask()) 2194 if (!get_utask())
2187 goto out; 2195 goto out;
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3ebd09efe72a..97959d7b77e2 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -56,7 +56,7 @@ struct kcov {
56 struct task_struct *t; 56 struct task_struct *t;
57}; 57};
58 58
59static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) 59static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
60{ 60{
61 unsigned int mode; 61 unsigned int mode;
62 62
@@ -78,7 +78,7 @@ static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
78 return mode == needed_mode; 78 return mode == needed_mode;
79} 79}
80 80
81static unsigned long canonicalize_ip(unsigned long ip) 81static notrace unsigned long canonicalize_ip(unsigned long ip)
82{ 82{
83#ifdef CONFIG_RANDOMIZE_BASE 83#ifdef CONFIG_RANDOMIZE_BASE
84 ip -= kaslr_offset(); 84 ip -= kaslr_offset();
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 80b34dffdfb9..c2cee9db5204 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -261,9 +261,6 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
261 261
262static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) 262static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
263{ 263{
264 if (mode & PTRACE_MODE_SCHED)
265 return false;
266
267 if (mode & PTRACE_MODE_NOAUDIT) 264 if (mode & PTRACE_MODE_NOAUDIT)
268 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); 265 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
269 else 266 else
@@ -331,16 +328,9 @@ ok:
331 !ptrace_has_cap(mm->user_ns, mode))) 328 !ptrace_has_cap(mm->user_ns, mode)))
332 return -EPERM; 329 return -EPERM;
333 330
334 if (mode & PTRACE_MODE_SCHED)
335 return 0;
336 return security_ptrace_access_check(task, mode); 331 return security_ptrace_access_check(task, mode);
337} 332}
338 333
339bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
340{
341 return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
342}
343
344bool ptrace_may_access(struct task_struct *task, unsigned int mode) 334bool ptrace_may_access(struct task_struct *task, unsigned int mode)
345{ 335{
346 int err; 336 int err;
diff --git a/kernel/resource.c b/kernel/resource.c
index b3a3a1fc499e..b0fbf685c77a 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -319,16 +319,23 @@ int release_resource(struct resource *old)
319EXPORT_SYMBOL(release_resource); 319EXPORT_SYMBOL(release_resource);
320 320
321/** 321/**
322 * Finds the lowest iomem resource that covers part of [start..end]. The 322 * Finds the lowest iomem resource that covers part of [@start..@end]. The
323 * caller must specify start, end, flags, and desc (which may be 323 * caller must specify @start, @end, @flags, and @desc (which may be
324 * IORES_DESC_NONE). 324 * IORES_DESC_NONE).
325 * 325 *
326 * If a resource is found, returns 0 and *res is overwritten with the part 326 * If a resource is found, returns 0 and @*res is overwritten with the part
327 * of the resource that's within [start..end]; if none is found, returns 327 * of the resource that's within [@start..@end]; if none is found, returns
328 * -1. 328 * -1 or -EINVAL for other invalid parameters.
329 * 329 *
330 * This function walks the whole tree and not just first level children 330 * This function walks the whole tree and not just first level children
331 * unless @first_lvl is true. 331 * unless @first_lvl is true.
332 *
333 * @start: start address of the resource searched for
334 * @end: end address of same resource
335 * @flags: flags which the resource must have
336 * @desc: descriptor the resource must have
337 * @first_lvl: walk only the first level children, if set
338 * @res: return ptr, if resource found
332 */ 339 */
333static int find_next_iomem_res(resource_size_t start, resource_size_t end, 340static int find_next_iomem_res(resource_size_t start, resource_size_t end,
334 unsigned long flags, unsigned long desc, 341 unsigned long flags, unsigned long desc,
@@ -399,6 +406,8 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
399 * @flags: I/O resource flags 406 * @flags: I/O resource flags
400 * @start: start addr 407 * @start: start addr
401 * @end: end addr 408 * @end: end addr
409 * @arg: function argument for the callback @func
410 * @func: callback function that is called for each qualifying resource area
402 * 411 *
403 * NOTE: For a new descriptor search, define a new IORES_DESC in 412 * NOTE: For a new descriptor search, define a new IORES_DESC in
404 * <linux/ioport.h> and set it in 'desc' of a target resource entry. 413 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f12225f26b70..6fedf3a98581 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5738,15 +5738,10 @@ int sched_cpu_activate(unsigned int cpu)
5738 5738
5739#ifdef CONFIG_SCHED_SMT 5739#ifdef CONFIG_SCHED_SMT
5740 /* 5740 /*
5741 * The sched_smt_present static key needs to be evaluated on every 5741 * When going up, increment the number of cores with SMT present.
5742 * hotplug event because at boot time SMT might be disabled when
5743 * the number of booted CPUs is limited.
5744 *
5745 * If then later a sibling gets hotplugged, then the key would stay
5746 * off and SMT scheduling would never be functional.
5747 */ 5742 */
5748 if (cpumask_weight(cpu_smt_mask(cpu)) > 1) 5743 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
5749 static_branch_enable_cpuslocked(&sched_smt_present); 5744 static_branch_inc_cpuslocked(&sched_smt_present);
5750#endif 5745#endif
5751 set_cpu_active(cpu, true); 5746 set_cpu_active(cpu, true);
5752 5747
@@ -5790,6 +5785,14 @@ int sched_cpu_deactivate(unsigned int cpu)
5790 */ 5785 */
5791 synchronize_rcu_mult(call_rcu, call_rcu_sched); 5786 synchronize_rcu_mult(call_rcu, call_rcu_sched);
5792 5787
5788#ifdef CONFIG_SCHED_SMT
5789 /*
5790 * When going down, decrement the number of cores with SMT present.
5791 */
5792 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
5793 static_branch_dec_cpuslocked(&sched_smt_present);
5794#endif
5795
5793 if (!sched_smp_initialized) 5796 if (!sched_smp_initialized)
5794 return 0; 5797 return 0;
5795 5798
@@ -5851,11 +5854,14 @@ void __init sched_init_smp(void)
5851 /* 5854 /*
5852 * There's no userspace yet to cause hotplug operations; hence all the 5855 * There's no userspace yet to cause hotplug operations; hence all the
5853 * CPU masks are stable and all blatant races in the below code cannot 5856 * CPU masks are stable and all blatant races in the below code cannot
5854 * happen. 5857 * happen. The hotplug lock is nevertheless taken to satisfy lockdep,
5858 * but there won't be any contention on it.
5855 */ 5859 */
5860 cpus_read_lock();
5856 mutex_lock(&sched_domains_mutex); 5861 mutex_lock(&sched_domains_mutex);
5857 sched_init_domains(cpu_active_mask); 5862 sched_init_domains(cpu_active_mask);
5858 mutex_unlock(&sched_domains_mutex); 5863 mutex_unlock(&sched_domains_mutex);
5864 cpus_read_unlock();
5859 5865
5860 /* Move init over to a non-isolated CPU */ 5866 /* Move init over to a non-isolated CPU */
5861 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 5867 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ee271bb661cc..ac855b2f4774 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2400,8 +2400,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2400 local = 1; 2400 local = 1;
2401 2401
2402 /* 2402 /*
2403 * Retry task to preferred node migration periodically, in case it 2403 * Retry to migrate task to preferred node periodically, in case it
2404 * case it previously failed, or the scheduler moved us. 2404 * previously failed, or the scheduler moved us.
2405 */ 2405 */
2406 if (time_after(jiffies, p->numa_migrate_retry)) { 2406 if (time_after(jiffies, p->numa_migrate_retry)) {
2407 task_numa_placement(p); 2407 task_numa_placement(p);
@@ -5674,11 +5674,11 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5674 return target; 5674 return target;
5675} 5675}
5676 5676
5677static unsigned long cpu_util_wake(int cpu, struct task_struct *p); 5677static unsigned long cpu_util_without(int cpu, struct task_struct *p);
5678 5678
5679static unsigned long capacity_spare_wake(int cpu, struct task_struct *p) 5679static unsigned long capacity_spare_without(int cpu, struct task_struct *p)
5680{ 5680{
5681 return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0); 5681 return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
5682} 5682}
5683 5683
5684/* 5684/*
@@ -5738,7 +5738,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5738 5738
5739 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); 5739 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
5740 5740
5741 spare_cap = capacity_spare_wake(i, p); 5741 spare_cap = capacity_spare_without(i, p);
5742 5742
5743 if (spare_cap > max_spare_cap) 5743 if (spare_cap > max_spare_cap)
5744 max_spare_cap = spare_cap; 5744 max_spare_cap = spare_cap;
@@ -5889,8 +5889,8 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
5889 return prev_cpu; 5889 return prev_cpu;
5890 5890
5891 /* 5891 /*
5892 * We need task's util for capacity_spare_wake, sync it up to prev_cpu's 5892 * We need task's util for capacity_spare_without, sync it up to
5893 * last_update_time. 5893 * prev_cpu's last_update_time.
5894 */ 5894 */
5895 if (!(sd_flag & SD_BALANCE_FORK)) 5895 if (!(sd_flag & SD_BALANCE_FORK))
5896 sync_entity_load_avg(&p->se); 5896 sync_entity_load_avg(&p->se);
@@ -6216,10 +6216,19 @@ static inline unsigned long cpu_util(int cpu)
6216} 6216}
6217 6217
6218/* 6218/*
6219 * cpu_util_wake: Compute CPU utilization with any contributions from 6219 * cpu_util_without: compute cpu utilization without any contributions from *p
6220 * the waking task p removed. 6220 * @cpu: the CPU which utilization is requested
6221 * @p: the task which utilization should be discounted
6222 *
6223 * The utilization of a CPU is defined by the utilization of tasks currently
6224 * enqueued on that CPU as well as tasks which are currently sleeping after an
6225 * execution on that CPU.
6226 *
6227 * This method returns the utilization of the specified CPU by discounting the
6228 * utilization of the specified task, whenever the task is currently
6229 * contributing to the CPU utilization.
6221 */ 6230 */
6222static unsigned long cpu_util_wake(int cpu, struct task_struct *p) 6231static unsigned long cpu_util_without(int cpu, struct task_struct *p)
6223{ 6232{
6224 struct cfs_rq *cfs_rq; 6233 struct cfs_rq *cfs_rq;
6225 unsigned int util; 6234 unsigned int util;
@@ -6231,7 +6240,7 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
6231 cfs_rq = &cpu_rq(cpu)->cfs; 6240 cfs_rq = &cpu_rq(cpu)->cfs;
6232 util = READ_ONCE(cfs_rq->avg.util_avg); 6241 util = READ_ONCE(cfs_rq->avg.util_avg);
6233 6242
6234 /* Discount task's blocked util from CPU's util */ 6243 /* Discount task's util from CPU's util */
6235 util -= min_t(unsigned int, util, task_util(p)); 6244 util -= min_t(unsigned int, util, task_util(p));
6236 6245
6237 /* 6246 /*
@@ -6240,14 +6249,14 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
6240 * a) if *p is the only task sleeping on this CPU, then: 6249 * a) if *p is the only task sleeping on this CPU, then:
6241 * cpu_util (== task_util) > util_est (== 0) 6250 * cpu_util (== task_util) > util_est (== 0)
6242 * and thus we return: 6251 * and thus we return:
6243 * cpu_util_wake = (cpu_util - task_util) = 0 6252 * cpu_util_without = (cpu_util - task_util) = 0
6244 * 6253 *
6245 * b) if other tasks are SLEEPING on this CPU, which is now exiting 6254 * b) if other tasks are SLEEPING on this CPU, which is now exiting
6246 * IDLE, then: 6255 * IDLE, then:
6247 * cpu_util >= task_util 6256 * cpu_util >= task_util
6248 * cpu_util > util_est (== 0) 6257 * cpu_util > util_est (== 0)
6249 * and thus we discount *p's blocked utilization to return: 6258 * and thus we discount *p's blocked utilization to return:
6250 * cpu_util_wake = (cpu_util - task_util) >= 0 6259 * cpu_util_without = (cpu_util - task_util) >= 0
6251 * 6260 *
6252 * c) if other tasks are RUNNABLE on that CPU and 6261 * c) if other tasks are RUNNABLE on that CPU and
6253 * util_est > cpu_util 6262 * util_est > cpu_util
@@ -6260,8 +6269,33 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
6260 * covered by the following code when estimated utilization is 6269 * covered by the following code when estimated utilization is
6261 * enabled. 6270 * enabled.
6262 */ 6271 */
6263 if (sched_feat(UTIL_EST)) 6272 if (sched_feat(UTIL_EST)) {
6264 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); 6273 unsigned int estimated =
6274 READ_ONCE(cfs_rq->avg.util_est.enqueued);
6275
6276 /*
6277 * Despite the following checks we still have a small window
6278 * for a possible race, when an execl's select_task_rq_fair()
6279 * races with LB's detach_task():
6280 *
6281 * detach_task()
6282 * p->on_rq = TASK_ON_RQ_MIGRATING;
6283 * ---------------------------------- A
6284 * deactivate_task() \
6285 * dequeue_task() + RaceTime
6286 * util_est_dequeue() /
6287 * ---------------------------------- B
6288 *
6289 * The additional check on "current == p" it's required to
6290 * properly fix the execl regression and it helps in further
6291 * reducing the chances for the above race.
6292 */
6293 if (unlikely(task_on_rq_queued(p) || current == p)) {
6294 estimated -= min_t(unsigned int, estimated,
6295 (_task_util_est(p) | UTIL_AVG_UNCHANGED));
6296 }
6297 util = max(util, estimated);
6298 }
6265 6299
6266 /* 6300 /*
6267 * Utilization (estimated) can exceed the CPU capacity, thus let's 6301 * Utilization (estimated) can exceed the CPU capacity, thus let's
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 7cdecfc010af..fe24de3fbc93 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -136,8 +136,18 @@
136 136
137static int psi_bug __read_mostly; 137static int psi_bug __read_mostly;
138 138
139bool psi_disabled __read_mostly; 139DEFINE_STATIC_KEY_FALSE(psi_disabled);
140core_param(psi_disabled, psi_disabled, bool, 0644); 140
141#ifdef CONFIG_PSI_DEFAULT_DISABLED
142bool psi_enable;
143#else
144bool psi_enable = true;
145#endif
146static int __init setup_psi(char *str)
147{
148 return kstrtobool(str, &psi_enable) == 0;
149}
150__setup("psi=", setup_psi);
141 151
142/* Running averages - we need to be higher-res than loadavg */ 152/* Running averages - we need to be higher-res than loadavg */
143#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ 153#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
@@ -169,8 +179,10 @@ static void group_init(struct psi_group *group)
169 179
170void __init psi_init(void) 180void __init psi_init(void)
171{ 181{
172 if (psi_disabled) 182 if (!psi_enable) {
183 static_branch_enable(&psi_disabled);
173 return; 184 return;
185 }
174 186
175 psi_period = jiffies_to_nsecs(PSI_FREQ); 187 psi_period = jiffies_to_nsecs(PSI_FREQ);
176 group_init(&psi_system); 188 group_init(&psi_system);
@@ -549,7 +561,7 @@ void psi_memstall_enter(unsigned long *flags)
549 struct rq_flags rf; 561 struct rq_flags rf;
550 struct rq *rq; 562 struct rq *rq;
551 563
552 if (psi_disabled) 564 if (static_branch_likely(&psi_disabled))
553 return; 565 return;
554 566
555 *flags = current->flags & PF_MEMSTALL; 567 *flags = current->flags & PF_MEMSTALL;
@@ -579,7 +591,7 @@ void psi_memstall_leave(unsigned long *flags)
579 struct rq_flags rf; 591 struct rq_flags rf;
580 struct rq *rq; 592 struct rq *rq;
581 593
582 if (psi_disabled) 594 if (static_branch_likely(&psi_disabled))
583 return; 595 return;
584 596
585 if (*flags) 597 if (*flags)
@@ -600,7 +612,7 @@ void psi_memstall_leave(unsigned long *flags)
600#ifdef CONFIG_CGROUPS 612#ifdef CONFIG_CGROUPS
601int psi_cgroup_alloc(struct cgroup *cgroup) 613int psi_cgroup_alloc(struct cgroup *cgroup)
602{ 614{
603 if (psi_disabled) 615 if (static_branch_likely(&psi_disabled))
604 return 0; 616 return 0;
605 617
606 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu); 618 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
@@ -612,7 +624,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
612 624
613void psi_cgroup_free(struct cgroup *cgroup) 625void psi_cgroup_free(struct cgroup *cgroup)
614{ 626{
615 if (psi_disabled) 627 if (static_branch_likely(&psi_disabled))
616 return; 628 return;
617 629
618 cancel_delayed_work_sync(&cgroup->psi.clock_work); 630 cancel_delayed_work_sync(&cgroup->psi.clock_work);
@@ -633,38 +645,39 @@ void psi_cgroup_free(struct cgroup *cgroup)
633 */ 645 */
634void cgroup_move_task(struct task_struct *task, struct css_set *to) 646void cgroup_move_task(struct task_struct *task, struct css_set *to)
635{ 647{
636 bool move_psi = !psi_disabled;
637 unsigned int task_flags = 0; 648 unsigned int task_flags = 0;
638 struct rq_flags rf; 649 struct rq_flags rf;
639 struct rq *rq; 650 struct rq *rq;
640 651
641 if (move_psi) { 652 if (static_branch_likely(&psi_disabled)) {
642 rq = task_rq_lock(task, &rf); 653 /*
654 * Lame to do this here, but the scheduler cannot be locked
655 * from the outside, so we move cgroups from inside sched/.
656 */
657 rcu_assign_pointer(task->cgroups, to);
658 return;
659 }
643 660
644 if (task_on_rq_queued(task)) 661 rq = task_rq_lock(task, &rf);
645 task_flags = TSK_RUNNING;
646 else if (task->in_iowait)
647 task_flags = TSK_IOWAIT;
648 662
649 if (task->flags & PF_MEMSTALL) 663 if (task_on_rq_queued(task))
650 task_flags |= TSK_MEMSTALL; 664 task_flags = TSK_RUNNING;
665 else if (task->in_iowait)
666 task_flags = TSK_IOWAIT;
651 667
652 if (task_flags) 668 if (task->flags & PF_MEMSTALL)
653 psi_task_change(task, task_flags, 0); 669 task_flags |= TSK_MEMSTALL;
654 }
655 670
656 /* 671 if (task_flags)
657 * Lame to do this here, but the scheduler cannot be locked 672 psi_task_change(task, task_flags, 0);
658 * from the outside, so we move cgroups from inside sched/. 673
659 */ 674 /* See comment above */
660 rcu_assign_pointer(task->cgroups, to); 675 rcu_assign_pointer(task->cgroups, to);
661 676
662 if (move_psi) { 677 if (task_flags)
663 if (task_flags) 678 psi_task_change(task, 0, task_flags);
664 psi_task_change(task, 0, task_flags);
665 679
666 task_rq_unlock(rq, task, &rf); 680 task_rq_unlock(rq, task, &rf);
667 }
668} 681}
669#endif /* CONFIG_CGROUPS */ 682#endif /* CONFIG_CGROUPS */
670 683
@@ -672,7 +685,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
672{ 685{
673 int full; 686 int full;
674 687
675 if (psi_disabled) 688 if (static_branch_likely(&psi_disabled))
676 return -EOPNOTSUPP; 689 return -EOPNOTSUPP;
677 690
678 update_stats(group); 691 update_stats(group);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 618577fc9aa8..4e524ab589c9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -23,6 +23,7 @@
23#include <linux/sched/prio.h> 23#include <linux/sched/prio.h>
24#include <linux/sched/rt.h> 24#include <linux/sched/rt.h>
25#include <linux/sched/signal.h> 25#include <linux/sched/signal.h>
26#include <linux/sched/smt.h>
26#include <linux/sched/stat.h> 27#include <linux/sched/stat.h>
27#include <linux/sched/sysctl.h> 28#include <linux/sched/sysctl.h>
28#include <linux/sched/task.h> 29#include <linux/sched/task.h>
@@ -936,9 +937,6 @@ static inline int cpu_of(struct rq *rq)
936 937
937 938
938#ifdef CONFIG_SCHED_SMT 939#ifdef CONFIG_SCHED_SMT
939
940extern struct static_key_false sched_smt_present;
941
942extern void __update_idle_core(struct rq *rq); 940extern void __update_idle_core(struct rq *rq);
943 941
944static inline void update_idle_core(struct rq *rq) 942static inline void update_idle_core(struct rq *rq)
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 4904c4677000..aa0de240fb41 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -66,7 +66,7 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
66{ 66{
67 int clear = 0, set = TSK_RUNNING; 67 int clear = 0, set = TSK_RUNNING;
68 68
69 if (psi_disabled) 69 if (static_branch_likely(&psi_disabled))
70 return; 70 return;
71 71
72 if (!wakeup || p->sched_psi_wake_requeue) { 72 if (!wakeup || p->sched_psi_wake_requeue) {
@@ -86,7 +86,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
86{ 86{
87 int clear = TSK_RUNNING, set = 0; 87 int clear = TSK_RUNNING, set = 0;
88 88
89 if (psi_disabled) 89 if (static_branch_likely(&psi_disabled))
90 return; 90 return;
91 91
92 if (!sleep) { 92 if (!sleep) {
@@ -102,7 +102,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
102 102
103static inline void psi_ttwu_dequeue(struct task_struct *p) 103static inline void psi_ttwu_dequeue(struct task_struct *p)
104{ 104{
105 if (psi_disabled) 105 if (static_branch_likely(&psi_disabled))
106 return; 106 return;
107 /* 107 /*
108 * Is the task being migrated during a wakeup? Make sure to 108 * Is the task being migrated during a wakeup? Make sure to
@@ -128,7 +128,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
128 128
129static inline void psi_task_tick(struct rq *rq) 129static inline void psi_task_tick(struct rq *rq)
130{ 130{
131 if (psi_disabled) 131 if (static_branch_likely(&psi_disabled))
132 return; 132 return;
133 133
134 if (unlikely(rq->curr->flags & PF_MEMSTALL)) 134 if (unlikely(rq->curr->flags & PF_MEMSTALL))
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
index e42892926244..08cb57eed389 100644
--- a/kernel/stackleak.c
+++ b/kernel/stackleak.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/stackleak.h> 13#include <linux/stackleak.h>
14#include <linux/kprobes.h>
14 15
15#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE 16#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
16#include <linux/jump_label.h> 17#include <linux/jump_label.h>
@@ -47,7 +48,7 @@ int stack_erasing_sysctl(struct ctl_table *table, int write,
47#define skip_erasing() false 48#define skip_erasing() false
48#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */ 49#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
49 50
50asmlinkage void stackleak_erase(void) 51asmlinkage void notrace stackleak_erase(void)
51{ 52{
52 /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */ 53 /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
53 unsigned long kstack_ptr = current->lowest_stack; 54 unsigned long kstack_ptr = current->lowest_stack;
@@ -101,6 +102,7 @@ asmlinkage void stackleak_erase(void)
101 /* Reset the 'lowest_stack' value for the next syscall */ 102 /* Reset the 'lowest_stack' value for the next syscall */
102 current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64; 103 current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
103} 104}
105NOKPROBE_SYMBOL(stackleak_erase);
104 106
105void __used stackleak_track_stack(void) 107void __used stackleak_track_stack(void)
106{ 108{
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index ce32cf741b25..8f0644af40be 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -917,9 +917,6 @@ static void check_process_timers(struct task_struct *tsk,
917 struct task_cputime cputime; 917 struct task_cputime cputime;
918 unsigned long soft; 918 unsigned long soft;
919 919
920 if (dl_task(tsk))
921 check_dl_overrun(tsk);
922
923 /* 920 /*
924 * If cputimer is not running, then there are no active 921 * If cputimer is not running, then there are no active
925 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). 922 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 08fcfe440c63..9864a35c8bb5 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -196,11 +196,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
196 i++; 196 i++;
197 } else if (fmt[i] == 'p' || fmt[i] == 's') { 197 } else if (fmt[i] == 'p' || fmt[i] == 's') {
198 mod[fmt_cnt]++; 198 mod[fmt_cnt]++;
199 i++; 199 /* disallow any further format extensions */
200 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0) 200 if (fmt[i + 1] != 0 &&
201 !isspace(fmt[i + 1]) &&
202 !ispunct(fmt[i + 1]))
201 return -EINVAL; 203 return -EINVAL;
202 fmt_cnt++; 204 fmt_cnt++;
203 if (fmt[i - 1] == 's') { 205 if (fmt[i] == 's') {
204 if (str_seen) 206 if (str_seen)
205 /* allow only one '%s' per fmt string */ 207 /* allow only one '%s' per fmt string */
206 return -EINVAL; 208 return -EINVAL;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f536f601bd46..77734451cb05 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -817,7 +817,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
817#ifdef CONFIG_FUNCTION_GRAPH_TRACER 817#ifdef CONFIG_FUNCTION_GRAPH_TRACER
818static int profile_graph_entry(struct ftrace_graph_ent *trace) 818static int profile_graph_entry(struct ftrace_graph_ent *trace)
819{ 819{
820 int index = trace->depth; 820 int index = current->curr_ret_stack;
821 821
822 function_profile_call(trace->func, 0, NULL, NULL); 822 function_profile_call(trace->func, 0, NULL, NULL);
823 823
@@ -852,7 +852,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
852 if (!fgraph_graph_time) { 852 if (!fgraph_graph_time) {
853 int index; 853 int index;
854 854
855 index = trace->depth; 855 index = current->curr_ret_stack;
856 856
857 /* Append this call time to the parent time to subtract */ 857 /* Append this call time to the parent time to subtract */
858 if (index) 858 if (index)
@@ -6814,6 +6814,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
6814 atomic_set(&t->tracing_graph_pause, 0); 6814 atomic_set(&t->tracing_graph_pause, 0);
6815 atomic_set(&t->trace_overrun, 0); 6815 atomic_set(&t->trace_overrun, 0);
6816 t->curr_ret_stack = -1; 6816 t->curr_ret_stack = -1;
6817 t->curr_ret_depth = -1;
6817 /* Make sure the tasks see the -1 first: */ 6818 /* Make sure the tasks see the -1 first: */
6818 smp_wmb(); 6819 smp_wmb();
6819 t->ret_stack = ret_stack_list[start++]; 6820 t->ret_stack = ret_stack_list[start++];
@@ -7038,6 +7039,7 @@ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
7038void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) 7039void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
7039{ 7040{
7040 t->curr_ret_stack = -1; 7041 t->curr_ret_stack = -1;
7042 t->curr_ret_depth = -1;
7041 /* 7043 /*
7042 * The idle task has no parent, it either has its own 7044 * The idle task has no parent, it either has its own
7043 * stack or no stack at all. 7045 * stack or no stack at all.
@@ -7068,6 +7070,7 @@ void ftrace_graph_init_task(struct task_struct *t)
7068 /* Make sure we do not use the parent ret_stack */ 7070 /* Make sure we do not use the parent ret_stack */
7069 t->ret_stack = NULL; 7071 t->ret_stack = NULL;
7070 t->curr_ret_stack = -1; 7072 t->curr_ret_stack = -1;
7073 t->curr_ret_depth = -1;
7071 7074
7072 if (ftrace_graph_active) { 7075 if (ftrace_graph_active) {
7073 struct ftrace_ret_stack *ret_stack; 7076 struct ftrace_ret_stack *ret_stack;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3b8c0e24ab30..447bd96ee658 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -512,12 +512,44 @@ enum {
512 * can only be modified by current, we can reuse trace_recursion. 512 * can only be modified by current, we can reuse trace_recursion.
513 */ 513 */
514 TRACE_IRQ_BIT, 514 TRACE_IRQ_BIT,
515
516 /* Set if the function is in the set_graph_function file */
517 TRACE_GRAPH_BIT,
518
519 /*
520 * In the very unlikely case that an interrupt came in
521 * at a start of graph tracing, and we want to trace
522 * the function in that interrupt, the depth can be greater
523 * than zero, because of the preempted start of a previous
524 * trace. In an even more unlikely case, depth could be 2
525 * if a softirq interrupted the start of graph tracing,
526 * followed by an interrupt preempting a start of graph
527 * tracing in the softirq, and depth can even be 3
528 * if an NMI came in at the start of an interrupt function
529 * that preempted a softirq start of a function that
530 * preempted normal context!!!! Luckily, it can't be
531 * greater than 3, so the next two bits are a mask
532 * of what the depth is when we set TRACE_GRAPH_BIT
533 */
534
535 TRACE_GRAPH_DEPTH_START_BIT,
536 TRACE_GRAPH_DEPTH_END_BIT,
515}; 537};
516 538
517#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 539#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
518#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) 540#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
519#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) 541#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
520 542
543#define trace_recursion_depth() \
544 (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
545#define trace_recursion_set_depth(depth) \
546 do { \
547 current->trace_recursion &= \
548 ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
549 current->trace_recursion |= \
550 ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
551 } while (0)
552
521#define TRACE_CONTEXT_BITS 4 553#define TRACE_CONTEXT_BITS 4
522 554
523#define TRACE_FTRACE_START TRACE_FTRACE_BIT 555#define TRACE_FTRACE_START TRACE_FTRACE_BIT
@@ -843,8 +875,9 @@ extern void __trace_graph_return(struct trace_array *tr,
843extern struct ftrace_hash *ftrace_graph_hash; 875extern struct ftrace_hash *ftrace_graph_hash;
844extern struct ftrace_hash *ftrace_graph_notrace_hash; 876extern struct ftrace_hash *ftrace_graph_notrace_hash;
845 877
846static inline int ftrace_graph_addr(unsigned long addr) 878static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
847{ 879{
880 unsigned long addr = trace->func;
848 int ret = 0; 881 int ret = 0;
849 882
850 preempt_disable_notrace(); 883 preempt_disable_notrace();
@@ -855,6 +888,14 @@ static inline int ftrace_graph_addr(unsigned long addr)
855 } 888 }
856 889
857 if (ftrace_lookup_ip(ftrace_graph_hash, addr)) { 890 if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
891
892 /*
893 * This needs to be cleared on the return functions
894 * when the depth is zero.
895 */
896 trace_recursion_set(TRACE_GRAPH_BIT);
897 trace_recursion_set_depth(trace->depth);
898
858 /* 899 /*
859 * If no irqs are to be traced, but a set_graph_function 900 * If no irqs are to be traced, but a set_graph_function
860 * is set, and called by an interrupt handler, we still 901 * is set, and called by an interrupt handler, we still
@@ -872,6 +913,13 @@ out:
872 return ret; 913 return ret;
873} 914}
874 915
916static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
917{
918 if (trace_recursion_test(TRACE_GRAPH_BIT) &&
919 trace->depth == trace_recursion_depth())
920 trace_recursion_clear(TRACE_GRAPH_BIT);
921}
922
875static inline int ftrace_graph_notrace_addr(unsigned long addr) 923static inline int ftrace_graph_notrace_addr(unsigned long addr)
876{ 924{
877 int ret = 0; 925 int ret = 0;
@@ -885,7 +933,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
885 return ret; 933 return ret;
886} 934}
887#else 935#else
888static inline int ftrace_graph_addr(unsigned long addr) 936static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
889{ 937{
890 return 1; 938 return 1;
891} 939}
@@ -894,6 +942,8 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
894{ 942{
895 return 0; 943 return 0;
896} 944}
945static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
946{ }
897#endif /* CONFIG_DYNAMIC_FTRACE */ 947#endif /* CONFIG_DYNAMIC_FTRACE */
898 948
899extern unsigned int fgraph_max_depth; 949extern unsigned int fgraph_max_depth;
@@ -901,7 +951,8 @@ extern unsigned int fgraph_max_depth;
901static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace) 951static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
902{ 952{
903 /* trace it when it is-nested-in or is a function enabled. */ 953 /* trace it when it is-nested-in or is a function enabled. */
904 return !(trace->depth || ftrace_graph_addr(trace->func)) || 954 return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
955 ftrace_graph_addr(trace)) ||
905 (trace->depth < 0) || 956 (trace->depth < 0) ||
906 (fgraph_max_depth && trace->depth >= fgraph_max_depth); 957 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
907} 958}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 169b3c44ee97..086af4f5c3e8 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -118,8 +118,8 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
118 struct trace_seq *s, u32 flags); 118 struct trace_seq *s, u32 flags);
119 119
120/* Add a function return address to the trace stack on thread info.*/ 120/* Add a function return address to the trace stack on thread info.*/
121int 121static int
122ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, 122ftrace_push_return_trace(unsigned long ret, unsigned long func,
123 unsigned long frame_pointer, unsigned long *retp) 123 unsigned long frame_pointer, unsigned long *retp)
124{ 124{
125 unsigned long long calltime; 125 unsigned long long calltime;
@@ -177,9 +177,31 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
178 current->ret_stack[index].retp = retp; 178 current->ret_stack[index].retp = retp;
179#endif 179#endif
180 *depth = current->curr_ret_stack; 180 return 0;
181}
182
183int function_graph_enter(unsigned long ret, unsigned long func,
184 unsigned long frame_pointer, unsigned long *retp)
185{
186 struct ftrace_graph_ent trace;
187
188 trace.func = func;
189 trace.depth = ++current->curr_ret_depth;
190
191 if (ftrace_push_return_trace(ret, func,
192 frame_pointer, retp))
193 goto out;
194
195 /* Only trace if the calling function expects to */
196 if (!ftrace_graph_entry(&trace))
197 goto out_ret;
181 198
182 return 0; 199 return 0;
200 out_ret:
201 current->curr_ret_stack--;
202 out:
203 current->curr_ret_depth--;
204 return -EBUSY;
183} 205}
184 206
185/* Retrieve a function return address to the trace stack on thread info.*/ 207/* Retrieve a function return address to the trace stack on thread info.*/
@@ -241,7 +263,13 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
241 trace->func = current->ret_stack[index].func; 263 trace->func = current->ret_stack[index].func;
242 trace->calltime = current->ret_stack[index].calltime; 264 trace->calltime = current->ret_stack[index].calltime;
243 trace->overrun = atomic_read(&current->trace_overrun); 265 trace->overrun = atomic_read(&current->trace_overrun);
244 trace->depth = index; 266 trace->depth = current->curr_ret_depth--;
267 /*
268 * We still want to trace interrupts coming in if
269 * max_depth is set to 1. Make sure the decrement is
270 * seen before ftrace_graph_return.
271 */
272 barrier();
245} 273}
246 274
247/* 275/*
@@ -255,6 +283,12 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
255 283
256 ftrace_pop_return_trace(&trace, &ret, frame_pointer); 284 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
257 trace.rettime = trace_clock_local(); 285 trace.rettime = trace_clock_local();
286 ftrace_graph_return(&trace);
287 /*
288 * The ftrace_graph_return() may still access the current
289 * ret_stack structure, we need to make sure the update of
290 * curr_ret_stack is after that.
291 */
258 barrier(); 292 barrier();
259 current->curr_ret_stack--; 293 current->curr_ret_stack--;
260 /* 294 /*
@@ -267,13 +301,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
267 return ret; 301 return ret;
268 } 302 }
269 303
270 /*
271 * The trace should run after decrementing the ret counter
272 * in case an interrupt were to come in. We don't want to
273 * lose the interrupt if max_depth is set.
274 */
275 ftrace_graph_return(&trace);
276
277 if (unlikely(!ret)) { 304 if (unlikely(!ret)) {
278 ftrace_graph_stop(); 305 ftrace_graph_stop();
279 WARN_ON(1); 306 WARN_ON(1);
@@ -482,6 +509,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
482 int cpu; 509 int cpu;
483 int pc; 510 int pc;
484 511
512 ftrace_graph_addr_finish(trace);
513
485 local_irq_save(flags); 514 local_irq_save(flags);
486 cpu = raw_smp_processor_id(); 515 cpu = raw_smp_processor_id();
487 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 516 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
@@ -505,6 +534,8 @@ void set_graph_array(struct trace_array *tr)
505 534
506static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) 535static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
507{ 536{
537 ftrace_graph_addr_finish(trace);
538
508 if (tracing_thresh && 539 if (tracing_thresh &&
509 (trace->rettime - trace->calltime < tracing_thresh)) 540 (trace->rettime - trace->calltime < tracing_thresh))
510 return; 541 return;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index b7357f9f82a3..98ea6d28df15 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -208,6 +208,8 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
208 unsigned long flags; 208 unsigned long flags;
209 int pc; 209 int pc;
210 210
211 ftrace_graph_addr_finish(trace);
212
211 if (!func_prolog_dec(tr, &data, &flags)) 213 if (!func_prolog_dec(tr, &data, &flags))
212 return; 214 return;
213 215
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 3ef15a6683c0..bd30e9398d2a 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -535,7 +535,7 @@ int traceprobe_update_arg(struct probe_arg *arg)
535 if (code[1].op != FETCH_OP_IMM) 535 if (code[1].op != FETCH_OP_IMM)
536 return -EINVAL; 536 return -EINVAL;
537 537
538 tmp = strpbrk("+-", code->data); 538 tmp = strpbrk(code->data, "+-");
539 if (tmp) 539 if (tmp)
540 c = *tmp; 540 c = *tmp;
541 ret = traceprobe_split_symbol_offset(code->data, 541 ret = traceprobe_split_symbol_offset(code->data,
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index a86b303e6c67..7d04b9890755 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -270,6 +270,8 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
270 unsigned long flags; 270 unsigned long flags;
271 int pc; 271 int pc;
272 272
273 ftrace_graph_addr_finish(trace);
274
273 if (!func_prolog_preempt_disable(tr, &data, &pc)) 275 if (!func_prolog_preempt_disable(tr, &data, &pc))
274 return; 276 return;
275 277
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index e5222b5fb4fe..923414a246e9 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -974,10 +974,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
974 if (!new_idmap_permitted(file, ns, cap_setid, &new_map)) 974 if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
975 goto out; 975 goto out;
976 976
977 ret = sort_idmaps(&new_map);
978 if (ret < 0)
979 goto out;
980
981 ret = -EPERM; 977 ret = -EPERM;
982 /* Map the lower ids from the parent user namespace to the 978 /* Map the lower ids from the parent user namespace to the
983 * kernel global id space. 979 * kernel global id space.
@@ -1004,6 +1000,14 @@ static ssize_t map_write(struct file *file, const char __user *buf,
1004 e->lower_first = lower_first; 1000 e->lower_first = lower_first;
1005 } 1001 }
1006 1002
1003 /*
1004 * If we want to use binary search for lookup, this clones the extent
1005 * array and sorts both copies.
1006 */
1007 ret = sort_idmaps(&new_map);
1008 if (ret < 0)
1009 goto out;
1010
1007 /* Install the map */ 1011 /* Install the map */
1008 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) { 1012 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
1009 memcpy(map->extent, new_map.extent, 1013 memcpy(map->extent, new_map.extent,
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 70935ed91125..14afeeb7d6ef 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -135,7 +135,6 @@ static void fill_pool(void)
135 if (!new) 135 if (!new)
136 return; 136 return;
137 137
138 kmemleak_ignore(new);
139 raw_spin_lock_irqsave(&pool_lock, flags); 138 raw_spin_lock_irqsave(&pool_lock, flags);
140 hlist_add_head(&new->node, &obj_pool); 139 hlist_add_head(&new->node, &obj_pool);
141 debug_objects_allocated++; 140 debug_objects_allocated++;
@@ -1128,7 +1127,6 @@ static int __init debug_objects_replace_static_objects(void)
1128 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); 1127 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1129 if (!obj) 1128 if (!obj)
1130 goto free; 1129 goto free;
1131 kmemleak_ignore(obj);
1132 hlist_add_head(&obj->node, &objects); 1130 hlist_add_head(&obj->node, &objects);
1133 } 1131 }
1134 1132
@@ -1184,7 +1182,8 @@ void __init debug_objects_mem_init(void)
1184 1182
1185 obj_cache = kmem_cache_create("debug_objects_cache", 1183 obj_cache = kmem_cache_create("debug_objects_cache",
1186 sizeof (struct debug_obj), 0, 1184 sizeof (struct debug_obj), 0,
1187 SLAB_DEBUG_OBJECTS, NULL); 1185 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1186 NULL);
1188 1187
1189 if (!obj_cache || debug_objects_replace_static_objects()) { 1188 if (!obj_cache || debug_objects_replace_static_objects()) {
1190 debug_objects_enabled = 0; 1189 debug_objects_enabled = 0;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 7ebccb5c1637..54c248526b55 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -560,6 +560,38 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
560 return bytes; 560 return bytes;
561} 561}
562 562
563static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
564 __wsum *csum, struct iov_iter *i)
565{
566 struct pipe_inode_info *pipe = i->pipe;
567 size_t n, r;
568 size_t off = 0;
569 __wsum sum = *csum, next;
570 int idx;
571
572 if (!sanity(i))
573 return 0;
574
575 bytes = n = push_pipe(i, bytes, &idx, &r);
576 if (unlikely(!n))
577 return 0;
578 for ( ; n; idx = next_idx(idx, pipe), r = 0) {
579 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
580 char *p = kmap_atomic(pipe->bufs[idx].page);
581 next = csum_partial_copy_nocheck(addr, p + r, chunk, 0);
582 sum = csum_block_add(sum, next, off);
583 kunmap_atomic(p);
584 i->idx = idx;
585 i->iov_offset = r + chunk;
586 n -= chunk;
587 off += chunk;
588 addr += chunk;
589 }
590 i->count -= bytes;
591 *csum = sum;
592 return bytes;
593}
594
563size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 595size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
564{ 596{
565 const char *from = addr; 597 const char *from = addr;
@@ -1438,8 +1470,12 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1438 const char *from = addr; 1470 const char *from = addr;
1439 __wsum sum, next; 1471 __wsum sum, next;
1440 size_t off = 0; 1472 size_t off = 0;
1473
1474 if (unlikely(iov_iter_is_pipe(i)))
1475 return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
1476
1441 sum = *csum; 1477 sum = *csum;
1442 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1478 if (unlikely(iov_iter_is_discard(i))) {
1443 WARN_ON(1); /* for now */ 1479 WARN_ON(1); /* for now */
1444 return 0; 1480 return 0;
1445 } 1481 }
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 5d73f5cb4d8a..79777645cac9 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -27,7 +27,7 @@ ifeq ($(ARCH),arm)
27 CFLAGS += -I../../../arch/arm/include -mfpu=neon 27 CFLAGS += -I../../../arch/arm/include -mfpu=neon
28 HAS_NEON = yes 28 HAS_NEON = yes
29endif 29endif
30ifeq ($(ARCH),arm64) 30ifeq ($(ARCH),aarch64)
31 CFLAGS += -I../../../arch/arm64/include 31 CFLAGS += -I../../../arch/arm64/include
32 HAS_NEON = yes 32 HAS_NEON = yes
33endif 33endif
@@ -41,7 +41,7 @@ ifeq ($(IS_X86),yes)
41 gcc -c -x assembler - >&/dev/null && \ 41 gcc -c -x assembler - >&/dev/null && \
42 rm ./-.o && echo -DCONFIG_AS_AVX512=1) 42 rm ./-.o && echo -DCONFIG_AS_AVX512=1)
43else ifeq ($(HAS_NEON),yes) 43else ifeq ($(HAS_NEON),yes)
44 OBJS += neon.o neon1.o neon2.o neon4.o neon8.o 44 OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
45 CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 45 CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
46else 46else
47 HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\ 47 HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index b984806d7d7b..7cab9a9869ac 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -837,6 +837,7 @@ static ssize_t read_firmware_show(struct device *dev,
837 if (req->fw->size > PAGE_SIZE) { 837 if (req->fw->size > PAGE_SIZE) {
838 pr_err("Testing interface must use PAGE_SIZE firmware for now\n"); 838 pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
839 rc = -EINVAL; 839 rc = -EINVAL;
840 goto out;
840 } 841 }
841 memcpy(buf, req->fw->data, req->fw->size); 842 memcpy(buf, req->fw->data, req->fw->size);
842 843
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 626f580b4ff7..5144899d3c6b 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -99,7 +99,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
99 const char *q = *result++; 99 const char *q = *result++;
100 size_t amount = strlen(q); 100 size_t amount = strlen(q);
101 101
102 strncpy(p, q, amount); 102 memcpy(p, q, amount);
103 p += amount; 103 p += amount;
104 104
105 *p++ = ' '; 105 *p++ = ' ';
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index e3ddd836491f..d82d022111e0 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1214,7 +1214,6 @@ void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
1214 1214
1215 dev_info(test_dev->dev, "removing interface\n"); 1215 dev_info(test_dev->dev, "removing interface\n");
1216 misc_deregister(&test_dev->misc_dev); 1216 misc_deregister(&test_dev->misc_dev);
1217 kfree(&test_dev->misc_dev.name);
1218 1217
1219 mutex_unlock(&test_dev->config_mutex); 1218 mutex_unlock(&test_dev->config_mutex);
1220 mutex_unlock(&test_dev->trigger_mutex); 1219 mutex_unlock(&test_dev->trigger_mutex);
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index aa47754150ce..0598e86af8fc 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -208,15 +208,19 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
208 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2)); 208 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
209 209
210 /* We should see two elements in the array */ 210 /* We should see two elements in the array */
211 rcu_read_lock();
211 xas_for_each(&xas, entry, ULONG_MAX) 212 xas_for_each(&xas, entry, ULONG_MAX)
212 seen++; 213 seen++;
214 rcu_read_unlock();
213 XA_BUG_ON(xa, seen != 2); 215 XA_BUG_ON(xa, seen != 2);
214 216
215 /* One of which is marked */ 217 /* One of which is marked */
216 xas_set(&xas, 0); 218 xas_set(&xas, 0);
217 seen = 0; 219 seen = 0;
220 rcu_read_lock();
218 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 221 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
219 seen++; 222 seen++;
223 rcu_read_unlock();
220 XA_BUG_ON(xa, seen != 1); 224 XA_BUG_ON(xa, seen != 1);
221 } 225 }
222 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 226 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
@@ -373,6 +377,12 @@ static noinline void check_reserve(struct xarray *xa)
373 xa_erase_index(xa, 12345678); 377 xa_erase_index(xa, 12345678);
374 XA_BUG_ON(xa, !xa_empty(xa)); 378 XA_BUG_ON(xa, !xa_empty(xa));
375 379
380 /* And so does xa_insert */
381 xa_reserve(xa, 12345678, GFP_KERNEL);
382 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0);
383 xa_erase_index(xa, 12345678);
384 XA_BUG_ON(xa, !xa_empty(xa));
385
376 /* Can iterate through a reserved entry */ 386 /* Can iterate through a reserved entry */
377 xa_store_index(xa, 5, GFP_KERNEL); 387 xa_store_index(xa, 5, GFP_KERNEL);
378 xa_reserve(xa, 6, GFP_KERNEL); 388 xa_reserve(xa, 6, GFP_KERNEL);
@@ -436,7 +446,9 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
436 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 446 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
437 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 447 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
438 448
449 xas_lock(&xas);
439 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index)); 450 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index));
451 xas_unlock(&xas);
440 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min)); 452 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min));
441 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min)); 453 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min));
442 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 454 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
@@ -452,9 +464,11 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
452 XA_STATE(xas, xa, index); 464 XA_STATE(xas, xa, index);
453 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 465 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
454 466
467 xas_lock(&xas);
455 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 468 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
456 XA_BUG_ON(xa, xas.xa_index != index); 469 XA_BUG_ON(xa, xas.xa_index != index);
457 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 470 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
471 xas_unlock(&xas);
458 XA_BUG_ON(xa, !xa_empty(xa)); 472 XA_BUG_ON(xa, !xa_empty(xa));
459} 473}
460#endif 474#endif
@@ -498,7 +512,7 @@ static noinline void check_multi_store(struct xarray *xa)
498 rcu_read_unlock(); 512 rcu_read_unlock();
499 513
500 /* We can erase multiple values with a single store */ 514 /* We can erase multiple values with a single store */
501 xa_store_order(xa, 0, 63, NULL, GFP_KERNEL); 515 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
502 XA_BUG_ON(xa, !xa_empty(xa)); 516 XA_BUG_ON(xa, !xa_empty(xa));
503 517
504 /* Even when the first slot is empty but the others aren't */ 518 /* Even when the first slot is empty but the others aren't */
@@ -702,7 +716,7 @@ static noinline void check_multi_find_2(struct xarray *xa)
702 } 716 }
703} 717}
704 718
705static noinline void check_find(struct xarray *xa) 719static noinline void check_find_1(struct xarray *xa)
706{ 720{
707 unsigned long i, j, k; 721 unsigned long i, j, k;
708 722
@@ -748,6 +762,34 @@ static noinline void check_find(struct xarray *xa)
748 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 762 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
749 } 763 }
750 XA_BUG_ON(xa, !xa_empty(xa)); 764 XA_BUG_ON(xa, !xa_empty(xa));
765}
766
767static noinline void check_find_2(struct xarray *xa)
768{
769 void *entry;
770 unsigned long i, j, index = 0;
771
772 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
773 XA_BUG_ON(xa, true);
774 }
775
776 for (i = 0; i < 1024; i++) {
777 xa_store_index(xa, index, GFP_KERNEL);
778 j = 0;
779 index = 0;
780 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
781 XA_BUG_ON(xa, xa_mk_value(index) != entry);
782 XA_BUG_ON(xa, index != j++);
783 }
784 }
785
786 xa_destroy(xa);
787}
788
789static noinline void check_find(struct xarray *xa)
790{
791 check_find_1(xa);
792 check_find_2(xa);
751 check_multi_find(xa); 793 check_multi_find(xa);
752 check_multi_find_2(xa); 794 check_multi_find_2(xa);
753} 795}
@@ -1067,7 +1109,7 @@ static noinline void check_store_range(struct xarray *xa)
1067 __check_store_range(xa, 4095 + i, 4095 + j); 1109 __check_store_range(xa, 4095 + i, 4095 + j);
1068 __check_store_range(xa, 4096 + i, 4096 + j); 1110 __check_store_range(xa, 4096 + i, 4096 + j);
1069 __check_store_range(xa, 123456 + i, 123456 + j); 1111 __check_store_range(xa, 123456 + i, 123456 + j);
1070 __check_store_range(xa, UINT_MAX + i, UINT_MAX + j); 1112 __check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
1071 } 1113 }
1072 } 1114 }
1073} 1115}
@@ -1146,10 +1188,12 @@ static noinline void check_account(struct xarray *xa)
1146 XA_STATE(xas, xa, 1 << order); 1188 XA_STATE(xas, xa, 1 << order);
1147 1189
1148 xa_store_order(xa, 0, order, xa, GFP_KERNEL); 1190 xa_store_order(xa, 0, order, xa, GFP_KERNEL);
1191 rcu_read_lock();
1149 xas_load(&xas); 1192 xas_load(&xas);
1150 XA_BUG_ON(xa, xas.xa_node->count == 0); 1193 XA_BUG_ON(xa, xas.xa_node->count == 0);
1151 XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 1194 XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
1152 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1195 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1196 rcu_read_unlock();
1153 1197
1154 xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order), 1198 xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order),
1155 GFP_KERNEL); 1199 GFP_KERNEL);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 59fee96c29a0..e4162f59a81c 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -427,8 +427,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
427EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); 427EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
428 428
429 429
430void __noreturn 430void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
431__ubsan_handle_builtin_unreachable(struct unreachable_data *data)
432{ 431{
433 unsigned long flags; 432 unsigned long flags;
434 433
diff --git a/lib/xarray.c b/lib/xarray.c
index 8b176f009c08..bbacca576593 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -610,8 +610,8 @@ static int xas_expand(struct xa_state *xas, void *head)
610 * (see the xa_cmpxchg() implementation for an example). 610 * (see the xa_cmpxchg() implementation for an example).
611 * 611 *
612 * Return: If the slot already existed, returns the contents of this slot. 612 * Return: If the slot already existed, returns the contents of this slot.
613 * If the slot was newly created, returns NULL. If it failed to create the 613 * If the slot was newly created, returns %NULL. If it failed to create the
614 * slot, returns NULL and indicates the error in @xas. 614 * slot, returns %NULL and indicates the error in @xas.
615 */ 615 */
616static void *xas_create(struct xa_state *xas) 616static void *xas_create(struct xa_state *xas)
617{ 617{
@@ -1334,44 +1334,31 @@ void *__xa_erase(struct xarray *xa, unsigned long index)
1334 XA_STATE(xas, xa, index); 1334 XA_STATE(xas, xa, index);
1335 return xas_result(&xas, xas_store(&xas, NULL)); 1335 return xas_result(&xas, xas_store(&xas, NULL));
1336} 1336}
1337EXPORT_SYMBOL_GPL(__xa_erase); 1337EXPORT_SYMBOL(__xa_erase);
1338 1338
1339/** 1339/**
1340 * xa_store() - Store this entry in the XArray. 1340 * xa_erase() - Erase this entry from the XArray.
1341 * @xa: XArray. 1341 * @xa: XArray.
1342 * @index: Index into array. 1342 * @index: Index of entry.
1343 * @entry: New entry.
1344 * @gfp: Memory allocation flags.
1345 * 1343 *
1346 * After this function returns, loads from this index will return @entry. 1344 * This function is the equivalent of calling xa_store() with %NULL as
1347 * Storing into an existing multislot entry updates the entry of every index. 1345 * the third argument. The XArray does not need to allocate memory, so
1348 * The marks associated with @index are unaffected unless @entry is %NULL. 1346 * the user does not need to provide GFP flags.
1349 * 1347 *
1350 * Context: Process context. Takes and releases the xa_lock. May sleep 1348 * Context: Any context. Takes and releases the xa_lock.
1351 * if the @gfp flags permit. 1349 * Return: The entry which used to be at this index.
1352 * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1353 * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1354 * failed.
1355 */ 1350 */
1356void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) 1351void *xa_erase(struct xarray *xa, unsigned long index)
1357{ 1352{
1358 XA_STATE(xas, xa, index); 1353 void *entry;
1359 void *curr;
1360
1361 if (WARN_ON_ONCE(xa_is_internal(entry)))
1362 return XA_ERROR(-EINVAL);
1363 1354
1364 do { 1355 xa_lock(xa);
1365 xas_lock(&xas); 1356 entry = __xa_erase(xa, index);
1366 curr = xas_store(&xas, entry); 1357 xa_unlock(xa);
1367 if (xa_track_free(xa) && entry)
1368 xas_clear_mark(&xas, XA_FREE_MARK);
1369 xas_unlock(&xas);
1370 } while (xas_nomem(&xas, gfp));
1371 1358
1372 return xas_result(&xas, curr); 1359 return entry;
1373} 1360}
1374EXPORT_SYMBOL(xa_store); 1361EXPORT_SYMBOL(xa_erase);
1375 1362
1376/** 1363/**
1377 * __xa_store() - Store this entry in the XArray. 1364 * __xa_store() - Store this entry in the XArray.
@@ -1395,10 +1382,12 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1395 1382
1396 if (WARN_ON_ONCE(xa_is_internal(entry))) 1383 if (WARN_ON_ONCE(xa_is_internal(entry)))
1397 return XA_ERROR(-EINVAL); 1384 return XA_ERROR(-EINVAL);
1385 if (xa_track_free(xa) && !entry)
1386 entry = XA_ZERO_ENTRY;
1398 1387
1399 do { 1388 do {
1400 curr = xas_store(&xas, entry); 1389 curr = xas_store(&xas, entry);
1401 if (xa_track_free(xa) && entry) 1390 if (xa_track_free(xa))
1402 xas_clear_mark(&xas, XA_FREE_MARK); 1391 xas_clear_mark(&xas, XA_FREE_MARK);
1403 } while (__xas_nomem(&xas, gfp)); 1392 } while (__xas_nomem(&xas, gfp));
1404 1393
@@ -1407,45 +1396,33 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1407EXPORT_SYMBOL(__xa_store); 1396EXPORT_SYMBOL(__xa_store);
1408 1397
1409/** 1398/**
1410 * xa_cmpxchg() - Conditionally replace an entry in the XArray. 1399 * xa_store() - Store this entry in the XArray.
1411 * @xa: XArray. 1400 * @xa: XArray.
1412 * @index: Index into array. 1401 * @index: Index into array.
1413 * @old: Old value to test against. 1402 * @entry: New entry.
1414 * @entry: New value to place in array.
1415 * @gfp: Memory allocation flags. 1403 * @gfp: Memory allocation flags.
1416 * 1404 *
1417 * If the entry at @index is the same as @old, replace it with @entry. 1405 * After this function returns, loads from this index will return @entry.
1418 * If the return value is equal to @old, then the exchange was successful. 1406 * Storing into an existing multislot entry updates the entry of every index.
1407 * The marks associated with @index are unaffected unless @entry is %NULL.
1419 * 1408 *
1420 * Context: Process context. Takes and releases the xa_lock. May sleep 1409 * Context: Any context. Takes and releases the xa_lock.
1421 * if the @gfp flags permit. 1410 * May sleep if the @gfp flags permit.
1422 * Return: The old value at this index or xa_err() if an error happened. 1411 * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1412 * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1413 * failed.
1423 */ 1414 */
1424void *xa_cmpxchg(struct xarray *xa, unsigned long index, 1415void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1425 void *old, void *entry, gfp_t gfp)
1426{ 1416{
1427 XA_STATE(xas, xa, index);
1428 void *curr; 1417 void *curr;
1429 1418
1430 if (WARN_ON_ONCE(xa_is_internal(entry))) 1419 xa_lock(xa);
1431 return XA_ERROR(-EINVAL); 1420 curr = __xa_store(xa, index, entry, gfp);
1432 1421 xa_unlock(xa);
1433 do {
1434 xas_lock(&xas);
1435 curr = xas_load(&xas);
1436 if (curr == XA_ZERO_ENTRY)
1437 curr = NULL;
1438 if (curr == old) {
1439 xas_store(&xas, entry);
1440 if (xa_track_free(xa) && entry)
1441 xas_clear_mark(&xas, XA_FREE_MARK);
1442 }
1443 xas_unlock(&xas);
1444 } while (xas_nomem(&xas, gfp));
1445 1422
1446 return xas_result(&xas, curr); 1423 return curr;
1447} 1424}
1448EXPORT_SYMBOL(xa_cmpxchg); 1425EXPORT_SYMBOL(xa_store);
1449 1426
1450/** 1427/**
1451 * __xa_cmpxchg() - Store this entry in the XArray. 1428 * __xa_cmpxchg() - Store this entry in the XArray.
@@ -1471,6 +1448,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1471 1448
1472 if (WARN_ON_ONCE(xa_is_internal(entry))) 1449 if (WARN_ON_ONCE(xa_is_internal(entry)))
1473 return XA_ERROR(-EINVAL); 1450 return XA_ERROR(-EINVAL);
1451 if (xa_track_free(xa) && !entry)
1452 entry = XA_ZERO_ENTRY;
1474 1453
1475 do { 1454 do {
1476 curr = xas_load(&xas); 1455 curr = xas_load(&xas);
@@ -1478,7 +1457,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1478 curr = NULL; 1457 curr = NULL;
1479 if (curr == old) { 1458 if (curr == old) {
1480 xas_store(&xas, entry); 1459 xas_store(&xas, entry);
1481 if (xa_track_free(xa) && entry) 1460 if (xa_track_free(xa))
1482 xas_clear_mark(&xas, XA_FREE_MARK); 1461 xas_clear_mark(&xas, XA_FREE_MARK);
1483 } 1462 }
1484 } while (__xas_nomem(&xas, gfp)); 1463 } while (__xas_nomem(&xas, gfp));
@@ -1488,7 +1467,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1488EXPORT_SYMBOL(__xa_cmpxchg); 1467EXPORT_SYMBOL(__xa_cmpxchg);
1489 1468
1490/** 1469/**
1491 * xa_reserve() - Reserve this index in the XArray. 1470 * __xa_reserve() - Reserve this index in the XArray.
1492 * @xa: XArray. 1471 * @xa: XArray.
1493 * @index: Index into array. 1472 * @index: Index into array.
1494 * @gfp: Memory allocation flags. 1473 * @gfp: Memory allocation flags.
@@ -1496,33 +1475,32 @@ EXPORT_SYMBOL(__xa_cmpxchg);
1496 * Ensures there is somewhere to store an entry at @index in the array. 1475 * Ensures there is somewhere to store an entry at @index in the array.
1497 * If there is already something stored at @index, this function does 1476 * If there is already something stored at @index, this function does
1498 * nothing. If there was nothing there, the entry is marked as reserved. 1477 * nothing. If there was nothing there, the entry is marked as reserved.
1499 * Loads from @index will continue to see a %NULL pointer until a 1478 * Loading from a reserved entry returns a %NULL pointer.
1500 * subsequent store to @index.
1501 * 1479 *
1502 * If you do not use the entry that you have reserved, call xa_release() 1480 * If you do not use the entry that you have reserved, call xa_release()
1503 * or xa_erase() to free any unnecessary memory. 1481 * or xa_erase() to free any unnecessary memory.
1504 * 1482 *
1505 * Context: Process context. Takes and releases the xa_lock, IRQ or BH safe 1483 * Context: Any context. Expects the xa_lock to be held on entry. May
1506 * if specified in XArray flags. May sleep if the @gfp flags permit. 1484 * release the lock, sleep and reacquire the lock if the @gfp flags permit.
1507 * Return: 0 if the reservation succeeded or -ENOMEM if it failed. 1485 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1508 */ 1486 */
1509int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) 1487int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
1510{ 1488{
1511 XA_STATE(xas, xa, index); 1489 XA_STATE(xas, xa, index);
1512 unsigned int lock_type = xa_lock_type(xa);
1513 void *curr; 1490 void *curr;
1514 1491
1515 do { 1492 do {
1516 xas_lock_type(&xas, lock_type);
1517 curr = xas_load(&xas); 1493 curr = xas_load(&xas);
1518 if (!curr) 1494 if (!curr) {
1519 xas_store(&xas, XA_ZERO_ENTRY); 1495 xas_store(&xas, XA_ZERO_ENTRY);
1520 xas_unlock_type(&xas, lock_type); 1496 if (xa_track_free(xa))
1521 } while (xas_nomem(&xas, gfp)); 1497 xas_clear_mark(&xas, XA_FREE_MARK);
1498 }
1499 } while (__xas_nomem(&xas, gfp));
1522 1500
1523 return xas_error(&xas); 1501 return xas_error(&xas);
1524} 1502}
1525EXPORT_SYMBOL(xa_reserve); 1503EXPORT_SYMBOL(__xa_reserve);
1526 1504
1527#ifdef CONFIG_XARRAY_MULTI 1505#ifdef CONFIG_XARRAY_MULTI
1528static void xas_set_range(struct xa_state *xas, unsigned long first, 1506static void xas_set_range(struct xa_state *xas, unsigned long first,
@@ -1587,8 +1565,9 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
1587 do { 1565 do {
1588 xas_lock(&xas); 1566 xas_lock(&xas);
1589 if (entry) { 1567 if (entry) {
1590 unsigned int order = (last == ~0UL) ? 64 : 1568 unsigned int order = BITS_PER_LONG;
1591 ilog2(last + 1); 1569 if (last + 1)
1570 order = __ffs(last + 1);
1592 xas_set_order(&xas, last, order); 1571 xas_set_order(&xas, last, order);
1593 xas_create(&xas); 1572 xas_create(&xas);
1594 if (xas_error(&xas)) 1573 if (xas_error(&xas))
@@ -1662,7 +1641,7 @@ EXPORT_SYMBOL(__xa_alloc);
1662 * @index: Index of entry. 1641 * @index: Index of entry.
1663 * @mark: Mark number. 1642 * @mark: Mark number.
1664 * 1643 *
1665 * Attempting to set a mark on a NULL entry does not succeed. 1644 * Attempting to set a mark on a %NULL entry does not succeed.
1666 * 1645 *
1667 * Context: Any context. Expects xa_lock to be held on entry. 1646 * Context: Any context. Expects xa_lock to be held on entry.
1668 */ 1647 */
@@ -1674,7 +1653,7 @@ void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1674 if (entry) 1653 if (entry)
1675 xas_set_mark(&xas, mark); 1654 xas_set_mark(&xas, mark);
1676} 1655}
1677EXPORT_SYMBOL_GPL(__xa_set_mark); 1656EXPORT_SYMBOL(__xa_set_mark);
1678 1657
1679/** 1658/**
1680 * __xa_clear_mark() - Clear this mark on this entry while locked. 1659 * __xa_clear_mark() - Clear this mark on this entry while locked.
@@ -1692,7 +1671,7 @@ void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1692 if (entry) 1671 if (entry)
1693 xas_clear_mark(&xas, mark); 1672 xas_clear_mark(&xas, mark);
1694} 1673}
1695EXPORT_SYMBOL_GPL(__xa_clear_mark); 1674EXPORT_SYMBOL(__xa_clear_mark);
1696 1675
1697/** 1676/**
1698 * xa_get_mark() - Inquire whether this mark is set on this entry. 1677 * xa_get_mark() - Inquire whether this mark is set on this entry.
@@ -1732,7 +1711,7 @@ EXPORT_SYMBOL(xa_get_mark);
1732 * @index: Index of entry. 1711 * @index: Index of entry.
1733 * @mark: Mark number. 1712 * @mark: Mark number.
1734 * 1713 *
1735 * Attempting to set a mark on a NULL entry does not succeed. 1714 * Attempting to set a mark on a %NULL entry does not succeed.
1736 * 1715 *
1737 * Context: Process context. Takes and releases the xa_lock. 1716 * Context: Process context. Takes and releases the xa_lock.
1738 */ 1717 */
@@ -1829,6 +1808,8 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
1829 entry = xas_find_marked(&xas, max, filter); 1808 entry = xas_find_marked(&xas, max, filter);
1830 else 1809 else
1831 entry = xas_find(&xas, max); 1810 entry = xas_find(&xas, max);
1811 if (xas.xa_node == XAS_BOUNDS)
1812 break;
1832 if (xas.xa_shift) { 1813 if (xas.xa_shift) {
1833 if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) 1814 if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
1834 continue; 1815 continue;
@@ -1899,7 +1880,7 @@ static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
1899 * 1880 *
1900 * The @filter may be an XArray mark value, in which case entries which are 1881 * The @filter may be an XArray mark value, in which case entries which are
1901 * marked with that mark will be copied. It may also be %XA_PRESENT, in 1882 * marked with that mark will be copied. It may also be %XA_PRESENT, in
1902 * which case all entries which are not NULL will be copied. 1883 * which case all entries which are not %NULL will be copied.
1903 * 1884 *
1904 * The entries returned may not represent a snapshot of the XArray at a 1885 * The entries returned may not represent a snapshot of the XArray at a
1905 * moment in time. For example, if another thread stores to index 5, then 1886 * moment in time. For example, if another thread stores to index 5, then
diff --git a/mm/gup.c b/mm/gup.c
index f76e77a2d34b..8cb68a50dbdf 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -385,11 +385,17 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
385 * @vma: vm_area_struct mapping @address 385 * @vma: vm_area_struct mapping @address
386 * @address: virtual address to look up 386 * @address: virtual address to look up
387 * @flags: flags modifying lookup behaviour 387 * @flags: flags modifying lookup behaviour
388 * @page_mask: on output, *page_mask is set according to the size of the page 388 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
389 * pointer to output page_mask
389 * 390 *
390 * @flags can have FOLL_ flags set, defined in <linux/mm.h> 391 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
391 * 392 *
392 * Returns the mapped (struct page *), %NULL if no mapping exists, or 393 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
394 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
395 *
396 * On output, the @ctx->page_mask is set according to the size of the page.
397 *
398 * Return: the mapped (struct page *), %NULL if no mapping exists, or
393 * an error pointer if there is a mapping to something not represented 399 * an error pointer if there is a mapping to something not represented
394 * by a page descriptor (see also vm_normal_page()). 400 * by a page descriptor (see also vm_normal_page()).
395 */ 401 */
@@ -696,12 +702,11 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
696 if (!vma || start >= vma->vm_end) { 702 if (!vma || start >= vma->vm_end) {
697 vma = find_extend_vma(mm, start); 703 vma = find_extend_vma(mm, start);
698 if (!vma && in_gate_area(mm, start)) { 704 if (!vma && in_gate_area(mm, start)) {
699 int ret;
700 ret = get_gate_page(mm, start & PAGE_MASK, 705 ret = get_gate_page(mm, start & PAGE_MASK,
701 gup_flags, &vma, 706 gup_flags, &vma,
702 pages ? &pages[i] : NULL); 707 pages ? &pages[i] : NULL);
703 if (ret) 708 if (ret)
704 return i ? : ret; 709 goto out;
705 ctx.page_mask = 0; 710 ctx.page_mask = 0;
706 goto next_page; 711 goto next_page;
707 } 712 }
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 55478ab3c83b..f2d19e4fe854 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -632,37 +632,27 @@ release:
632static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr) 632static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
633{ 633{
634 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 634 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
635 gfp_t this_node = 0; 635 const gfp_t gfp_mask = GFP_TRANSHUGE_LIGHT | __GFP_THISNODE;
636
637#ifdef CONFIG_NUMA
638 struct mempolicy *pol;
639 /*
640 * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not
641 * specified, to express a general desire to stay on the current
642 * node for optimistic allocation attempts. If the defrag mode
643 * and/or madvise hint requires the direct reclaim then we prefer
644 * to fallback to other node rather than node reclaim because that
645 * can lead to excessive reclaim even though there is free memory
646 * on other nodes. We expect that NUMA preferences are specified
647 * by memory policies.
648 */
649 pol = get_vma_policy(vma, addr);
650 if (pol->mode != MPOL_BIND)
651 this_node = __GFP_THISNODE;
652 mpol_cond_put(pol);
653#endif
654 636
637 /* Always do synchronous compaction */
655 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 638 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
656 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 639 return GFP_TRANSHUGE | __GFP_THISNODE |
640 (vma_madvised ? 0 : __GFP_NORETRY);
641
642 /* Kick kcompactd and fail quickly */
657 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 643 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
658 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node; 644 return gfp_mask | __GFP_KSWAPD_RECLAIM;
645
646 /* Synchronous compaction if madvised, otherwise kick kcompactd */
659 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 647 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
660 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 648 return gfp_mask | (vma_madvised ? __GFP_DIRECT_RECLAIM :
661 __GFP_KSWAPD_RECLAIM | this_node); 649 __GFP_KSWAPD_RECLAIM);
650
651 /* Only do synchronous compaction if madvised */
662 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 652 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
663 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 653 return gfp_mask | (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
664 this_node); 654
665 return GFP_TRANSHUGE_LIGHT | this_node; 655 return gfp_mask;
666} 656}
667 657
668/* Caller must hold page table lock. */ 658/* Caller must hold page table lock. */
@@ -2350,7 +2340,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
2350 } 2340 }
2351} 2341}
2352 2342
2353static void freeze_page(struct page *page) 2343static void unmap_page(struct page *page)
2354{ 2344{
2355 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | 2345 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
2356 TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; 2346 TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
@@ -2365,7 +2355,7 @@ static void freeze_page(struct page *page)
2365 VM_BUG_ON_PAGE(!unmap_success, page); 2355 VM_BUG_ON_PAGE(!unmap_success, page);
2366} 2356}
2367 2357
2368static void unfreeze_page(struct page *page) 2358static void remap_page(struct page *page)
2369{ 2359{
2370 int i; 2360 int i;
2371 if (PageTransHuge(page)) { 2361 if (PageTransHuge(page)) {
@@ -2402,6 +2392,12 @@ static void __split_huge_page_tail(struct page *head, int tail,
2402 (1L << PG_unevictable) | 2392 (1L << PG_unevictable) |
2403 (1L << PG_dirty))); 2393 (1L << PG_dirty)));
2404 2394
2395 /* ->mapping in first tail page is compound_mapcount */
2396 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2397 page_tail);
2398 page_tail->mapping = head->mapping;
2399 page_tail->index = head->index + tail;
2400
2405 /* Page flags must be visible before we make the page non-compound. */ 2401 /* Page flags must be visible before we make the page non-compound. */
2406 smp_wmb(); 2402 smp_wmb();
2407 2403
@@ -2422,12 +2418,6 @@ static void __split_huge_page_tail(struct page *head, int tail,
2422 if (page_is_idle(head)) 2418 if (page_is_idle(head))
2423 set_page_idle(page_tail); 2419 set_page_idle(page_tail);
2424 2420
2425 /* ->mapping in first tail page is compound_mapcount */
2426 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2427 page_tail);
2428 page_tail->mapping = head->mapping;
2429
2430 page_tail->index = head->index + tail;
2431 page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 2421 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2432 2422
2433 /* 2423 /*
@@ -2439,12 +2429,11 @@ static void __split_huge_page_tail(struct page *head, int tail,
2439} 2429}
2440 2430
2441static void __split_huge_page(struct page *page, struct list_head *list, 2431static void __split_huge_page(struct page *page, struct list_head *list,
2442 unsigned long flags) 2432 pgoff_t end, unsigned long flags)
2443{ 2433{
2444 struct page *head = compound_head(page); 2434 struct page *head = compound_head(page);
2445 struct zone *zone = page_zone(head); 2435 struct zone *zone = page_zone(head);
2446 struct lruvec *lruvec; 2436 struct lruvec *lruvec;
2447 pgoff_t end = -1;
2448 int i; 2437 int i;
2449 2438
2450 lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat); 2439 lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
@@ -2452,9 +2441,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2452 /* complete memcg works before add pages to LRU */ 2441 /* complete memcg works before add pages to LRU */
2453 mem_cgroup_split_huge_fixup(head); 2442 mem_cgroup_split_huge_fixup(head);
2454 2443
2455 if (!PageAnon(page))
2456 end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
2457
2458 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 2444 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
2459 __split_huge_page_tail(head, i, lruvec, list); 2445 __split_huge_page_tail(head, i, lruvec, list);
2460 /* Some pages can be beyond i_size: drop them from page cache */ 2446 /* Some pages can be beyond i_size: drop them from page cache */
@@ -2483,7 +2469,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2483 2469
2484 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2470 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2485 2471
2486 unfreeze_page(head); 2472 remap_page(head);
2487 2473
2488 for (i = 0; i < HPAGE_PMD_NR; i++) { 2474 for (i = 0; i < HPAGE_PMD_NR; i++) {
2489 struct page *subpage = head + i; 2475 struct page *subpage = head + i;
@@ -2626,6 +2612,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2626 int count, mapcount, extra_pins, ret; 2612 int count, mapcount, extra_pins, ret;
2627 bool mlocked; 2613 bool mlocked;
2628 unsigned long flags; 2614 unsigned long flags;
2615 pgoff_t end;
2629 2616
2630 VM_BUG_ON_PAGE(is_huge_zero_page(page), page); 2617 VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
2631 VM_BUG_ON_PAGE(!PageLocked(page), page); 2618 VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -2648,6 +2635,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2648 ret = -EBUSY; 2635 ret = -EBUSY;
2649 goto out; 2636 goto out;
2650 } 2637 }
2638 end = -1;
2651 mapping = NULL; 2639 mapping = NULL;
2652 anon_vma_lock_write(anon_vma); 2640 anon_vma_lock_write(anon_vma);
2653 } else { 2641 } else {
@@ -2661,10 +2649,19 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2661 2649
2662 anon_vma = NULL; 2650 anon_vma = NULL;
2663 i_mmap_lock_read(mapping); 2651 i_mmap_lock_read(mapping);
2652
2653 /*
2654 *__split_huge_page() may need to trim off pages beyond EOF:
2655 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2656 * which cannot be nested inside the page tree lock. So note
2657 * end now: i_size itself may be changed at any moment, but
2658 * head page lock is good enough to serialize the trimming.
2659 */
2660 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2664 } 2661 }
2665 2662
2666 /* 2663 /*
2667 * Racy check if we can split the page, before freeze_page() will 2664 * Racy check if we can split the page, before unmap_page() will
2668 * split PMDs 2665 * split PMDs
2669 */ 2666 */
2670 if (!can_split_huge_page(head, &extra_pins)) { 2667 if (!can_split_huge_page(head, &extra_pins)) {
@@ -2673,7 +2670,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2673 } 2670 }
2674 2671
2675 mlocked = PageMlocked(page); 2672 mlocked = PageMlocked(page);
2676 freeze_page(head); 2673 unmap_page(head);
2677 VM_BUG_ON_PAGE(compound_mapcount(head), head); 2674 VM_BUG_ON_PAGE(compound_mapcount(head), head);
2678 2675
2679 /* Make sure the page is not on per-CPU pagevec as it takes pin */ 2676 /* Make sure the page is not on per-CPU pagevec as it takes pin */
@@ -2707,7 +2704,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2707 if (mapping) 2704 if (mapping)
2708 __dec_node_page_state(page, NR_SHMEM_THPS); 2705 __dec_node_page_state(page, NR_SHMEM_THPS);
2709 spin_unlock(&pgdata->split_queue_lock); 2706 spin_unlock(&pgdata->split_queue_lock);
2710 __split_huge_page(page, list, flags); 2707 __split_huge_page(page, list, end, flags);
2711 if (PageSwapCache(head)) { 2708 if (PageSwapCache(head)) {
2712 swp_entry_t entry = { .val = page_private(head) }; 2709 swp_entry_t entry = { .val = page_private(head) };
2713 2710
@@ -2727,7 +2724,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2727fail: if (mapping) 2724fail: if (mapping)
2728 xa_unlock(&mapping->i_pages); 2725 xa_unlock(&mapping->i_pages);
2729 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2726 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2730 unfreeze_page(head); 2727 remap_page(head);
2731 ret = -EBUSY; 2728 ret = -EBUSY;
2732 } 2729 }
2733 2730
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c007fb5fb8d5..705a3e9cc910 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3233,7 +3233,7 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3233int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 3233int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3234 struct vm_area_struct *vma) 3234 struct vm_area_struct *vma)
3235{ 3235{
3236 pte_t *src_pte, *dst_pte, entry; 3236 pte_t *src_pte, *dst_pte, entry, dst_entry;
3237 struct page *ptepage; 3237 struct page *ptepage;
3238 unsigned long addr; 3238 unsigned long addr;
3239 int cow; 3239 int cow;
@@ -3261,15 +3261,30 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3261 break; 3261 break;
3262 } 3262 }
3263 3263
3264 /* If the pagetables are shared don't copy or take references */ 3264 /*
3265 if (dst_pte == src_pte) 3265 * If the pagetables are shared don't copy or take references.
3266 * dst_pte == src_pte is the common case of src/dest sharing.
3267 *
3268 * However, src could have 'unshared' and dst shares with
3269 * another vma. If dst_pte !none, this implies sharing.
3270 * Check here before taking page table lock, and once again
3271 * after taking the lock below.
3272 */
3273 dst_entry = huge_ptep_get(dst_pte);
3274 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3266 continue; 3275 continue;
3267 3276
3268 dst_ptl = huge_pte_lock(h, dst, dst_pte); 3277 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3269 src_ptl = huge_pte_lockptr(h, src, src_pte); 3278 src_ptl = huge_pte_lockptr(h, src, src_pte);
3270 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 3279 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3271 entry = huge_ptep_get(src_pte); 3280 entry = huge_ptep_get(src_pte);
3272 if (huge_pte_none(entry)) { /* skip none entry */ 3281 dst_entry = huge_ptep_get(dst_pte);
3282 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3283 /*
3284 * Skip if src entry none. Also, skip in the
3285 * unlikely case dst entry !none as this implies
3286 * sharing with another vma.
3287 */
3273 ; 3288 ;
3274 } else if (unlikely(is_hugetlb_entry_migration(entry) || 3289 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3275 is_hugetlb_entry_hwpoisoned(entry))) { 3290 is_hugetlb_entry_hwpoisoned(entry))) {
@@ -4065,7 +4080,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4065 4080
4066 /* fallback to copy_from_user outside mmap_sem */ 4081 /* fallback to copy_from_user outside mmap_sem */
4067 if (unlikely(ret)) { 4082 if (unlikely(ret)) {
4068 ret = -EFAULT; 4083 ret = -ENOENT;
4069 *pagep = page; 4084 *pagep = page;
4070 /* don't free the page */ 4085 /* don't free the page */
4071 goto out; 4086 goto out;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index c13625c1ad5e..8e2ff195ecb3 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1287,7 +1287,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1287 * collapse_shmem - collapse small tmpfs/shmem pages into huge one. 1287 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1288 * 1288 *
1289 * Basic scheme is simple, details are more complex: 1289 * Basic scheme is simple, details are more complex:
1290 * - allocate and freeze a new huge page; 1290 * - allocate and lock a new huge page;
1291 * - scan page cache replacing old pages with the new one 1291 * - scan page cache replacing old pages with the new one
1292 * + swap in pages if necessary; 1292 * + swap in pages if necessary;
1293 * + fill in gaps; 1293 * + fill in gaps;
@@ -1295,11 +1295,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1295 * - if replacing succeeds: 1295 * - if replacing succeeds:
1296 * + copy data over; 1296 * + copy data over;
1297 * + free old pages; 1297 * + free old pages;
1298 * + unfreeze huge page; 1298 * + unlock huge page;
1299 * - if replacing failed; 1299 * - if replacing failed;
1300 * + put all pages back and unfreeze them; 1300 * + put all pages back and unfreeze them;
1301 * + restore gaps in the page cache; 1301 * + restore gaps in the page cache;
1302 * + free huge page; 1302 * + unlock and free huge page;
1303 */ 1303 */
1304static void collapse_shmem(struct mm_struct *mm, 1304static void collapse_shmem(struct mm_struct *mm,
1305 struct address_space *mapping, pgoff_t start, 1305 struct address_space *mapping, pgoff_t start,
@@ -1329,19 +1329,6 @@ static void collapse_shmem(struct mm_struct *mm,
1329 goto out; 1329 goto out;
1330 } 1330 }
1331 1331
1332 new_page->index = start;
1333 new_page->mapping = mapping;
1334 __SetPageSwapBacked(new_page);
1335 __SetPageLocked(new_page);
1336 BUG_ON(!page_ref_freeze(new_page, 1));
1337
1338 /*
1339 * At this point the new_page is 'frozen' (page_count() is zero),
1340 * locked and not up-to-date. It's safe to insert it into the page
1341 * cache, because nobody would be able to map it or use it in other
1342 * way until we unfreeze it.
1343 */
1344
1345 /* This will be less messy when we use multi-index entries */ 1332 /* This will be less messy when we use multi-index entries */
1346 do { 1333 do {
1347 xas_lock_irq(&xas); 1334 xas_lock_irq(&xas);
@@ -1349,19 +1336,44 @@ static void collapse_shmem(struct mm_struct *mm,
1349 if (!xas_error(&xas)) 1336 if (!xas_error(&xas))
1350 break; 1337 break;
1351 xas_unlock_irq(&xas); 1338 xas_unlock_irq(&xas);
1352 if (!xas_nomem(&xas, GFP_KERNEL)) 1339 if (!xas_nomem(&xas, GFP_KERNEL)) {
1340 mem_cgroup_cancel_charge(new_page, memcg, true);
1341 result = SCAN_FAIL;
1353 goto out; 1342 goto out;
1343 }
1354 } while (1); 1344 } while (1);
1355 1345
1346 __SetPageLocked(new_page);
1347 __SetPageSwapBacked(new_page);
1348 new_page->index = start;
1349 new_page->mapping = mapping;
1350
1351 /*
1352 * At this point the new_page is locked and not up-to-date.
1353 * It's safe to insert it into the page cache, because nobody would
1354 * be able to map it or use it in another way until we unlock it.
1355 */
1356
1356 xas_set(&xas, start); 1357 xas_set(&xas, start);
1357 for (index = start; index < end; index++) { 1358 for (index = start; index < end; index++) {
1358 struct page *page = xas_next(&xas); 1359 struct page *page = xas_next(&xas);
1359 1360
1360 VM_BUG_ON(index != xas.xa_index); 1361 VM_BUG_ON(index != xas.xa_index);
1361 if (!page) { 1362 if (!page) {
1363 /*
1364 * Stop if extent has been truncated or hole-punched,
1365 * and is now completely empty.
1366 */
1367 if (index == start) {
1368 if (!xas_next_entry(&xas, end - 1)) {
1369 result = SCAN_TRUNCATED;
1370 goto xa_locked;
1371 }
1372 xas_set(&xas, index);
1373 }
1362 if (!shmem_charge(mapping->host, 1)) { 1374 if (!shmem_charge(mapping->host, 1)) {
1363 result = SCAN_FAIL; 1375 result = SCAN_FAIL;
1364 break; 1376 goto xa_locked;
1365 } 1377 }
1366 xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); 1378 xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
1367 nr_none++; 1379 nr_none++;
@@ -1376,13 +1388,12 @@ static void collapse_shmem(struct mm_struct *mm,
1376 result = SCAN_FAIL; 1388 result = SCAN_FAIL;
1377 goto xa_unlocked; 1389 goto xa_unlocked;
1378 } 1390 }
1379 xas_lock_irq(&xas);
1380 xas_set(&xas, index);
1381 } else if (trylock_page(page)) { 1391 } else if (trylock_page(page)) {
1382 get_page(page); 1392 get_page(page);
1393 xas_unlock_irq(&xas);
1383 } else { 1394 } else {
1384 result = SCAN_PAGE_LOCK; 1395 result = SCAN_PAGE_LOCK;
1385 break; 1396 goto xa_locked;
1386 } 1397 }
1387 1398
1388 /* 1399 /*
@@ -1391,17 +1402,24 @@ static void collapse_shmem(struct mm_struct *mm,
1391 */ 1402 */
1392 VM_BUG_ON_PAGE(!PageLocked(page), page); 1403 VM_BUG_ON_PAGE(!PageLocked(page), page);
1393 VM_BUG_ON_PAGE(!PageUptodate(page), page); 1404 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1394 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1405
1406 /*
1407 * If file was truncated then extended, or hole-punched, before
1408 * we locked the first page, then a THP might be there already.
1409 */
1410 if (PageTransCompound(page)) {
1411 result = SCAN_PAGE_COMPOUND;
1412 goto out_unlock;
1413 }
1395 1414
1396 if (page_mapping(page) != mapping) { 1415 if (page_mapping(page) != mapping) {
1397 result = SCAN_TRUNCATED; 1416 result = SCAN_TRUNCATED;
1398 goto out_unlock; 1417 goto out_unlock;
1399 } 1418 }
1400 xas_unlock_irq(&xas);
1401 1419
1402 if (isolate_lru_page(page)) { 1420 if (isolate_lru_page(page)) {
1403 result = SCAN_DEL_PAGE_LRU; 1421 result = SCAN_DEL_PAGE_LRU;
1404 goto out_isolate_failed; 1422 goto out_unlock;
1405 } 1423 }
1406 1424
1407 if (page_mapped(page)) 1425 if (page_mapped(page))
@@ -1421,7 +1439,9 @@ static void collapse_shmem(struct mm_struct *mm,
1421 */ 1439 */
1422 if (!page_ref_freeze(page, 3)) { 1440 if (!page_ref_freeze(page, 3)) {
1423 result = SCAN_PAGE_COUNT; 1441 result = SCAN_PAGE_COUNT;
1424 goto out_lru; 1442 xas_unlock_irq(&xas);
1443 putback_lru_page(page);
1444 goto out_unlock;
1425 } 1445 }
1426 1446
1427 /* 1447 /*
@@ -1433,71 +1453,74 @@ static void collapse_shmem(struct mm_struct *mm,
1433 /* Finally, replace with the new page. */ 1453 /* Finally, replace with the new page. */
1434 xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); 1454 xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
1435 continue; 1455 continue;
1436out_lru:
1437 xas_unlock_irq(&xas);
1438 putback_lru_page(page);
1439out_isolate_failed:
1440 unlock_page(page);
1441 put_page(page);
1442 goto xa_unlocked;
1443out_unlock: 1456out_unlock:
1444 unlock_page(page); 1457 unlock_page(page);
1445 put_page(page); 1458 put_page(page);
1446 break; 1459 goto xa_unlocked;
1447 } 1460 }
1448 xas_unlock_irq(&xas);
1449 1461
1462 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1463 if (nr_none) {
1464 struct zone *zone = page_zone(new_page);
1465
1466 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1467 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1468 }
1469
1470xa_locked:
1471 xas_unlock_irq(&xas);
1450xa_unlocked: 1472xa_unlocked:
1473
1451 if (result == SCAN_SUCCEED) { 1474 if (result == SCAN_SUCCEED) {
1452 struct page *page, *tmp; 1475 struct page *page, *tmp;
1453 struct zone *zone = page_zone(new_page);
1454 1476
1455 /* 1477 /*
1456 * Replacing old pages with new one has succeeded, now we 1478 * Replacing old pages with new one has succeeded, now we
1457 * need to copy the content and free the old pages. 1479 * need to copy the content and free the old pages.
1458 */ 1480 */
1481 index = start;
1459 list_for_each_entry_safe(page, tmp, &pagelist, lru) { 1482 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1483 while (index < page->index) {
1484 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1485 index++;
1486 }
1460 copy_highpage(new_page + (page->index % HPAGE_PMD_NR), 1487 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1461 page); 1488 page);
1462 list_del(&page->lru); 1489 list_del(&page->lru);
1463 unlock_page(page);
1464 page_ref_unfreeze(page, 1);
1465 page->mapping = NULL; 1490 page->mapping = NULL;
1491 page_ref_unfreeze(page, 1);
1466 ClearPageActive(page); 1492 ClearPageActive(page);
1467 ClearPageUnevictable(page); 1493 ClearPageUnevictable(page);
1494 unlock_page(page);
1468 put_page(page); 1495 put_page(page);
1496 index++;
1469 } 1497 }
1470 1498 while (index < end) {
1471 local_irq_disable(); 1499 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1472 __inc_node_page_state(new_page, NR_SHMEM_THPS); 1500 index++;
1473 if (nr_none) {
1474 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1475 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1476 } 1501 }
1477 local_irq_enable();
1478 1502
1479 /*
1480 * Remove pte page tables, so we can re-fault
1481 * the page as huge.
1482 */
1483 retract_page_tables(mapping, start);
1484
1485 /* Everything is ready, let's unfreeze the new_page */
1486 set_page_dirty(new_page);
1487 SetPageUptodate(new_page); 1503 SetPageUptodate(new_page);
1488 page_ref_unfreeze(new_page, HPAGE_PMD_NR); 1504 page_ref_add(new_page, HPAGE_PMD_NR - 1);
1505 set_page_dirty(new_page);
1489 mem_cgroup_commit_charge(new_page, memcg, false, true); 1506 mem_cgroup_commit_charge(new_page, memcg, false, true);
1490 lru_cache_add_anon(new_page); 1507 lru_cache_add_anon(new_page);
1491 unlock_page(new_page);
1492 1508
1509 /*
1510 * Remove pte page tables, so we can re-fault the page as huge.
1511 */
1512 retract_page_tables(mapping, start);
1493 *hpage = NULL; 1513 *hpage = NULL;
1494 1514
1495 khugepaged_pages_collapsed++; 1515 khugepaged_pages_collapsed++;
1496 } else { 1516 } else {
1497 struct page *page; 1517 struct page *page;
1518
1498 /* Something went wrong: roll back page cache changes */ 1519 /* Something went wrong: roll back page cache changes */
1499 shmem_uncharge(mapping->host, nr_none);
1500 xas_lock_irq(&xas); 1520 xas_lock_irq(&xas);
1521 mapping->nrpages -= nr_none;
1522 shmem_uncharge(mapping->host, nr_none);
1523
1501 xas_set(&xas, start); 1524 xas_set(&xas, start);
1502 xas_for_each(&xas, page, end - 1) { 1525 xas_for_each(&xas, page, end - 1) {
1503 page = list_first_entry_or_null(&pagelist, 1526 page = list_first_entry_or_null(&pagelist,
@@ -1519,19 +1542,18 @@ xa_unlocked:
1519 xas_store(&xas, page); 1542 xas_store(&xas, page);
1520 xas_pause(&xas); 1543 xas_pause(&xas);
1521 xas_unlock_irq(&xas); 1544 xas_unlock_irq(&xas);
1522 putback_lru_page(page);
1523 unlock_page(page); 1545 unlock_page(page);
1546 putback_lru_page(page);
1524 xas_lock_irq(&xas); 1547 xas_lock_irq(&xas);
1525 } 1548 }
1526 VM_BUG_ON(nr_none); 1549 VM_BUG_ON(nr_none);
1527 xas_unlock_irq(&xas); 1550 xas_unlock_irq(&xas);
1528 1551
1529 /* Unfreeze new_page, caller would take care about freeing it */
1530 page_ref_unfreeze(new_page, 1);
1531 mem_cgroup_cancel_charge(new_page, memcg, true); 1552 mem_cgroup_cancel_charge(new_page, memcg, true);
1532 unlock_page(new_page);
1533 new_page->mapping = NULL; 1553 new_page->mapping = NULL;
1534 } 1554 }
1555
1556 unlock_page(new_page);
1535out: 1557out:
1536 VM_BUG_ON(!list_empty(&pagelist)); 1558 VM_BUG_ON(!list_empty(&pagelist));
1537 /* TODO: tracepoints */ 1559 /* TODO: tracepoints */
diff --git a/mm/memblock.c b/mm/memblock.c
index 7df468c8ebc8..9a2d5ae81ae1 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1179,7 +1179,7 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1179 1179
1180#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1180#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1181/* 1181/*
1182 * Common iterator interface used to define for_each_mem_range(). 1182 * Common iterator interface used to define for_each_mem_pfn_range().
1183 */ 1183 */
1184void __init_memblock __next_mem_pfn_range(int *idx, int nid, 1184void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1185 unsigned long *out_start_pfn, 1185 unsigned long *out_start_pfn,
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 5837a067124d..69e278b469ef 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1662,7 +1662,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1662 * freeing by another task. It is the caller's responsibility to free the 1662 * freeing by another task. It is the caller's responsibility to free the
1663 * extra reference for shared policies. 1663 * extra reference for shared policies.
1664 */ 1664 */
1665struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1665static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1666 unsigned long addr) 1666 unsigned long addr)
1667{ 1667{
1668 struct mempolicy *pol = __get_vma_policy(vma, addr); 1668 struct mempolicy *pol = __get_vma_policy(vma, addr);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a919ba5cb3c8..2ec9cc407216 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4061,17 +4061,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4061 int reserve_flags; 4061 int reserve_flags;
4062 4062
4063 /* 4063 /*
4064 * In the slowpath, we sanity check order to avoid ever trying to
4065 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
4066 * be using allocators in order of preference for an area that is
4067 * too large.
4068 */
4069 if (order >= MAX_ORDER) {
4070 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4071 return NULL;
4072 }
4073
4074 /*
4075 * We also sanity check to catch abuse of atomic reserves being used by 4064 * We also sanity check to catch abuse of atomic reserves being used by
4076 * callers that are not in atomic context. 4065 * callers that are not in atomic context.
4077 */ 4066 */
@@ -4364,6 +4353,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4364 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ 4353 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
4365 struct alloc_context ac = { }; 4354 struct alloc_context ac = { };
4366 4355
4356 /*
4357 * There are several places where we assume that the order value is sane
4358 * so bail out early if the request is out of bound.
4359 */
4360 if (unlikely(order >= MAX_ORDER)) {
4361 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4362 return NULL;
4363 }
4364
4367 gfp_mask &= gfp_allowed_mask; 4365 gfp_mask &= gfp_allowed_mask;
4368 alloc_mask = gfp_mask; 4366 alloc_mask = gfp_mask;
4369 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) 4367 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
@@ -5815,8 +5813,10 @@ void __meminit init_currently_empty_zone(struct zone *zone,
5815 unsigned long size) 5813 unsigned long size)
5816{ 5814{
5817 struct pglist_data *pgdat = zone->zone_pgdat; 5815 struct pglist_data *pgdat = zone->zone_pgdat;
5816 int zone_idx = zone_idx(zone) + 1;
5818 5817
5819 pgdat->nr_zones = zone_idx(zone) + 1; 5818 if (zone_idx > pgdat->nr_zones)
5819 pgdat->nr_zones = zone_idx;
5820 5820
5821 zone->zone_start_pfn = zone_start_pfn; 5821 zone->zone_start_pfn = zone_start_pfn;
5822 5822
@@ -7789,6 +7789,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7789 goto unmovable; 7789 goto unmovable;
7790 7790
7791 /* 7791 /*
7792 * If the zone is movable and we have ruled out all reserved
7793 * pages then it should be reasonably safe to assume the rest
7794 * is movable.
7795 */
7796 if (zone_idx(zone) == ZONE_MOVABLE)
7797 continue;
7798
7799 /*
7792 * Hugepages are not in LRU lists, but they're movable. 7800 * Hugepages are not in LRU lists, but they're movable.
7793 * We need not scan over tail pages bacause we don't 7801 * We need not scan over tail pages bacause we don't
7794 * handle each tail page individually in migration. 7802 * handle each tail page individually in migration.
diff --git a/mm/rmap.c b/mm/rmap.c
index 1e79fac3186b..85b7f9423352 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1627,16 +1627,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1627 address + PAGE_SIZE); 1627 address + PAGE_SIZE);
1628 } else { 1628 } else {
1629 /* 1629 /*
1630 * We should not need to notify here as we reach this 1630 * This is a locked file-backed page, thus it cannot
1631 * case only from freeze_page() itself only call from 1631 * be removed from the page cache and replaced by a new
1632 * split_huge_page_to_list() so everything below must 1632 * page before mmu_notifier_invalidate_range_end, so no
1633 * be true:
1634 * - page is not anonymous
1635 * - page is locked
1636 *
1637 * So as it is a locked file back page thus it can not
1638 * be remove from the page cache and replace by a new
1639 * page before mmu_notifier_invalidate_range_end so no
1640 * concurrent thread might update its page table to 1633 * concurrent thread might update its page table to
1641 * point at new page while a device still is using this 1634 * point at new page while a device still is using this
1642 * page. 1635 * page.
diff --git a/mm/shmem.c b/mm/shmem.c
index ea26d7a0342d..cddc72ac44d8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -297,12 +297,14 @@ bool shmem_charge(struct inode *inode, long pages)
297 if (!shmem_inode_acct_block(inode, pages)) 297 if (!shmem_inode_acct_block(inode, pages))
298 return false; 298 return false;
299 299
300 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
301 inode->i_mapping->nrpages += pages;
302
300 spin_lock_irqsave(&info->lock, flags); 303 spin_lock_irqsave(&info->lock, flags);
301 info->alloced += pages; 304 info->alloced += pages;
302 inode->i_blocks += pages * BLOCKS_PER_PAGE; 305 inode->i_blocks += pages * BLOCKS_PER_PAGE;
303 shmem_recalc_inode(inode); 306 shmem_recalc_inode(inode);
304 spin_unlock_irqrestore(&info->lock, flags); 307 spin_unlock_irqrestore(&info->lock, flags);
305 inode->i_mapping->nrpages += pages;
306 308
307 return true; 309 return true;
308} 310}
@@ -312,6 +314,8 @@ void shmem_uncharge(struct inode *inode, long pages)
312 struct shmem_inode_info *info = SHMEM_I(inode); 314 struct shmem_inode_info *info = SHMEM_I(inode);
313 unsigned long flags; 315 unsigned long flags;
314 316
317 /* nrpages adjustment done by __delete_from_page_cache() or caller */
318
315 spin_lock_irqsave(&info->lock, flags); 319 spin_lock_irqsave(&info->lock, flags);
316 info->alloced -= pages; 320 info->alloced -= pages;
317 inode->i_blocks -= pages * BLOCKS_PER_PAGE; 321 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
@@ -1509,11 +1513,13 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1509{ 1513{
1510 struct page *oldpage, *newpage; 1514 struct page *oldpage, *newpage;
1511 struct address_space *swap_mapping; 1515 struct address_space *swap_mapping;
1516 swp_entry_t entry;
1512 pgoff_t swap_index; 1517 pgoff_t swap_index;
1513 int error; 1518 int error;
1514 1519
1515 oldpage = *pagep; 1520 oldpage = *pagep;
1516 swap_index = page_private(oldpage); 1521 entry.val = page_private(oldpage);
1522 swap_index = swp_offset(entry);
1517 swap_mapping = page_mapping(oldpage); 1523 swap_mapping = page_mapping(oldpage);
1518 1524
1519 /* 1525 /*
@@ -1532,7 +1538,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1532 __SetPageLocked(newpage); 1538 __SetPageLocked(newpage);
1533 __SetPageSwapBacked(newpage); 1539 __SetPageSwapBacked(newpage);
1534 SetPageUptodate(newpage); 1540 SetPageUptodate(newpage);
1535 set_page_private(newpage, swap_index); 1541 set_page_private(newpage, entry.val);
1536 SetPageSwapCache(newpage); 1542 SetPageSwapCache(newpage);
1537 1543
1538 /* 1544 /*
@@ -2214,6 +2220,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2214 struct page *page; 2220 struct page *page;
2215 pte_t _dst_pte, *dst_pte; 2221 pte_t _dst_pte, *dst_pte;
2216 int ret; 2222 int ret;
2223 pgoff_t offset, max_off;
2217 2224
2218 ret = -ENOMEM; 2225 ret = -ENOMEM;
2219 if (!shmem_inode_acct_block(inode, 1)) 2226 if (!shmem_inode_acct_block(inode, 1))
@@ -2236,7 +2243,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2236 *pagep = page; 2243 *pagep = page;
2237 shmem_inode_unacct_blocks(inode, 1); 2244 shmem_inode_unacct_blocks(inode, 1);
2238 /* don't free the page */ 2245 /* don't free the page */
2239 return -EFAULT; 2246 return -ENOENT;
2240 } 2247 }
2241 } else { /* mfill_zeropage_atomic */ 2248 } else { /* mfill_zeropage_atomic */
2242 clear_highpage(page); 2249 clear_highpage(page);
@@ -2251,6 +2258,12 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2251 __SetPageSwapBacked(page); 2258 __SetPageSwapBacked(page);
2252 __SetPageUptodate(page); 2259 __SetPageUptodate(page);
2253 2260
2261 ret = -EFAULT;
2262 offset = linear_page_index(dst_vma, dst_addr);
2263 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2264 if (unlikely(offset >= max_off))
2265 goto out_release;
2266
2254 ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false); 2267 ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
2255 if (ret) 2268 if (ret)
2256 goto out_release; 2269 goto out_release;
@@ -2265,9 +2278,25 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2265 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 2278 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2266 if (dst_vma->vm_flags & VM_WRITE) 2279 if (dst_vma->vm_flags & VM_WRITE)
2267 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 2280 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2281 else {
2282 /*
2283 * We don't set the pte dirty if the vma has no
2284 * VM_WRITE permission, so mark the page dirty or it
2285 * could be freed from under us. We could do it
2286 * unconditionally before unlock_page(), but doing it
2287 * only if VM_WRITE is not set is faster.
2288 */
2289 set_page_dirty(page);
2290 }
2268 2291
2269 ret = -EEXIST;
2270 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 2292 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2293
2294 ret = -EFAULT;
2295 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2296 if (unlikely(offset >= max_off))
2297 goto out_release_uncharge_unlock;
2298
2299 ret = -EEXIST;
2271 if (!pte_none(*dst_pte)) 2300 if (!pte_none(*dst_pte))
2272 goto out_release_uncharge_unlock; 2301 goto out_release_uncharge_unlock;
2273 2302
@@ -2285,13 +2314,15 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2285 2314
2286 /* No need to invalidate - it was non-present before */ 2315 /* No need to invalidate - it was non-present before */
2287 update_mmu_cache(dst_vma, dst_addr, dst_pte); 2316 update_mmu_cache(dst_vma, dst_addr, dst_pte);
2288 unlock_page(page);
2289 pte_unmap_unlock(dst_pte, ptl); 2317 pte_unmap_unlock(dst_pte, ptl);
2318 unlock_page(page);
2290 ret = 0; 2319 ret = 0;
2291out: 2320out:
2292 return ret; 2321 return ret;
2293out_release_uncharge_unlock: 2322out_release_uncharge_unlock:
2294 pte_unmap_unlock(dst_pte, ptl); 2323 pte_unmap_unlock(dst_pte, ptl);
2324 ClearPageDirty(page);
2325 delete_from_page_cache(page);
2295out_release_uncharge: 2326out_release_uncharge:
2296 mem_cgroup_cancel_charge(page, memcg, false); 2327 mem_cgroup_cancel_charge(page, memcg, false);
2297out_release: 2328out_release:
@@ -2563,9 +2594,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2563 inode_lock(inode); 2594 inode_lock(inode);
2564 /* We're holding i_mutex so we can access i_size directly */ 2595 /* We're holding i_mutex so we can access i_size directly */
2565 2596
2566 if (offset < 0) 2597 if (offset < 0 || offset >= inode->i_size)
2567 offset = -EINVAL;
2568 else if (offset >= inode->i_size)
2569 offset = -ENXIO; 2598 offset = -ENXIO;
2570 else { 2599 else {
2571 start = offset >> PAGE_SHIFT; 2600 start = offset >> PAGE_SHIFT;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 644f746e167a..8688ae65ef58 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2813,7 +2813,7 @@ static struct swap_info_struct *alloc_swap_info(void)
2813 unsigned int type; 2813 unsigned int type;
2814 int i; 2814 int i;
2815 2815
2816 p = kzalloc(sizeof(*p), GFP_KERNEL); 2816 p = kvzalloc(sizeof(*p), GFP_KERNEL);
2817 if (!p) 2817 if (!p)
2818 return ERR_PTR(-ENOMEM); 2818 return ERR_PTR(-ENOMEM);
2819 2819
@@ -2824,7 +2824,7 @@ static struct swap_info_struct *alloc_swap_info(void)
2824 } 2824 }
2825 if (type >= MAX_SWAPFILES) { 2825 if (type >= MAX_SWAPFILES) {
2826 spin_unlock(&swap_lock); 2826 spin_unlock(&swap_lock);
2827 kfree(p); 2827 kvfree(p);
2828 return ERR_PTR(-EPERM); 2828 return ERR_PTR(-EPERM);
2829 } 2829 }
2830 if (type >= nr_swapfiles) { 2830 if (type >= nr_swapfiles) {
@@ -2838,7 +2838,7 @@ static struct swap_info_struct *alloc_swap_info(void)
2838 smp_wmb(); 2838 smp_wmb();
2839 nr_swapfiles++; 2839 nr_swapfiles++;
2840 } else { 2840 } else {
2841 kfree(p); 2841 kvfree(p);
2842 p = swap_info[type]; 2842 p = swap_info[type];
2843 /* 2843 /*
2844 * Do not memset this entry: a racing procfs swap_next() 2844 * Do not memset this entry: a racing procfs swap_next()
diff --git a/mm/truncate.c b/mm/truncate.c
index 45d68e90b703..798e7ccfb030 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -517,9 +517,13 @@ void truncate_inode_pages_final(struct address_space *mapping)
517 */ 517 */
518 xa_lock_irq(&mapping->i_pages); 518 xa_lock_irq(&mapping->i_pages);
519 xa_unlock_irq(&mapping->i_pages); 519 xa_unlock_irq(&mapping->i_pages);
520
521 truncate_inode_pages(mapping, 0);
522 } 520 }
521
522 /*
523 * Cleancache needs notification even if there are no pages or shadow
524 * entries.
525 */
526 truncate_inode_pages(mapping, 0);
523} 527}
524EXPORT_SYMBOL(truncate_inode_pages_final); 528EXPORT_SYMBOL(truncate_inode_pages_final);
525 529
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 5029f241908f..458acda96f20 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -33,6 +33,8 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
33 void *page_kaddr; 33 void *page_kaddr;
34 int ret; 34 int ret;
35 struct page *page; 35 struct page *page;
36 pgoff_t offset, max_off;
37 struct inode *inode;
36 38
37 if (!*pagep) { 39 if (!*pagep) {
38 ret = -ENOMEM; 40 ret = -ENOMEM;
@@ -48,7 +50,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
48 50
49 /* fallback to copy_from_user outside mmap_sem */ 51 /* fallback to copy_from_user outside mmap_sem */
50 if (unlikely(ret)) { 52 if (unlikely(ret)) {
51 ret = -EFAULT; 53 ret = -ENOENT;
52 *pagep = page; 54 *pagep = page;
53 /* don't free the page */ 55 /* don't free the page */
54 goto out; 56 goto out;
@@ -73,8 +75,17 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
73 if (dst_vma->vm_flags & VM_WRITE) 75 if (dst_vma->vm_flags & VM_WRITE)
74 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 76 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
75 77
76 ret = -EEXIST;
77 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 78 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
79 if (dst_vma->vm_file) {
80 /* the shmem MAP_PRIVATE case requires checking the i_size */
81 inode = dst_vma->vm_file->f_inode;
82 offset = linear_page_index(dst_vma, dst_addr);
83 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
84 ret = -EFAULT;
85 if (unlikely(offset >= max_off))
86 goto out_release_uncharge_unlock;
87 }
88 ret = -EEXIST;
78 if (!pte_none(*dst_pte)) 89 if (!pte_none(*dst_pte))
79 goto out_release_uncharge_unlock; 90 goto out_release_uncharge_unlock;
80 91
@@ -108,11 +119,22 @@ static int mfill_zeropage_pte(struct mm_struct *dst_mm,
108 pte_t _dst_pte, *dst_pte; 119 pte_t _dst_pte, *dst_pte;
109 spinlock_t *ptl; 120 spinlock_t *ptl;
110 int ret; 121 int ret;
122 pgoff_t offset, max_off;
123 struct inode *inode;
111 124
112 _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 125 _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
113 dst_vma->vm_page_prot)); 126 dst_vma->vm_page_prot));
114 ret = -EEXIST;
115 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 127 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
128 if (dst_vma->vm_file) {
129 /* the shmem MAP_PRIVATE case requires checking the i_size */
130 inode = dst_vma->vm_file->f_inode;
131 offset = linear_page_index(dst_vma, dst_addr);
132 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
133 ret = -EFAULT;
134 if (unlikely(offset >= max_off))
135 goto out_unlock;
136 }
137 ret = -EEXIST;
116 if (!pte_none(*dst_pte)) 138 if (!pte_none(*dst_pte))
117 goto out_unlock; 139 goto out_unlock;
118 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 140 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
@@ -205,8 +227,9 @@ retry:
205 if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) 227 if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
206 goto out_unlock; 228 goto out_unlock;
207 /* 229 /*
208 * Only allow __mcopy_atomic_hugetlb on userfaultfd 230 * Check the vma is registered in uffd, this is
209 * registered ranges. 231 * required to enforce the VM_MAYWRITE check done at
232 * uffd registration time.
210 */ 233 */
211 if (!dst_vma->vm_userfaultfd_ctx.ctx) 234 if (!dst_vma->vm_userfaultfd_ctx.ctx)
212 goto out_unlock; 235 goto out_unlock;
@@ -274,7 +297,7 @@ retry:
274 297
275 cond_resched(); 298 cond_resched();
276 299
277 if (unlikely(err == -EFAULT)) { 300 if (unlikely(err == -ENOENT)) {
278 up_read(&dst_mm->mmap_sem); 301 up_read(&dst_mm->mmap_sem);
279 BUG_ON(!page); 302 BUG_ON(!page);
280 303
@@ -380,7 +403,17 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
380{ 403{
381 ssize_t err; 404 ssize_t err;
382 405
383 if (vma_is_anonymous(dst_vma)) { 406 /*
407 * The normal page fault path for a shmem will invoke the
408 * fault, fill the hole in the file and COW it right away. The
409 * result generates plain anonymous memory. So when we are
410 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
411 * generate anonymous memory directly without actually filling
412 * the hole. For the MAP_PRIVATE case the robustness check
413 * only happens in the pagetable (to verify it's still none)
414 * and not in the radix tree.
415 */
416 if (!(dst_vma->vm_flags & VM_SHARED)) {
384 if (!zeropage) 417 if (!zeropage)
385 err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, 418 err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
386 dst_addr, src_addr, page); 419 dst_addr, src_addr, page);
@@ -449,13 +482,9 @@ retry:
449 if (!dst_vma) 482 if (!dst_vma)
450 goto out_unlock; 483 goto out_unlock;
451 /* 484 /*
452 * Be strict and only allow __mcopy_atomic on userfaultfd 485 * Check the vma is registered in uffd, this is required to
453 * registered ranges to prevent userland errors going 486 * enforce the VM_MAYWRITE check done at uffd registration
454 * unnoticed. As far as the VM consistency is concerned, it 487 * time.
455 * would be perfectly safe to remove this check, but there's
456 * no useful usage for __mcopy_atomic ouside of userfaultfd
457 * registered ranges. This is after all why these are ioctls
458 * belonging to the userfaultfd and not syscalls.
459 */ 488 */
460 if (!dst_vma->vm_userfaultfd_ctx.ctx) 489 if (!dst_vma->vm_userfaultfd_ctx.ctx)
461 goto out_unlock; 490 goto out_unlock;
@@ -489,7 +518,8 @@ retry:
489 * dst_vma. 518 * dst_vma.
490 */ 519 */
491 err = -ENOMEM; 520 err = -ENOMEM;
492 if (vma_is_anonymous(dst_vma) && unlikely(anon_vma_prepare(dst_vma))) 521 if (!(dst_vma->vm_flags & VM_SHARED) &&
522 unlikely(anon_vma_prepare(dst_vma)))
493 goto out_unlock; 523 goto out_unlock;
494 524
495 while (src_addr < src_start + len) { 525 while (src_addr < src_start + len) {
@@ -530,7 +560,7 @@ retry:
530 src_addr, &page, zeropage); 560 src_addr, &page, zeropage);
531 cond_resched(); 561 cond_resched();
532 562
533 if (unlikely(err == -EFAULT)) { 563 if (unlikely(err == -ENOENT)) {
534 void *page_kaddr; 564 void *page_kaddr;
535 565
536 up_read(&dst_mm->mmap_sem); 566 up_read(&dst_mm->mmap_sem);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6038ce593ce3..9c624595e904 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1827,12 +1827,13 @@ static bool need_update(int cpu)
1827 1827
1828 /* 1828 /*
1829 * The fast way of checking if there are any vmstat diffs. 1829 * The fast way of checking if there are any vmstat diffs.
1830 * This works because the diffs are byte sized items.
1831 */ 1830 */
1832 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS)) 1831 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
1832 sizeof(p->vm_stat_diff[0])))
1833 return true; 1833 return true;
1834#ifdef CONFIG_NUMA 1834#ifdef CONFIG_NUMA
1835 if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS)) 1835 if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
1836 sizeof(p->vm_numa_stat_diff[0])))
1836 return true; 1837 return true;
1837#endif 1838#endif
1838 } 1839 }
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 4b366d181f35..aee9b0b8d907 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -99,6 +99,7 @@ struct z3fold_header {
99#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) 99#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
100 100
101#define BUDDY_MASK (0x3) 101#define BUDDY_MASK (0x3)
102#define BUDDY_SHIFT 2
102 103
103/** 104/**
104 * struct z3fold_pool - stores metadata for each z3fold pool 105 * struct z3fold_pool - stores metadata for each z3fold pool
@@ -145,7 +146,7 @@ enum z3fold_page_flags {
145 MIDDLE_CHUNK_MAPPED, 146 MIDDLE_CHUNK_MAPPED,
146 NEEDS_COMPACTING, 147 NEEDS_COMPACTING,
147 PAGE_STALE, 148 PAGE_STALE,
148 UNDER_RECLAIM 149 PAGE_CLAIMED, /* by either reclaim or free */
149}; 150};
150 151
151/***************** 152/*****************
@@ -174,7 +175,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
174 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); 175 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
175 clear_bit(NEEDS_COMPACTING, &page->private); 176 clear_bit(NEEDS_COMPACTING, &page->private);
176 clear_bit(PAGE_STALE, &page->private); 177 clear_bit(PAGE_STALE, &page->private);
177 clear_bit(UNDER_RECLAIM, &page->private); 178 clear_bit(PAGE_CLAIMED, &page->private);
178 179
179 spin_lock_init(&zhdr->page_lock); 180 spin_lock_init(&zhdr->page_lock);
180 kref_init(&zhdr->refcount); 181 kref_init(&zhdr->refcount);
@@ -223,8 +224,11 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
223 unsigned long handle; 224 unsigned long handle;
224 225
225 handle = (unsigned long)zhdr; 226 handle = (unsigned long)zhdr;
226 if (bud != HEADLESS) 227 if (bud != HEADLESS) {
227 handle += (bud + zhdr->first_num) & BUDDY_MASK; 228 handle |= (bud + zhdr->first_num) & BUDDY_MASK;
229 if (bud == LAST)
230 handle |= (zhdr->last_chunks << BUDDY_SHIFT);
231 }
228 return handle; 232 return handle;
229} 233}
230 234
@@ -234,6 +238,12 @@ static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
234 return (struct z3fold_header *)(handle & PAGE_MASK); 238 return (struct z3fold_header *)(handle & PAGE_MASK);
235} 239}
236 240
241/* only for LAST bud, returns zero otherwise */
242static unsigned short handle_to_chunks(unsigned long handle)
243{
244 return (handle & ~PAGE_MASK) >> BUDDY_SHIFT;
245}
246
237/* 247/*
238 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle 248 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
239 * but that doesn't matter. because the masking will result in the 249 * but that doesn't matter. because the masking will result in the
@@ -720,37 +730,39 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
720 page = virt_to_page(zhdr); 730 page = virt_to_page(zhdr);
721 731
722 if (test_bit(PAGE_HEADLESS, &page->private)) { 732 if (test_bit(PAGE_HEADLESS, &page->private)) {
723 /* HEADLESS page stored */ 733 /* if a headless page is under reclaim, just leave.
724 bud = HEADLESS; 734 * NB: we use test_and_set_bit for a reason: if the bit
725 } else { 735 * has not been set before, we release this page
726 z3fold_page_lock(zhdr); 736 * immediately so we don't care about its value any more.
727 bud = handle_to_buddy(handle); 737 */
728 738 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
729 switch (bud) { 739 spin_lock(&pool->lock);
730 case FIRST: 740 list_del(&page->lru);
731 zhdr->first_chunks = 0; 741 spin_unlock(&pool->lock);
732 break; 742 free_z3fold_page(page);
733 case MIDDLE: 743 atomic64_dec(&pool->pages_nr);
734 zhdr->middle_chunks = 0;
735 zhdr->start_middle = 0;
736 break;
737 case LAST:
738 zhdr->last_chunks = 0;
739 break;
740 default:
741 pr_err("%s: unknown bud %d\n", __func__, bud);
742 WARN_ON(1);
743 z3fold_page_unlock(zhdr);
744 return;
745 } 744 }
745 return;
746 } 746 }
747 747
748 if (bud == HEADLESS) { 748 /* Non-headless case */
749 spin_lock(&pool->lock); 749 z3fold_page_lock(zhdr);
750 list_del(&page->lru); 750 bud = handle_to_buddy(handle);
751 spin_unlock(&pool->lock); 751
752 free_z3fold_page(page); 752 switch (bud) {
753 atomic64_dec(&pool->pages_nr); 753 case FIRST:
754 zhdr->first_chunks = 0;
755 break;
756 case MIDDLE:
757 zhdr->middle_chunks = 0;
758 break;
759 case LAST:
760 zhdr->last_chunks = 0;
761 break;
762 default:
763 pr_err("%s: unknown bud %d\n", __func__, bud);
764 WARN_ON(1);
765 z3fold_page_unlock(zhdr);
754 return; 766 return;
755 } 767 }
756 768
@@ -758,7 +770,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
758 atomic64_dec(&pool->pages_nr); 770 atomic64_dec(&pool->pages_nr);
759 return; 771 return;
760 } 772 }
761 if (test_bit(UNDER_RECLAIM, &page->private)) { 773 if (test_bit(PAGE_CLAIMED, &page->private)) {
762 z3fold_page_unlock(zhdr); 774 z3fold_page_unlock(zhdr);
763 return; 775 return;
764 } 776 }
@@ -836,20 +848,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
836 } 848 }
837 list_for_each_prev(pos, &pool->lru) { 849 list_for_each_prev(pos, &pool->lru) {
838 page = list_entry(pos, struct page, lru); 850 page = list_entry(pos, struct page, lru);
851
852 /* this bit could have been set by free, in which case
853 * we pass over to the next page in the pool.
854 */
855 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
856 continue;
857
858 zhdr = page_address(page);
839 if (test_bit(PAGE_HEADLESS, &page->private)) 859 if (test_bit(PAGE_HEADLESS, &page->private))
840 /* candidate found */
841 break; 860 break;
842 861
843 zhdr = page_address(page); 862 if (!z3fold_page_trylock(zhdr)) {
844 if (!z3fold_page_trylock(zhdr)) 863 zhdr = NULL;
845 continue; /* can't evict at this point */ 864 continue; /* can't evict at this point */
865 }
846 kref_get(&zhdr->refcount); 866 kref_get(&zhdr->refcount);
847 list_del_init(&zhdr->buddy); 867 list_del_init(&zhdr->buddy);
848 zhdr->cpu = -1; 868 zhdr->cpu = -1;
849 set_bit(UNDER_RECLAIM, &page->private);
850 break; 869 break;
851 } 870 }
852 871
872 if (!zhdr)
873 break;
874
853 list_del_init(&page->lru); 875 list_del_init(&page->lru);
854 spin_unlock(&pool->lock); 876 spin_unlock(&pool->lock);
855 877
@@ -898,6 +920,7 @@ next:
898 if (test_bit(PAGE_HEADLESS, &page->private)) { 920 if (test_bit(PAGE_HEADLESS, &page->private)) {
899 if (ret == 0) { 921 if (ret == 0) {
900 free_z3fold_page(page); 922 free_z3fold_page(page);
923 atomic64_dec(&pool->pages_nr);
901 return 0; 924 return 0;
902 } 925 }
903 spin_lock(&pool->lock); 926 spin_lock(&pool->lock);
@@ -905,7 +928,7 @@ next:
905 spin_unlock(&pool->lock); 928 spin_unlock(&pool->lock);
906 } else { 929 } else {
907 z3fold_page_lock(zhdr); 930 z3fold_page_lock(zhdr);
908 clear_bit(UNDER_RECLAIM, &page->private); 931 clear_bit(PAGE_CLAIMED, &page->private);
909 if (kref_put(&zhdr->refcount, 932 if (kref_put(&zhdr->refcount,
910 release_z3fold_page_locked)) { 933 release_z3fold_page_locked)) {
911 atomic64_dec(&pool->pages_nr); 934 atomic64_dec(&pool->pages_nr);
@@ -964,7 +987,7 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
964 set_bit(MIDDLE_CHUNK_MAPPED, &page->private); 987 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
965 break; 988 break;
966 case LAST: 989 case LAST:
967 addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); 990 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
968 break; 991 break;
969 default: 992 default:
970 pr_err("unknown buddy id %d\n", buddy); 993 pr_err("unknown buddy id %d\n", buddy);
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 9f481cfdf77d..e8090f099eb8 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -352,19 +352,21 @@ out:
352 */ 352 */
353int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface) 353int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
354{ 354{
355 static const size_t tvlv_padding = sizeof(__be32);
355 struct batadv_elp_packet *elp_packet; 356 struct batadv_elp_packet *elp_packet;
356 unsigned char *elp_buff; 357 unsigned char *elp_buff;
357 u32 random_seqno; 358 u32 random_seqno;
358 size_t size; 359 size_t size;
359 int res = -ENOMEM; 360 int res = -ENOMEM;
360 361
361 size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN; 362 size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
362 hard_iface->bat_v.elp_skb = dev_alloc_skb(size); 363 hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
363 if (!hard_iface->bat_v.elp_skb) 364 if (!hard_iface->bat_v.elp_skb)
364 goto out; 365 goto out;
365 366
366 skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN); 367 skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
367 elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN); 368 elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb,
369 BATADV_ELP_HLEN + tvlv_padding);
368 elp_packet = (struct batadv_elp_packet *)elp_buff; 370 elp_packet = (struct batadv_elp_packet *)elp_buff;
369 371
370 elp_packet->packet_type = BATADV_ELP; 372 elp_packet->packet_type = BATADV_ELP;
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 0fddc17106bd..5b71a289d04f 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -275,7 +275,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
275 kfree(entry); 275 kfree(entry);
276 276
277 packet = (struct batadv_frag_packet *)skb_out->data; 277 packet = (struct batadv_frag_packet *)skb_out->data;
278 size = ntohs(packet->total_size); 278 size = ntohs(packet->total_size) + hdr_size;
279 279
280 /* Make room for the rest of the fragments. */ 280 /* Make room for the rest of the fragments. */
281 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { 281 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2920e06a5403..04c19a37e500 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -102,12 +102,18 @@ struct br_tunnel_info {
102 struct metadata_dst *tunnel_dst; 102 struct metadata_dst *tunnel_dst;
103}; 103};
104 104
105/* private vlan flags */
106enum {
107 BR_VLFLAG_PER_PORT_STATS = BIT(0),
108};
109
105/** 110/**
106 * struct net_bridge_vlan - per-vlan entry 111 * struct net_bridge_vlan - per-vlan entry
107 * 112 *
108 * @vnode: rhashtable member 113 * @vnode: rhashtable member
109 * @vid: VLAN id 114 * @vid: VLAN id
110 * @flags: bridge vlan flags 115 * @flags: bridge vlan flags
116 * @priv_flags: private (in-kernel) bridge vlan flags
111 * @stats: per-cpu VLAN statistics 117 * @stats: per-cpu VLAN statistics
112 * @br: if MASTER flag set, this points to a bridge struct 118 * @br: if MASTER flag set, this points to a bridge struct
113 * @port: if MASTER flag unset, this points to a port struct 119 * @port: if MASTER flag unset, this points to a port struct
@@ -127,6 +133,7 @@ struct net_bridge_vlan {
127 struct rhash_head tnode; 133 struct rhash_head tnode;
128 u16 vid; 134 u16 vid;
129 u16 flags; 135 u16 flags;
136 u16 priv_flags;
130 struct br_vlan_stats __percpu *stats; 137 struct br_vlan_stats __percpu *stats;
131 union { 138 union {
132 struct net_bridge *br; 139 struct net_bridge *br;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 8c9297a01947..e84be08b8285 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -197,7 +197,7 @@ static void nbp_vlan_rcu_free(struct rcu_head *rcu)
197 v = container_of(rcu, struct net_bridge_vlan, rcu); 197 v = container_of(rcu, struct net_bridge_vlan, rcu);
198 WARN_ON(br_vlan_is_master(v)); 198 WARN_ON(br_vlan_is_master(v));
199 /* if we had per-port stats configured then free them here */ 199 /* if we had per-port stats configured then free them here */
200 if (v->brvlan->stats != v->stats) 200 if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
201 free_percpu(v->stats); 201 free_percpu(v->stats);
202 v->stats = NULL; 202 v->stats = NULL;
203 kfree(v); 203 kfree(v);
@@ -264,6 +264,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
264 err = -ENOMEM; 264 err = -ENOMEM;
265 goto out_filt; 265 goto out_filt;
266 } 266 }
267 v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
267 } else { 268 } else {
268 v->stats = masterv->stats; 269 v->stats = masterv->stats;
269 } 270 }
diff --git a/net/can/raw.c b/net/can/raw.c
index 1051eee82581..3aab7664933f 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -745,18 +745,19 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
745 } else 745 } else
746 ifindex = ro->ifindex; 746 ifindex = ro->ifindex;
747 747
748 if (ro->fd_frames) { 748 dev = dev_get_by_index(sock_net(sk), ifindex);
749 if (!dev)
750 return -ENXIO;
751
752 err = -EINVAL;
753 if (ro->fd_frames && dev->mtu == CANFD_MTU) {
749 if (unlikely(size != CANFD_MTU && size != CAN_MTU)) 754 if (unlikely(size != CANFD_MTU && size != CAN_MTU))
750 return -EINVAL; 755 goto put_dev;
751 } else { 756 } else {
752 if (unlikely(size != CAN_MTU)) 757 if (unlikely(size != CAN_MTU))
753 return -EINVAL; 758 goto put_dev;
754 } 759 }
755 760
756 dev = dev_get_by_index(sock_net(sk), ifindex);
757 if (!dev)
758 return -ENXIO;
759
760 skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), 761 skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
761 msg->msg_flags & MSG_DONTWAIT, &err); 762 msg->msg_flags & MSG_DONTWAIT, &err);
762 if (!skb) 763 if (!skb)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 57fcc6b4bf6e..2f126eff275d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -580,9 +580,15 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
580 struct bio_vec bvec; 580 struct bio_vec bvec;
581 int ret; 581 int ret;
582 582
583 /* sendpage cannot properly handle pages with page_count == 0, 583 /*
584 * we need to fallback to sendmsg if that's the case */ 584 * sendpage cannot properly handle pages with page_count == 0,
585 if (page_count(page) >= 1) 585 * we need to fall back to sendmsg if that's the case.
586 *
587 * Same goes for slab pages: skb_can_coalesce() allows
588 * coalescing neighboring slab objects into a single frag which
589 * triggers one of hardened usercopy checks.
590 */
591 if (page_count(page) >= 1 && !PageSlab(page))
586 return __ceph_tcp_sendpage(sock, page, offset, size, more); 592 return __ceph_tcp_sendpage(sock, page, offset, size, more);
587 593
588 bvec.bv_page = page; 594 bvec.bv_page = page;
diff --git a/net/core/dev.c b/net/core/dev.c
index 77d43ae2a7bb..ddc551f24ba2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3272,7 +3272,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
3272 } 3272 }
3273 3273
3274 skb = next; 3274 skb = next;
3275 if (netif_xmit_stopped(txq) && skb) { 3275 if (netif_tx_queue_stopped(txq) && skb) {
3276 rc = NETDEV_TX_BUSY; 3276 rc = NETDEV_TX_BUSY;
3277 break; 3277 break;
3278 } 3278 }
@@ -5655,6 +5655,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
5655 skb->vlan_tci = 0; 5655 skb->vlan_tci = 0;
5656 skb->dev = napi->dev; 5656 skb->dev = napi->dev;
5657 skb->skb_iif = 0; 5657 skb->skb_iif = 0;
5658
5659 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
5660 skb->pkt_type = PACKET_HOST;
5661
5658 skb->encapsulation = 0; 5662 skb->encapsulation = 0;
5659 skb_shinfo(skb)->gso_type = 0; 5663 skb_shinfo(skb)->gso_type = 0;
5660 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 5664 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
@@ -5966,11 +5970,14 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
5966 if (work_done) 5970 if (work_done)
5967 timeout = n->dev->gro_flush_timeout; 5971 timeout = n->dev->gro_flush_timeout;
5968 5972
5973 /* When the NAPI instance uses a timeout and keeps postponing
5974 * it, we need to bound somehow the time packets are kept in
5975 * the GRO layer
5976 */
5977 napi_gro_flush(n, !!timeout);
5969 if (timeout) 5978 if (timeout)
5970 hrtimer_start(&n->timer, ns_to_ktime(timeout), 5979 hrtimer_start(&n->timer, ns_to_ktime(timeout),
5971 HRTIMER_MODE_REL_PINNED); 5980 HRTIMER_MODE_REL_PINNED);
5972 else
5973 napi_gro_flush(n, false);
5974 } 5981 }
5975 if (unlikely(!list_empty(&n->poll_list))) { 5982 if (unlikely(!list_empty(&n->poll_list))) {
5976 /* If n->poll_list is not empty, we need to mask irqs */ 5983 /* If n->poll_list is not empty, we need to mask irqs */
diff --git a/net/core/filter.c b/net/core/filter.c
index e521c5ebc7d1..9a1327eb25fa 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4852,18 +4852,17 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
4852 } else { 4852 } else {
4853 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; 4853 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
4854 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; 4854 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
4855 u16 hnum = ntohs(tuple->ipv6.dport);
4856 int sdif = inet6_sdif(skb); 4855 int sdif = inet6_sdif(skb);
4857 4856
4858 if (proto == IPPROTO_TCP) 4857 if (proto == IPPROTO_TCP)
4859 sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0, 4858 sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0,
4860 src6, tuple->ipv6.sport, 4859 src6, tuple->ipv6.sport,
4861 dst6, hnum, 4860 dst6, ntohs(tuple->ipv6.dport),
4862 dif, sdif, &refcounted); 4861 dif, sdif, &refcounted);
4863 else if (likely(ipv6_bpf_stub)) 4862 else if (likely(ipv6_bpf_stub))
4864 sk = ipv6_bpf_stub->udp6_lib_lookup(net, 4863 sk = ipv6_bpf_stub->udp6_lib_lookup(net,
4865 src6, tuple->ipv6.sport, 4864 src6, tuple->ipv6.sport,
4866 dst6, hnum, 4865 dst6, tuple->ipv6.dport,
4867 dif, sdif, 4866 dif, sdif,
4868 &udp_table, skb); 4867 &udp_table, skb);
4869#endif 4868#endif
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 676f3ad629f9..588f475019d4 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1166,8 +1166,8 @@ ip_proto_again:
1166 break; 1166 break;
1167 } 1167 }
1168 1168
1169 if (dissector_uses_key(flow_dissector, 1169 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
1170 FLOW_DISSECTOR_KEY_PORTS)) { 1170 !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
1171 key_ports = skb_flow_dissector_target(flow_dissector, 1171 key_ports = skb_flow_dissector_target(flow_dissector,
1172 FLOW_DISSECTOR_KEY_PORTS, 1172 FLOW_DISSECTOR_KEY_PORTS,
1173 target_container); 1173 target_container);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 5da9552b186b..2b9fdbc43205 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -717,7 +717,8 @@ int netpoll_setup(struct netpoll *np)
717 717
718 read_lock_bh(&idev->lock); 718 read_lock_bh(&idev->lock);
719 list_for_each_entry(ifp, &idev->addr_list, if_list) { 719 list_for_each_entry(ifp, &idev->addr_list, if_list) {
720 if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) 720 if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
721 !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
721 continue; 722 continue;
722 np->local_ip.in6 = ifp->addr; 723 np->local_ip.in6 = ifp->addr;
723 err = 0; 724 err = 0;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e01274bd5e3e..33d9227a8b80 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3367,7 +3367,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3367 cb->seq = 0; 3367 cb->seq = 0;
3368 } 3368 }
3369 ret = dumpit(skb, cb); 3369 ret = dumpit(skb, cb);
3370 if (ret < 0) 3370 if (ret)
3371 break; 3371 break;
3372 } 3372 }
3373 cb->family = idx; 3373 cb->family = idx;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 946de0e24c87..a8217e221e19 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4854,6 +4854,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4854 nf_reset(skb); 4854 nf_reset(skb);
4855 nf_reset_trace(skb); 4855 nf_reset_trace(skb);
4856 4856
4857#ifdef CONFIG_NET_SWITCHDEV
4858 skb->offload_fwd_mark = 0;
4859 skb->offload_mr_fwd_mark = 0;
4860#endif
4861
4857 if (!xnet) 4862 if (!xnet)
4858 return; 4863 return;
4859 4864
@@ -4944,6 +4949,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4944 * 4949 *
4945 * This is a helper to do that correctly considering GSO_BY_FRAGS. 4950 * This is a helper to do that correctly considering GSO_BY_FRAGS.
4946 * 4951 *
4952 * @skb: GSO skb
4953 *
4947 * @seg_len: The segmented length (from skb_gso_*_seglen). In the 4954 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
4948 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. 4955 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
4949 * 4956 *
diff --git a/net/core/sock.c b/net/core/sock.c
index 6fcc4bc07d19..080a880a1761 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3279,6 +3279,7 @@ int sock_load_diag_module(int family, int protocol)
3279 3279
3280#ifdef CONFIG_INET 3280#ifdef CONFIG_INET
3281 if (family == AF_INET && 3281 if (family == AF_INET &&
3282 protocol != IPPROTO_RAW &&
3282 !rcu_access_pointer(inet_protos[protocol])) 3283 !rcu_access_pointer(inet_protos[protocol]))
3283 return -ENOENT; 3284 return -ENOENT;
3284#endif 3285#endif
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index bcb11f3a27c0..760a9e52e02b 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
178} 178}
179 179
180static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, 180static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
181 void *arg) 181 void *arg,
182 struct inet_frag_queue **prev)
182{ 183{
183 struct inet_frags *f = nf->f; 184 struct inet_frags *f = nf->f;
184 struct inet_frag_queue *q; 185 struct inet_frag_queue *q;
185 int err;
186 186
187 q = inet_frag_alloc(nf, f, arg); 187 q = inet_frag_alloc(nf, f, arg);
188 if (!q) 188 if (!q) {
189 *prev = ERR_PTR(-ENOMEM);
189 return NULL; 190 return NULL;
190 191 }
191 mod_timer(&q->timer, jiffies + nf->timeout); 192 mod_timer(&q->timer, jiffies + nf->timeout);
192 193
193 err = rhashtable_insert_fast(&nf->rhashtable, &q->node, 194 *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
194 f->rhash_params); 195 &q->node, f->rhash_params);
195 if (err < 0) { 196 if (*prev) {
196 q->flags |= INET_FRAG_COMPLETE; 197 q->flags |= INET_FRAG_COMPLETE;
197 inet_frag_kill(q); 198 inet_frag_kill(q);
198 inet_frag_destroy(q); 199 inet_frag_destroy(q);
@@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
204/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */ 205/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
205struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key) 206struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
206{ 207{
207 struct inet_frag_queue *fq; 208 struct inet_frag_queue *fq = NULL, *prev;
208 209
209 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) 210 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
210 return NULL; 211 return NULL;
211 212
212 rcu_read_lock(); 213 rcu_read_lock();
213 214
214 fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); 215 prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
215 if (fq) { 216 if (!prev)
217 fq = inet_frag_create(nf, key, &prev);
218 if (prev && !IS_ERR(prev)) {
219 fq = prev;
216 if (!refcount_inc_not_zero(&fq->refcnt)) 220 if (!refcount_inc_not_zero(&fq->refcnt))
217 fq = NULL; 221 fq = NULL;
218 rcu_read_unlock();
219 return fq;
220 } 222 }
221 rcu_read_unlock(); 223 rcu_read_unlock();
222 224 return fq;
223 return inet_frag_create(nf, key);
224} 225}
225EXPORT_SYMBOL(inet_frag_find); 226EXPORT_SYMBOL(inet_frag_find);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 9b0158fa431f..d6ee343fdb86 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -722,10 +722,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
722 if (ip_is_fragment(&iph)) { 722 if (ip_is_fragment(&iph)) {
723 skb = skb_share_check(skb, GFP_ATOMIC); 723 skb = skb_share_check(skb, GFP_ATOMIC);
724 if (skb) { 724 if (skb) {
725 if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) 725 if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
726 return skb; 726 kfree_skb(skb);
727 if (pskb_trim_rcsum(skb, netoff + len)) 727 return NULL;
728 return skb; 728 }
729 if (pskb_trim_rcsum(skb, netoff + len)) {
730 kfree_skb(skb);
731 return NULL;
732 }
729 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 733 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
730 if (ip_defrag(net, skb, user)) 734 if (ip_defrag(net, skb, user))
731 return NULL; 735 return NULL;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c09219e7f230..5dbec21856f4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -939,7 +939,7 @@ static int __ip_append_data(struct sock *sk,
939 unsigned int fraglen; 939 unsigned int fraglen;
940 unsigned int fraggap; 940 unsigned int fraggap;
941 unsigned int alloclen; 941 unsigned int alloclen;
942 unsigned int pagedlen = 0; 942 unsigned int pagedlen;
943 struct sk_buff *skb_prev; 943 struct sk_buff *skb_prev;
944alloc_new_skb: 944alloc_new_skb:
945 skb_prev = skb; 945 skb_prev = skb;
@@ -956,6 +956,7 @@ alloc_new_skb:
956 if (datalen > mtu - fragheaderlen) 956 if (datalen > mtu - fragheaderlen)
957 datalen = maxfraglen - fragheaderlen; 957 datalen = maxfraglen - fragheaderlen;
958 fraglen = datalen + fragheaderlen; 958 fraglen = datalen + fragheaderlen;
959 pagedlen = 0;
959 960
960 if ((flags & MSG_MORE) && 961 if ((flags & MSG_MORE) &&
961 !(rt->dst.dev->features&NETIF_F_SG)) 962 !(rt->dst.dev->features&NETIF_F_SG))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 26c36cccabdc..fffcc130900e 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1246,7 +1246,7 @@ int ip_setsockopt(struct sock *sk, int level,
1246 return -ENOPROTOOPT; 1246 return -ENOPROTOOPT;
1247 1247
1248 err = do_ip_setsockopt(sk, level, optname, optval, optlen); 1248 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1249#ifdef CONFIG_BPFILTER 1249#if IS_ENABLED(CONFIG_BPFILTER_UMH)
1250 if (optname >= BPFILTER_IPT_SO_SET_REPLACE && 1250 if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
1251 optname < BPFILTER_IPT_SET_MAX) 1251 optname < BPFILTER_IPT_SET_MAX)
1252 err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen); 1252 err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
@@ -1559,7 +1559,7 @@ int ip_getsockopt(struct sock *sk, int level,
1559 int err; 1559 int err;
1560 1560
1561 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0); 1561 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
1562#ifdef CONFIG_BPFILTER 1562#if IS_ENABLED(CONFIG_BPFILTER_UMH)
1563 if (optname >= BPFILTER_IPT_SO_GET_INFO && 1563 if (optname >= BPFILTER_IPT_SO_GET_INFO &&
1564 optname < BPFILTER_IPT_GET_MAX) 1564 optname < BPFILTER_IPT_GET_MAX)
1565 err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen); 1565 err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
@@ -1596,7 +1596,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1596 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 1596 err = do_ip_getsockopt(sk, level, optname, optval, optlen,
1597 MSG_CMSG_COMPAT); 1597 MSG_CMSG_COMPAT);
1598 1598
1599#ifdef CONFIG_BPFILTER 1599#if IS_ENABLED(CONFIG_BPFILTER_UMH)
1600 if (optname >= BPFILTER_IPT_SO_GET_INFO && 1600 if (optname >= BPFILTER_IPT_SO_GET_INFO &&
1601 optname < BPFILTER_IPT_GET_MAX) 1601 optname < BPFILTER_IPT_GET_MAX)
1602 err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen); 1602 err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index dde671e97829..c248e0dccbe1 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
80 80
81 iph->version = 4; 81 iph->version = 4;
82 iph->ihl = sizeof(struct iphdr) >> 2; 82 iph->ihl = sizeof(struct iphdr) >> 2;
83 iph->frag_off = df; 83 iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
84 iph->protocol = proto; 84 iph->protocol = proto;
85 iph->tos = tos; 85 iph->tos = tos;
86 iph->daddr = dst; 86 iph->daddr = dst;
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index ce1512b02cb2..fd3f9e8a74da 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -81,9 +81,12 @@ static int __init masquerade_tg_init(void)
81 int ret; 81 int ret;
82 82
83 ret = xt_register_target(&masquerade_tg_reg); 83 ret = xt_register_target(&masquerade_tg_reg);
84 if (ret)
85 return ret;
84 86
85 if (ret == 0) 87 ret = nf_nat_masquerade_ipv4_register_notifier();
86 nf_nat_masquerade_ipv4_register_notifier(); 88 if (ret)
89 xt_unregister_target(&masquerade_tg_reg);
87 90
88 return ret; 91 return ret;
89} 92}
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
index a9d5e013e555..41327bb99093 100644
--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
@@ -147,28 +147,50 @@ static struct notifier_block masq_inet_notifier = {
147 .notifier_call = masq_inet_event, 147 .notifier_call = masq_inet_event,
148}; 148};
149 149
150static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0); 150static int masq_refcnt;
151static DEFINE_MUTEX(masq_mutex);
151 152
152void nf_nat_masquerade_ipv4_register_notifier(void) 153int nf_nat_masquerade_ipv4_register_notifier(void)
153{ 154{
155 int ret = 0;
156
157 mutex_lock(&masq_mutex);
154 /* check if the notifier was already set */ 158 /* check if the notifier was already set */
155 if (atomic_inc_return(&masquerade_notifier_refcount) > 1) 159 if (++masq_refcnt > 1)
156 return; 160 goto out_unlock;
157 161
158 /* Register for device down reports */ 162 /* Register for device down reports */
159 register_netdevice_notifier(&masq_dev_notifier); 163 ret = register_netdevice_notifier(&masq_dev_notifier);
164 if (ret)
165 goto err_dec;
160 /* Register IP address change reports */ 166 /* Register IP address change reports */
161 register_inetaddr_notifier(&masq_inet_notifier); 167 ret = register_inetaddr_notifier(&masq_inet_notifier);
168 if (ret)
169 goto err_unregister;
170
171 mutex_unlock(&masq_mutex);
172 return ret;
173
174err_unregister:
175 unregister_netdevice_notifier(&masq_dev_notifier);
176err_dec:
177 masq_refcnt--;
178out_unlock:
179 mutex_unlock(&masq_mutex);
180 return ret;
162} 181}
163EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier); 182EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
164 183
165void nf_nat_masquerade_ipv4_unregister_notifier(void) 184void nf_nat_masquerade_ipv4_unregister_notifier(void)
166{ 185{
186 mutex_lock(&masq_mutex);
167 /* check if the notifier still has clients */ 187 /* check if the notifier still has clients */
168 if (atomic_dec_return(&masquerade_notifier_refcount) > 0) 188 if (--masq_refcnt > 0)
169 return; 189 goto out_unlock;
170 190
171 unregister_netdevice_notifier(&masq_dev_notifier); 191 unregister_netdevice_notifier(&masq_dev_notifier);
172 unregister_inetaddr_notifier(&masq_inet_notifier); 192 unregister_inetaddr_notifier(&masq_inet_notifier);
193out_unlock:
194 mutex_unlock(&masq_mutex);
173} 195}
174EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier); 196EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index f1193e1e928a..6847de1d1db8 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -69,7 +69,9 @@ static int __init nft_masq_ipv4_module_init(void)
69 if (ret < 0) 69 if (ret < 0)
70 return ret; 70 return ret;
71 71
72 nf_nat_masquerade_ipv4_register_notifier(); 72 ret = nf_nat_masquerade_ipv4_register_notifier();
73 if (ret)
74 nft_unregister_expr(&nft_masq_ipv4_type);
73 75
74 return ret; 76 return ret;
75} 77}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2868ef28ce52..a9d9555a973f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -579,10 +579,12 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
579 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 579 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
580 u32 delta_us; 580 u32 delta_us;
581 581
582 if (!delta) 582 if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
583 delta = 1; 583 if (!delta)
584 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 584 delta = 1;
585 tcp_rcv_rtt_update(tp, delta_us, 0); 585 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
586 tcp_rcv_rtt_update(tp, delta_us, 0);
587 }
586 } 588 }
587} 589}
588 590
@@ -2910,9 +2912,11 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2910 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2912 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2911 flag & FLAG_ACKED) { 2913 flag & FLAG_ACKED) {
2912 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 2914 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
2913 u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
2914 2915
2915 seq_rtt_us = ca_rtt_us = delta_us; 2916 if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
2917 seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
2918 ca_rtt_us = seq_rtt_us;
2919 }
2916 } 2920 }
2917 rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 2921 rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
2918 if (seq_rtt_us < 0) 2922 if (seq_rtt_us < 0)
@@ -4268,7 +4272,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
4268 * If the sack array is full, forget about the last one. 4272 * If the sack array is full, forget about the last one.
4269 */ 4273 */
4270 if (this_sack >= TCP_NUM_SACKS) { 4274 if (this_sack >= TCP_NUM_SACKS) {
4271 if (tp->compressed_ack) 4275 if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
4272 tcp_send_ack(sk); 4276 tcp_send_ack(sk);
4273 this_sack--; 4277 this_sack--;
4274 tp->rx_opt.num_sacks--; 4278 tp->rx_opt.num_sacks--;
@@ -4363,6 +4367,7 @@ static bool tcp_try_coalesce(struct sock *sk,
4363 if (TCP_SKB_CB(from)->has_rxtstamp) { 4367 if (TCP_SKB_CB(from)->has_rxtstamp) {
4364 TCP_SKB_CB(to)->has_rxtstamp = true; 4368 TCP_SKB_CB(to)->has_rxtstamp = true;
4365 to->tstamp = from->tstamp; 4369 to->tstamp = from->tstamp;
4370 skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
4366 } 4371 }
4367 4372
4368 return true; 4373 return true;
@@ -5188,7 +5193,17 @@ send_now:
5188 if (!tcp_is_sack(tp) || 5193 if (!tcp_is_sack(tp) ||
5189 tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) 5194 tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
5190 goto send_now; 5195 goto send_now;
5191 tp->compressed_ack++; 5196
5197 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
5198 tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
5199 if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
5200 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
5201 tp->compressed_ack - TCP_FASTRETRANS_THRESH);
5202 tp->compressed_ack = 0;
5203 }
5204
5205 if (++tp->compressed_ack <= TCP_FASTRETRANS_THRESH)
5206 goto send_now;
5192 5207
5193 if (hrtimer_is_queued(&tp->compressed_ack_timer)) 5208 if (hrtimer_is_queued(&tp->compressed_ack_timer))
5194 return; 5209 return;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9c34b97d365d..3f510cad0b3e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -180,10 +180,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
180{ 180{
181 struct tcp_sock *tp = tcp_sk(sk); 181 struct tcp_sock *tp = tcp_sk(sk);
182 182
183 if (unlikely(tp->compressed_ack)) { 183 if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
184 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, 184 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
185 tp->compressed_ack); 185 tp->compressed_ack - TCP_FASTRETRANS_THRESH);
186 tp->compressed_ack = 0; 186 tp->compressed_ack = TCP_FASTRETRANS_THRESH;
187 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) 187 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
188 __sock_put(sk); 188 __sock_put(sk);
189 } 189 }
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 676020663ce8..091c53925e4d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -40,15 +40,17 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
40{ 40{
41 struct inet_connection_sock *icsk = inet_csk(sk); 41 struct inet_connection_sock *icsk = inet_csk(sk);
42 u32 elapsed, start_ts; 42 u32 elapsed, start_ts;
43 s32 remaining;
43 44
44 start_ts = tcp_retransmit_stamp(sk); 45 start_ts = tcp_retransmit_stamp(sk);
45 if (!icsk->icsk_user_timeout || !start_ts) 46 if (!icsk->icsk_user_timeout || !start_ts)
46 return icsk->icsk_rto; 47 return icsk->icsk_rto;
47 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts; 48 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
48 if (elapsed >= icsk->icsk_user_timeout) 49 remaining = icsk->icsk_user_timeout - elapsed;
50 if (remaining <= 0)
49 return 1; /* user timeout has passed; fire ASAP */ 51 return 1; /* user timeout has passed; fire ASAP */
50 else 52
51 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(icsk->icsk_user_timeout - elapsed)); 53 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
52} 54}
53 55
54/** 56/**
@@ -209,7 +211,7 @@ static bool retransmits_timed_out(struct sock *sk,
209 (boundary - linear_backoff_thresh) * TCP_RTO_MAX; 211 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
210 timeout = jiffies_to_msecs(timeout); 212 timeout = jiffies_to_msecs(timeout);
211 } 213 }
212 return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= timeout; 214 return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
213} 215}
214 216
215/* A write timeout has occurred. Process the after effects. */ 217/* A write timeout has occurred. Process the after effects. */
@@ -740,7 +742,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
740 742
741 bh_lock_sock(sk); 743 bh_lock_sock(sk);
742 if (!sock_owned_by_user(sk)) { 744 if (!sock_owned_by_user(sk)) {
743 if (tp->compressed_ack) 745 if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
744 tcp_send_ack(sk); 746 tcp_send_ack(sk);
745 } else { 747 } else {
746 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, 748 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 63a808d5af15..045597b9a7c0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -179,7 +179,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp);
179static void addrconf_dad_work(struct work_struct *w); 179static void addrconf_dad_work(struct work_struct *w);
180static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, 180static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
181 bool send_na); 181 bool send_na);
182static void addrconf_dad_run(struct inet6_dev *idev); 182static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
183static void addrconf_rs_timer(struct timer_list *t); 183static void addrconf_rs_timer(struct timer_list *t);
184static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); 184static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
185static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); 185static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
@@ -3439,6 +3439,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3439 void *ptr) 3439 void *ptr)
3440{ 3440{
3441 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3441 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3442 struct netdev_notifier_change_info *change_info;
3442 struct netdev_notifier_changeupper_info *info; 3443 struct netdev_notifier_changeupper_info *info;
3443 struct inet6_dev *idev = __in6_dev_get(dev); 3444 struct inet6_dev *idev = __in6_dev_get(dev);
3444 struct net *net = dev_net(dev); 3445 struct net *net = dev_net(dev);
@@ -3513,7 +3514,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3513 break; 3514 break;
3514 } 3515 }
3515 3516
3516 if (idev) { 3517 if (!IS_ERR_OR_NULL(idev)) {
3517 if (idev->if_flags & IF_READY) { 3518 if (idev->if_flags & IF_READY) {
3518 /* device is already configured - 3519 /* device is already configured -
3519 * but resend MLD reports, we might 3520 * but resend MLD reports, we might
@@ -3521,6 +3522,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3521 * multicast snooping switches 3522 * multicast snooping switches
3522 */ 3523 */
3523 ipv6_mc_up(idev); 3524 ipv6_mc_up(idev);
3525 change_info = ptr;
3526 if (change_info->flags_changed & IFF_NOARP)
3527 addrconf_dad_run(idev, true);
3524 rt6_sync_up(dev, RTNH_F_LINKDOWN); 3528 rt6_sync_up(dev, RTNH_F_LINKDOWN);
3525 break; 3529 break;
3526 } 3530 }
@@ -3555,7 +3559,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3555 3559
3556 if (!IS_ERR_OR_NULL(idev)) { 3560 if (!IS_ERR_OR_NULL(idev)) {
3557 if (run_pending) 3561 if (run_pending)
3558 addrconf_dad_run(idev); 3562 addrconf_dad_run(idev, false);
3559 3563
3560 /* Device has an address by now */ 3564 /* Device has an address by now */
3561 rt6_sync_up(dev, RTNH_F_DEAD); 3565 rt6_sync_up(dev, RTNH_F_DEAD);
@@ -4173,16 +4177,19 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4173 addrconf_verify_rtnl(); 4177 addrconf_verify_rtnl();
4174} 4178}
4175 4179
4176static void addrconf_dad_run(struct inet6_dev *idev) 4180static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4177{ 4181{
4178 struct inet6_ifaddr *ifp; 4182 struct inet6_ifaddr *ifp;
4179 4183
4180 read_lock_bh(&idev->lock); 4184 read_lock_bh(&idev->lock);
4181 list_for_each_entry(ifp, &idev->addr_list, if_list) { 4185 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4182 spin_lock(&ifp->lock); 4186 spin_lock(&ifp->lock);
4183 if (ifp->flags & IFA_F_TENTATIVE && 4187 if ((ifp->flags & IFA_F_TENTATIVE &&
4184 ifp->state == INET6_IFADDR_STATE_DAD) 4188 ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4189 if (restart)
4190 ifp->state = INET6_IFADDR_STATE_PREDAD;
4185 addrconf_dad_kick(ifp); 4191 addrconf_dad_kick(ifp);
4192 }
4186 spin_unlock(&ifp->lock); 4193 spin_unlock(&ifp->lock);
4187 } 4194 }
4188 read_unlock_bh(&idev->lock); 4195 read_unlock_bh(&idev->lock);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3f4d61017a69..f0cd291034f0 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -1001,6 +1001,9 @@ static int __init inet6_init(void)
1001 err = ip6_flowlabel_init(); 1001 err = ip6_flowlabel_init();
1002 if (err) 1002 if (err)
1003 goto ip6_flowlabel_fail; 1003 goto ip6_flowlabel_fail;
1004 err = ipv6_anycast_init();
1005 if (err)
1006 goto ipv6_anycast_fail;
1004 err = addrconf_init(); 1007 err = addrconf_init();
1005 if (err) 1008 if (err)
1006 goto addrconf_fail; 1009 goto addrconf_fail;
@@ -1091,6 +1094,8 @@ ipv6_frag_fail:
1091ipv6_exthdrs_fail: 1094ipv6_exthdrs_fail:
1092 addrconf_cleanup(); 1095 addrconf_cleanup();
1093addrconf_fail: 1096addrconf_fail:
1097 ipv6_anycast_cleanup();
1098ipv6_anycast_fail:
1094 ip6_flowlabel_cleanup(); 1099 ip6_flowlabel_cleanup();
1095ip6_flowlabel_fail: 1100ip6_flowlabel_fail:
1096 ndisc_late_cleanup(); 1101 ndisc_late_cleanup();
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 4e0ff7031edd..94999058e110 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -44,8 +44,22 @@
44 44
45#include <net/checksum.h> 45#include <net/checksum.h>
46 46
47#define IN6_ADDR_HSIZE_SHIFT 8
48#define IN6_ADDR_HSIZE BIT(IN6_ADDR_HSIZE_SHIFT)
49/* anycast address hash table
50 */
51static struct hlist_head inet6_acaddr_lst[IN6_ADDR_HSIZE];
52static DEFINE_SPINLOCK(acaddr_hash_lock);
53
47static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr); 54static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
48 55
56static u32 inet6_acaddr_hash(struct net *net, const struct in6_addr *addr)
57{
58 u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
59
60 return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
61}
62
49/* 63/*
50 * socket join an anycast group 64 * socket join an anycast group
51 */ 65 */
@@ -204,16 +218,39 @@ void ipv6_sock_ac_close(struct sock *sk)
204 rtnl_unlock(); 218 rtnl_unlock();
205} 219}
206 220
221static void ipv6_add_acaddr_hash(struct net *net, struct ifacaddr6 *aca)
222{
223 unsigned int hash = inet6_acaddr_hash(net, &aca->aca_addr);
224
225 spin_lock(&acaddr_hash_lock);
226 hlist_add_head_rcu(&aca->aca_addr_lst, &inet6_acaddr_lst[hash]);
227 spin_unlock(&acaddr_hash_lock);
228}
229
230static void ipv6_del_acaddr_hash(struct ifacaddr6 *aca)
231{
232 spin_lock(&acaddr_hash_lock);
233 hlist_del_init_rcu(&aca->aca_addr_lst);
234 spin_unlock(&acaddr_hash_lock);
235}
236
207static void aca_get(struct ifacaddr6 *aca) 237static void aca_get(struct ifacaddr6 *aca)
208{ 238{
209 refcount_inc(&aca->aca_refcnt); 239 refcount_inc(&aca->aca_refcnt);
210} 240}
211 241
242static void aca_free_rcu(struct rcu_head *h)
243{
244 struct ifacaddr6 *aca = container_of(h, struct ifacaddr6, rcu);
245
246 fib6_info_release(aca->aca_rt);
247 kfree(aca);
248}
249
212static void aca_put(struct ifacaddr6 *ac) 250static void aca_put(struct ifacaddr6 *ac)
213{ 251{
214 if (refcount_dec_and_test(&ac->aca_refcnt)) { 252 if (refcount_dec_and_test(&ac->aca_refcnt)) {
215 fib6_info_release(ac->aca_rt); 253 call_rcu(&ac->rcu, aca_free_rcu);
216 kfree(ac);
217 } 254 }
218} 255}
219 256
@@ -229,6 +266,7 @@ static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i,
229 aca->aca_addr = *addr; 266 aca->aca_addr = *addr;
230 fib6_info_hold(f6i); 267 fib6_info_hold(f6i);
231 aca->aca_rt = f6i; 268 aca->aca_rt = f6i;
269 INIT_HLIST_NODE(&aca->aca_addr_lst);
232 aca->aca_users = 1; 270 aca->aca_users = 1;
233 /* aca_tstamp should be updated upon changes */ 271 /* aca_tstamp should be updated upon changes */
234 aca->aca_cstamp = aca->aca_tstamp = jiffies; 272 aca->aca_cstamp = aca->aca_tstamp = jiffies;
@@ -285,6 +323,8 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
285 aca_get(aca); 323 aca_get(aca);
286 write_unlock_bh(&idev->lock); 324 write_unlock_bh(&idev->lock);
287 325
326 ipv6_add_acaddr_hash(net, aca);
327
288 ip6_ins_rt(net, f6i); 328 ip6_ins_rt(net, f6i);
289 329
290 addrconf_join_solict(idev->dev, &aca->aca_addr); 330 addrconf_join_solict(idev->dev, &aca->aca_addr);
@@ -325,6 +365,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
325 else 365 else
326 idev->ac_list = aca->aca_next; 366 idev->ac_list = aca->aca_next;
327 write_unlock_bh(&idev->lock); 367 write_unlock_bh(&idev->lock);
368 ipv6_del_acaddr_hash(aca);
328 addrconf_leave_solict(idev, &aca->aca_addr); 369 addrconf_leave_solict(idev, &aca->aca_addr);
329 370
330 ip6_del_rt(dev_net(idev->dev), aca->aca_rt); 371 ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@@ -352,6 +393,8 @@ void ipv6_ac_destroy_dev(struct inet6_dev *idev)
352 idev->ac_list = aca->aca_next; 393 idev->ac_list = aca->aca_next;
353 write_unlock_bh(&idev->lock); 394 write_unlock_bh(&idev->lock);
354 395
396 ipv6_del_acaddr_hash(aca);
397
355 addrconf_leave_solict(idev, &aca->aca_addr); 398 addrconf_leave_solict(idev, &aca->aca_addr);
356 399
357 ip6_del_rt(dev_net(idev->dev), aca->aca_rt); 400 ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@@ -390,17 +433,25 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
390bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, 433bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
391 const struct in6_addr *addr) 434 const struct in6_addr *addr)
392{ 435{
436 unsigned int hash = inet6_acaddr_hash(net, addr);
437 struct net_device *nh_dev;
438 struct ifacaddr6 *aca;
393 bool found = false; 439 bool found = false;
394 440
395 rcu_read_lock(); 441 rcu_read_lock();
396 if (dev) 442 if (dev)
397 found = ipv6_chk_acast_dev(dev, addr); 443 found = ipv6_chk_acast_dev(dev, addr);
398 else 444 else
399 for_each_netdev_rcu(net, dev) 445 hlist_for_each_entry_rcu(aca, &inet6_acaddr_lst[hash],
400 if (ipv6_chk_acast_dev(dev, addr)) { 446 aca_addr_lst) {
447 nh_dev = fib6_info_nh_dev(aca->aca_rt);
448 if (!nh_dev || !net_eq(dev_net(nh_dev), net))
449 continue;
450 if (ipv6_addr_equal(&aca->aca_addr, addr)) {
401 found = true; 451 found = true;
402 break; 452 break;
403 } 453 }
454 }
404 rcu_read_unlock(); 455 rcu_read_unlock();
405 return found; 456 return found;
406} 457}
@@ -540,3 +591,24 @@ void ac6_proc_exit(struct net *net)
540 remove_proc_entry("anycast6", net->proc_net); 591 remove_proc_entry("anycast6", net->proc_net);
541} 592}
542#endif 593#endif
594
595/* Init / cleanup code
596 */
597int __init ipv6_anycast_init(void)
598{
599 int i;
600
601 for (i = 0; i < IN6_ADDR_HSIZE; i++)
602 INIT_HLIST_HEAD(&inet6_acaddr_lst[i]);
603 return 0;
604}
605
606void ipv6_anycast_cleanup(void)
607{
608 int i;
609
610 spin_lock(&acaddr_hash_lock);
611 for (i = 0; i < IN6_ADDR_HSIZE; i++)
612 WARN_ON(!hlist_empty(&inet6_acaddr_lst[i]));
613 spin_unlock(&acaddr_hash_lock);
614}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1b8bc008b53b..ae3786132c23 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -591,7 +591,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
591 591
592 /* fib entries are never clones */ 592 /* fib entries are never clones */
593 if (arg.filter.flags & RTM_F_CLONED) 593 if (arg.filter.flags & RTM_F_CLONED)
594 return skb->len; 594 goto out;
595 595
596 w = (void *)cb->args[2]; 596 w = (void *)cb->args[2];
597 if (!w) { 597 if (!w) {
@@ -621,7 +621,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
621 tb = fib6_get_table(net, arg.filter.table_id); 621 tb = fib6_get_table(net, arg.filter.table_id);
622 if (!tb) { 622 if (!tb) {
623 if (arg.filter.dump_all_families) 623 if (arg.filter.dump_all_families)
624 return skb->len; 624 goto out;
625 625
626 NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist"); 626 NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
627 return -ENOENT; 627 return -ENOENT;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 89e0d5118afe..827a3f5ff3bb 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1354,7 +1354,7 @@ emsgsize:
1354 unsigned int fraglen; 1354 unsigned int fraglen;
1355 unsigned int fraggap; 1355 unsigned int fraggap;
1356 unsigned int alloclen; 1356 unsigned int alloclen;
1357 unsigned int pagedlen = 0; 1357 unsigned int pagedlen;
1358alloc_new_skb: 1358alloc_new_skb:
1359 /* There's no room in the current skb */ 1359 /* There's no room in the current skb */
1360 if (skb) 1360 if (skb)
@@ -1378,6 +1378,7 @@ alloc_new_skb:
1378 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen) 1378 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1379 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len; 1379 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1380 fraglen = datalen + fragheaderlen; 1380 fraglen = datalen + fragheaderlen;
1381 pagedlen = 0;
1381 1382
1382 if ((flags & MSG_MORE) && 1383 if ((flags & MSG_MORE) &&
1383 !(rt->dst.dev->features&NETIF_F_SG)) 1384 !(rt->dst.dev->features&NETIF_F_SG))
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 5ae8e1c51079..8b075f0bc351 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -24,7 +24,8 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
24 unsigned int hh_len; 24 unsigned int hh_len;
25 struct dst_entry *dst; 25 struct dst_entry *dst;
26 struct flowi6 fl6 = { 26 struct flowi6 fl6 = {
27 .flowi6_oif = sk ? sk->sk_bound_dev_if : 0, 27 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
28 rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
28 .flowi6_mark = skb->mark, 29 .flowi6_mark = skb->mark,
29 .flowi6_uid = sock_net_uid(net, sk), 30 .flowi6_uid = sock_net_uid(net, sk),
30 .daddr = iph->daddr, 31 .daddr = iph->daddr,
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
index 491f808e356a..29c7f1915a96 100644
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -58,8 +58,12 @@ static int __init masquerade_tg6_init(void)
58 int err; 58 int err;
59 59
60 err = xt_register_target(&masquerade_tg6_reg); 60 err = xt_register_target(&masquerade_tg6_reg);
61 if (err == 0) 61 if (err)
62 nf_nat_masquerade_ipv6_register_notifier(); 62 return err;
63
64 err = nf_nat_masquerade_ipv6_register_notifier();
65 if (err)
66 xt_unregister_target(&masquerade_tg6_reg);
63 67
64 return err; 68 return err;
65} 69}
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index b8ac369f98ad..d219979c3e52 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -587,11 +587,16 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
587 */ 587 */
588 ret = -EINPROGRESS; 588 ret = -EINPROGRESS;
589 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 589 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
590 fq->q.meat == fq->q.len && 590 fq->q.meat == fq->q.len) {
591 nf_ct_frag6_reasm(fq, skb, dev)) 591 unsigned long orefdst = skb->_skb_refdst;
592 ret = 0; 592
593 else 593 skb->_skb_refdst = 0UL;
594 if (nf_ct_frag6_reasm(fq, skb, dev))
595 ret = 0;
596 skb->_skb_refdst = orefdst;
597 } else {
594 skb_dst_drop(skb); 598 skb_dst_drop(skb);
599 }
595 600
596out_unlock: 601out_unlock:
597 spin_unlock_bh(&fq->q.lock); 602 spin_unlock_bh(&fq->q.lock);
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
index 3e4bf2286abe..0ad0da5a2600 100644
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -132,8 +132,8 @@ static void iterate_cleanup_work(struct work_struct *work)
132 * of ipv6 addresses being deleted), we also need to add an upper 132 * of ipv6 addresses being deleted), we also need to add an upper
133 * limit to the number of queued work items. 133 * limit to the number of queued work items.
134 */ 134 */
135static int masq_inet_event(struct notifier_block *this, 135static int masq_inet6_event(struct notifier_block *this,
136 unsigned long event, void *ptr) 136 unsigned long event, void *ptr)
137{ 137{
138 struct inet6_ifaddr *ifa = ptr; 138 struct inet6_ifaddr *ifa = ptr;
139 const struct net_device *dev; 139 const struct net_device *dev;
@@ -171,30 +171,53 @@ static int masq_inet_event(struct notifier_block *this,
171 return NOTIFY_DONE; 171 return NOTIFY_DONE;
172} 172}
173 173
174static struct notifier_block masq_inet_notifier = { 174static struct notifier_block masq_inet6_notifier = {
175 .notifier_call = masq_inet_event, 175 .notifier_call = masq_inet6_event,
176}; 176};
177 177
178static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0); 178static int masq_refcnt;
179static DEFINE_MUTEX(masq_mutex);
179 180
180void nf_nat_masquerade_ipv6_register_notifier(void) 181int nf_nat_masquerade_ipv6_register_notifier(void)
181{ 182{
183 int ret = 0;
184
185 mutex_lock(&masq_mutex);
182 /* check if the notifier is already set */ 186 /* check if the notifier is already set */
183 if (atomic_inc_return(&masquerade_notifier_refcount) > 1) 187 if (++masq_refcnt > 1)
184 return; 188 goto out_unlock;
189
190 ret = register_netdevice_notifier(&masq_dev_notifier);
191 if (ret)
192 goto err_dec;
193
194 ret = register_inet6addr_notifier(&masq_inet6_notifier);
195 if (ret)
196 goto err_unregister;
185 197
186 register_netdevice_notifier(&masq_dev_notifier); 198 mutex_unlock(&masq_mutex);
187 register_inet6addr_notifier(&masq_inet_notifier); 199 return ret;
200
201err_unregister:
202 unregister_netdevice_notifier(&masq_dev_notifier);
203err_dec:
204 masq_refcnt--;
205out_unlock:
206 mutex_unlock(&masq_mutex);
207 return ret;
188} 208}
189EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier); 209EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
190 210
191void nf_nat_masquerade_ipv6_unregister_notifier(void) 211void nf_nat_masquerade_ipv6_unregister_notifier(void)
192{ 212{
213 mutex_lock(&masq_mutex);
193 /* check if the notifier still has clients */ 214 /* check if the notifier still has clients */
194 if (atomic_dec_return(&masquerade_notifier_refcount) > 0) 215 if (--masq_refcnt > 0)
195 return; 216 goto out_unlock;
196 217
197 unregister_inet6addr_notifier(&masq_inet_notifier); 218 unregister_inet6addr_notifier(&masq_inet6_notifier);
198 unregister_netdevice_notifier(&masq_dev_notifier); 219 unregister_netdevice_notifier(&masq_dev_notifier);
220out_unlock:
221 mutex_unlock(&masq_mutex);
199} 222}
200EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier); 223EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
index dd0122f3cffe..e06c82e9dfcd 100644
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -70,7 +70,9 @@ static int __init nft_masq_ipv6_module_init(void)
70 if (ret < 0) 70 if (ret < 0)
71 return ret; 71 return ret;
72 72
73 nf_nat_masquerade_ipv6_register_notifier(); 73 ret = nf_nat_masquerade_ipv6_register_notifier();
74 if (ret)
75 nft_unregister_expr(&nft_masq_ipv6_type);
74 76
75 return ret; 77 return ret;
76} 78}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 2a7423c39456..059f0531f7c1 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2232,8 +2232,7 @@ static void ip6_link_failure(struct sk_buff *skb)
2232 if (rt) { 2232 if (rt) {
2233 rcu_read_lock(); 2233 rcu_read_lock();
2234 if (rt->rt6i_flags & RTF_CACHE) { 2234 if (rt->rt6i_flags & RTF_CACHE) {
2235 if (dst_hold_safe(&rt->dst)) 2235 rt6_remove_exception_rt(rt);
2236 rt6_remove_exception_rt(rt);
2237 } else { 2236 } else {
2238 struct fib6_info *from; 2237 struct fib6_info *from;
2239 struct fib6_node *fn; 2238 struct fib6_node *fn;
@@ -2360,10 +2359,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2360 2359
2361void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) 2360void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2362{ 2361{
2362 int oif = sk->sk_bound_dev_if;
2363 struct dst_entry *dst; 2363 struct dst_entry *dst;
2364 2364
2365 ip6_update_pmtu(skb, sock_net(sk), mtu, 2365 if (!oif && skb->dev)
2366 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); 2366 oif = l3mdev_master_ifindex(skb->dev);
2367
2368 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2367 2369
2368 dst = __sk_dst_get(sk); 2370 dst = __sk_dst_get(sk);
2369 if (!dst || !dst->obsolete || 2371 if (!dst || !dst->obsolete ||
@@ -3214,8 +3216,8 @@ static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3214 if (cfg->fc_flags & RTF_GATEWAY && 3216 if (cfg->fc_flags & RTF_GATEWAY &&
3215 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) 3217 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3216 goto out; 3218 goto out;
3217 if (dst_hold_safe(&rt->dst)) 3219
3218 rc = rt6_remove_exception_rt(rt); 3220 rc = rt6_remove_exception_rt(rt);
3219out: 3221out:
3220 return rc; 3222 return rc;
3221} 3223}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 82cdf9020b53..26f1d435696a 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1490,12 +1490,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1490 goto err_sock; 1490 goto err_sock;
1491 } 1491 }
1492 1492
1493 sk = sock->sk;
1494
1495 sock_hold(sk);
1496 tunnel->sock = sk;
1497 tunnel->l2tp_net = net; 1493 tunnel->l2tp_net = net;
1498
1499 pn = l2tp_pernet(net); 1494 pn = l2tp_pernet(net);
1500 1495
1501 spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1496 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1510,6 +1505,10 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1510 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); 1505 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1511 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1506 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1512 1507
1508 sk = sock->sk;
1509 sock_hold(sk);
1510 tunnel->sock = sk;
1511
1513 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { 1512 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1514 struct udp_tunnel_sock_cfg udp_cfg = { 1513 struct udp_tunnel_sock_cfg udp_cfg = {
1515 .sk_user_data = tunnel, 1514 .sk_user_data = tunnel,
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index bc4bd247bb7d..1577f2f76060 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -55,11 +55,15 @@ MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
55MODULE_DESCRIPTION("core IP set support"); 55MODULE_DESCRIPTION("core IP set support");
56MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET); 56MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
57 57
58/* When the nfnl mutex is held: */ 58/* When the nfnl mutex or ip_set_ref_lock is held: */
59#define ip_set_dereference(p) \ 59#define ip_set_dereference(p) \
60 rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET)) 60 rcu_dereference_protected(p, \
61 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
62 lockdep_is_held(&ip_set_ref_lock))
61#define ip_set(inst, id) \ 63#define ip_set(inst, id) \
62 ip_set_dereference((inst)->ip_set_list)[id] 64 ip_set_dereference((inst)->ip_set_list)[id]
65#define ip_set_ref_netlink(inst,id) \
66 rcu_dereference_raw((inst)->ip_set_list)[id]
63 67
64/* The set types are implemented in modules and registered set types 68/* The set types are implemented in modules and registered set types
65 * can be found in ip_set_type_list. Adding/deleting types is 69 * can be found in ip_set_type_list. Adding/deleting types is
@@ -693,21 +697,20 @@ ip_set_put_byindex(struct net *net, ip_set_id_t index)
693EXPORT_SYMBOL_GPL(ip_set_put_byindex); 697EXPORT_SYMBOL_GPL(ip_set_put_byindex);
694 698
695/* Get the name of a set behind a set index. 699/* Get the name of a set behind a set index.
696 * We assume the set is referenced, so it does exist and 700 * Set itself is protected by RCU, but its name isn't: to protect against
697 * can't be destroyed. The set cannot be renamed due to 701 * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
698 * the referencing either. 702 * name.
699 *
700 */ 703 */
701const char * 704void
702ip_set_name_byindex(struct net *net, ip_set_id_t index) 705ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
703{ 706{
704 const struct ip_set *set = ip_set_rcu_get(net, index); 707 struct ip_set *set = ip_set_rcu_get(net, index);
705 708
706 BUG_ON(!set); 709 BUG_ON(!set);
707 BUG_ON(set->ref == 0);
708 710
709 /* Referenced, so it's safe */ 711 read_lock_bh(&ip_set_ref_lock);
710 return set->name; 712 strncpy(name, set->name, IPSET_MAXNAMELEN);
713 read_unlock_bh(&ip_set_ref_lock);
711} 714}
712EXPORT_SYMBOL_GPL(ip_set_name_byindex); 715EXPORT_SYMBOL_GPL(ip_set_name_byindex);
713 716
@@ -961,7 +964,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
961 /* Wraparound */ 964 /* Wraparound */
962 goto cleanup; 965 goto cleanup;
963 966
964 list = kcalloc(i, sizeof(struct ip_set *), GFP_KERNEL); 967 list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
965 if (!list) 968 if (!list)
966 goto cleanup; 969 goto cleanup;
967 /* nfnl mutex is held, both lists are valid */ 970 /* nfnl mutex is held, both lists are valid */
@@ -973,7 +976,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
973 /* Use new list */ 976 /* Use new list */
974 index = inst->ip_set_max; 977 index = inst->ip_set_max;
975 inst->ip_set_max = i; 978 inst->ip_set_max = i;
976 kfree(tmp); 979 kvfree(tmp);
977 ret = 0; 980 ret = 0;
978 } else if (ret) { 981 } else if (ret) {
979 goto cleanup; 982 goto cleanup;
@@ -1153,7 +1156,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
1153 if (!set) 1156 if (!set)
1154 return -ENOENT; 1157 return -ENOENT;
1155 1158
1156 read_lock_bh(&ip_set_ref_lock); 1159 write_lock_bh(&ip_set_ref_lock);
1157 if (set->ref != 0) { 1160 if (set->ref != 0) {
1158 ret = -IPSET_ERR_REFERENCED; 1161 ret = -IPSET_ERR_REFERENCED;
1159 goto out; 1162 goto out;
@@ -1170,7 +1173,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
1170 strncpy(set->name, name2, IPSET_MAXNAMELEN); 1173 strncpy(set->name, name2, IPSET_MAXNAMELEN);
1171 1174
1172out: 1175out:
1173 read_unlock_bh(&ip_set_ref_lock); 1176 write_unlock_bh(&ip_set_ref_lock);
1174 return ret; 1177 return ret;
1175} 1178}
1176 1179
@@ -1252,7 +1255,7 @@ ip_set_dump_done(struct netlink_callback *cb)
1252 struct ip_set_net *inst = 1255 struct ip_set_net *inst =
1253 (struct ip_set_net *)cb->args[IPSET_CB_NET]; 1256 (struct ip_set_net *)cb->args[IPSET_CB_NET];
1254 ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX]; 1257 ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
1255 struct ip_set *set = ip_set(inst, index); 1258 struct ip_set *set = ip_set_ref_netlink(inst, index);
1256 1259
1257 if (set->variant->uref) 1260 if (set->variant->uref)
1258 set->variant->uref(set, cb, false); 1261 set->variant->uref(set, cb, false);
@@ -1441,7 +1444,7 @@ next_set:
1441release_refcount: 1444release_refcount:
1442 /* If there was an error or set is done, release set */ 1445 /* If there was an error or set is done, release set */
1443 if (ret || !cb->args[IPSET_CB_ARG0]) { 1446 if (ret || !cb->args[IPSET_CB_ARG0]) {
1444 set = ip_set(inst, index); 1447 set = ip_set_ref_netlink(inst, index);
1445 if (set->variant->uref) 1448 if (set->variant->uref)
1446 set->variant->uref(set, cb, false); 1449 set->variant->uref(set, cb, false);
1447 pr_debug("release set %s\n", set->name); 1450 pr_debug("release set %s\n", set->name);
@@ -2059,7 +2062,7 @@ ip_set_net_init(struct net *net)
2059 if (inst->ip_set_max >= IPSET_INVALID_ID) 2062 if (inst->ip_set_max >= IPSET_INVALID_ID)
2060 inst->ip_set_max = IPSET_INVALID_ID - 1; 2063 inst->ip_set_max = IPSET_INVALID_ID - 1;
2061 2064
2062 list = kcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL); 2065 list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
2063 if (!list) 2066 if (!list)
2064 return -ENOMEM; 2067 return -ENOMEM;
2065 inst->is_deleted = false; 2068 inst->is_deleted = false;
@@ -2087,7 +2090,7 @@ ip_set_net_exit(struct net *net)
2087 } 2090 }
2088 } 2091 }
2089 nfnl_unlock(NFNL_SUBSYS_IPSET); 2092 nfnl_unlock(NFNL_SUBSYS_IPSET);
2090 kfree(rcu_dereference_protected(inst->ip_set_list, 1)); 2093 kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
2091} 2094}
2092 2095
2093static struct pernet_operations ip_set_net_ops = { 2096static struct pernet_operations ip_set_net_ops = {
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index d391485a6acd..613e18e720a4 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -213,13 +213,13 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
213 213
214 if (tb[IPSET_ATTR_CIDR]) { 214 if (tb[IPSET_ATTR_CIDR]) {
215 e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]); 215 e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
216 if (!e.cidr[0] || e.cidr[0] > HOST_MASK) 216 if (e.cidr[0] > HOST_MASK)
217 return -IPSET_ERR_INVALID_CIDR; 217 return -IPSET_ERR_INVALID_CIDR;
218 } 218 }
219 219
220 if (tb[IPSET_ATTR_CIDR2]) { 220 if (tb[IPSET_ATTR_CIDR2]) {
221 e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]); 221 e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
222 if (!e.cidr[1] || e.cidr[1] > HOST_MASK) 222 if (e.cidr[1] > HOST_MASK)
223 return -IPSET_ERR_INVALID_CIDR; 223 return -IPSET_ERR_INVALID_CIDR;
224 } 224 }
225 225
@@ -493,13 +493,13 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
493 493
494 if (tb[IPSET_ATTR_CIDR]) { 494 if (tb[IPSET_ATTR_CIDR]) {
495 e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]); 495 e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
496 if (!e.cidr[0] || e.cidr[0] > HOST_MASK) 496 if (e.cidr[0] > HOST_MASK)
497 return -IPSET_ERR_INVALID_CIDR; 497 return -IPSET_ERR_INVALID_CIDR;
498 } 498 }
499 499
500 if (tb[IPSET_ATTR_CIDR2]) { 500 if (tb[IPSET_ATTR_CIDR2]) {
501 e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]); 501 e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
502 if (!e.cidr[1] || e.cidr[1] > HOST_MASK) 502 if (e.cidr[1] > HOST_MASK)
503 return -IPSET_ERR_INVALID_CIDR; 503 return -IPSET_ERR_INVALID_CIDR;
504 } 504 }
505 505
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 072a658fde04..4eef55da0878 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -148,9 +148,7 @@ __list_set_del_rcu(struct rcu_head * rcu)
148{ 148{
149 struct set_elem *e = container_of(rcu, struct set_elem, rcu); 149 struct set_elem *e = container_of(rcu, struct set_elem, rcu);
150 struct ip_set *set = e->set; 150 struct ip_set *set = e->set;
151 struct list_set *map = set->data;
152 151
153 ip_set_put_byindex(map->net, e->id);
154 ip_set_ext_destroy(set, e); 152 ip_set_ext_destroy(set, e);
155 kfree(e); 153 kfree(e);
156} 154}
@@ -158,15 +156,21 @@ __list_set_del_rcu(struct rcu_head * rcu)
158static inline void 156static inline void
159list_set_del(struct ip_set *set, struct set_elem *e) 157list_set_del(struct ip_set *set, struct set_elem *e)
160{ 158{
159 struct list_set *map = set->data;
160
161 set->elements--; 161 set->elements--;
162 list_del_rcu(&e->list); 162 list_del_rcu(&e->list);
163 ip_set_put_byindex(map->net, e->id);
163 call_rcu(&e->rcu, __list_set_del_rcu); 164 call_rcu(&e->rcu, __list_set_del_rcu);
164} 165}
165 166
166static inline void 167static inline void
167list_set_replace(struct set_elem *e, struct set_elem *old) 168list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
168{ 169{
170 struct list_set *map = set->data;
171
169 list_replace_rcu(&old->list, &e->list); 172 list_replace_rcu(&old->list, &e->list);
173 ip_set_put_byindex(map->net, old->id);
170 call_rcu(&old->rcu, __list_set_del_rcu); 174 call_rcu(&old->rcu, __list_set_del_rcu);
171} 175}
172 176
@@ -298,7 +302,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
298 INIT_LIST_HEAD(&e->list); 302 INIT_LIST_HEAD(&e->list);
299 list_set_init_extensions(set, ext, e); 303 list_set_init_extensions(set, ext, e);
300 if (n) 304 if (n)
301 list_set_replace(e, n); 305 list_set_replace(set, e, n);
302 else if (next) 306 else if (next)
303 list_add_tail_rcu(&e->list, &next->list); 307 list_add_tail_rcu(&e->list, &next->list);
304 else if (prev) 308 else if (prev)
@@ -486,6 +490,7 @@ list_set_list(const struct ip_set *set,
486 const struct list_set *map = set->data; 490 const struct list_set *map = set->data;
487 struct nlattr *atd, *nested; 491 struct nlattr *atd, *nested;
488 u32 i = 0, first = cb->args[IPSET_CB_ARG0]; 492 u32 i = 0, first = cb->args[IPSET_CB_ARG0];
493 char name[IPSET_MAXNAMELEN];
489 struct set_elem *e; 494 struct set_elem *e;
490 int ret = 0; 495 int ret = 0;
491 496
@@ -504,8 +509,8 @@ list_set_list(const struct ip_set *set,
504 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 509 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
505 if (!nested) 510 if (!nested)
506 goto nla_put_failure; 511 goto nla_put_failure;
507 if (nla_put_string(skb, IPSET_ATTR_NAME, 512 ip_set_name_byindex(map->net, e->id, name);
508 ip_set_name_byindex(map->net, e->id))) 513 if (nla_put_string(skb, IPSET_ATTR_NAME, name))
509 goto nla_put_failure; 514 goto nla_put_failure;
510 if (ip_set_put_extensions(skb, set, e, true)) 515 if (ip_set_put_extensions(skb, set, e, true))
511 goto nla_put_failure; 516 goto nla_put_failure;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 83395bf6dc35..432141f04af3 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3980,6 +3980,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs)
3980 3980
3981static struct notifier_block ip_vs_dst_notifier = { 3981static struct notifier_block ip_vs_dst_notifier = {
3982 .notifier_call = ip_vs_dst_event, 3982 .notifier_call = ip_vs_dst_event,
3983#ifdef CONFIG_IP_VS_IPV6
3984 .priority = ADDRCONF_NOTIFY_PRIORITY + 5,
3985#endif
3983}; 3986};
3984 3987
3985int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs) 3988int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 02ca7df793f5..b6d0f6deea86 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -49,6 +49,7 @@ struct nf_conncount_tuple {
49 struct nf_conntrack_zone zone; 49 struct nf_conntrack_zone zone;
50 int cpu; 50 int cpu;
51 u32 jiffies32; 51 u32 jiffies32;
52 bool dead;
52 struct rcu_head rcu_head; 53 struct rcu_head rcu_head;
53}; 54};
54 55
@@ -106,15 +107,16 @@ nf_conncount_add(struct nf_conncount_list *list,
106 conn->zone = *zone; 107 conn->zone = *zone;
107 conn->cpu = raw_smp_processor_id(); 108 conn->cpu = raw_smp_processor_id();
108 conn->jiffies32 = (u32)jiffies; 109 conn->jiffies32 = (u32)jiffies;
109 spin_lock(&list->list_lock); 110 conn->dead = false;
111 spin_lock_bh(&list->list_lock);
110 if (list->dead == true) { 112 if (list->dead == true) {
111 kmem_cache_free(conncount_conn_cachep, conn); 113 kmem_cache_free(conncount_conn_cachep, conn);
112 spin_unlock(&list->list_lock); 114 spin_unlock_bh(&list->list_lock);
113 return NF_CONNCOUNT_SKIP; 115 return NF_CONNCOUNT_SKIP;
114 } 116 }
115 list_add_tail(&conn->node, &list->head); 117 list_add_tail(&conn->node, &list->head);
116 list->count++; 118 list->count++;
117 spin_unlock(&list->list_lock); 119 spin_unlock_bh(&list->list_lock);
118 return NF_CONNCOUNT_ADDED; 120 return NF_CONNCOUNT_ADDED;
119} 121}
120EXPORT_SYMBOL_GPL(nf_conncount_add); 122EXPORT_SYMBOL_GPL(nf_conncount_add);
@@ -132,19 +134,22 @@ static bool conn_free(struct nf_conncount_list *list,
132{ 134{
133 bool free_entry = false; 135 bool free_entry = false;
134 136
135 spin_lock(&list->list_lock); 137 spin_lock_bh(&list->list_lock);
136 138
137 if (list->count == 0) { 139 if (conn->dead) {
138 spin_unlock(&list->list_lock); 140 spin_unlock_bh(&list->list_lock);
139 return free_entry; 141 return free_entry;
140 } 142 }
141 143
142 list->count--; 144 list->count--;
145 conn->dead = true;
143 list_del_rcu(&conn->node); 146 list_del_rcu(&conn->node);
144 if (list->count == 0) 147 if (list->count == 0) {
148 list->dead = true;
145 free_entry = true; 149 free_entry = true;
150 }
146 151
147 spin_unlock(&list->list_lock); 152 spin_unlock_bh(&list->list_lock);
148 call_rcu(&conn->rcu_head, __conn_free); 153 call_rcu(&conn->rcu_head, __conn_free);
149 return free_entry; 154 return free_entry;
150} 155}
@@ -245,7 +250,7 @@ void nf_conncount_list_init(struct nf_conncount_list *list)
245{ 250{
246 spin_lock_init(&list->list_lock); 251 spin_lock_init(&list->list_lock);
247 INIT_LIST_HEAD(&list->head); 252 INIT_LIST_HEAD(&list->head);
248 list->count = 1; 253 list->count = 0;
249 list->dead = false; 254 list->dead = false;
250} 255}
251EXPORT_SYMBOL_GPL(nf_conncount_list_init); 256EXPORT_SYMBOL_GPL(nf_conncount_list_init);
@@ -259,6 +264,7 @@ bool nf_conncount_gc_list(struct net *net,
259 struct nf_conn *found_ct; 264 struct nf_conn *found_ct;
260 unsigned int collected = 0; 265 unsigned int collected = 0;
261 bool free_entry = false; 266 bool free_entry = false;
267 bool ret = false;
262 268
263 list_for_each_entry_safe(conn, conn_n, &list->head, node) { 269 list_for_each_entry_safe(conn, conn_n, &list->head, node) {
264 found = find_or_evict(net, list, conn, &free_entry); 270 found = find_or_evict(net, list, conn, &free_entry);
@@ -288,7 +294,15 @@ bool nf_conncount_gc_list(struct net *net,
288 if (collected > CONNCOUNT_GC_MAX_NODES) 294 if (collected > CONNCOUNT_GC_MAX_NODES)
289 return false; 295 return false;
290 } 296 }
291 return false; 297
298 spin_lock_bh(&list->list_lock);
299 if (!list->count) {
300 list->dead = true;
301 ret = true;
302 }
303 spin_unlock_bh(&list->list_lock);
304
305 return ret;
292} 306}
293EXPORT_SYMBOL_GPL(nf_conncount_gc_list); 307EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
294 308
@@ -309,11 +323,8 @@ static void tree_nodes_free(struct rb_root *root,
309 while (gc_count) { 323 while (gc_count) {
310 rbconn = gc_nodes[--gc_count]; 324 rbconn = gc_nodes[--gc_count];
311 spin_lock(&rbconn->list.list_lock); 325 spin_lock(&rbconn->list.list_lock);
312 if (rbconn->list.count == 0 && rbconn->list.dead == false) { 326 rb_erase(&rbconn->node, root);
313 rbconn->list.dead = true; 327 call_rcu(&rbconn->rcu_head, __tree_nodes_free);
314 rb_erase(&rbconn->node, root);
315 call_rcu(&rbconn->rcu_head, __tree_nodes_free);
316 }
317 spin_unlock(&rbconn->list.list_lock); 328 spin_unlock(&rbconn->list.list_lock);
318 } 329 }
319} 330}
@@ -414,6 +425,7 @@ insert_tree(struct net *net,
414 nf_conncount_list_init(&rbconn->list); 425 nf_conncount_list_init(&rbconn->list);
415 list_add(&conn->node, &rbconn->list.head); 426 list_add(&conn->node, &rbconn->list.head);
416 count = 1; 427 count = 1;
428 rbconn->list.count = count;
417 429
418 rb_link_node(&rbconn->node, parent, rbnode); 430 rb_link_node(&rbconn->node, parent, rbnode);
419 rb_insert_color(&rbconn->node, root); 431 rb_insert_color(&rbconn->node, root);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ca1168d67fac..e92e749aff53 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1073,19 +1073,22 @@ static unsigned int early_drop_list(struct net *net,
1073 return drops; 1073 return drops;
1074} 1074}
1075 1075
1076static noinline int early_drop(struct net *net, unsigned int _hash) 1076static noinline int early_drop(struct net *net, unsigned int hash)
1077{ 1077{
1078 unsigned int i; 1078 unsigned int i, bucket;
1079 1079
1080 for (i = 0; i < NF_CT_EVICTION_RANGE; i++) { 1080 for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
1081 struct hlist_nulls_head *ct_hash; 1081 struct hlist_nulls_head *ct_hash;
1082 unsigned int hash, hsize, drops; 1082 unsigned int hsize, drops;
1083 1083
1084 rcu_read_lock(); 1084 rcu_read_lock();
1085 nf_conntrack_get_ht(&ct_hash, &hsize); 1085 nf_conntrack_get_ht(&ct_hash, &hsize);
1086 hash = reciprocal_scale(_hash++, hsize); 1086 if (!i)
1087 bucket = reciprocal_scale(hash, hsize);
1088 else
1089 bucket = (bucket + 1) % hsize;
1087 1090
1088 drops = early_drop_list(net, &ct_hash[hash]); 1091 drops = early_drop_list(net, &ct_hash[bucket]);
1089 rcu_read_unlock(); 1092 rcu_read_unlock();
1090 1093
1091 if (drops) { 1094 if (drops) {
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 171e9e122e5f..023c1445bc39 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -384,11 +384,6 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
384 }, 384 },
385}; 385};
386 386
387static inline struct nf_dccp_net *dccp_pernet(struct net *net)
388{
389 return &net->ct.nf_ct_proto.dccp;
390}
391
392static noinline bool 387static noinline bool
393dccp_new(struct nf_conn *ct, const struct sk_buff *skb, 388dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
394 const struct dccp_hdr *dh) 389 const struct dccp_hdr *dh)
@@ -401,7 +396,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
401 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE]; 396 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
402 switch (state) { 397 switch (state) {
403 default: 398 default:
404 dn = dccp_pernet(net); 399 dn = nf_dccp_pernet(net);
405 if (dn->dccp_loose == 0) { 400 if (dn->dccp_loose == 0) {
406 msg = "not picking up existing connection "; 401 msg = "not picking up existing connection ";
407 goto out_invalid; 402 goto out_invalid;
@@ -568,7 +563,7 @@ static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
568 563
569 timeouts = nf_ct_timeout_lookup(ct); 564 timeouts = nf_ct_timeout_lookup(ct);
570 if (!timeouts) 565 if (!timeouts)
571 timeouts = dccp_pernet(nf_ct_net(ct))->dccp_timeout; 566 timeouts = nf_dccp_pernet(nf_ct_net(ct))->dccp_timeout;
572 nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); 567 nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
573 568
574 return NF_ACCEPT; 569 return NF_ACCEPT;
@@ -681,7 +676,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
681static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], 676static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
682 struct net *net, void *data) 677 struct net *net, void *data)
683{ 678{
684 struct nf_dccp_net *dn = dccp_pernet(net); 679 struct nf_dccp_net *dn = nf_dccp_pernet(net);
685 unsigned int *timeouts = data; 680 unsigned int *timeouts = data;
686 int i; 681 int i;
687 682
@@ -814,7 +809,7 @@ static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
814 809
815static int dccp_init_net(struct net *net) 810static int dccp_init_net(struct net *net)
816{ 811{
817 struct nf_dccp_net *dn = dccp_pernet(net); 812 struct nf_dccp_net *dn = nf_dccp_pernet(net);
818 struct nf_proto_net *pn = &dn->pn; 813 struct nf_proto_net *pn = &dn->pn;
819 814
820 if (!pn->users) { 815 if (!pn->users) {
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index e10e867e0b55..5da19d5fbc76 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -27,11 +27,6 @@ static bool nf_generic_should_process(u8 proto)
27 } 27 }
28} 28}
29 29
30static inline struct nf_generic_net *generic_pernet(struct net *net)
31{
32 return &net->ct.nf_ct_proto.generic;
33}
34
35static bool generic_pkt_to_tuple(const struct sk_buff *skb, 30static bool generic_pkt_to_tuple(const struct sk_buff *skb,
36 unsigned int dataoff, 31 unsigned int dataoff,
37 struct net *net, struct nf_conntrack_tuple *tuple) 32 struct net *net, struct nf_conntrack_tuple *tuple)
@@ -58,7 +53,7 @@ static int generic_packet(struct nf_conn *ct,
58 } 53 }
59 54
60 if (!timeout) 55 if (!timeout)
61 timeout = &generic_pernet(nf_ct_net(ct))->timeout; 56 timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
62 57
63 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); 58 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
64 return NF_ACCEPT; 59 return NF_ACCEPT;
@@ -72,7 +67,7 @@ static int generic_packet(struct nf_conn *ct,
72static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], 67static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
73 struct net *net, void *data) 68 struct net *net, void *data)
74{ 69{
75 struct nf_generic_net *gn = generic_pernet(net); 70 struct nf_generic_net *gn = nf_generic_pernet(net);
76 unsigned int *timeout = data; 71 unsigned int *timeout = data;
77 72
78 if (!timeout) 73 if (!timeout)
@@ -138,7 +133,7 @@ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
138 133
139static int generic_init_net(struct net *net) 134static int generic_init_net(struct net *net)
140{ 135{
141 struct nf_generic_net *gn = generic_pernet(net); 136 struct nf_generic_net *gn = nf_generic_pernet(net);
142 struct nf_proto_net *pn = &gn->pn; 137 struct nf_proto_net *pn = &gn->pn;
143 138
144 gn->timeout = nf_ct_generic_timeout; 139 gn->timeout = nf_ct_generic_timeout;
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 9b48dc8b4b88..2a5e56c6d8d9 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -43,24 +43,12 @@
43#include <linux/netfilter/nf_conntrack_proto_gre.h> 43#include <linux/netfilter/nf_conntrack_proto_gre.h>
44#include <linux/netfilter/nf_conntrack_pptp.h> 44#include <linux/netfilter/nf_conntrack_pptp.h>
45 45
46enum grep_conntrack {
47 GRE_CT_UNREPLIED,
48 GRE_CT_REPLIED,
49 GRE_CT_MAX
50};
51
52static const unsigned int gre_timeouts[GRE_CT_MAX] = { 46static const unsigned int gre_timeouts[GRE_CT_MAX] = {
53 [GRE_CT_UNREPLIED] = 30*HZ, 47 [GRE_CT_UNREPLIED] = 30*HZ,
54 [GRE_CT_REPLIED] = 180*HZ, 48 [GRE_CT_REPLIED] = 180*HZ,
55}; 49};
56 50
57static unsigned int proto_gre_net_id __read_mostly; 51static unsigned int proto_gre_net_id __read_mostly;
58struct netns_proto_gre {
59 struct nf_proto_net nf;
60 rwlock_t keymap_lock;
61 struct list_head keymap_list;
62 unsigned int gre_timeouts[GRE_CT_MAX];
63};
64 52
65static inline struct netns_proto_gre *gre_pernet(struct net *net) 53static inline struct netns_proto_gre *gre_pernet(struct net *net)
66{ 54{
@@ -402,6 +390,8 @@ static int __init nf_ct_proto_gre_init(void)
402{ 390{
403 int ret; 391 int ret;
404 392
393 BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0);
394
405 ret = register_pernet_subsys(&proto_gre_net_ops); 395 ret = register_pernet_subsys(&proto_gre_net_ops);
406 if (ret < 0) 396 if (ret < 0)
407 goto out_pernet; 397 goto out_pernet;
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 3598520bd19b..de64d8a5fdfd 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -25,11 +25,6 @@
25 25
26static const unsigned int nf_ct_icmp_timeout = 30*HZ; 26static const unsigned int nf_ct_icmp_timeout = 30*HZ;
27 27
28static inline struct nf_icmp_net *icmp_pernet(struct net *net)
29{
30 return &net->ct.nf_ct_proto.icmp;
31}
32
33static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 28static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
34 struct net *net, struct nf_conntrack_tuple *tuple) 29 struct net *net, struct nf_conntrack_tuple *tuple)
35{ 30{
@@ -103,7 +98,7 @@ static int icmp_packet(struct nf_conn *ct,
103 } 98 }
104 99
105 if (!timeout) 100 if (!timeout)
106 timeout = &icmp_pernet(nf_ct_net(ct))->timeout; 101 timeout = &nf_icmp_pernet(nf_ct_net(ct))->timeout;
107 102
108 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); 103 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
109 return NF_ACCEPT; 104 return NF_ACCEPT;
@@ -275,7 +270,7 @@ static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
275 struct net *net, void *data) 270 struct net *net, void *data)
276{ 271{
277 unsigned int *timeout = data; 272 unsigned int *timeout = data;
278 struct nf_icmp_net *in = icmp_pernet(net); 273 struct nf_icmp_net *in = nf_icmp_pernet(net);
279 274
280 if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) { 275 if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
281 if (!timeout) 276 if (!timeout)
@@ -337,7 +332,7 @@ static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
337 332
338static int icmp_init_net(struct net *net) 333static int icmp_init_net(struct net *net)
339{ 334{
340 struct nf_icmp_net *in = icmp_pernet(net); 335 struct nf_icmp_net *in = nf_icmp_pernet(net);
341 struct nf_proto_net *pn = &in->pn; 336 struct nf_proto_net *pn = &in->pn;
342 337
343 in->timeout = nf_ct_icmp_timeout; 338 in->timeout = nf_ct_icmp_timeout;
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index 378618feed5d..a15eefb8e317 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -30,11 +30,6 @@
30 30
31static const unsigned int nf_ct_icmpv6_timeout = 30*HZ; 31static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
32 32
33static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
34{
35 return &net->ct.nf_ct_proto.icmpv6;
36}
37
38static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, 33static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
39 unsigned int dataoff, 34 unsigned int dataoff,
40 struct net *net, 35 struct net *net,
@@ -87,7 +82,7 @@ static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
87 82
88static unsigned int *icmpv6_get_timeouts(struct net *net) 83static unsigned int *icmpv6_get_timeouts(struct net *net)
89{ 84{
90 return &icmpv6_pernet(net)->timeout; 85 return &nf_icmpv6_pernet(net)->timeout;
91} 86}
92 87
93/* Returns verdict for packet, or -1 for invalid. */ 88/* Returns verdict for packet, or -1 for invalid. */
@@ -286,7 +281,7 @@ static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
286 struct net *net, void *data) 281 struct net *net, void *data)
287{ 282{
288 unsigned int *timeout = data; 283 unsigned int *timeout = data;
289 struct nf_icmp_net *in = icmpv6_pernet(net); 284 struct nf_icmp_net *in = nf_icmpv6_pernet(net);
290 285
291 if (!timeout) 286 if (!timeout)
292 timeout = icmpv6_get_timeouts(net); 287 timeout = icmpv6_get_timeouts(net);
@@ -348,7 +343,7 @@ static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
348 343
349static int icmpv6_init_net(struct net *net) 344static int icmpv6_init_net(struct net *net)
350{ 345{
351 struct nf_icmp_net *in = icmpv6_pernet(net); 346 struct nf_icmp_net *in = nf_icmpv6_pernet(net);
352 struct nf_proto_net *pn = &in->pn; 347 struct nf_proto_net *pn = &in->pn;
353 348
354 in->timeout = nf_ct_icmpv6_timeout; 349 in->timeout = nf_ct_icmpv6_timeout;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 3d719d3eb9a3..d53e3e78f605 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -146,11 +146,6 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
146 } 146 }
147}; 147};
148 148
149static inline struct nf_sctp_net *sctp_pernet(struct net *net)
150{
151 return &net->ct.nf_ct_proto.sctp;
152}
153
154#ifdef CONFIG_NF_CONNTRACK_PROCFS 149#ifdef CONFIG_NF_CONNTRACK_PROCFS
155/* Print out the private part of the conntrack. */ 150/* Print out the private part of the conntrack. */
156static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) 151static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -480,7 +475,7 @@ static int sctp_packet(struct nf_conn *ct,
480 475
481 timeouts = nf_ct_timeout_lookup(ct); 476 timeouts = nf_ct_timeout_lookup(ct);
482 if (!timeouts) 477 if (!timeouts)
483 timeouts = sctp_pernet(nf_ct_net(ct))->timeouts; 478 timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
484 479
485 nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); 480 nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
486 481
@@ -599,7 +594,7 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
599 struct net *net, void *data) 594 struct net *net, void *data)
600{ 595{
601 unsigned int *timeouts = data; 596 unsigned int *timeouts = data;
602 struct nf_sctp_net *sn = sctp_pernet(net); 597 struct nf_sctp_net *sn = nf_sctp_pernet(net);
603 int i; 598 int i;
604 599
605 /* set default SCTP timeouts. */ 600 /* set default SCTP timeouts. */
@@ -736,7 +731,7 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
736 731
737static int sctp_init_net(struct net *net) 732static int sctp_init_net(struct net *net)
738{ 733{
739 struct nf_sctp_net *sn = sctp_pernet(net); 734 struct nf_sctp_net *sn = nf_sctp_pernet(net);
740 struct nf_proto_net *pn = &sn->pn; 735 struct nf_proto_net *pn = &sn->pn;
741 736
742 if (!pn->users) { 737 if (!pn->users) {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 1bcf9984d45e..4dcbd51a8e97 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -272,11 +272,6 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
272 } 272 }
273}; 273};
274 274
275static inline struct nf_tcp_net *tcp_pernet(struct net *net)
276{
277 return &net->ct.nf_ct_proto.tcp;
278}
279
280#ifdef CONFIG_NF_CONNTRACK_PROCFS 275#ifdef CONFIG_NF_CONNTRACK_PROCFS
281/* Print out the private part of the conntrack. */ 276/* Print out the private part of the conntrack. */
282static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct) 277static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -475,7 +470,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
475 const struct tcphdr *tcph) 470 const struct tcphdr *tcph)
476{ 471{
477 struct net *net = nf_ct_net(ct); 472 struct net *net = nf_ct_net(ct);
478 struct nf_tcp_net *tn = tcp_pernet(net); 473 struct nf_tcp_net *tn = nf_tcp_pernet(net);
479 struct ip_ct_tcp_state *sender = &state->seen[dir]; 474 struct ip_ct_tcp_state *sender = &state->seen[dir];
480 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 475 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
481 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 476 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
@@ -767,7 +762,7 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
767{ 762{
768 enum tcp_conntrack new_state; 763 enum tcp_conntrack new_state;
769 struct net *net = nf_ct_net(ct); 764 struct net *net = nf_ct_net(ct);
770 const struct nf_tcp_net *tn = tcp_pernet(net); 765 const struct nf_tcp_net *tn = nf_tcp_pernet(net);
771 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0]; 766 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
772 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1]; 767 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
773 768
@@ -841,7 +836,7 @@ static int tcp_packet(struct nf_conn *ct,
841 const struct nf_hook_state *state) 836 const struct nf_hook_state *state)
842{ 837{
843 struct net *net = nf_ct_net(ct); 838 struct net *net = nf_ct_net(ct);
844 struct nf_tcp_net *tn = tcp_pernet(net); 839 struct nf_tcp_net *tn = nf_tcp_pernet(net);
845 struct nf_conntrack_tuple *tuple; 840 struct nf_conntrack_tuple *tuple;
846 enum tcp_conntrack new_state, old_state; 841 enum tcp_conntrack new_state, old_state;
847 unsigned int index, *timeouts; 842 unsigned int index, *timeouts;
@@ -1283,7 +1278,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
1283static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], 1278static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1284 struct net *net, void *data) 1279 struct net *net, void *data)
1285{ 1280{
1286 struct nf_tcp_net *tn = tcp_pernet(net); 1281 struct nf_tcp_net *tn = nf_tcp_pernet(net);
1287 unsigned int *timeouts = data; 1282 unsigned int *timeouts = data;
1288 int i; 1283 int i;
1289 1284
@@ -1508,7 +1503,7 @@ static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
1508 1503
1509static int tcp_init_net(struct net *net) 1504static int tcp_init_net(struct net *net)
1510{ 1505{
1511 struct nf_tcp_net *tn = tcp_pernet(net); 1506 struct nf_tcp_net *tn = nf_tcp_pernet(net);
1512 struct nf_proto_net *pn = &tn->pn; 1507 struct nf_proto_net *pn = &tn->pn;
1513 1508
1514 if (!pn->users) { 1509 if (!pn->users) {
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index a7aa70370913..c879d8d78cfd 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -32,14 +32,9 @@ static const unsigned int udp_timeouts[UDP_CT_MAX] = {
32 [UDP_CT_REPLIED] = 180*HZ, 32 [UDP_CT_REPLIED] = 180*HZ,
33}; 33};
34 34
35static inline struct nf_udp_net *udp_pernet(struct net *net)
36{
37 return &net->ct.nf_ct_proto.udp;
38}
39
40static unsigned int *udp_get_timeouts(struct net *net) 35static unsigned int *udp_get_timeouts(struct net *net)
41{ 36{
42 return udp_pernet(net)->timeouts; 37 return nf_udp_pernet(net)->timeouts;
43} 38}
44 39
45static void udp_error_log(const struct sk_buff *skb, 40static void udp_error_log(const struct sk_buff *skb,
@@ -212,7 +207,7 @@ static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
212 struct net *net, void *data) 207 struct net *net, void *data)
213{ 208{
214 unsigned int *timeouts = data; 209 unsigned int *timeouts = data;
215 struct nf_udp_net *un = udp_pernet(net); 210 struct nf_udp_net *un = nf_udp_pernet(net);
216 211
217 if (!timeouts) 212 if (!timeouts)
218 timeouts = un->timeouts; 213 timeouts = un->timeouts;
@@ -292,7 +287,7 @@ static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
292 287
293static int udp_init_net(struct net *net) 288static int udp_init_net(struct net *net)
294{ 289{
295 struct nf_udp_net *un = udp_pernet(net); 290 struct nf_udp_net *un = nf_udp_pernet(net);
296 struct nf_proto_net *pn = &un->pn; 291 struct nf_proto_net *pn = &un->pn;
297 292
298 if (!pn->users) { 293 if (!pn->users) {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 42487d01a3ed..2e61aab6ed73 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2457,7 +2457,7 @@ err:
2457static void nf_tables_rule_destroy(const struct nft_ctx *ctx, 2457static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2458 struct nft_rule *rule) 2458 struct nft_rule *rule)
2459{ 2459{
2460 struct nft_expr *expr; 2460 struct nft_expr *expr, *next;
2461 2461
2462 /* 2462 /*
2463 * Careful: some expressions might not be initialized in case this 2463 * Careful: some expressions might not be initialized in case this
@@ -2465,8 +2465,9 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2465 */ 2465 */
2466 expr = nft_expr_first(rule); 2466 expr = nft_expr_first(rule);
2467 while (expr != nft_expr_last(rule) && expr->ops) { 2467 while (expr != nft_expr_last(rule) && expr->ops) {
2468 next = nft_expr_next(expr);
2468 nf_tables_expr_destroy(ctx, expr); 2469 nf_tables_expr_destroy(ctx, expr);
2469 expr = nft_expr_next(expr); 2470 expr = next;
2470 } 2471 }
2471 kfree(rule); 2472 kfree(rule);
2472} 2473}
@@ -2589,17 +2590,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2589 2590
2590 if (chain->use == UINT_MAX) 2591 if (chain->use == UINT_MAX)
2591 return -EOVERFLOW; 2592 return -EOVERFLOW;
2592 }
2593
2594 if (nla[NFTA_RULE_POSITION]) {
2595 if (!(nlh->nlmsg_flags & NLM_F_CREATE))
2596 return -EOPNOTSUPP;
2597 2593
2598 pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION])); 2594 if (nla[NFTA_RULE_POSITION]) {
2599 old_rule = __nft_rule_lookup(chain, pos_handle); 2595 pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
2600 if (IS_ERR(old_rule)) { 2596 old_rule = __nft_rule_lookup(chain, pos_handle);
2601 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]); 2597 if (IS_ERR(old_rule)) {
2602 return PTR_ERR(old_rule); 2598 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
2599 return PTR_ERR(old_rule);
2600 }
2603 } 2601 }
2604 } 2602 }
2605 2603
@@ -2669,21 +2667,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2669 } 2667 }
2670 2668
2671 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 2669 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
2672 if (!nft_is_active_next(net, old_rule)) { 2670 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
2673 err = -ENOENT;
2674 goto err2;
2675 }
2676 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
2677 old_rule);
2678 if (trans == NULL) { 2671 if (trans == NULL) {
2679 err = -ENOMEM; 2672 err = -ENOMEM;
2680 goto err2; 2673 goto err2;
2681 } 2674 }
2682 nft_deactivate_next(net, old_rule); 2675 err = nft_delrule(&ctx, old_rule);
2683 chain->use--; 2676 if (err < 0) {
2684 2677 nft_trans_destroy(trans);
2685 if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
2686 err = -ENOMEM;
2687 goto err2; 2678 goto err2;
2688 } 2679 }
2689 2680
@@ -6324,7 +6315,7 @@ static void nf_tables_commit_chain_free_rules_old(struct nft_rule **rules)
6324 call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old); 6315 call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old);
6325} 6316}
6326 6317
6327static void nf_tables_commit_chain_active(struct net *net, struct nft_chain *chain) 6318static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
6328{ 6319{
6329 struct nft_rule **g0, **g1; 6320 struct nft_rule **g0, **g1;
6330 bool next_genbit; 6321 bool next_genbit;
@@ -6441,11 +6432,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
6441 6432
6442 /* step 2. Make rules_gen_X visible to packet path */ 6433 /* step 2. Make rules_gen_X visible to packet path */
6443 list_for_each_entry(table, &net->nft.tables, list) { 6434 list_for_each_entry(table, &net->nft.tables, list) {
6444 list_for_each_entry(chain, &table->chains, list) { 6435 list_for_each_entry(chain, &table->chains, list)
6445 if (!nft_is_active_next(net, chain)) 6436 nf_tables_commit_chain(net, chain);
6446 continue;
6447 nf_tables_commit_chain_active(net, chain);
6448 }
6449 } 6437 }
6450 6438
6451 /* 6439 /*
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index e7a50af1b3d6..109b0d27345a 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -382,7 +382,8 @@ err:
382static int 382static int
383cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid, 383cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
384 u32 seq, u32 type, int event, u16 l3num, 384 u32 seq, u32 type, int event, u16 l3num,
385 const struct nf_conntrack_l4proto *l4proto) 385 const struct nf_conntrack_l4proto *l4proto,
386 const unsigned int *timeouts)
386{ 387{
387 struct nlmsghdr *nlh; 388 struct nlmsghdr *nlh;
388 struct nfgenmsg *nfmsg; 389 struct nfgenmsg *nfmsg;
@@ -408,7 +409,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
408 if (!nest_parms) 409 if (!nest_parms)
409 goto nla_put_failure; 410 goto nla_put_failure;
410 411
411 ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL); 412 ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
412 if (ret < 0) 413 if (ret < 0)
413 goto nla_put_failure; 414 goto nla_put_failure;
414 415
@@ -430,6 +431,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
430 struct netlink_ext_ack *extack) 431 struct netlink_ext_ack *extack)
431{ 432{
432 const struct nf_conntrack_l4proto *l4proto; 433 const struct nf_conntrack_l4proto *l4proto;
434 unsigned int *timeouts = NULL;
433 struct sk_buff *skb2; 435 struct sk_buff *skb2;
434 int ret, err; 436 int ret, err;
435 __u16 l3num; 437 __u16 l3num;
@@ -442,12 +444,55 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
442 l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]); 444 l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
443 l4proto = nf_ct_l4proto_find_get(l4num); 445 l4proto = nf_ct_l4proto_find_get(l4num);
444 446
445 /* This protocol is not supported, skip. */ 447 err = -EOPNOTSUPP;
446 if (l4proto->l4proto != l4num) { 448 if (l4proto->l4proto != l4num)
447 err = -EOPNOTSUPP;
448 goto err; 449 goto err;
450
451 switch (l4proto->l4proto) {
452 case IPPROTO_ICMP:
453 timeouts = &nf_icmp_pernet(net)->timeout;
454 break;
455 case IPPROTO_TCP:
456 timeouts = nf_tcp_pernet(net)->timeouts;
457 break;
458 case IPPROTO_UDP: /* fallthrough */
459 case IPPROTO_UDPLITE:
460 timeouts = nf_udp_pernet(net)->timeouts;
461 break;
462 case IPPROTO_DCCP:
463#ifdef CONFIG_NF_CT_PROTO_DCCP
464 timeouts = nf_dccp_pernet(net)->dccp_timeout;
465#endif
466 break;
467 case IPPROTO_ICMPV6:
468 timeouts = &nf_icmpv6_pernet(net)->timeout;
469 break;
470 case IPPROTO_SCTP:
471#ifdef CONFIG_NF_CT_PROTO_SCTP
472 timeouts = nf_sctp_pernet(net)->timeouts;
473#endif
474 break;
475 case IPPROTO_GRE:
476#ifdef CONFIG_NF_CT_PROTO_GRE
477 if (l4proto->net_id) {
478 struct netns_proto_gre *net_gre;
479
480 net_gre = net_generic(net, *l4proto->net_id);
481 timeouts = net_gre->gre_timeouts;
482 }
483#endif
484 break;
485 case 255:
486 timeouts = &nf_generic_pernet(net)->timeout;
487 break;
488 default:
489 WARN_ONCE(1, "Missing timeouts for proto %d", l4proto->l4proto);
490 break;
449 } 491 }
450 492
493 if (!timeouts)
494 goto err;
495
451 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 496 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
452 if (skb2 == NULL) { 497 if (skb2 == NULL) {
453 err = -ENOMEM; 498 err = -ENOMEM;
@@ -458,8 +503,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
458 nlh->nlmsg_seq, 503 nlh->nlmsg_seq,
459 NFNL_MSG_TYPE(nlh->nlmsg_type), 504 NFNL_MSG_TYPE(nlh->nlmsg_type),
460 IPCTNL_MSG_TIMEOUT_DEFAULT_SET, 505 IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
461 l3num, 506 l3num, l4proto, timeouts);
462 l4proto);
463 if (ret <= 0) { 507 if (ret <= 0) {
464 kfree_skb(skb2); 508 kfree_skb(skb2);
465 err = -ENOMEM; 509 err = -ENOMEM;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 768292eac2a4..7334e0b80a5e 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -54,9 +54,11 @@ static bool nft_xt_put(struct nft_xt *xt)
54 return false; 54 return false;
55} 55}
56 56
57static int nft_compat_chain_validate_dependency(const char *tablename, 57static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
58 const struct nft_chain *chain) 58 const char *tablename)
59{ 59{
60 enum nft_chain_types type = NFT_CHAIN_T_DEFAULT;
61 const struct nft_chain *chain = ctx->chain;
60 const struct nft_base_chain *basechain; 62 const struct nft_base_chain *basechain;
61 63
62 if (!tablename || 64 if (!tablename ||
@@ -64,9 +66,12 @@ static int nft_compat_chain_validate_dependency(const char *tablename,
64 return 0; 66 return 0;
65 67
66 basechain = nft_base_chain(chain); 68 basechain = nft_base_chain(chain);
67 if (strcmp(tablename, "nat") == 0 && 69 if (strcmp(tablename, "nat") == 0) {
68 basechain->type->type != NFT_CHAIN_T_NAT) 70 if (ctx->family != NFPROTO_BRIDGE)
69 return -EINVAL; 71 type = NFT_CHAIN_T_NAT;
72 if (basechain->type->type != type)
73 return -EINVAL;
74 }
70 75
71 return 0; 76 return 0;
72} 77}
@@ -342,8 +347,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
342 if (target->hooks && !(hook_mask & target->hooks)) 347 if (target->hooks && !(hook_mask & target->hooks))
343 return -EINVAL; 348 return -EINVAL;
344 349
345 ret = nft_compat_chain_validate_dependency(target->table, 350 ret = nft_compat_chain_validate_dependency(ctx, target->table);
346 ctx->chain);
347 if (ret < 0) 351 if (ret < 0)
348 return ret; 352 return ret;
349 } 353 }
@@ -516,6 +520,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
516 void *info) 520 void *info)
517{ 521{
518 struct xt_match *match = expr->ops->data; 522 struct xt_match *match = expr->ops->data;
523 struct module *me = match->me;
519 struct xt_mtdtor_param par; 524 struct xt_mtdtor_param par;
520 525
521 par.net = ctx->net; 526 par.net = ctx->net;
@@ -526,7 +531,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
526 par.match->destroy(&par); 531 par.match->destroy(&par);
527 532
528 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) 533 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
529 module_put(match->me); 534 module_put(me);
530} 535}
531 536
532static void 537static void
@@ -590,8 +595,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
590 if (match->hooks && !(hook_mask & match->hooks)) 595 if (match->hooks && !(hook_mask & match->hooks))
591 return -EINVAL; 596 return -EINVAL;
592 597
593 ret = nft_compat_chain_validate_dependency(match->table, 598 ret = nft_compat_chain_validate_dependency(ctx, match->table);
594 ctx->chain);
595 if (ret < 0) 599 if (ret < 0)
596 return ret; 600 return ret;
597 } 601 }
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index e82d9a966c45..974525eb92df 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -214,7 +214,9 @@ static int __init nft_flow_offload_module_init(void)
214{ 214{
215 int err; 215 int err;
216 216
217 register_netdevice_notifier(&flow_offload_netdev_notifier); 217 err = register_netdevice_notifier(&flow_offload_netdev_notifier);
218 if (err)
219 goto err;
218 220
219 err = nft_register_expr(&nft_flow_offload_type); 221 err = nft_register_expr(&nft_flow_offload_type);
220 if (err < 0) 222 if (err < 0)
@@ -224,6 +226,7 @@ static int __init nft_flow_offload_module_init(void)
224 226
225register_expr: 227register_expr:
226 unregister_netdevice_notifier(&flow_offload_netdev_notifier); 228 unregister_netdevice_notifier(&flow_offload_netdev_notifier);
229err:
227 return err; 230 return err;
228} 231}
229 232
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
index 649d1700ec5b..3cc1b3dc3c3c 100644
--- a/net/netfilter/nft_numgen.c
+++ b/net/netfilter/nft_numgen.c
@@ -24,7 +24,6 @@ struct nft_ng_inc {
24 u32 modulus; 24 u32 modulus;
25 atomic_t counter; 25 atomic_t counter;
26 u32 offset; 26 u32 offset;
27 struct nft_set *map;
28}; 27};
29 28
30static u32 nft_ng_inc_gen(struct nft_ng_inc *priv) 29static u32 nft_ng_inc_gen(struct nft_ng_inc *priv)
@@ -48,34 +47,11 @@ static void nft_ng_inc_eval(const struct nft_expr *expr,
48 regs->data[priv->dreg] = nft_ng_inc_gen(priv); 47 regs->data[priv->dreg] = nft_ng_inc_gen(priv);
49} 48}
50 49
51static void nft_ng_inc_map_eval(const struct nft_expr *expr,
52 struct nft_regs *regs,
53 const struct nft_pktinfo *pkt)
54{
55 struct nft_ng_inc *priv = nft_expr_priv(expr);
56 const struct nft_set *map = priv->map;
57 const struct nft_set_ext *ext;
58 u32 result;
59 bool found;
60
61 result = nft_ng_inc_gen(priv);
62 found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
63
64 if (!found)
65 return;
66
67 nft_data_copy(&regs->data[priv->dreg],
68 nft_set_ext_data(ext), map->dlen);
69}
70
71static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = { 50static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
72 [NFTA_NG_DREG] = { .type = NLA_U32 }, 51 [NFTA_NG_DREG] = { .type = NLA_U32 },
73 [NFTA_NG_MODULUS] = { .type = NLA_U32 }, 52 [NFTA_NG_MODULUS] = { .type = NLA_U32 },
74 [NFTA_NG_TYPE] = { .type = NLA_U32 }, 53 [NFTA_NG_TYPE] = { .type = NLA_U32 },
75 [NFTA_NG_OFFSET] = { .type = NLA_U32 }, 54 [NFTA_NG_OFFSET] = { .type = NLA_U32 },
76 [NFTA_NG_SET_NAME] = { .type = NLA_STRING,
77 .len = NFT_SET_MAXNAMELEN - 1 },
78 [NFTA_NG_SET_ID] = { .type = NLA_U32 },
79}; 55};
80 56
81static int nft_ng_inc_init(const struct nft_ctx *ctx, 57static int nft_ng_inc_init(const struct nft_ctx *ctx,
@@ -101,22 +77,6 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
101 NFT_DATA_VALUE, sizeof(u32)); 77 NFT_DATA_VALUE, sizeof(u32));
102} 78}
103 79
104static int nft_ng_inc_map_init(const struct nft_ctx *ctx,
105 const struct nft_expr *expr,
106 const struct nlattr * const tb[])
107{
108 struct nft_ng_inc *priv = nft_expr_priv(expr);
109 u8 genmask = nft_genmask_next(ctx->net);
110
111 nft_ng_inc_init(ctx, expr, tb);
112
113 priv->map = nft_set_lookup_global(ctx->net, ctx->table,
114 tb[NFTA_NG_SET_NAME],
115 tb[NFTA_NG_SET_ID], genmask);
116
117 return PTR_ERR_OR_ZERO(priv->map);
118}
119
120static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg, 80static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
121 u32 modulus, enum nft_ng_types type, u32 offset) 81 u32 modulus, enum nft_ng_types type, u32 offset)
122{ 82{
@@ -143,27 +103,10 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
143 priv->offset); 103 priv->offset);
144} 104}
145 105
146static int nft_ng_inc_map_dump(struct sk_buff *skb,
147 const struct nft_expr *expr)
148{
149 const struct nft_ng_inc *priv = nft_expr_priv(expr);
150
151 if (nft_ng_dump(skb, priv->dreg, priv->modulus,
152 NFT_NG_INCREMENTAL, priv->offset) ||
153 nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
154 goto nla_put_failure;
155
156 return 0;
157
158nla_put_failure:
159 return -1;
160}
161
162struct nft_ng_random { 106struct nft_ng_random {
163 enum nft_registers dreg:8; 107 enum nft_registers dreg:8;
164 u32 modulus; 108 u32 modulus;
165 u32 offset; 109 u32 offset;
166 struct nft_set *map;
167}; 110};
168 111
169static u32 nft_ng_random_gen(struct nft_ng_random *priv) 112static u32 nft_ng_random_gen(struct nft_ng_random *priv)
@@ -183,25 +126,6 @@ static void nft_ng_random_eval(const struct nft_expr *expr,
183 regs->data[priv->dreg] = nft_ng_random_gen(priv); 126 regs->data[priv->dreg] = nft_ng_random_gen(priv);
184} 127}
185 128
186static void nft_ng_random_map_eval(const struct nft_expr *expr,
187 struct nft_regs *regs,
188 const struct nft_pktinfo *pkt)
189{
190 struct nft_ng_random *priv = nft_expr_priv(expr);
191 const struct nft_set *map = priv->map;
192 const struct nft_set_ext *ext;
193 u32 result;
194 bool found;
195
196 result = nft_ng_random_gen(priv);
197 found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
198 if (!found)
199 return;
200
201 nft_data_copy(&regs->data[priv->dreg],
202 nft_set_ext_data(ext), map->dlen);
203}
204
205static int nft_ng_random_init(const struct nft_ctx *ctx, 129static int nft_ng_random_init(const struct nft_ctx *ctx,
206 const struct nft_expr *expr, 130 const struct nft_expr *expr,
207 const struct nlattr * const tb[]) 131 const struct nlattr * const tb[])
@@ -226,21 +150,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
226 NFT_DATA_VALUE, sizeof(u32)); 150 NFT_DATA_VALUE, sizeof(u32));
227} 151}
228 152
229static int nft_ng_random_map_init(const struct nft_ctx *ctx,
230 const struct nft_expr *expr,
231 const struct nlattr * const tb[])
232{
233 struct nft_ng_random *priv = nft_expr_priv(expr);
234 u8 genmask = nft_genmask_next(ctx->net);
235
236 nft_ng_random_init(ctx, expr, tb);
237 priv->map = nft_set_lookup_global(ctx->net, ctx->table,
238 tb[NFTA_NG_SET_NAME],
239 tb[NFTA_NG_SET_ID], genmask);
240
241 return PTR_ERR_OR_ZERO(priv->map);
242}
243
244static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr) 153static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
245{ 154{
246 const struct nft_ng_random *priv = nft_expr_priv(expr); 155 const struct nft_ng_random *priv = nft_expr_priv(expr);
@@ -249,22 +158,6 @@ static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
249 priv->offset); 158 priv->offset);
250} 159}
251 160
252static int nft_ng_random_map_dump(struct sk_buff *skb,
253 const struct nft_expr *expr)
254{
255 const struct nft_ng_random *priv = nft_expr_priv(expr);
256
257 if (nft_ng_dump(skb, priv->dreg, priv->modulus,
258 NFT_NG_RANDOM, priv->offset) ||
259 nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
260 goto nla_put_failure;
261
262 return 0;
263
264nla_put_failure:
265 return -1;
266}
267
268static struct nft_expr_type nft_ng_type; 161static struct nft_expr_type nft_ng_type;
269static const struct nft_expr_ops nft_ng_inc_ops = { 162static const struct nft_expr_ops nft_ng_inc_ops = {
270 .type = &nft_ng_type, 163 .type = &nft_ng_type,
@@ -274,14 +167,6 @@ static const struct nft_expr_ops nft_ng_inc_ops = {
274 .dump = nft_ng_inc_dump, 167 .dump = nft_ng_inc_dump,
275}; 168};
276 169
277static const struct nft_expr_ops nft_ng_inc_map_ops = {
278 .type = &nft_ng_type,
279 .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
280 .eval = nft_ng_inc_map_eval,
281 .init = nft_ng_inc_map_init,
282 .dump = nft_ng_inc_map_dump,
283};
284
285static const struct nft_expr_ops nft_ng_random_ops = { 170static const struct nft_expr_ops nft_ng_random_ops = {
286 .type = &nft_ng_type, 171 .type = &nft_ng_type,
287 .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)), 172 .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
@@ -290,14 +175,6 @@ static const struct nft_expr_ops nft_ng_random_ops = {
290 .dump = nft_ng_random_dump, 175 .dump = nft_ng_random_dump,
291}; 176};
292 177
293static const struct nft_expr_ops nft_ng_random_map_ops = {
294 .type = &nft_ng_type,
295 .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
296 .eval = nft_ng_random_map_eval,
297 .init = nft_ng_random_map_init,
298 .dump = nft_ng_random_map_dump,
299};
300
301static const struct nft_expr_ops * 178static const struct nft_expr_ops *
302nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) 179nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
303{ 180{
@@ -312,12 +189,8 @@ nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
312 189
313 switch (type) { 190 switch (type) {
314 case NFT_NG_INCREMENTAL: 191 case NFT_NG_INCREMENTAL:
315 if (tb[NFTA_NG_SET_NAME])
316 return &nft_ng_inc_map_ops;
317 return &nft_ng_inc_ops; 192 return &nft_ng_inc_ops;
318 case NFT_NG_RANDOM: 193 case NFT_NG_RANDOM:
319 if (tb[NFTA_NG_SET_NAME])
320 return &nft_ng_random_map_ops;
321 return &nft_ng_random_ops; 194 return &nft_ng_random_ops;
322 } 195 }
323 196
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index ca5e5d8c5ef8..b13618c764ec 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -50,7 +50,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
50 int err; 50 int err;
51 u8 ttl; 51 u8 ttl;
52 52
53 if (nla_get_u8(tb[NFTA_OSF_TTL])) { 53 if (tb[NFTA_OSF_TTL]) {
54 ttl = nla_get_u8(tb[NFTA_OSF_TTL]); 54 ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
55 if (ttl > 2) 55 if (ttl > 2)
56 return -EINVAL; 56 return -EINVAL;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index c6acfc2d9c84..eb4cbd244c3d 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -114,6 +114,22 @@ static void idletimer_tg_expired(struct timer_list *t)
114 schedule_work(&timer->work); 114 schedule_work(&timer->work);
115} 115}
116 116
117static int idletimer_check_sysfs_name(const char *name, unsigned int size)
118{
119 int ret;
120
121 ret = xt_check_proc_name(name, size);
122 if (ret < 0)
123 return ret;
124
125 if (!strcmp(name, "power") ||
126 !strcmp(name, "subsystem") ||
127 !strcmp(name, "uevent"))
128 return -EINVAL;
129
130 return 0;
131}
132
117static int idletimer_tg_create(struct idletimer_tg_info *info) 133static int idletimer_tg_create(struct idletimer_tg_info *info)
118{ 134{
119 int ret; 135 int ret;
@@ -124,6 +140,10 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
124 goto out; 140 goto out;
125 } 141 }
126 142
143 ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
144 if (ret < 0)
145 goto out_free_timer;
146
127 sysfs_attr_init(&info->timer->attr.attr); 147 sysfs_attr_init(&info->timer->attr.attr);
128 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); 148 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
129 if (!info->timer->attr.attr.name) { 149 if (!info->timer->attr.attr.name) {
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index dec843cadf46..9e05c86ba5c4 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -201,18 +201,8 @@ static __net_init int xt_rateest_net_init(struct net *net)
201 return 0; 201 return 0;
202} 202}
203 203
204static void __net_exit xt_rateest_net_exit(struct net *net)
205{
206 struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
207 int i;
208
209 for (i = 0; i < ARRAY_SIZE(xn->hash); i++)
210 WARN_ON_ONCE(!hlist_empty(&xn->hash[i]));
211}
212
213static struct pernet_operations xt_rateest_net_ops = { 204static struct pernet_operations xt_rateest_net_ops = {
214 .init = xt_rateest_net_init, 205 .init = xt_rateest_net_init,
215 .exit = xt_rateest_net_exit,
216 .id = &xt_rateest_id, 206 .id = &xt_rateest_id,
217 .size = sizeof(struct xt_rateest_net), 207 .size = sizeof(struct xt_rateest_net),
218}; 208};
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 3e7d259e5d8d..1ad4017f9b73 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -295,9 +295,10 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
295 295
296 /* copy match config into hashtable config */ 296 /* copy match config into hashtable config */
297 ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3); 297 ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3);
298 298 if (ret) {
299 if (ret) 299 vfree(hinfo);
300 return ret; 300 return ret;
301 }
301 302
302 hinfo->cfg.size = size; 303 hinfo->cfg.size = size;
303 if (hinfo->cfg.max == 0) 304 if (hinfo->cfg.max == 0)
@@ -814,7 +815,6 @@ hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
814 int ret; 815 int ret;
815 816
816 ret = cfg_copy(&cfg, (void *)&info->cfg, 1); 817 ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
817
818 if (ret) 818 if (ret)
819 return ret; 819 return ret;
820 820
@@ -830,7 +830,6 @@ hashlimit_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
830 int ret; 830 int ret;
831 831
832 ret = cfg_copy(&cfg, (void *)&info->cfg, 2); 832 ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
833
834 if (ret) 833 if (ret)
835 return ret; 834 return ret;
836 835
@@ -921,7 +920,6 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
921 return ret; 920 return ret;
922 921
923 ret = cfg_copy(&cfg, (void *)&info->cfg, 1); 922 ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
924
925 if (ret) 923 if (ret)
926 return ret; 924 return ret;
927 925
@@ -940,7 +938,6 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
940 return ret; 938 return ret;
941 939
942 ret = cfg_copy(&cfg, (void *)&info->cfg, 2); 940 ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
943
944 if (ret) 941 if (ret)
945 return ret; 942 return ret;
946 943
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 6bec37ab4472..a4660c48ff01 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1203,7 +1203,8 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
1203 &info->labels.mask); 1203 &info->labels.mask);
1204 if (err) 1204 if (err)
1205 return err; 1205 return err;
1206 } else if (labels_nonzero(&info->labels.mask)) { 1206 } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1207 labels_nonzero(&info->labels.mask)) {
1207 err = ovs_ct_set_labels(ct, key, &info->labels.value, 1208 err = ovs_ct_set_labels(ct, key, &info->labels.value,
1208 &info->labels.mask); 1209 &info->labels.mask);
1209 if (err) 1210 if (err)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ec3095f13aae..a74650e98f42 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2394,7 +2394,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
2394 void *ph; 2394 void *ph;
2395 __u32 ts; 2395 __u32 ts;
2396 2396
2397 ph = skb_shinfo(skb)->destructor_arg; 2397 ph = skb_zcopy_get_nouarg(skb);
2398 packet_dec_pending(&po->tx_ring); 2398 packet_dec_pending(&po->tx_ring);
2399 2399
2400 ts = __packet_set_timestamp(po, ph, skb); 2400 ts = __packet_set_timestamp(po, ph, skb);
@@ -2461,7 +2461,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2461 skb->mark = po->sk.sk_mark; 2461 skb->mark = po->sk.sk_mark;
2462 skb->tstamp = sockc->transmit_time; 2462 skb->tstamp = sockc->transmit_time;
2463 sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); 2463 sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
2464 skb_shinfo(skb)->destructor_arg = ph.raw; 2464 skb_zcopy_set_nouarg(skb, ph.raw);
2465 2465
2466 skb_reserve(skb, hlen); 2466 skb_reserve(skb, hlen);
2467 skb_reset_network_header(skb); 2467 skb_reset_network_header(skb);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 64362d078da8..a2522f9d71e2 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -375,17 +375,36 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
375 * getting ACKs from the server. Returns a number representing the life state 375 * getting ACKs from the server. Returns a number representing the life state
376 * which can be compared to that returned by a previous call. 376 * which can be compared to that returned by a previous call.
377 * 377 *
378 * If this is a client call, ping ACKs will be sent to the server to find out 378 * If the life state stalls, rxrpc_kernel_probe_life() should be called and
379 * whether it's still responsive and whether the call is still alive on the 379 * then 2RTT waited.
380 * server.
381 */ 380 */
382u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call) 381u32 rxrpc_kernel_check_life(const struct socket *sock,
382 const struct rxrpc_call *call)
383{ 383{
384 return call->acks_latest; 384 return call->acks_latest;
385} 385}
386EXPORT_SYMBOL(rxrpc_kernel_check_life); 386EXPORT_SYMBOL(rxrpc_kernel_check_life);
387 387
388/** 388/**
389 * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive
390 * @sock: The socket the call is on
391 * @call: The call to check
392 *
393 * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to
394 * find out whether a call is still alive by pinging it. This should cause the
395 * life state to be bumped in about 2*RTT.
396 *
397 * The must be called in TASK_RUNNING state on pain of might_sleep() objecting.
398 */
399void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
400{
401 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
402 rxrpc_propose_ack_ping_for_check_life);
403 rxrpc_send_ack_packet(call, true, NULL);
404}
405EXPORT_SYMBOL(rxrpc_kernel_probe_life);
406
407/**
389 * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call. 408 * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
390 * @sock: The socket the call is on 409 * @sock: The socket the call is on
391 * @call: The call to query 410 * @call: The call to query
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 382196e57a26..bc628acf4f4f 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -611,6 +611,7 @@ struct rxrpc_call {
611 * not hard-ACK'd packet follows this. 611 * not hard-ACK'd packet follows this.
612 */ 612 */
613 rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */ 613 rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
614 u16 tx_backoff; /* Delay to insert due to Tx failure */
614 615
615 /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS 616 /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
616 * is fixed, we keep these numbers in terms of segments (ie. DATA 617 * is fixed, we keep these numbers in terms of segments (ie. DATA
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 8e7434e92097..468efc3660c0 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -123,6 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
123 else 123 else
124 ack_at = expiry; 124 ack_at = expiry;
125 125
126 ack_at += READ_ONCE(call->tx_backoff);
126 ack_at += now; 127 ack_at += now;
127 if (time_before(ack_at, call->ack_at)) { 128 if (time_before(ack_at, call->ack_at)) {
128 WRITE_ONCE(call->ack_at, ack_at); 129 WRITE_ONCE(call->ack_at, ack_at);
@@ -311,6 +312,7 @@ void rxrpc_process_call(struct work_struct *work)
311 container_of(work, struct rxrpc_call, processor); 312 container_of(work, struct rxrpc_call, processor);
312 rxrpc_serial_t *send_ack; 313 rxrpc_serial_t *send_ack;
313 unsigned long now, next, t; 314 unsigned long now, next, t;
315 unsigned int iterations = 0;
314 316
315 rxrpc_see_call(call); 317 rxrpc_see_call(call);
316 318
@@ -319,6 +321,11 @@ void rxrpc_process_call(struct work_struct *work)
319 call->debug_id, rxrpc_call_states[call->state], call->events); 321 call->debug_id, rxrpc_call_states[call->state], call->events);
320 322
321recheck_state: 323recheck_state:
324 /* Limit the number of times we do this before returning to the manager */
325 iterations++;
326 if (iterations > 5)
327 goto requeue;
328
322 if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) { 329 if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
323 rxrpc_send_abort_packet(call); 330 rxrpc_send_abort_packet(call);
324 goto recheck_state; 331 goto recheck_state;
@@ -447,13 +454,16 @@ recheck_state:
447 rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart); 454 rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
448 455
449 /* other events may have been raised since we started checking */ 456 /* other events may have been raised since we started checking */
450 if (call->events && call->state < RXRPC_CALL_COMPLETE) { 457 if (call->events && call->state < RXRPC_CALL_COMPLETE)
451 __rxrpc_queue_call(call); 458 goto requeue;
452 goto out;
453 }
454 459
455out_put: 460out_put:
456 rxrpc_put_call(call, rxrpc_call_put); 461 rxrpc_put_call(call, rxrpc_call_put);
457out: 462out:
458 _leave(""); 463 _leave("");
464 return;
465
466requeue:
467 __rxrpc_queue_call(call);
468 goto out;
459} 469}
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 189418888839..736aa9281100 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -35,6 +35,21 @@ struct rxrpc_abort_buffer {
35static const char rxrpc_keepalive_string[] = ""; 35static const char rxrpc_keepalive_string[] = "";
36 36
37/* 37/*
38 * Increase Tx backoff on transmission failure and clear it on success.
39 */
40static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
41{
42 if (ret < 0) {
43 u16 tx_backoff = READ_ONCE(call->tx_backoff);
44
45 if (tx_backoff < HZ)
46 WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
47 } else {
48 WRITE_ONCE(call->tx_backoff, 0);
49 }
50}
51
52/*
38 * Arrange for a keepalive ping a certain time after we last transmitted. This 53 * Arrange for a keepalive ping a certain time after we last transmitted. This
39 * lets the far side know we're still interested in this call and helps keep 54 * lets the far side know we're still interested in this call and helps keep
40 * the route through any intervening firewall open. 55 * the route through any intervening firewall open.
@@ -210,6 +225,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
210 else 225 else
211 trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr, 226 trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
212 rxrpc_tx_point_call_ack); 227 rxrpc_tx_point_call_ack);
228 rxrpc_tx_backoff(call, ret);
213 229
214 if (call->state < RXRPC_CALL_COMPLETE) { 230 if (call->state < RXRPC_CALL_COMPLETE) {
215 if (ret < 0) { 231 if (ret < 0) {
@@ -218,7 +234,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
218 rxrpc_propose_ACK(call, pkt->ack.reason, 234 rxrpc_propose_ACK(call, pkt->ack.reason,
219 ntohs(pkt->ack.maxSkew), 235 ntohs(pkt->ack.maxSkew),
220 ntohl(pkt->ack.serial), 236 ntohl(pkt->ack.serial),
221 true, true, 237 false, true,
222 rxrpc_propose_ack_retry_tx); 238 rxrpc_propose_ack_retry_tx);
223 } else { 239 } else {
224 spin_lock_bh(&call->lock); 240 spin_lock_bh(&call->lock);
@@ -300,7 +316,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
300 else 316 else
301 trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr, 317 trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
302 rxrpc_tx_point_call_abort); 318 rxrpc_tx_point_call_abort);
303 319 rxrpc_tx_backoff(call, ret);
304 320
305 rxrpc_put_connection(conn); 321 rxrpc_put_connection(conn);
306 return ret; 322 return ret;
@@ -413,6 +429,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
413 else 429 else
414 trace_rxrpc_tx_packet(call->debug_id, &whdr, 430 trace_rxrpc_tx_packet(call->debug_id, &whdr,
415 rxrpc_tx_point_call_data_nofrag); 431 rxrpc_tx_point_call_data_nofrag);
432 rxrpc_tx_backoff(call, ret);
416 if (ret == -EMSGSIZE) 433 if (ret == -EMSGSIZE)
417 goto send_fragmentable; 434 goto send_fragmentable;
418 435
@@ -445,9 +462,18 @@ done:
445 rxrpc_reduce_call_timer(call, expect_rx_by, nowj, 462 rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
446 rxrpc_timer_set_for_normal); 463 rxrpc_timer_set_for_normal);
447 } 464 }
448 }
449 465
450 rxrpc_set_keepalive(call); 466 rxrpc_set_keepalive(call);
467 } else {
468 /* Cancel the call if the initial transmission fails,
469 * particularly if that's due to network routing issues that
470 * aren't going away anytime soon. The layer above can arrange
471 * the retransmission.
472 */
473 if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
474 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
475 RX_USER_ABORT, ret);
476 }
451 477
452 _leave(" = %d [%u]", ret, call->peer->maxdata); 478 _leave(" = %d [%u]", ret, call->peer->maxdata);
453 return ret; 479 return ret;
@@ -506,6 +532,7 @@ send_fragmentable:
506 else 532 else
507 trace_rxrpc_tx_packet(call->debug_id, &whdr, 533 trace_rxrpc_tx_packet(call->debug_id, &whdr,
508 rxrpc_tx_point_call_data_frag); 534 rxrpc_tx_point_call_data_frag);
535 rxrpc_tx_backoff(call, ret);
509 536
510 up_write(&conn->params.local->defrag_sem); 537 up_write(&conn->params.local->defrag_sem);
511 goto done; 538 goto done;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 1dae5f2b358f..c8cf4d10c435 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -258,7 +258,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
258 if (is_redirect) { 258 if (is_redirect) {
259 skb2->tc_redirected = 1; 259 skb2->tc_redirected = 1;
260 skb2->tc_from_ingress = skb2->tc_at_ingress; 260 skb2->tc_from_ingress = skb2->tc_at_ingress;
261 261 if (skb2->tc_from_ingress)
262 skb2->tstamp = 0;
262 /* let's the caller reinsert the packet, if possible */ 263 /* let's the caller reinsert the packet, if possible */
263 if (use_reinsert) { 264 if (use_reinsert) {
264 res->ingress = want_ingress; 265 res->ingress = want_ingress;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index da3dd0f68cc2..2b372a06b432 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -201,7 +201,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
201 goto out_release; 201 goto out_release;
202 } 202 }
203 } else { 203 } else {
204 return err; 204 ret = err;
205 goto out_free;
205 } 206 }
206 207
207 p = to_pedit(*a); 208 p = to_pedit(*a);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 052855d47354..37c9b8f0e10f 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -27,10 +27,7 @@ struct tcf_police_params {
27 u32 tcfp_ewma_rate; 27 u32 tcfp_ewma_rate;
28 s64 tcfp_burst; 28 s64 tcfp_burst;
29 u32 tcfp_mtu; 29 u32 tcfp_mtu;
30 s64 tcfp_toks;
31 s64 tcfp_ptoks;
32 s64 tcfp_mtu_ptoks; 30 s64 tcfp_mtu_ptoks;
33 s64 tcfp_t_c;
34 struct psched_ratecfg rate; 31 struct psched_ratecfg rate;
35 bool rate_present; 32 bool rate_present;
36 struct psched_ratecfg peak; 33 struct psched_ratecfg peak;
@@ -41,6 +38,11 @@ struct tcf_police_params {
41struct tcf_police { 38struct tcf_police {
42 struct tc_action common; 39 struct tc_action common;
43 struct tcf_police_params __rcu *params; 40 struct tcf_police_params __rcu *params;
41
42 spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
43 s64 tcfp_toks;
44 s64 tcfp_ptoks;
45 s64 tcfp_t_c;
44}; 46};
45 47
46#define to_police(pc) ((struct tcf_police *)pc) 48#define to_police(pc) ((struct tcf_police *)pc)
@@ -122,6 +124,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
122 return ret; 124 return ret;
123 } 125 }
124 ret = ACT_P_CREATED; 126 ret = ACT_P_CREATED;
127 spin_lock_init(&(to_police(*a)->tcfp_lock));
125 } else if (!ovr) { 128 } else if (!ovr) {
126 tcf_idr_release(*a, bind); 129 tcf_idr_release(*a, bind);
127 return -EEXIST; 130 return -EEXIST;
@@ -186,12 +189,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
186 } 189 }
187 190
188 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); 191 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
189 new->tcfp_toks = new->tcfp_burst; 192 if (new->peak_present)
190 if (new->peak_present) {
191 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, 193 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
192 new->tcfp_mtu); 194 new->tcfp_mtu);
193 new->tcfp_ptoks = new->tcfp_mtu_ptoks;
194 }
195 195
196 if (tb[TCA_POLICE_AVRATE]) 196 if (tb[TCA_POLICE_AVRATE])
197 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); 197 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
@@ -207,7 +207,12 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
207 } 207 }
208 208
209 spin_lock_bh(&police->tcf_lock); 209 spin_lock_bh(&police->tcf_lock);
210 new->tcfp_t_c = ktime_get_ns(); 210 spin_lock_bh(&police->tcfp_lock);
211 police->tcfp_t_c = ktime_get_ns();
212 police->tcfp_toks = new->tcfp_burst;
213 if (new->peak_present)
214 police->tcfp_ptoks = new->tcfp_mtu_ptoks;
215 spin_unlock_bh(&police->tcfp_lock);
211 police->tcf_action = parm->action; 216 police->tcf_action = parm->action;
212 rcu_swap_protected(police->params, 217 rcu_swap_protected(police->params,
213 new, 218 new,
@@ -257,25 +262,28 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
257 } 262 }
258 263
259 now = ktime_get_ns(); 264 now = ktime_get_ns();
260 toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst); 265 spin_lock_bh(&police->tcfp_lock);
266 toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
261 if (p->peak_present) { 267 if (p->peak_present) {
262 ptoks = toks + p->tcfp_ptoks; 268 ptoks = toks + police->tcfp_ptoks;
263 if (ptoks > p->tcfp_mtu_ptoks) 269 if (ptoks > p->tcfp_mtu_ptoks)
264 ptoks = p->tcfp_mtu_ptoks; 270 ptoks = p->tcfp_mtu_ptoks;
265 ptoks -= (s64)psched_l2t_ns(&p->peak, 271 ptoks -= (s64)psched_l2t_ns(&p->peak,
266 qdisc_pkt_len(skb)); 272 qdisc_pkt_len(skb));
267 } 273 }
268 toks += p->tcfp_toks; 274 toks += police->tcfp_toks;
269 if (toks > p->tcfp_burst) 275 if (toks > p->tcfp_burst)
270 toks = p->tcfp_burst; 276 toks = p->tcfp_burst;
271 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); 277 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
272 if ((toks|ptoks) >= 0) { 278 if ((toks|ptoks) >= 0) {
273 p->tcfp_t_c = now; 279 police->tcfp_t_c = now;
274 p->tcfp_toks = toks; 280 police->tcfp_toks = toks;
275 p->tcfp_ptoks = ptoks; 281 police->tcfp_ptoks = ptoks;
282 spin_unlock_bh(&police->tcfp_lock);
276 ret = p->tcfp_result; 283 ret = p->tcfp_result;
277 goto inc_drops; 284 goto inc_drops;
278 } 285 }
286 spin_unlock_bh(&police->tcfp_lock);
279 } 287 }
280 288
281inc_overlimits: 289inc_overlimits:
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9aada2d0ef06..c6c327874abc 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -709,11 +709,23 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
709 struct netlink_ext_ack *extack) 709 struct netlink_ext_ack *extack)
710{ 710{
711 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; 711 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
712 int option_len, key_depth, msk_depth = 0; 712 int err, option_len, key_depth, msk_depth = 0;
713
714 err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
715 TCA_FLOWER_KEY_ENC_OPTS_MAX,
716 enc_opts_policy, extack);
717 if (err)
718 return err;
713 719
714 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); 720 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
715 721
716 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { 722 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
723 err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
724 TCA_FLOWER_KEY_ENC_OPTS_MAX,
725 enc_opts_policy, extack);
726 if (err)
727 return err;
728
717 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 729 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
718 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 730 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
719 } 731 }
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 4b1af706896c..25a7cf6d380f 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -469,22 +469,29 @@ begin:
469 goto begin; 469 goto begin;
470 } 470 }
471 prefetch(&skb->end); 471 prefetch(&skb->end);
472 f->credit -= qdisc_pkt_len(skb); 472 plen = qdisc_pkt_len(skb);
473 f->credit -= plen;
473 474
474 if (ktime_to_ns(skb->tstamp) || !q->rate_enable) 475 if (!q->rate_enable)
475 goto out; 476 goto out;
476 477
477 rate = q->flow_max_rate; 478 rate = q->flow_max_rate;
478 if (skb->sk) 479
479 rate = min(skb->sk->sk_pacing_rate, rate); 480 /* If EDT time was provided for this skb, we need to
480 481 * update f->time_next_packet only if this qdisc enforces
481 if (rate <= q->low_rate_threshold) { 482 * a flow max rate.
482 f->credit = 0; 483 */
483 plen = qdisc_pkt_len(skb); 484 if (!skb->tstamp) {
484 } else { 485 if (skb->sk)
485 plen = max(qdisc_pkt_len(skb), q->quantum); 486 rate = min(skb->sk->sk_pacing_rate, rate);
486 if (f->credit > 0) 487
487 goto out; 488 if (rate <= q->low_rate_threshold) {
489 f->credit = 0;
490 } else {
491 plen = max(plen, q->quantum);
492 if (f->credit > 0)
493 goto out;
494 }
488 } 495 }
489 if (rate != ~0UL) { 496 if (rate != ~0UL) {
490 u64 len = (u64)plen * NSEC_PER_SEC; 497 u64 len = (u64)plen * NSEC_PER_SEC;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 57b3ad9394ad..2c38e3d07924 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -648,15 +648,6 @@ deliver:
648 */ 648 */
649 skb->dev = qdisc_dev(sch); 649 skb->dev = qdisc_dev(sch);
650 650
651#ifdef CONFIG_NET_CLS_ACT
652 /*
653 * If it's at ingress let's pretend the delay is
654 * from the network (tstamp will be updated).
655 */
656 if (skb->tc_redirected && skb->tc_from_ingress)
657 skb->tstamp = 0;
658#endif
659
660 if (q->slot.slot_next) { 651 if (q->slot.slot_next) {
661 q->slot.packets_left--; 652 q->slot.packets_left--;
662 q->slot.bytes_left -= qdisc_pkt_len(skb); 653 q->slot.bytes_left -= qdisc_pkt_len(skb);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 67939ad99c01..025f48e14a91 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
118 sctp_transport_route(tp, NULL, sp); 118 sctp_transport_route(tp, NULL, sp);
119 if (asoc->param_flags & SPP_PMTUD_ENABLE) 119 if (asoc->param_flags & SPP_PMTUD_ENABLE)
120 sctp_assoc_sync_pmtu(asoc); 120 sctp_assoc_sync_pmtu(asoc);
121 } else if (!sctp_transport_pmtu_check(tp)) {
122 if (asoc->param_flags & SPP_PMTUD_ENABLE)
123 sctp_assoc_sync_pmtu(asoc);
121 } 124 }
122 125
123 if (asoc->pmtu_pending) { 126 if (asoc->pmtu_pending) {
@@ -396,25 +399,6 @@ finish:
396 return retval; 399 return retval;
397} 400}
398 401
399static void sctp_packet_release_owner(struct sk_buff *skb)
400{
401 sk_free(skb->sk);
402}
403
404static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
405{
406 skb_orphan(skb);
407 skb->sk = sk;
408 skb->destructor = sctp_packet_release_owner;
409
410 /*
411 * The data chunks have already been accounted for in sctp_sendmsg(),
412 * therefore only reserve a single byte to keep socket around until
413 * the packet has been transmitted.
414 */
415 refcount_inc(&sk->sk_wmem_alloc);
416}
417
418static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) 402static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
419{ 403{
420 if (SCTP_OUTPUT_CB(head)->last == head) 404 if (SCTP_OUTPUT_CB(head)->last == head)
@@ -426,6 +410,7 @@ static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
426 head->truesize += skb->truesize; 410 head->truesize += skb->truesize;
427 head->data_len += skb->len; 411 head->data_len += skb->len;
428 head->len += skb->len; 412 head->len += skb->len;
413 refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
429 414
430 __skb_header_release(skb); 415 __skb_header_release(skb);
431} 416}
@@ -601,7 +586,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
601 if (!head) 586 if (!head)
602 goto out; 587 goto out;
603 skb_reserve(head, packet->overhead + MAX_HEADER); 588 skb_reserve(head, packet->overhead + MAX_HEADER);
604 sctp_packet_set_owner_w(head, sk); 589 skb_set_owner_w(head, sk);
605 590
606 /* set sctp header */ 591 /* set sctp header */
607 sh = skb_push(head, sizeof(struct sctphdr)); 592 sh = skb_push(head, sizeof(struct sctphdr));
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 9cb854b05342..c37e1c2dec9d 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -212,7 +212,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
212 INIT_LIST_HEAD(&q->retransmit); 212 INIT_LIST_HEAD(&q->retransmit);
213 INIT_LIST_HEAD(&q->sacked); 213 INIT_LIST_HEAD(&q->sacked);
214 INIT_LIST_HEAD(&q->abandoned); 214 INIT_LIST_HEAD(&q->abandoned);
215 sctp_sched_set_sched(asoc, SCTP_SS_FCFS); 215 sctp_sched_set_sched(asoc, SCTP_SS_DEFAULT);
216} 216}
217 217
218/* Free the outqueue structure and any related pending chunks. 218/* Free the outqueue structure and any related pending chunks.
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 739f3e50120d..bf618d1b41fd 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3940,32 +3940,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk,
3940 unsigned int optlen) 3940 unsigned int optlen)
3941{ 3941{
3942 struct sctp_assoc_value params; 3942 struct sctp_assoc_value params;
3943 struct sctp_association *asoc;
3944 int retval = -EINVAL;
3945 3943
3946 if (optlen != sizeof(params)) 3944 if (optlen != sizeof(params))
3947 goto out; 3945 return -EINVAL;
3948
3949 if (copy_from_user(&params, optval, optlen)) {
3950 retval = -EFAULT;
3951 goto out;
3952 }
3953
3954 asoc = sctp_id2assoc(sk, params.assoc_id);
3955 if (asoc) {
3956 asoc->prsctp_enable = !!params.assoc_value;
3957 } else if (!params.assoc_id) {
3958 struct sctp_sock *sp = sctp_sk(sk);
3959 3946
3960 sp->ep->prsctp_enable = !!params.assoc_value; 3947 if (copy_from_user(&params, optval, optlen))
3961 } else { 3948 return -EFAULT;
3962 goto out;
3963 }
3964 3949
3965 retval = 0; 3950 sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
3966 3951
3967out: 3952 return 0;
3968 return retval;
3969} 3953}
3970 3954
3971static int sctp_setsockopt_default_prinfo(struct sock *sk, 3955static int sctp_setsockopt_default_prinfo(struct sock *sk,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index ffb940d3b57c..3892e7630f3a 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_association *asoc,
535 goto out; 535 goto out;
536 } 536 }
537 537
538 stream->incnt = incnt;
539 stream->outcnt = outcnt; 538 stream->outcnt = outcnt;
540 539
541 asoc->strreset_outstanding = !!out + !!in; 540 asoc->strreset_outstanding = !!out + !!in;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 80e2119f1c70..5fbaf1901571 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -127,6 +127,8 @@ static int smc_release(struct socket *sock)
127 smc = smc_sk(sk); 127 smc = smc_sk(sk);
128 128
129 /* cleanup for a dangling non-blocking connect */ 129 /* cleanup for a dangling non-blocking connect */
130 if (smc->connect_info && sk->sk_state == SMC_INIT)
131 tcp_abort(smc->clcsock->sk, ECONNABORTED);
130 flush_work(&smc->connect_work); 132 flush_work(&smc->connect_work);
131 kfree(smc->connect_info); 133 kfree(smc->connect_info);
132 smc->connect_info = NULL; 134 smc->connect_info = NULL;
@@ -547,7 +549,8 @@ static int smc_connect_rdma(struct smc_sock *smc,
547 549
548 mutex_lock(&smc_create_lgr_pending); 550 mutex_lock(&smc_create_lgr_pending);
549 local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, 551 local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev,
550 ibport, &aclc->lcl, NULL, 0); 552 ibport, ntoh24(aclc->qpn), &aclc->lcl,
553 NULL, 0);
551 if (local_contact < 0) { 554 if (local_contact < 0) {
552 if (local_contact == -ENOMEM) 555 if (local_contact == -ENOMEM)
553 reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ 556 reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
@@ -618,7 +621,7 @@ static int smc_connect_ism(struct smc_sock *smc,
618 int rc = 0; 621 int rc = 0;
619 622
620 mutex_lock(&smc_create_lgr_pending); 623 mutex_lock(&smc_create_lgr_pending);
621 local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 624 local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0,
622 NULL, ismdev, aclc->gid); 625 NULL, ismdev, aclc->gid);
623 if (local_contact < 0) 626 if (local_contact < 0)
624 return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); 627 return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0);
@@ -1083,7 +1086,7 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
1083 int *local_contact) 1086 int *local_contact)
1084{ 1087{
1085 /* allocate connection / link group */ 1088 /* allocate connection / link group */
1086 *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 1089 *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 0,
1087 &pclc->lcl, NULL, 0); 1090 &pclc->lcl, NULL, 0);
1088 if (*local_contact < 0) { 1091 if (*local_contact < 0) {
1089 if (*local_contact == -ENOMEM) 1092 if (*local_contact == -ENOMEM)
@@ -1107,7 +1110,7 @@ static int smc_listen_ism_init(struct smc_sock *new_smc,
1107 struct smc_clc_msg_smcd *pclc_smcd; 1110 struct smc_clc_msg_smcd *pclc_smcd;
1108 1111
1109 pclc_smcd = smc_get_clc_msg_smcd(pclc); 1112 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1110 *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL, 1113 *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, 0, NULL,
1111 ismdev, pclc_smcd->gid); 1114 ismdev, pclc_smcd->gid);
1112 if (*local_contact < 0) { 1115 if (*local_contact < 0) {
1113 if (*local_contact == -ENOMEM) 1116 if (*local_contact == -ENOMEM)
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index ed5dcf03fe0b..db83332ac1c8 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -81,7 +81,7 @@ static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
81 sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, 81 sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
82 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); 82 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
83 BUILD_BUG_ON_MSG( 83 BUILD_BUG_ON_MSG(
84 sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE, 84 offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
85 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); 85 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
86 BUILD_BUG_ON_MSG( 86 BUILD_BUG_ON_MSG(
87 sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, 87 sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
@@ -177,23 +177,24 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
177int smcd_cdc_msg_send(struct smc_connection *conn) 177int smcd_cdc_msg_send(struct smc_connection *conn)
178{ 178{
179 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 179 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
180 union smc_host_cursor curs;
180 struct smcd_cdc_msg cdc; 181 struct smcd_cdc_msg cdc;
181 int rc, diff; 182 int rc, diff;
182 183
183 memset(&cdc, 0, sizeof(cdc)); 184 memset(&cdc, 0, sizeof(cdc));
184 cdc.common.type = SMC_CDC_MSG_TYPE; 185 cdc.common.type = SMC_CDC_MSG_TYPE;
185 cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap; 186 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
186 cdc.prod_count = conn->local_tx_ctrl.prod.count; 187 cdc.prod.wrap = curs.wrap;
187 188 cdc.prod.count = curs.count;
188 cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap; 189 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
189 cdc.cons_count = conn->local_tx_ctrl.cons.count; 190 cdc.cons.wrap = curs.wrap;
190 cdc.prod_flags = conn->local_tx_ctrl.prod_flags; 191 cdc.cons.count = curs.count;
191 cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; 192 cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
193 cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
192 rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); 194 rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
193 if (rc) 195 if (rc)
194 return rc; 196 return rc;
195 smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons, 197 smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
196 conn);
197 /* Calculate transmitted data and increment free send buffer space */ 198 /* Calculate transmitted data and increment free send buffer space */
198 diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, 199 diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
199 &conn->tx_curs_sent); 200 &conn->tx_curs_sent);
@@ -331,13 +332,16 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
331static void smcd_cdc_rx_tsklet(unsigned long data) 332static void smcd_cdc_rx_tsklet(unsigned long data)
332{ 333{
333 struct smc_connection *conn = (struct smc_connection *)data; 334 struct smc_connection *conn = (struct smc_connection *)data;
335 struct smcd_cdc_msg *data_cdc;
334 struct smcd_cdc_msg cdc; 336 struct smcd_cdc_msg cdc;
335 struct smc_sock *smc; 337 struct smc_sock *smc;
336 338
337 if (!conn) 339 if (!conn)
338 return; 340 return;
339 341
340 memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc)); 342 data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
343 smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
344 smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
341 smc = container_of(conn, struct smc_sock, conn); 345 smc = container_of(conn, struct smc_sock, conn);
342 smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); 346 smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
343} 347}
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index 934df4473a7c..b5bfe38c7f9b 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -48,21 +48,31 @@ struct smc_cdc_msg {
48 struct smc_cdc_producer_flags prod_flags; 48 struct smc_cdc_producer_flags prod_flags;
49 struct smc_cdc_conn_state_flags conn_state_flags; 49 struct smc_cdc_conn_state_flags conn_state_flags;
50 u8 reserved[18]; 50 u8 reserved[18];
51} __packed; /* format defined in RFC7609 */ 51};
52
53/* SMC-D cursor format */
54union smcd_cdc_cursor {
55 struct {
56 u16 wrap;
57 u32 count;
58 struct smc_cdc_producer_flags prod_flags;
59 struct smc_cdc_conn_state_flags conn_state_flags;
60 } __packed;
61#ifdef KERNEL_HAS_ATOMIC64
62 atomic64_t acurs; /* for atomic processing */
63#else
64 u64 acurs; /* for atomic processing */
65#endif
66} __aligned(8);
52 67
53/* CDC message for SMC-D */ 68/* CDC message for SMC-D */
54struct smcd_cdc_msg { 69struct smcd_cdc_msg {
55 struct smc_wr_rx_hdr common; /* Type = 0xFE */ 70 struct smc_wr_rx_hdr common; /* Type = 0xFE */
56 u8 res1[7]; 71 u8 res1[7];
57 u16 prod_wrap; 72 union smcd_cdc_cursor prod;
58 u32 prod_count; 73 union smcd_cdc_cursor cons;
59 u8 res2[2];
60 u16 cons_wrap;
61 u32 cons_count;
62 struct smc_cdc_producer_flags prod_flags;
63 struct smc_cdc_conn_state_flags conn_state_flags;
64 u8 res3[8]; 74 u8 res3[8];
65} __packed; 75} __aligned(8);
66 76
67static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) 77static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
68{ 78{
@@ -135,6 +145,21 @@ static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
135#endif 145#endif
136} 146}
137 147
148static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
149 union smcd_cdc_cursor *src,
150 struct smc_connection *conn)
151{
152#ifndef KERNEL_HAS_ATOMIC64
153 unsigned long flags;
154
155 spin_lock_irqsave(&conn->acurs_lock, flags);
156 tgt->acurs = src->acurs;
157 spin_unlock_irqrestore(&conn->acurs_lock, flags);
158#else
159 atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
160#endif
161}
162
138/* calculate cursor difference between old and new, where old <= new */ 163/* calculate cursor difference between old and new, where old <= new */
139static inline int smc_curs_diff(unsigned int size, 164static inline int smc_curs_diff(unsigned int size,
140 union smc_host_cursor *old, 165 union smc_host_cursor *old,
@@ -222,12 +247,17 @@ static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
222static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, 247static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
223 struct smcd_cdc_msg *peer) 248 struct smcd_cdc_msg *peer)
224{ 249{
225 local->prod.wrap = peer->prod_wrap; 250 union smc_host_cursor temp;
226 local->prod.count = peer->prod_count; 251
227 local->cons.wrap = peer->cons_wrap; 252 temp.wrap = peer->prod.wrap;
228 local->cons.count = peer->cons_count; 253 temp.count = peer->prod.count;
229 local->prod_flags = peer->prod_flags; 254 atomic64_set(&local->prod.acurs, atomic64_read(&temp.acurs));
230 local->conn_state_flags = peer->conn_state_flags; 255
256 temp.wrap = peer->cons.wrap;
257 temp.count = peer->cons.count;
258 atomic64_set(&local->cons.acurs, atomic64_read(&temp.acurs));
259 local->prod_flags = peer->cons.prod_flags;
260 local->conn_state_flags = peer->cons.conn_state_flags;
231} 261}
232 262
233static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, 263static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 18daebcef181..1c9fa7f0261a 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -184,6 +184,8 @@ free:
184 184
185 if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) 185 if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
186 smc_llc_link_inactive(lnk); 186 smc_llc_link_inactive(lnk);
187 if (lgr->is_smcd)
188 smc_ism_signal_shutdown(lgr);
187 smc_lgr_free(lgr); 189 smc_lgr_free(lgr);
188 } 190 }
189} 191}
@@ -485,7 +487,7 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
485} 487}
486 488
487/* Called when SMC-D device is terminated or peer is lost */ 489/* Called when SMC-D device is terminated or peer is lost */
488void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) 490void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
489{ 491{
490 struct smc_link_group *lgr, *l; 492 struct smc_link_group *lgr, *l;
491 LIST_HEAD(lgr_free_list); 493 LIST_HEAD(lgr_free_list);
@@ -495,7 +497,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
495 list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { 497 list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
496 if (lgr->is_smcd && lgr->smcd == dev && 498 if (lgr->is_smcd && lgr->smcd == dev &&
497 (!peer_gid || lgr->peer_gid == peer_gid) && 499 (!peer_gid || lgr->peer_gid == peer_gid) &&
498 !list_empty(&lgr->list)) { 500 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
499 __smc_lgr_terminate(lgr); 501 __smc_lgr_terminate(lgr);
500 list_move(&lgr->list, &lgr_free_list); 502 list_move(&lgr->list, &lgr_free_list);
501 } 503 }
@@ -506,6 +508,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
506 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { 508 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
507 list_del_init(&lgr->list); 509 list_del_init(&lgr->list);
508 cancel_delayed_work_sync(&lgr->free_work); 510 cancel_delayed_work_sync(&lgr->free_work);
511 if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */
512 smc_ism_signal_shutdown(lgr);
509 smc_lgr_free(lgr); 513 smc_lgr_free(lgr);
510 } 514 }
511} 515}
@@ -559,7 +563,7 @@ out:
559 563
560static bool smcr_lgr_match(struct smc_link_group *lgr, 564static bool smcr_lgr_match(struct smc_link_group *lgr,
561 struct smc_clc_msg_local *lcl, 565 struct smc_clc_msg_local *lcl,
562 enum smc_lgr_role role) 566 enum smc_lgr_role role, u32 clcqpn)
563{ 567{
564 return !memcmp(lgr->peer_systemid, lcl->id_for_peer, 568 return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
565 SMC_SYSTEMID_LEN) && 569 SMC_SYSTEMID_LEN) &&
@@ -567,7 +571,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr,
567 SMC_GID_SIZE) && 571 SMC_GID_SIZE) &&
568 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, 572 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
569 sizeof(lcl->mac)) && 573 sizeof(lcl->mac)) &&
570 lgr->role == role; 574 lgr->role == role &&
575 (lgr->role == SMC_SERV ||
576 lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
571} 577}
572 578
573static bool smcd_lgr_match(struct smc_link_group *lgr, 579static bool smcd_lgr_match(struct smc_link_group *lgr,
@@ -578,7 +584,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
578 584
579/* create a new SMC connection (and a new link group if necessary) */ 585/* create a new SMC connection (and a new link group if necessary) */
580int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, 586int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
581 struct smc_ib_device *smcibdev, u8 ibport, 587 struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
582 struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, 588 struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
583 u64 peer_gid) 589 u64 peer_gid)
584{ 590{
@@ -603,7 +609,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
603 list_for_each_entry(lgr, &smc_lgr_list.list, list) { 609 list_for_each_entry(lgr, &smc_lgr_list.list, list) {
604 write_lock_bh(&lgr->conns_lock); 610 write_lock_bh(&lgr->conns_lock);
605 if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : 611 if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) :
606 smcr_lgr_match(lgr, lcl, role)) && 612 smcr_lgr_match(lgr, lcl, role, clcqpn)) &&
607 !lgr->sync_err && 613 !lgr->sync_err &&
608 lgr->vlan_id == vlan_id && 614 lgr->vlan_id == vlan_id &&
609 (role == SMC_CLNT || 615 (role == SMC_CLNT ||
@@ -1024,6 +1030,8 @@ void smc_core_exit(void)
1024 smc_llc_link_inactive(lnk); 1030 smc_llc_link_inactive(lnk);
1025 } 1031 }
1026 cancel_delayed_work_sync(&lgr->free_work); 1032 cancel_delayed_work_sync(&lgr->free_work);
1033 if (lgr->is_smcd)
1034 smc_ism_signal_shutdown(lgr);
1027 smc_lgr_free(lgr); /* free link group */ 1035 smc_lgr_free(lgr); /* free link group */
1028 } 1036 }
1029} 1037}
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index c156674733c9..cf98f4d6093e 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -247,7 +247,8 @@ void smc_lgr_free(struct smc_link_group *lgr);
247void smc_lgr_forget(struct smc_link_group *lgr); 247void smc_lgr_forget(struct smc_link_group *lgr);
248void smc_lgr_terminate(struct smc_link_group *lgr); 248void smc_lgr_terminate(struct smc_link_group *lgr);
249void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); 249void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
250void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid); 250void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
251 unsigned short vlan);
251int smc_buf_create(struct smc_sock *smc, bool is_smcd); 252int smc_buf_create(struct smc_sock *smc, bool is_smcd);
252int smc_uncompress_bufsize(u8 compressed); 253int smc_uncompress_bufsize(u8 compressed);
253int smc_rmb_rtoken_handling(struct smc_connection *conn, 254int smc_rmb_rtoken_handling(struct smc_connection *conn,
@@ -262,7 +263,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id);
262 263
263void smc_conn_free(struct smc_connection *conn); 264void smc_conn_free(struct smc_connection *conn);
264int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, 265int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
265 struct smc_ib_device *smcibdev, u8 ibport, 266 struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
266 struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, 267 struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
267 u64 peer_gid); 268 u64 peer_gid);
268void smcd_conn_free(struct smc_connection *conn); 269void smcd_conn_free(struct smc_connection *conn);
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index e36f21ce7252..2fff79db1a59 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -187,22 +187,28 @@ struct smc_ism_event_work {
187#define ISM_EVENT_REQUEST 0x0001 187#define ISM_EVENT_REQUEST 0x0001
188#define ISM_EVENT_RESPONSE 0x0002 188#define ISM_EVENT_RESPONSE 0x0002
189#define ISM_EVENT_REQUEST_IR 0x00000001 189#define ISM_EVENT_REQUEST_IR 0x00000001
190#define ISM_EVENT_CODE_SHUTDOWN 0x80
190#define ISM_EVENT_CODE_TESTLINK 0x83 191#define ISM_EVENT_CODE_TESTLINK 0x83
191 192
193union smcd_sw_event_info {
194 u64 info;
195 struct {
196 u8 uid[SMC_LGR_ID_SIZE];
197 unsigned short vlan_id;
198 u16 code;
199 };
200};
201
192static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) 202static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
193{ 203{
194 union { 204 union smcd_sw_event_info ev_info;
195 u64 info;
196 struct {
197 u32 uid;
198 unsigned short vlanid;
199 u16 code;
200 };
201 } ev_info;
202 205
206 ev_info.info = wrk->event.info;
203 switch (wrk->event.code) { 207 switch (wrk->event.code) {
208 case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */
209 smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id);
210 break;
204 case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ 211 case ISM_EVENT_CODE_TESTLINK: /* Activity timer */
205 ev_info.info = wrk->event.info;
206 if (ev_info.code == ISM_EVENT_REQUEST) { 212 if (ev_info.code == ISM_EVENT_REQUEST) {
207 ev_info.code = ISM_EVENT_RESPONSE; 213 ev_info.code = ISM_EVENT_RESPONSE;
208 wrk->smcd->ops->signal_event(wrk->smcd, 214 wrk->smcd->ops->signal_event(wrk->smcd,
@@ -215,6 +221,21 @@ static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
215 } 221 }
216} 222}
217 223
224int smc_ism_signal_shutdown(struct smc_link_group *lgr)
225{
226 int rc;
227 union smcd_sw_event_info ev_info;
228
229 memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
230 ev_info.vlan_id = lgr->vlan_id;
231 ev_info.code = ISM_EVENT_REQUEST;
232 rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid,
233 ISM_EVENT_REQUEST_IR,
234 ISM_EVENT_CODE_SHUTDOWN,
235 ev_info.info);
236 return rc;
237}
238
218/* worker for SMC-D events */ 239/* worker for SMC-D events */
219static void smc_ism_event_work(struct work_struct *work) 240static void smc_ism_event_work(struct work_struct *work)
220{ 241{
@@ -223,7 +244,7 @@ static void smc_ism_event_work(struct work_struct *work)
223 244
224 switch (wrk->event.type) { 245 switch (wrk->event.type) {
225 case ISM_EVENT_GID: /* GID event, token is peer GID */ 246 case ISM_EVENT_GID: /* GID event, token is peer GID */
226 smc_smcd_terminate(wrk->smcd, wrk->event.tok); 247 smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK);
227 break; 248 break;
228 case ISM_EVENT_DMB: 249 case ISM_EVENT_DMB:
229 break; 250 break;
@@ -289,7 +310,7 @@ void smcd_unregister_dev(struct smcd_dev *smcd)
289 spin_unlock(&smcd_dev_list.lock); 310 spin_unlock(&smcd_dev_list.lock);
290 flush_workqueue(smcd->event_wq); 311 flush_workqueue(smcd->event_wq);
291 destroy_workqueue(smcd->event_wq); 312 destroy_workqueue(smcd->event_wq);
292 smc_smcd_terminate(smcd, 0); 313 smc_smcd_terminate(smcd, 0, VLAN_VID_MASK);
293 314
294 device_del(&smcd->dev); 315 device_del(&smcd->dev);
295} 316}
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index aee45b860b79..4da946cbfa29 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -45,4 +45,5 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size,
45int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); 45int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc);
46int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, 46int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos,
47 void *data, size_t len); 47 void *data, size_t len);
48int smc_ism_signal_shutdown(struct smc_link_group *lgr);
48#endif 49#endif
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 3c458d279855..c2694750a6a8 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -215,12 +215,14 @@ int smc_wr_tx_put_slot(struct smc_link *link,
215 215
216 pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); 216 pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
217 if (pend->idx < link->wr_tx_cnt) { 217 if (pend->idx < link->wr_tx_cnt) {
218 u32 idx = pend->idx;
219
218 /* clear the full struct smc_wr_tx_pend including .priv */ 220 /* clear the full struct smc_wr_tx_pend including .priv */
219 memset(&link->wr_tx_pends[pend->idx], 0, 221 memset(&link->wr_tx_pends[pend->idx], 0,
220 sizeof(link->wr_tx_pends[pend->idx])); 222 sizeof(link->wr_tx_pends[pend->idx]));
221 memset(&link->wr_tx_bufs[pend->idx], 0, 223 memset(&link->wr_tx_bufs[pend->idx], 0,
222 sizeof(link->wr_tx_bufs[pend->idx])); 224 sizeof(link->wr_tx_bufs[pend->idx]));
223 test_and_clear_bit(pend->idx, link->wr_tx_mask); 225 test_and_clear_bit(idx, link->wr_tx_mask);
224 return 1; 226 return 1;
225 } 227 }
226 228
diff --git a/net/socket.c b/net/socket.c
index 593826e11a53..334fcc617ef2 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -853,7 +853,7 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
853 struct socket *sock = file->private_data; 853 struct socket *sock = file->private_data;
854 854
855 if (unlikely(!sock->ops->splice_read)) 855 if (unlikely(!sock->ops->splice_read))
856 return -EINVAL; 856 return generic_file_splice_read(file, ppos, pipe, len, flags);
857 857
858 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 858 return sock->ops->splice_read(sock, ppos, pipe, len, flags);
859} 859}
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index d8831b988b1e..ab4a3be1542a 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -281,13 +281,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred)
281{ 281{
282 struct auth_cred *acred = &container_of(cred, struct generic_cred, 282 struct auth_cred *acred = &container_of(cred, struct generic_cred,
283 gc_base)->acred; 283 gc_base)->acred;
284 bool ret; 284 return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
285
286 get_rpccred(cred);
287 ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
288 put_rpccred(cred);
289
290 return ret;
291} 285}
292 286
293static const struct rpc_credops generic_credops = { 287static const struct rpc_credops generic_credops = {
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 30f970cdc7f6..5d3f252659f1 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1239,36 +1239,59 @@ gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1239 return &gss_auth->rpc_auth; 1239 return &gss_auth->rpc_auth;
1240} 1240}
1241 1241
1242static struct gss_cred *
1243gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
1244{
1245 struct gss_cred *new;
1246
1247 /* Make a copy of the cred so that we can reference count it */
1248 new = kzalloc(sizeof(*gss_cred), GFP_NOIO);
1249 if (new) {
1250 struct auth_cred acred = {
1251 .uid = gss_cred->gc_base.cr_uid,
1252 };
1253 struct gss_cl_ctx *ctx =
1254 rcu_dereference_protected(gss_cred->gc_ctx, 1);
1255
1256 rpcauth_init_cred(&new->gc_base, &acred,
1257 &gss_auth->rpc_auth,
1258 &gss_nullops);
1259 new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
1260 new->gc_service = gss_cred->gc_service;
1261 new->gc_principal = gss_cred->gc_principal;
1262 kref_get(&gss_auth->kref);
1263 rcu_assign_pointer(new->gc_ctx, ctx);
1264 gss_get_ctx(ctx);
1265 }
1266 return new;
1267}
1268
1242/* 1269/*
1243 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call 1270 * gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
1244 * to the server with the GSS control procedure field set to 1271 * to the server with the GSS control procedure field set to
1245 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release 1272 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
1246 * all RPCSEC_GSS state associated with that context. 1273 * all RPCSEC_GSS state associated with that context.
1247 */ 1274 */
1248static int 1275static void
1249gss_destroying_context(struct rpc_cred *cred) 1276gss_send_destroy_context(struct rpc_cred *cred)
1250{ 1277{
1251 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1278 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1252 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1279 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1253 struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1); 1280 struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1281 struct gss_cred *new;
1254 struct rpc_task *task; 1282 struct rpc_task *task;
1255 1283
1256 if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) 1284 new = gss_dup_cred(gss_auth, gss_cred);
1257 return 0; 1285 if (new) {
1258 1286 ctx->gc_proc = RPC_GSS_PROC_DESTROY;
1259 ctx->gc_proc = RPC_GSS_PROC_DESTROY;
1260 cred->cr_ops = &gss_nullops;
1261
1262 /* Take a reference to ensure the cred will be destroyed either
1263 * by the RPC call or by the put_rpccred() below */
1264 get_rpccred(cred);
1265 1287
1266 task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT); 1288 task = rpc_call_null(gss_auth->client, &new->gc_base,
1267 if (!IS_ERR(task)) 1289 RPC_TASK_ASYNC|RPC_TASK_SOFT);
1268 rpc_put_task(task); 1290 if (!IS_ERR(task))
1291 rpc_put_task(task);
1269 1292
1270 put_rpccred(cred); 1293 put_rpccred(&new->gc_base);
1271 return 1; 1294 }
1272} 1295}
1273 1296
1274/* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure 1297/* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
@@ -1330,8 +1353,8 @@ static void
1330gss_destroy_cred(struct rpc_cred *cred) 1353gss_destroy_cred(struct rpc_cred *cred)
1331{ 1354{
1332 1355
1333 if (gss_destroying_context(cred)) 1356 if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
1334 return; 1357 gss_send_destroy_context(cred);
1335 gss_destroy_nullcred(cred); 1358 gss_destroy_nullcred(cred);
1336} 1359}
1337 1360
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 2bbb8d38d2bf..f302c6eb8779 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(xdr_commit_encode);
546static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, 546static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
547 size_t nbytes) 547 size_t nbytes)
548{ 548{
549 static __be32 *p; 549 __be32 *p;
550 int space_left; 550 int space_left;
551 int frag1bytes, frag2bytes; 551 int frag1bytes, frag2bytes;
552 552
@@ -673,11 +673,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
673 WARN_ON_ONCE(xdr->iov); 673 WARN_ON_ONCE(xdr->iov);
674 return; 674 return;
675 } 675 }
676 if (fraglen) { 676 if (fraglen)
677 xdr->end = head->iov_base + head->iov_len; 677 xdr->end = head->iov_base + head->iov_len;
678 xdr->page_ptr--;
679 }
680 /* (otherwise assume xdr->end is already set) */ 678 /* (otherwise assume xdr->end is already set) */
679 xdr->page_ptr--;
681 head->iov_len = len; 680 head->iov_len = len;
682 buf->len = len; 681 buf->len = len;
683 xdr->p = head->iov_base + head->iov_len; 682 xdr->p = head->iov_base + head->iov_len;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 2830709957bd..c138d68e8a69 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -166,7 +166,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
166 166
167 /* Apply trial address if we just left trial period */ 167 /* Apply trial address if we just left trial period */
168 if (!trial && !self) { 168 if (!trial && !self) {
169 tipc_net_finalize(net, tn->trial_addr); 169 tipc_sched_net_finalize(net, tn->trial_addr);
170 msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
170 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); 171 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
171 } 172 }
172 173
@@ -300,14 +301,12 @@ static void tipc_disc_timeout(struct timer_list *t)
300 goto exit; 301 goto exit;
301 } 302 }
302 303
303 /* Trial period over ? */ 304 /* Did we just leave trial period ? */
304 if (!time_before(jiffies, tn->addr_trial_end)) { 305 if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
305 /* Did we just leave it ? */ 306 mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
306 if (!tipc_own_addr(net)) 307 spin_unlock_bh(&d->lock);
307 tipc_net_finalize(net, tn->trial_addr); 308 tipc_sched_net_finalize(net, tn->trial_addr);
308 309 return;
309 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
310 msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
311 } 310 }
312 311
313 /* Adjust timeout interval according to discovery phase */ 312 /* Adjust timeout interval according to discovery phase */
@@ -319,6 +318,8 @@ static void tipc_disc_timeout(struct timer_list *t)
319 d->timer_intv = TIPC_DISC_SLOW; 318 d->timer_intv = TIPC_DISC_SLOW;
320 else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST) 319 else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
321 d->timer_intv = TIPC_DISC_FAST; 320 d->timer_intv = TIPC_DISC_FAST;
321 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
322 msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
322 } 323 }
323 324
324 mod_timer(&d->timer, jiffies + d->timer_intv); 325 mod_timer(&d->timer, jiffies + d->timer_intv);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 201c3b5bc96b..836727e363c4 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1594,14 +1594,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1594 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1594 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1595 l->priority = peers_prio; 1595 l->priority = peers_prio;
1596 1596
1597 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ 1597 /* If peer is going down we want full re-establish cycle */
1598 if (msg_peer_stopping(hdr)) 1598 if (msg_peer_stopping(hdr)) {
1599 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1599 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1600 else if ((mtyp == RESET_MSG) || !link_is_up(l)) 1600 break;
1601 }
1602 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1603 if (mtyp == RESET_MSG || !link_is_up(l))
1601 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1604 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1602 1605
1603 /* ACTIVATE_MSG takes up link if it was already locally reset */ 1606 /* ACTIVATE_MSG takes up link if it was already locally reset */
1604 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING)) 1607 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
1605 rc = TIPC_LINK_UP_EVT; 1608 rc = TIPC_LINK_UP_EVT;
1606 1609
1607 l->peer_session = msg_session(hdr); 1610 l->peer_session = msg_session(hdr);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 62199cf5a56c..f076edb74338 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -104,6 +104,14 @@
104 * - A local spin_lock protecting the queue of subscriber events. 104 * - A local spin_lock protecting the queue of subscriber events.
105*/ 105*/
106 106
107struct tipc_net_work {
108 struct work_struct work;
109 struct net *net;
110 u32 addr;
111};
112
113static void tipc_net_finalize(struct net *net, u32 addr);
114
107int tipc_net_init(struct net *net, u8 *node_id, u32 addr) 115int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
108{ 116{
109 if (tipc_own_id(net)) { 117 if (tipc_own_id(net)) {
@@ -119,17 +127,38 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
119 return 0; 127 return 0;
120} 128}
121 129
122void tipc_net_finalize(struct net *net, u32 addr) 130static void tipc_net_finalize(struct net *net, u32 addr)
123{ 131{
124 struct tipc_net *tn = tipc_net(net); 132 struct tipc_net *tn = tipc_net(net);
125 133
126 if (!cmpxchg(&tn->node_addr, 0, addr)) { 134 if (cmpxchg(&tn->node_addr, 0, addr))
127 tipc_set_node_addr(net, addr); 135 return;
128 tipc_named_reinit(net); 136 tipc_set_node_addr(net, addr);
129 tipc_sk_reinit(net); 137 tipc_named_reinit(net);
130 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, 138 tipc_sk_reinit(net);
131 TIPC_CLUSTER_SCOPE, 0, addr); 139 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
132 } 140 TIPC_CLUSTER_SCOPE, 0, addr);
141}
142
143static void tipc_net_finalize_work(struct work_struct *work)
144{
145 struct tipc_net_work *fwork;
146
147 fwork = container_of(work, struct tipc_net_work, work);
148 tipc_net_finalize(fwork->net, fwork->addr);
149 kfree(fwork);
150}
151
152void tipc_sched_net_finalize(struct net *net, u32 addr)
153{
154 struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
155
156 if (!fwork)
157 return;
158 INIT_WORK(&fwork->work, tipc_net_finalize_work);
159 fwork->net = net;
160 fwork->addr = addr;
161 schedule_work(&fwork->work);
133} 162}
134 163
135void tipc_net_stop(struct net *net) 164void tipc_net_stop(struct net *net)
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 09ad02b50bb1..b7f2e364eb99 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -42,7 +42,7 @@
42extern const struct nla_policy tipc_nl_net_policy[]; 42extern const struct nla_policy tipc_nl_net_policy[];
43 43
44int tipc_net_init(struct net *net, u8 *node_id, u32 addr); 44int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
45void tipc_net_finalize(struct net *net, u32 addr); 45void tipc_sched_net_finalize(struct net *net, u32 addr);
46void tipc_net_stop(struct net *net); 46void tipc_net_stop(struct net *net);
47int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); 47int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
48int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); 48int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2afc4f8c37a7..488019766433 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -584,12 +584,15 @@ static void tipc_node_clear_links(struct tipc_node *node)
584/* tipc_node_cleanup - delete nodes that does not 584/* tipc_node_cleanup - delete nodes that does not
585 * have active links for NODE_CLEANUP_AFTER time 585 * have active links for NODE_CLEANUP_AFTER time
586 */ 586 */
587static int tipc_node_cleanup(struct tipc_node *peer) 587static bool tipc_node_cleanup(struct tipc_node *peer)
588{ 588{
589 struct tipc_net *tn = tipc_net(peer->net); 589 struct tipc_net *tn = tipc_net(peer->net);
590 bool deleted = false; 590 bool deleted = false;
591 591
592 spin_lock_bh(&tn->node_list_lock); 592 /* If lock held by tipc_node_stop() the node will be deleted anyway */
593 if (!spin_trylock_bh(&tn->node_list_lock))
594 return false;
595
593 tipc_node_write_lock(peer); 596 tipc_node_write_lock(peer);
594 597
595 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { 598 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 636e6131769d..b57b1be7252b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1555,16 +1555,17 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1555/** 1555/**
1556 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1556 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1557 * @m: descriptor for message info 1557 * @m: descriptor for message info
1558 * @msg: received message header 1558 * @skb: received message buffer
1559 * @tsk: TIPC port associated with message 1559 * @tsk: TIPC port associated with message
1560 * 1560 *
1561 * Note: Ancillary data is not captured if not requested by receiver. 1561 * Note: Ancillary data is not captured if not requested by receiver.
1562 * 1562 *
1563 * Returns 0 if successful, otherwise errno 1563 * Returns 0 if successful, otherwise errno
1564 */ 1564 */
1565static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1565static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1566 struct tipc_sock *tsk) 1566 struct tipc_sock *tsk)
1567{ 1567{
1568 struct tipc_msg *msg;
1568 u32 anc_data[3]; 1569 u32 anc_data[3];
1569 u32 err; 1570 u32 err;
1570 u32 dest_type; 1571 u32 dest_type;
@@ -1573,6 +1574,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1573 1574
1574 if (likely(m->msg_controllen == 0)) 1575 if (likely(m->msg_controllen == 0))
1575 return 0; 1576 return 0;
1577 msg = buf_msg(skb);
1576 1578
1577 /* Optionally capture errored message object(s) */ 1579 /* Optionally capture errored message object(s) */
1578 err = msg ? msg_errcode(msg) : 0; 1580 err = msg ? msg_errcode(msg) : 0;
@@ -1583,6 +1585,9 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1583 if (res) 1585 if (res)
1584 return res; 1586 return res;
1585 if (anc_data[1]) { 1587 if (anc_data[1]) {
1588 if (skb_linearize(skb))
1589 return -ENOMEM;
1590 msg = buf_msg(skb);
1586 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1591 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1587 msg_data(msg)); 1592 msg_data(msg));
1588 if (res) 1593 if (res)
@@ -1744,9 +1749,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1744 1749
1745 /* Collect msg meta data, including error code and rejected data */ 1750 /* Collect msg meta data, including error code and rejected data */
1746 tipc_sk_set_orig_addr(m, skb); 1751 tipc_sk_set_orig_addr(m, skb);
1747 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1752 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1748 if (unlikely(rc)) 1753 if (unlikely(rc))
1749 goto exit; 1754 goto exit;
1755 hdr = buf_msg(skb);
1750 1756
1751 /* Capture data if non-error msg, otherwise just set return value */ 1757 /* Capture data if non-error msg, otherwise just set return value */
1752 if (likely(!err)) { 1758 if (likely(!err)) {
@@ -1856,9 +1862,10 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1856 /* Collect msg meta data, incl. error code and rejected data */ 1862 /* Collect msg meta data, incl. error code and rejected data */
1857 if (!copied) { 1863 if (!copied) {
1858 tipc_sk_set_orig_addr(m, skb); 1864 tipc_sk_set_orig_addr(m, skb);
1859 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1865 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1860 if (rc) 1866 if (rc)
1861 break; 1867 break;
1868 hdr = buf_msg(skb);
1862 } 1869 }
1863 1870
1864 /* Copy data if msg ok, otherwise return error/partial data */ 1871 /* Copy data if msg ok, otherwise return error/partial data */
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index a8e7ba9f73e8..6a6be9f440cf 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -236,10 +236,8 @@ ifdef CONFIG_GCOV_KERNEL
236objtool_args += --no-unreachable 236objtool_args += --no-unreachable
237endif 237endif
238ifdef CONFIG_RETPOLINE 238ifdef CONFIG_RETPOLINE
239ifneq ($(RETPOLINE_CFLAGS),)
240 objtool_args += --retpoline 239 objtool_args += --retpoline
241endif 240endif
242endif
243 241
244 242
245ifdef CONFIG_MODVERSIONS 243ifdef CONFIG_MODVERSIONS
diff --git a/scripts/faddr2line b/scripts/faddr2line
index a0149db00be7..6c6439f69a72 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -71,7 +71,7 @@ die() {
71 71
72# Try to figure out the source directory prefix so we can remove it from the 72# Try to figure out the source directory prefix so we can remove it from the
73# addr2line output. HACK ALERT: This assumes that start_kernel() is in 73# addr2line output. HACK ALERT: This assumes that start_kernel() is in
74# kernel/init.c! This only works for vmlinux. Otherwise it falls back to 74# init/main.c! This only works for vmlinux. Otherwise it falls back to
75# printing the absolute path. 75# printing the absolute path.
76find_dir_prefix() { 76find_dir_prefix() {
77 local objfile=$1 77 local objfile=$1
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index da66e7742282..0ef906499646 100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -102,7 +102,8 @@ if [ ! -r "$INITFILE" ]; then
102fi 102fi
103 103
104MERGE_LIST=$* 104MERGE_LIST=$*
105SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)[= ].*/\2/p" 105SED_CONFIG_EXP1="s/^\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)=.*/\1/p"
106SED_CONFIG_EXP2="s/^# \(${CONFIG_PREFIX}[a-zA-Z0-9_]*\) is not set$/\1/p"
106 107
107TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) 108TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX)
108 109
@@ -116,7 +117,7 @@ for MERGE_FILE in $MERGE_LIST ; do
116 echo "The merge file '$MERGE_FILE' does not exist. Exit." >&2 117 echo "The merge file '$MERGE_FILE' does not exist. Exit." >&2
117 exit 1 118 exit 1
118 fi 119 fi
119 CFG_LIST=$(sed -n "$SED_CONFIG_EXP" $MERGE_FILE) 120 CFG_LIST=$(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $MERGE_FILE)
120 121
121 for CFG in $CFG_LIST ; do 122 for CFG in $CFG_LIST ; do
122 grep -q -w $CFG $TMP_FILE || continue 123 grep -q -w $CFG $TMP_FILE || continue
@@ -159,7 +160,7 @@ make KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET
159 160
160 161
161# Check all specified config values took (might have missed-dependency issues) 162# Check all specified config values took (might have missed-dependency issues)
162for CFG in $(sed -n "$SED_CONFIG_EXP" $TMP_FILE); do 163for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do
163 164
164 REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE) 165 REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE)
165 ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG") 166 ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG")
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 90c9a8ac7adb..f43a274f4f1d 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -81,11 +81,11 @@ else
81 cp System.map "$tmpdir/boot/System.map-$version" 81 cp System.map "$tmpdir/boot/System.map-$version"
82 cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version" 82 cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
83fi 83fi
84cp "$($MAKE -s image_name)" "$tmpdir/$installed_image_path" 84cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
85 85
86if grep -q "^CONFIG_OF=y" $KCONFIG_CONFIG ; then 86if grep -q "^CONFIG_OF_EARLY_FLATTREE=y" $KCONFIG_CONFIG ; then
87 # Only some architectures with OF support have this target 87 # Only some architectures with OF support have this target
88 if grep -q dtbs_install "${srctree}/arch/$SRCARCH/Makefile"; then 88 if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then
89 $MAKE KBUILD_SRC= INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install 89 $MAKE KBUILD_SRC= INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
90 fi 90 fi
91fi 91fi
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index 663a7f343b42..edcad61fe3cd 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -88,6 +88,7 @@ set_debarch() {
88version=$KERNELRELEASE 88version=$KERNELRELEASE
89if [ -n "$KDEB_PKGVERSION" ]; then 89if [ -n "$KDEB_PKGVERSION" ]; then
90 packageversion=$KDEB_PKGVERSION 90 packageversion=$KDEB_PKGVERSION
91 revision=${packageversion##*-}
91else 92else
92 revision=$(cat .version 2>/dev/null||echo 1) 93 revision=$(cat .version 2>/dev/null||echo 1)
93 packageversion=$version-$revision 94 packageversion=$version-$revision
@@ -205,10 +206,12 @@ cat <<EOF > debian/rules
205#!$(command -v $MAKE) -f 206#!$(command -v $MAKE) -f
206 207
207build: 208build:
208 \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC= 209 \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
210 KBUILD_BUILD_VERSION=${revision} KBUILD_SRC=
209 211
210binary-arch: 212binary-arch:
211 \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC= intdeb-pkg 213 \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
214 KBUILD_BUILD_VERSION=${revision} KBUILD_SRC= intdeb-pkg
212 215
213clean: 216clean:
214 rm -rf debian/*tmp debian/files 217 rm -rf debian/*tmp debian/files
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index e05646dc24dc..009147d4718e 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -12,6 +12,7 @@
12# how we were called determines which rpms we build and how we build them 12# how we were called determines which rpms we build and how we build them
13if [ "$1" = prebuilt ]; then 13if [ "$1" = prebuilt ]; then
14 S=DEL 14 S=DEL
15 MAKE="$MAKE -f $srctree/Makefile"
15else 16else
16 S= 17 S=
17fi 18fi
@@ -78,19 +79,19 @@ $S %prep
78$S %setup -q 79$S %setup -q
79$S 80$S
80$S %build 81$S %build
81$S make %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release} 82$S $MAKE %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}
82$S 83$S
83 %install 84 %install
84 mkdir -p %{buildroot}/boot 85 mkdir -p %{buildroot}/boot
85 %ifarch ia64 86 %ifarch ia64
86 mkdir -p %{buildroot}/boot/efi 87 mkdir -p %{buildroot}/boot/efi
87 cp \$(make image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE 88 cp \$($MAKE image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE
88 ln -s efi/vmlinuz-$KERNELRELEASE %{buildroot}/boot/ 89 ln -s efi/vmlinuz-$KERNELRELEASE %{buildroot}/boot/
89 %else 90 %else
90 cp \$(make image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE 91 cp \$($MAKE image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE
91 %endif 92 %endif
92$M make %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} KBUILD_SRC= modules_install 93$M $MAKE %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} modules_install
93 make %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr KBUILD_SRC= headers_install 94 $MAKE %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
94 cp System.map %{buildroot}/boot/System.map-$KERNELRELEASE 95 cp System.map %{buildroot}/boot/System.map-$KERNELRELEASE
95 cp .config %{buildroot}/boot/config-$KERNELRELEASE 96 cp .config %{buildroot}/boot/config-$KERNELRELEASE
96 bzip2 -9 --keep vmlinux 97 bzip2 -9 --keep vmlinux
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 79f7dd57d571..71f39410691b 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -74,7 +74,7 @@ scm_version()
74 fi 74 fi
75 75
76 # Check for uncommitted changes 76 # Check for uncommitted changes
77 if git status -uno --porcelain | grep -qv '^.. scripts/package'; then 77 if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
78 printf '%s' -dirty 78 printf '%s' -dirty
79 fi 79 fi
80 80
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
index 839e190bbd7a..5056fb3b897d 100755
--- a/scripts/spdxcheck.py
+++ b/scripts/spdxcheck.py
@@ -168,7 +168,6 @@ class id_parser(object):
168 self.curline = 0 168 self.curline = 0
169 try: 169 try:
170 for line in fd: 170 for line in fd:
171 line = line.decode(locale.getpreferredencoding(False), errors='ignore')
172 self.curline += 1 171 self.curline += 1
173 if self.curline > maxlines: 172 if self.curline > maxlines:
174 break 173 break
diff --git a/scripts/unifdef.c b/scripts/unifdef.c
index 7493c0ee51cc..db00e3e30a59 100644
--- a/scripts/unifdef.c
+++ b/scripts/unifdef.c
@@ -395,7 +395,7 @@ usage(void)
395 * When we have processed a group that starts off with a known-false 395 * When we have processed a group that starts off with a known-false
396 * #if/#elif sequence (which has therefore been deleted) followed by a 396 * #if/#elif sequence (which has therefore been deleted) followed by a
397 * #elif that we don't understand and therefore must keep, we edit the 397 * #elif that we don't understand and therefore must keep, we edit the
398 * latter into a #if to keep the nesting correct. We use strncpy() to 398 * latter into a #if to keep the nesting correct. We use memcpy() to
399 * overwrite the 4 byte token "elif" with "if " without a '\0' byte. 399 * overwrite the 4 byte token "elif" with "if " without a '\0' byte.
400 * 400 *
401 * When we find a true #elif in a group, the following block will 401 * When we find a true #elif in a group, the following block will
@@ -450,7 +450,7 @@ static void Idrop (void) { Fdrop(); ignoreon(); }
450static void Itrue (void) { Ftrue(); ignoreon(); } 450static void Itrue (void) { Ftrue(); ignoreon(); }
451static void Ifalse(void) { Ffalse(); ignoreon(); } 451static void Ifalse(void) { Ffalse(); ignoreon(); }
452/* modify this line */ 452/* modify this line */
453static void Mpass (void) { strncpy(keyword, "if ", 4); Pelif(); } 453static void Mpass (void) { memcpy(keyword, "if ", 4); Pelif(); }
454static void Mtrue (void) { keywordedit("else"); state(IS_TRUE_MIDDLE); } 454static void Mtrue (void) { keywordedit("else"); state(IS_TRUE_MIDDLE); }
455static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); } 455static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); }
456static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); } 456static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); }
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index 6dc075144508..d775e03fbbcc 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -106,6 +106,7 @@ int asymmetric_verify(struct key *keyring, const char *sig,
106 106
107 pks.pkey_algo = "rsa"; 107 pks.pkey_algo = "rsa";
108 pks.hash_algo = hash_algo_name[hdr->hash_algo]; 108 pks.hash_algo = hash_algo_name[hdr->hash_algo];
109 pks.encoding = "pkcs1";
109 pks.digest = (u8 *)data; 110 pks.digest = (u8 *)data;
110 pks.digest_size = datalen; 111 pks.digest_size = datalen;
111 pks.s = hdr->sig; 112 pks.s = hdr->sig;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 7ce683259357..a67459eb62d5 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -5318,6 +5318,9 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
5318 addr_buf = address; 5318 addr_buf = address;
5319 5319
5320 while (walk_size < addrlen) { 5320 while (walk_size < addrlen) {
5321 if (walk_size + sizeof(sa_family_t) > addrlen)
5322 return -EINVAL;
5323
5321 addr = addr_buf; 5324 addr = addr_buf;
5322 switch (addr->sa_family) { 5325 switch (addr->sa_family) {
5323 case AF_UNSPEC: 5326 case AF_UNSPEC:
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 74b951f55608..9cec81209617 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -80,6 +80,9 @@ static const struct nlmsg_perm nlmsg_route_perms[] =
80 { RTM_NEWSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ }, 80 { RTM_NEWSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
81 { RTM_GETSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ }, 81 { RTM_GETSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
82 { RTM_NEWCACHEREPORT, NETLINK_ROUTE_SOCKET__NLMSG_READ }, 82 { RTM_NEWCACHEREPORT, NETLINK_ROUTE_SOCKET__NLMSG_READ },
83 { RTM_NEWCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
84 { RTM_DELCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
85 { RTM_GETCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_READ },
83}; 86};
84 87
85static const struct nlmsg_perm nlmsg_tcpdiag_perms[] = 88static const struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -158,7 +161,11 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
158 161
159 switch (sclass) { 162 switch (sclass) {
160 case SECCLASS_NETLINK_ROUTE_SOCKET: 163 case SECCLASS_NETLINK_ROUTE_SOCKET:
161 /* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */ 164 /* RTM_MAX always points to RTM_SETxxxx, ie RTM_NEWxxx + 3.
165 * If the BUILD_BUG_ON() below fails you must update the
166 * structures at the top of this file with the new mappings
167 * before updating the BUILD_BUG_ON() macro!
168 */
162 BUILD_BUG_ON(RTM_MAX != (RTM_NEWCHAIN + 3)); 169 BUILD_BUG_ON(RTM_MAX != (RTM_NEWCHAIN + 3));
163 err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms, 170 err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
164 sizeof(nlmsg_route_perms)); 171 sizeof(nlmsg_route_perms));
@@ -170,6 +177,10 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
170 break; 177 break;
171 178
172 case SECCLASS_NETLINK_XFRM_SOCKET: 179 case SECCLASS_NETLINK_XFRM_SOCKET:
180 /* If the BUILD_BUG_ON() below fails you must update the
181 * structures at the top of this file with the new mappings
182 * before updating the BUILD_BUG_ON() macro!
183 */
173 BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING); 184 BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING);
174 err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms, 185 err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms,
175 sizeof(nlmsg_xfrm_perms)); 186 sizeof(nlmsg_xfrm_perms));
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 2fe459df3c85..b7efa2296969 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -245,9 +245,13 @@ int mls_context_to_sid(struct policydb *pol,
245 char *rangep[2]; 245 char *rangep[2];
246 246
247 if (!pol->mls_enabled) { 247 if (!pol->mls_enabled) {
248 if ((def_sid != SECSID_NULL && oldc) || (*scontext) == '\0') 248 /*
249 return 0; 249 * With no MLS, only return -EINVAL if there is a MLS field
250 return -EINVAL; 250 * and it did not come from an xattr.
251 */
252 if (oldc && def_sid == SECSID_NULL)
253 return -EINVAL;
254 return 0;
251 } 255 }
252 256
253 /* 257 /*
diff --git a/sound/core/control.c b/sound/core/control.c
index 9aa15bfc7936..649d3217590e 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -348,6 +348,40 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
348 return 0; 348 return 0;
349} 349}
350 350
351/* add a new kcontrol object; call with card->controls_rwsem locked */
352static int __snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
353{
354 struct snd_ctl_elem_id id;
355 unsigned int idx;
356 unsigned int count;
357
358 id = kcontrol->id;
359 if (id.index > UINT_MAX - kcontrol->count)
360 return -EINVAL;
361
362 if (snd_ctl_find_id(card, &id)) {
363 dev_err(card->dev,
364 "control %i:%i:%i:%s:%i is already present\n",
365 id.iface, id.device, id.subdevice, id.name, id.index);
366 return -EBUSY;
367 }
368
369 if (snd_ctl_find_hole(card, kcontrol->count) < 0)
370 return -ENOMEM;
371
372 list_add_tail(&kcontrol->list, &card->controls);
373 card->controls_count += kcontrol->count;
374 kcontrol->id.numid = card->last_numid + 1;
375 card->last_numid += kcontrol->count;
376
377 id = kcontrol->id;
378 count = kcontrol->count;
379 for (idx = 0; idx < count; idx++, id.index++, id.numid++)
380 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
381
382 return 0;
383}
384
351/** 385/**
352 * snd_ctl_add - add the control instance to the card 386 * snd_ctl_add - add the control instance to the card
353 * @card: the card instance 387 * @card: the card instance
@@ -364,45 +398,18 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
364 */ 398 */
365int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol) 399int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
366{ 400{
367 struct snd_ctl_elem_id id;
368 unsigned int idx;
369 unsigned int count;
370 int err = -EINVAL; 401 int err = -EINVAL;
371 402
372 if (! kcontrol) 403 if (! kcontrol)
373 return err; 404 return err;
374 if (snd_BUG_ON(!card || !kcontrol->info)) 405 if (snd_BUG_ON(!card || !kcontrol->info))
375 goto error; 406 goto error;
376 id = kcontrol->id;
377 if (id.index > UINT_MAX - kcontrol->count)
378 goto error;
379 407
380 down_write(&card->controls_rwsem); 408 down_write(&card->controls_rwsem);
381 if (snd_ctl_find_id(card, &id)) { 409 err = __snd_ctl_add(card, kcontrol);
382 up_write(&card->controls_rwsem);
383 dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
384 id.iface,
385 id.device,
386 id.subdevice,
387 id.name,
388 id.index);
389 err = -EBUSY;
390 goto error;
391 }
392 if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
393 up_write(&card->controls_rwsem);
394 err = -ENOMEM;
395 goto error;
396 }
397 list_add_tail(&kcontrol->list, &card->controls);
398 card->controls_count += kcontrol->count;
399 kcontrol->id.numid = card->last_numid + 1;
400 card->last_numid += kcontrol->count;
401 id = kcontrol->id;
402 count = kcontrol->count;
403 up_write(&card->controls_rwsem); 410 up_write(&card->controls_rwsem);
404 for (idx = 0; idx < count; idx++, id.index++, id.numid++) 411 if (err < 0)
405 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); 412 goto error;
406 return 0; 413 return 0;
407 414
408 error: 415 error:
@@ -1361,9 +1368,12 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1361 kctl->tlv.c = snd_ctl_elem_user_tlv; 1368 kctl->tlv.c = snd_ctl_elem_user_tlv;
1362 1369
1363 /* This function manage to free the instance on failure. */ 1370 /* This function manage to free the instance on failure. */
1364 err = snd_ctl_add(card, kctl); 1371 down_write(&card->controls_rwsem);
1365 if (err < 0) 1372 err = __snd_ctl_add(card, kctl);
1366 return err; 1373 if (err < 0) {
1374 snd_ctl_free_one(kctl);
1375 goto unlock;
1376 }
1367 offset = snd_ctl_get_ioff(kctl, &info->id); 1377 offset = snd_ctl_get_ioff(kctl, &info->id);
1368 snd_ctl_build_ioff(&info->id, kctl, offset); 1378 snd_ctl_build_ioff(&info->id, kctl, offset);
1369 /* 1379 /*
@@ -1374,10 +1384,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1374 * which locks the element. 1384 * which locks the element.
1375 */ 1385 */
1376 1386
1377 down_write(&card->controls_rwsem);
1378 card->user_ctl_count++; 1387 card->user_ctl_count++;
1379 up_write(&card->controls_rwsem);
1380 1388
1389 unlock:
1390 up_write(&card->controls_rwsem);
1381 return 0; 1391 return 0;
1382} 1392}
1383 1393
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index f8d4a419f3af..467039b342b5 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1062,8 +1062,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
1062 runtime->oss.channels = params_channels(params); 1062 runtime->oss.channels = params_channels(params);
1063 runtime->oss.rate = params_rate(params); 1063 runtime->oss.rate = params_rate(params);
1064 1064
1065 vfree(runtime->oss.buffer); 1065 kvfree(runtime->oss.buffer);
1066 runtime->oss.buffer = vmalloc(runtime->oss.period_bytes); 1066 runtime->oss.buffer = kvzalloc(runtime->oss.period_bytes, GFP_KERNEL);
1067 if (!runtime->oss.buffer) { 1067 if (!runtime->oss.buffer) {
1068 err = -ENOMEM; 1068 err = -ENOMEM;
1069 goto failure; 1069 goto failure;
@@ -2328,7 +2328,7 @@ static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream)
2328{ 2328{
2329 struct snd_pcm_runtime *runtime; 2329 struct snd_pcm_runtime *runtime;
2330 runtime = substream->runtime; 2330 runtime = substream->runtime;
2331 vfree(runtime->oss.buffer); 2331 kvfree(runtime->oss.buffer);
2332 runtime->oss.buffer = NULL; 2332 runtime->oss.buffer = NULL;
2333#ifdef CONFIG_SND_PCM_OSS_PLUGINS 2333#ifdef CONFIG_SND_PCM_OSS_PLUGINS
2334 snd_pcm_oss_plugin_clear(substream); 2334 snd_pcm_oss_plugin_clear(substream);
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 141c5f3a9575..31cb2acf8afc 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -66,8 +66,8 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t
66 return -ENXIO; 66 return -ENXIO;
67 size /= 8; 67 size /= 8;
68 if (plugin->buf_frames < frames) { 68 if (plugin->buf_frames < frames) {
69 vfree(plugin->buf); 69 kvfree(plugin->buf);
70 plugin->buf = vmalloc(size); 70 plugin->buf = kvzalloc(size, GFP_KERNEL);
71 plugin->buf_frames = frames; 71 plugin->buf_frames = frames;
72 } 72 }
73 if (!plugin->buf) { 73 if (!plugin->buf) {
@@ -191,7 +191,7 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin)
191 if (plugin->private_free) 191 if (plugin->private_free)
192 plugin->private_free(plugin); 192 plugin->private_free(plugin);
193 kfree(plugin->buf_channels); 193 kfree(plugin->buf_channels);
194 vfree(plugin->buf); 194 kvfree(plugin->buf);
195 kfree(plugin); 195 kfree(plugin);
196 return 0; 196 return 0;
197} 197}
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 32453f81b95a..3a5008837576 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -1531,7 +1531,6 @@ static int snd_wss_playback_open(struct snd_pcm_substream *substream)
1531 if (err < 0) { 1531 if (err < 0) {
1532 if (chip->release_dma) 1532 if (chip->release_dma)
1533 chip->release_dma(chip, chip->dma_private_data, chip->dma1); 1533 chip->release_dma(chip, chip->dma_private_data, chip->dma1);
1534 snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1535 return err; 1534 return err;
1536 } 1535 }
1537 chip->playback_substream = substream; 1536 chip->playback_substream = substream;
@@ -1572,7 +1571,6 @@ static int snd_wss_capture_open(struct snd_pcm_substream *substream)
1572 if (err < 0) { 1571 if (err < 0) {
1573 if (chip->release_dma) 1572 if (chip->release_dma)
1574 chip->release_dma(chip, chip->dma_private_data, chip->dma2); 1573 chip->release_dma(chip, chip->dma_private_data, chip->dma2);
1575 snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1576 return err; 1574 return err;
1577 } 1575 }
1578 chip->capture_substream = substream; 1576 chip->capture_substream = substream;
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index f4459d1a9d67..27b468f057dd 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -824,7 +824,7 @@ static int snd_ac97_put_spsa(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
824{ 824{
825 struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); 825 struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
826 int reg = kcontrol->private_value & 0xff; 826 int reg = kcontrol->private_value & 0xff;
827 int shift = (kcontrol->private_value >> 8) & 0xff; 827 int shift = (kcontrol->private_value >> 8) & 0x0f;
828 int mask = (kcontrol->private_value >> 16) & 0xff; 828 int mask = (kcontrol->private_value >> 16) & 0xff;
829 // int invert = (kcontrol->private_value >> 24) & 0xff; 829 // int invert = (kcontrol->private_value >> 24) & 0xff;
830 unsigned short value, old, new; 830 unsigned short value, old, new;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index d8eb2b5f51ae..0bbdf1a01e76 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2169,6 +2169,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
2169 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ 2169 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2170 SND_PCI_QUIRK(0x1849, 0xc892, "Asrock B85M-ITX", 0), 2170 SND_PCI_QUIRK(0x1849, 0xc892, "Asrock B85M-ITX", 0),
2171 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ 2171 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2172 SND_PCI_QUIRK(0x1849, 0x0397, "Asrock N68C-S UCC", 0),
2173 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2172 SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0), 2174 SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
2173 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ 2175 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2174 SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), 2176 SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0a24037184c3..0a567634e5fa 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1177,6 +1177,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
1177 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), 1177 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
1178 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), 1178 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
1179 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), 1179 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
1180 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
1180 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), 1181 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
1181 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), 1182 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
1182 SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), 1183 SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
@@ -8413,7 +8414,7 @@ static void ca0132_free(struct hda_codec *codec)
8413 8414
8414 snd_hda_power_down(codec); 8415 snd_hda_power_down(codec);
8415 if (spec->mem_base) 8416 if (spec->mem_base)
8416 iounmap(spec->mem_base); 8417 pci_iounmap(codec->bus->pci, spec->mem_base);
8417 kfree(spec->spec_init_verbs); 8418 kfree(spec->spec_init_verbs);
8418 kfree(codec->spec); 8419 kfree(codec->spec);
8419} 8420}
@@ -8488,7 +8489,7 @@ static void ca0132_config(struct hda_codec *codec)
8488 break; 8489 break;
8489 case QUIRK_AE5: 8490 case QUIRK_AE5:
8490 codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__); 8491 codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__);
8491 snd_hda_apply_pincfgs(codec, r3di_pincfgs); 8492 snd_hda_apply_pincfgs(codec, ae5_pincfgs);
8492 break; 8493 break;
8493 } 8494 }
8494 8495
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index fa61674a5605..06f93032d0cc 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -388,6 +388,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
388 case 0x10ec0285: 388 case 0x10ec0285:
389 case 0x10ec0298: 389 case 0x10ec0298:
390 case 0x10ec0289: 390 case 0x10ec0289:
391 case 0x10ec0300:
391 alc_update_coef_idx(codec, 0x10, 1<<9, 0); 392 alc_update_coef_idx(codec, 0x10, 1<<9, 0);
392 break; 393 break;
393 case 0x10ec0275: 394 case 0x10ec0275:
@@ -2830,6 +2831,7 @@ enum {
2830 ALC269_TYPE_ALC215, 2831 ALC269_TYPE_ALC215,
2831 ALC269_TYPE_ALC225, 2832 ALC269_TYPE_ALC225,
2832 ALC269_TYPE_ALC294, 2833 ALC269_TYPE_ALC294,
2834 ALC269_TYPE_ALC300,
2833 ALC269_TYPE_ALC700, 2835 ALC269_TYPE_ALC700,
2834}; 2836};
2835 2837
@@ -2864,6 +2866,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
2864 case ALC269_TYPE_ALC215: 2866 case ALC269_TYPE_ALC215:
2865 case ALC269_TYPE_ALC225: 2867 case ALC269_TYPE_ALC225:
2866 case ALC269_TYPE_ALC294: 2868 case ALC269_TYPE_ALC294:
2869 case ALC269_TYPE_ALC300:
2867 case ALC269_TYPE_ALC700: 2870 case ALC269_TYPE_ALC700:
2868 ssids = alc269_ssids; 2871 ssids = alc269_ssids;
2869 break; 2872 break;
@@ -5358,6 +5361,16 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
5358 spec->gen.preferred_dacs = preferred_pairs; 5361 spec->gen.preferred_dacs = preferred_pairs;
5359} 5362}
5360 5363
5364/* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
5365static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
5366 const struct hda_fixup *fix, int action)
5367{
5368 if (action != HDA_FIXUP_ACT_PRE_PROBE)
5369 return;
5370
5371 snd_hda_override_wcaps(codec, 0x03, 0);
5372}
5373
5361/* for hda_fixup_thinkpad_acpi() */ 5374/* for hda_fixup_thinkpad_acpi() */
5362#include "thinkpad_helper.c" 5375#include "thinkpad_helper.c"
5363 5376
@@ -5495,6 +5508,8 @@ enum {
5495 ALC255_FIXUP_DELL_HEADSET_MIC, 5508 ALC255_FIXUP_DELL_HEADSET_MIC,
5496 ALC295_FIXUP_HP_X360, 5509 ALC295_FIXUP_HP_X360,
5497 ALC221_FIXUP_HP_HEADSET_MIC, 5510 ALC221_FIXUP_HP_HEADSET_MIC,
5511 ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
5512 ALC295_FIXUP_HP_AUTO_MUTE,
5498}; 5513};
5499 5514
5500static const struct hda_fixup alc269_fixups[] = { 5515static const struct hda_fixup alc269_fixups[] = {
@@ -5659,6 +5674,8 @@ static const struct hda_fixup alc269_fixups[] = {
5659 [ALC269_FIXUP_HP_MUTE_LED_MIC3] = { 5674 [ALC269_FIXUP_HP_MUTE_LED_MIC3] = {
5660 .type = HDA_FIXUP_FUNC, 5675 .type = HDA_FIXUP_FUNC,
5661 .v.func = alc269_fixup_hp_mute_led_mic3, 5676 .v.func = alc269_fixup_hp_mute_led_mic3,
5677 .chained = true,
5678 .chain_id = ALC295_FIXUP_HP_AUTO_MUTE
5662 }, 5679 },
5663 [ALC269_FIXUP_HP_GPIO_LED] = { 5680 [ALC269_FIXUP_HP_GPIO_LED] = {
5664 .type = HDA_FIXUP_FUNC, 5681 .type = HDA_FIXUP_FUNC,
@@ -6362,6 +6379,14 @@ static const struct hda_fixup alc269_fixups[] = {
6362 .chained = true, 6379 .chained = true,
6363 .chain_id = ALC269_FIXUP_HEADSET_MIC 6380 .chain_id = ALC269_FIXUP_HEADSET_MIC
6364 }, 6381 },
6382 [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = {
6383 .type = HDA_FIXUP_FUNC,
6384 .v.func = alc285_fixup_invalidate_dacs,
6385 },
6386 [ALC295_FIXUP_HP_AUTO_MUTE] = {
6387 .type = HDA_FIXUP_FUNC,
6388 .v.func = alc_fixup_auto_mute_via_amp,
6389 },
6365}; 6390};
6366 6391
6367static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6392static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6481,6 +6506,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6481 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 6506 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
6482 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 6507 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
6483 SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC), 6508 SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
6509 SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6484 SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC), 6510 SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
6485 SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360), 6511 SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
6486 SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6512 SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
@@ -6531,6 +6557,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6531 SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), 6557 SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
6532 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), 6558 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
6533 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), 6559 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
6560 SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
6534 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), 6561 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
6535 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 6562 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
6536 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), 6563 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -7033,6 +7060,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7033 {0x12, 0x90a60130}, 7060 {0x12, 0x90a60130},
7034 {0x19, 0x03a11020}, 7061 {0x19, 0x03a11020},
7035 {0x21, 0x0321101f}), 7062 {0x21, 0x0321101f}),
7063 SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
7064 {0x12, 0x90a60130},
7065 {0x14, 0x90170110},
7066 {0x19, 0x04a11040},
7067 {0x21, 0x04211020}),
7036 SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, 7068 SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
7037 {0x12, 0x90a60120}, 7069 {0x12, 0x90a60120},
7038 {0x14, 0x90170110}, 7070 {0x14, 0x90170110},
@@ -7294,6 +7326,10 @@ static int patch_alc269(struct hda_codec *codec)
7294 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ 7326 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
7295 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ 7327 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
7296 break; 7328 break;
7329 case 0x10ec0300:
7330 spec->codec_variant = ALC269_TYPE_ALC300;
7331 spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
7332 break;
7297 case 0x10ec0700: 7333 case 0x10ec0700:
7298 case 0x10ec0701: 7334 case 0x10ec0701:
7299 case 0x10ec0703: 7335 case 0x10ec0703:
@@ -8404,6 +8440,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
8404 HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269), 8440 HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
8405 HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269), 8441 HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
8406 HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269), 8442 HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
8443 HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269),
8407 HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861), 8444 HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
8408 HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd), 8445 HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
8409 HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861), 8446 HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 97f49b751e6e..568575b72f2f 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -58,8 +58,8 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
58 removefunc = false; 58 removefunc = false;
59 } 59 }
60 if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0 && 60 if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0 &&
61 snd_hda_gen_add_micmute_led(codec, 61 !snd_hda_gen_add_micmute_led(codec,
62 update_tpacpi_micmute) > 0) 62 update_tpacpi_micmute))
63 removefunc = false; 63 removefunc = false;
64 } 64 }
65 65
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 4e9854889a95..e63d6e33df48 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -2187,11 +2187,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
2187 */ 2187 */
2188 snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE, 2188 snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
2189 AC_PWRST_D3); 2189 AC_PWRST_D3);
2190 err = snd_hdac_display_power(bus, false);
2191 if (err < 0) {
2192 dev_err(dev, "Cannot turn on display power on i915\n");
2193 return err;
2194 }
2195 2190
2196 hlink = snd_hdac_ext_bus_get_link(bus, dev_name(dev)); 2191 hlink = snd_hdac_ext_bus_get_link(bus, dev_name(dev));
2197 if (!hlink) { 2192 if (!hlink) {
@@ -2201,7 +2196,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
2201 2196
2202 snd_hdac_ext_bus_link_put(bus, hlink); 2197 snd_hdac_ext_bus_link_put(bus, hlink);
2203 2198
2204 return 0; 2199 err = snd_hdac_display_power(bus, false);
2200 if (err < 0)
2201 dev_err(dev, "Cannot turn off display power on i915\n");
2202
2203 return err;
2205} 2204}
2206 2205
2207static int hdac_hdmi_runtime_resume(struct device *dev) 2206static int hdac_hdmi_runtime_resume(struct device *dev)
diff --git a/sound/soc/codecs/pcm186x.h b/sound/soc/codecs/pcm186x.h
index 2c6ba55bf394..bb3f0c42a1cd 100644
--- a/sound/soc/codecs/pcm186x.h
+++ b/sound/soc/codecs/pcm186x.h
@@ -139,7 +139,7 @@ enum pcm186x_type {
139#define PCM186X_MAX_REGISTER PCM186X_CURR_TRIM_CTRL 139#define PCM186X_MAX_REGISTER PCM186X_CURR_TRIM_CTRL
140 140
141/* PCM186X_PAGE */ 141/* PCM186X_PAGE */
142#define PCM186X_RESET 0xff 142#define PCM186X_RESET 0xfe
143 143
144/* PCM186X_ADCX_INPUT_SEL_X */ 144/* PCM186X_ADCX_INPUT_SEL_X */
145#define PCM186X_ADC_INPUT_SEL_POL BIT(7) 145#define PCM186X_ADC_INPUT_SEL_POL BIT(7)
diff --git a/sound/soc/codecs/pcm3060.c b/sound/soc/codecs/pcm3060.c
index 494d9d662be8..771b46e1974b 100644
--- a/sound/soc/codecs/pcm3060.c
+++ b/sound/soc/codecs/pcm3060.c
@@ -198,20 +198,16 @@ static const struct snd_kcontrol_new pcm3060_dapm_controls[] = {
198}; 198};
199 199
200static const struct snd_soc_dapm_widget pcm3060_dapm_widgets[] = { 200static const struct snd_soc_dapm_widget pcm3060_dapm_widgets[] = {
201 SND_SOC_DAPM_OUTPUT("OUTL+"), 201 SND_SOC_DAPM_OUTPUT("OUTL"),
202 SND_SOC_DAPM_OUTPUT("OUTR+"), 202 SND_SOC_DAPM_OUTPUT("OUTR"),
203 SND_SOC_DAPM_OUTPUT("OUTL-"),
204 SND_SOC_DAPM_OUTPUT("OUTR-"),
205 203
206 SND_SOC_DAPM_INPUT("INL"), 204 SND_SOC_DAPM_INPUT("INL"),
207 SND_SOC_DAPM_INPUT("INR"), 205 SND_SOC_DAPM_INPUT("INR"),
208}; 206};
209 207
210static const struct snd_soc_dapm_route pcm3060_dapm_map[] = { 208static const struct snd_soc_dapm_route pcm3060_dapm_map[] = {
211 { "OUTL+", NULL, "Playback" }, 209 { "OUTL", NULL, "Playback" },
212 { "OUTR+", NULL, "Playback" }, 210 { "OUTR", NULL, "Playback" },
213 { "OUTL-", NULL, "Playback" },
214 { "OUTR-", NULL, "Playback" },
215 211
216 { "Capture", NULL, "INL" }, 212 { "Capture", NULL, "INL" },
217 { "Capture", NULL, "INR" }, 213 { "Capture", NULL, "INR" },
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index a53dc174bbf0..66501b8dc46f 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -765,38 +765,41 @@ static unsigned int wm_adsp_region_to_reg(struct wm_adsp_region const *mem,
765 765
766static void wm_adsp2_show_fw_status(struct wm_adsp *dsp) 766static void wm_adsp2_show_fw_status(struct wm_adsp *dsp)
767{ 767{
768 u16 scratch[4]; 768 unsigned int scratch[4];
769 unsigned int addr = dsp->base + ADSP2_SCRATCH0;
770 unsigned int i;
769 int ret; 771 int ret;
770 772
771 ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2_SCRATCH0, 773 for (i = 0; i < ARRAY_SIZE(scratch); ++i) {
772 scratch, sizeof(scratch)); 774 ret = regmap_read(dsp->regmap, addr + i, &scratch[i]);
773 if (ret) { 775 if (ret) {
774 adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret); 776 adsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
775 return; 777 return;
778 }
776 } 779 }
777 780
778 adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n", 781 adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
779 be16_to_cpu(scratch[0]), 782 scratch[0], scratch[1], scratch[2], scratch[3]);
780 be16_to_cpu(scratch[1]),
781 be16_to_cpu(scratch[2]),
782 be16_to_cpu(scratch[3]));
783} 783}
784 784
785static void wm_adsp2v2_show_fw_status(struct wm_adsp *dsp) 785static void wm_adsp2v2_show_fw_status(struct wm_adsp *dsp)
786{ 786{
787 u32 scratch[2]; 787 unsigned int scratch[2];
788 int ret; 788 int ret;
789 789
790 ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1, 790 ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
791 scratch, sizeof(scratch)); 791 &scratch[0]);
792
793 if (ret) { 792 if (ret) {
794 adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret); 793 adsp_err(dsp, "Failed to read SCRATCH0_1: %d\n", ret);
795 return; 794 return;
796 } 795 }
797 796
798 scratch[0] = be32_to_cpu(scratch[0]); 797 ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH2_3,
799 scratch[1] = be32_to_cpu(scratch[1]); 798 &scratch[1]);
799 if (ret) {
800 adsp_err(dsp, "Failed to read SCRATCH2_3: %d\n", ret);
801 return;
802 }
800 803
801 adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n", 804 adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
802 scratch[0] & 0xFFFF, 805 scratch[0] & 0xFFFF,
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 0caa1f4eb94d..18e717703685 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -101,22 +101,42 @@ config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
101 codec, then enable this option by saying Y or m. This is a 101 codec, then enable this option by saying Y or m. This is a
102 recommended option 102 recommended option
103 103
104config SND_SOC_INTEL_SKYLAKE_SSP_CLK
105 tristate
106
107config SND_SOC_INTEL_SKYLAKE 104config SND_SOC_INTEL_SKYLAKE
108 tristate "SKL/BXT/KBL/GLK/CNL... Platforms" 105 tristate "SKL/BXT/KBL/GLK/CNL... Platforms"
109 depends on PCI && ACPI 106 depends on PCI && ACPI
107 select SND_SOC_INTEL_SKYLAKE_COMMON
108 help
109 If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
110 GeminiLake or CannonLake platform with the DSP enabled in the BIOS
111 then enable this option by saying Y or m.
112
113if SND_SOC_INTEL_SKYLAKE
114
115config SND_SOC_INTEL_SKYLAKE_SSP_CLK
116 tristate
117
118config SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
119 bool "HDAudio codec support"
120 help
121 If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
122 GeminiLake or CannonLake platform with an HDaudio codec
123 then enable this option by saying Y
124
125config SND_SOC_INTEL_SKYLAKE_COMMON
126 tristate
110 select SND_HDA_EXT_CORE 127 select SND_HDA_EXT_CORE
111 select SND_HDA_DSP_LOADER 128 select SND_HDA_DSP_LOADER
112 select SND_SOC_TOPOLOGY 129 select SND_SOC_TOPOLOGY
113 select SND_SOC_INTEL_SST 130 select SND_SOC_INTEL_SST
131 select SND_SOC_HDAC_HDA if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
114 select SND_SOC_ACPI_INTEL_MATCH 132 select SND_SOC_ACPI_INTEL_MATCH
115 help 133 help
116 If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/ 134 If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
117 GeminiLake or CannonLake platform with the DSP enabled in the BIOS 135 GeminiLake or CannonLake platform with the DSP enabled in the BIOS
118 then enable this option by saying Y or m. 136 then enable this option by saying Y or m.
119 137
138endif ## SND_SOC_INTEL_SKYLAKE
139
120config SND_SOC_ACPI_INTEL_MATCH 140config SND_SOC_ACPI_INTEL_MATCH
121 tristate 141 tristate
122 select SND_SOC_ACPI if ACPI 142 select SND_SOC_ACPI if ACPI
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 73ca1350aa31..b177db2a0dbb 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -293,16 +293,6 @@ config SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH
293 Say Y if you have such a device. 293 Say Y if you have such a device.
294 If unsure select "N". 294 If unsure select "N".
295 295
296config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
297 tristate "SKL/KBL/BXT/APL with HDA Codecs"
298 select SND_SOC_HDAC_HDMI
299 select SND_SOC_HDAC_HDA
300 help
301 This adds support for ASoC machine driver for Intel platforms
302 SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
303 Say Y or m if you have such a device. This is a recommended option.
304 If unsure select "N".
305
306config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH 296config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
307 tristate "GLK with RT5682 and MAX98357A in I2S Mode" 297 tristate "GLK with RT5682 and MAX98357A in I2S Mode"
308 depends on MFD_INTEL_LPSS && I2C && ACPI 298 depends on MFD_INTEL_LPSS && I2C && ACPI
@@ -319,4 +309,18 @@ config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
319 309
320endif ## SND_SOC_INTEL_SKYLAKE 310endif ## SND_SOC_INTEL_SKYLAKE
321 311
312if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
313
314config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
315 tristate "SKL/KBL/BXT/APL with HDA Codecs"
316 select SND_SOC_HDAC_HDMI
317 # SND_SOC_HDAC_HDA is already selected
318 help
319 This adds support for ASoC machine driver for Intel platforms
320 SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
321 Say Y or m if you have such a device. This is a recommended option.
322 If unsure select "N".
323
324endif ## SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
325
322endif ## SND_SOC_INTEL_MACH 326endif ## SND_SOC_INTEL_MACH
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index db6976f4ddaa..9d9f6e41d81c 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -19,6 +19,7 @@
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */ 20 */
21 21
22#include <linux/dmi.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
@@ -35,6 +36,8 @@
35#define CHT_PLAT_CLK_3_HZ 19200000 36#define CHT_PLAT_CLK_3_HZ 19200000
36#define CHT_CODEC_DAI "HiFi" 37#define CHT_CODEC_DAI "HiFi"
37 38
39#define QUIRK_PMC_PLT_CLK_0 0x01
40
38struct cht_mc_private { 41struct cht_mc_private {
39 struct clk *mclk; 42 struct clk *mclk;
40 struct snd_soc_jack jack; 43 struct snd_soc_jack jack;
@@ -385,11 +388,29 @@ static struct snd_soc_card snd_soc_card_cht = {
385 .num_controls = ARRAY_SIZE(cht_mc_controls), 388 .num_controls = ARRAY_SIZE(cht_mc_controls),
386}; 389};
387 390
391static const struct dmi_system_id cht_max98090_quirk_table[] = {
392 {
393 /* Swanky model Chromebook (Toshiba Chromebook 2) */
394 .matches = {
395 DMI_MATCH(DMI_PRODUCT_NAME, "Swanky"),
396 },
397 .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
398 },
399 {}
400};
401
388static int snd_cht_mc_probe(struct platform_device *pdev) 402static int snd_cht_mc_probe(struct platform_device *pdev)
389{ 403{
404 const struct dmi_system_id *dmi_id;
390 struct device *dev = &pdev->dev; 405 struct device *dev = &pdev->dev;
391 int ret_val = 0; 406 int ret_val = 0;
392 struct cht_mc_private *drv; 407 struct cht_mc_private *drv;
408 const char *mclk_name;
409 int quirks = 0;
410
411 dmi_id = dmi_first_match(cht_max98090_quirk_table);
412 if (dmi_id)
413 quirks = (unsigned long)dmi_id->driver_data;
393 414
394 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); 415 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
395 if (!drv) 416 if (!drv)
@@ -411,11 +432,16 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
411 snd_soc_card_cht.dev = &pdev->dev; 432 snd_soc_card_cht.dev = &pdev->dev;
412 snd_soc_card_set_drvdata(&snd_soc_card_cht, drv); 433 snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
413 434
414 drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3"); 435 if (quirks & QUIRK_PMC_PLT_CLK_0)
436 mclk_name = "pmc_plt_clk_0";
437 else
438 mclk_name = "pmc_plt_clk_3";
439
440 drv->mclk = devm_clk_get(&pdev->dev, mclk_name);
415 if (IS_ERR(drv->mclk)) { 441 if (IS_ERR(drv->mclk)) {
416 dev_err(&pdev->dev, 442 dev_err(&pdev->dev,
417 "Failed to get MCLK from pmc_plt_clk_3: %ld\n", 443 "Failed to get MCLK from %s: %ld\n",
418 PTR_ERR(drv->mclk)); 444 mclk_name, PTR_ERR(drv->mclk));
419 return PTR_ERR(drv->mclk); 445 return PTR_ERR(drv->mclk);
420 } 446 }
421 447
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 29225623b4b4..7487f388e65d 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -37,7 +37,9 @@
37#include "skl.h" 37#include "skl.h"
38#include "skl-sst-dsp.h" 38#include "skl-sst-dsp.h"
39#include "skl-sst-ipc.h" 39#include "skl-sst-ipc.h"
40#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
40#include "../../../soc/codecs/hdac_hda.h" 41#include "../../../soc/codecs/hdac_hda.h"
42#endif
41 43
42/* 44/*
43 * initialize the PCI registers 45 * initialize the PCI registers
@@ -658,6 +660,8 @@ static void skl_clock_device_unregister(struct skl *skl)
658 platform_device_unregister(skl->clk_dev); 660 platform_device_unregister(skl->clk_dev);
659} 661}
660 662
663#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
664
661#define IDISP_INTEL_VENDOR_ID 0x80860000 665#define IDISP_INTEL_VENDOR_ID 0x80860000
662 666
663/* 667/*
@@ -676,6 +680,8 @@ static void load_codec_module(struct hda_codec *codec)
676#endif 680#endif
677} 681}
678 682
683#endif /* CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC */
684
679/* 685/*
680 * Probe the given codec address 686 * Probe the given codec address
681 */ 687 */
@@ -685,9 +691,11 @@ static int probe_codec(struct hdac_bus *bus, int addr)
685 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; 691 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
686 unsigned int res = -1; 692 unsigned int res = -1;
687 struct skl *skl = bus_to_skl(bus); 693 struct skl *skl = bus_to_skl(bus);
694#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
688 struct hdac_hda_priv *hda_codec; 695 struct hdac_hda_priv *hda_codec;
689 struct hdac_device *hdev;
690 int err; 696 int err;
697#endif
698 struct hdac_device *hdev;
691 699
692 mutex_lock(&bus->cmd_mutex); 700 mutex_lock(&bus->cmd_mutex);
693 snd_hdac_bus_send_cmd(bus, cmd); 701 snd_hdac_bus_send_cmd(bus, cmd);
@@ -697,6 +705,7 @@ static int probe_codec(struct hdac_bus *bus, int addr)
697 return -EIO; 705 return -EIO;
698 dev_dbg(bus->dev, "codec #%d probed OK: %x\n", addr, res); 706 dev_dbg(bus->dev, "codec #%d probed OK: %x\n", addr, res);
699 707
708#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
700 hda_codec = devm_kzalloc(&skl->pci->dev, sizeof(*hda_codec), 709 hda_codec = devm_kzalloc(&skl->pci->dev, sizeof(*hda_codec),
701 GFP_KERNEL); 710 GFP_KERNEL);
702 if (!hda_codec) 711 if (!hda_codec)
@@ -715,6 +724,13 @@ static int probe_codec(struct hdac_bus *bus, int addr)
715 load_codec_module(&hda_codec->codec); 724 load_codec_module(&hda_codec->codec);
716 } 725 }
717 return 0; 726 return 0;
727#else
728 hdev = devm_kzalloc(&skl->pci->dev, sizeof(*hdev), GFP_KERNEL);
729 if (!hdev)
730 return -ENOMEM;
731
732 return snd_hdac_ext_bus_device_init(bus, addr, hdev);
733#endif /* CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC */
718} 734}
719 735
720/* Codec initialization */ 736/* Codec initialization */
@@ -815,6 +831,12 @@ static void skl_probe_work(struct work_struct *work)
815 } 831 }
816 } 832 }
817 833
834 /*
835 * we are done probing so decrement link counts
836 */
837 list_for_each_entry(hlink, &bus->hlink_list, list)
838 snd_hdac_ext_bus_link_put(bus, hlink);
839
818 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { 840 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
819 err = snd_hdac_display_power(bus, false); 841 err = snd_hdac_display_power(bus, false);
820 if (err < 0) { 842 if (err < 0) {
@@ -824,12 +846,6 @@ static void skl_probe_work(struct work_struct *work)
824 } 846 }
825 } 847 }
826 848
827 /*
828 * we are done probing so decrement link counts
829 */
830 list_for_each_entry(hlink, &bus->hlink_list, list)
831 snd_hdac_ext_bus_link_put(bus, hlink);
832
833 /* configure PM */ 849 /* configure PM */
834 pm_runtime_put_noidle(bus->dev); 850 pm_runtime_put_noidle(bus->dev);
835 pm_runtime_allow(bus->dev); 851 pm_runtime_allow(bus->dev);
@@ -870,7 +886,7 @@ static int skl_create(struct pci_dev *pci,
870 hbus = skl_to_hbus(skl); 886 hbus = skl_to_hbus(skl);
871 bus = skl_to_bus(skl); 887 bus = skl_to_bus(skl);
872 888
873#if IS_ENABLED(CONFIG_SND_SOC_HDAC_HDA) 889#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
874 ext_ops = snd_soc_hdac_hda_get_ops(); 890 ext_ops = snd_soc_hdac_hda_get_ops();
875#endif 891#endif
876 snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, ext_ops); 892 snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, ext_ops);
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index d5ae9eb8c756..fed45b41f9d3 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -36,6 +36,8 @@
36#include "../codecs/twl6040.h" 36#include "../codecs/twl6040.h"
37 37
38struct abe_twl6040 { 38struct abe_twl6040 {
39 struct snd_soc_card card;
40 struct snd_soc_dai_link dai_links[2];
39 int jack_detection; /* board can detect jack events */ 41 int jack_detection; /* board can detect jack events */
40 int mclk_freq; /* MCLK frequency speed for twl6040 */ 42 int mclk_freq; /* MCLK frequency speed for twl6040 */
41}; 43};
@@ -208,40 +210,10 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
208 ARRAY_SIZE(dmic_audio_map)); 210 ARRAY_SIZE(dmic_audio_map));
209} 211}
210 212
211/* Digital audio interface glue - connects codec <--> CPU */
212static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
213 {
214 .name = "TWL6040",
215 .stream_name = "TWL6040",
216 .codec_dai_name = "twl6040-legacy",
217 .codec_name = "twl6040-codec",
218 .init = omap_abe_twl6040_init,
219 .ops = &omap_abe_ops,
220 },
221 {
222 .name = "DMIC",
223 .stream_name = "DMIC Capture",
224 .codec_dai_name = "dmic-hifi",
225 .codec_name = "dmic-codec",
226 .init = omap_abe_dmic_init,
227 .ops = &omap_abe_dmic_ops,
228 },
229};
230
231/* Audio machine driver */
232static struct snd_soc_card omap_abe_card = {
233 .owner = THIS_MODULE,
234
235 .dapm_widgets = twl6040_dapm_widgets,
236 .num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets),
237 .dapm_routes = audio_map,
238 .num_dapm_routes = ARRAY_SIZE(audio_map),
239};
240
241static int omap_abe_probe(struct platform_device *pdev) 213static int omap_abe_probe(struct platform_device *pdev)
242{ 214{
243 struct device_node *node = pdev->dev.of_node; 215 struct device_node *node = pdev->dev.of_node;
244 struct snd_soc_card *card = &omap_abe_card; 216 struct snd_soc_card *card;
245 struct device_node *dai_node; 217 struct device_node *dai_node;
246 struct abe_twl6040 *priv; 218 struct abe_twl6040 *priv;
247 int num_links = 0; 219 int num_links = 0;
@@ -252,12 +224,18 @@ static int omap_abe_probe(struct platform_device *pdev)
252 return -ENODEV; 224 return -ENODEV;
253 } 225 }
254 226
255 card->dev = &pdev->dev;
256
257 priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL); 227 priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
258 if (priv == NULL) 228 if (priv == NULL)
259 return -ENOMEM; 229 return -ENOMEM;
260 230
231 card = &priv->card;
232 card->dev = &pdev->dev;
233 card->owner = THIS_MODULE;
234 card->dapm_widgets = twl6040_dapm_widgets;
235 card->num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets);
236 card->dapm_routes = audio_map;
237 card->num_dapm_routes = ARRAY_SIZE(audio_map);
238
261 if (snd_soc_of_parse_card_name(card, "ti,model")) { 239 if (snd_soc_of_parse_card_name(card, "ti,model")) {
262 dev_err(&pdev->dev, "Card name is not provided\n"); 240 dev_err(&pdev->dev, "Card name is not provided\n");
263 return -ENODEV; 241 return -ENODEV;
@@ -274,14 +252,27 @@ static int omap_abe_probe(struct platform_device *pdev)
274 dev_err(&pdev->dev, "McPDM node is not provided\n"); 252 dev_err(&pdev->dev, "McPDM node is not provided\n");
275 return -EINVAL; 253 return -EINVAL;
276 } 254 }
277 abe_twl6040_dai_links[0].cpu_of_node = dai_node; 255
278 abe_twl6040_dai_links[0].platform_of_node = dai_node; 256 priv->dai_links[0].name = "DMIC";
257 priv->dai_links[0].stream_name = "TWL6040";
258 priv->dai_links[0].cpu_of_node = dai_node;
259 priv->dai_links[0].platform_of_node = dai_node;
260 priv->dai_links[0].codec_dai_name = "twl6040-legacy";
261 priv->dai_links[0].codec_name = "twl6040-codec";
262 priv->dai_links[0].init = omap_abe_twl6040_init;
263 priv->dai_links[0].ops = &omap_abe_ops;
279 264
280 dai_node = of_parse_phandle(node, "ti,dmic", 0); 265 dai_node = of_parse_phandle(node, "ti,dmic", 0);
281 if (dai_node) { 266 if (dai_node) {
282 num_links = 2; 267 num_links = 2;
283 abe_twl6040_dai_links[1].cpu_of_node = dai_node; 268 priv->dai_links[1].name = "TWL6040";
284 abe_twl6040_dai_links[1].platform_of_node = dai_node; 269 priv->dai_links[1].stream_name = "DMIC Capture";
270 priv->dai_links[1].cpu_of_node = dai_node;
271 priv->dai_links[1].platform_of_node = dai_node;
272 priv->dai_links[1].codec_dai_name = "dmic-hifi";
273 priv->dai_links[1].codec_name = "dmic-codec";
274 priv->dai_links[1].init = omap_abe_dmic_init;
275 priv->dai_links[1].ops = &omap_abe_dmic_ops;
285 } else { 276 } else {
286 num_links = 1; 277 num_links = 1;
287 } 278 }
@@ -300,7 +291,7 @@ static int omap_abe_probe(struct platform_device *pdev)
300 return -ENODEV; 291 return -ENODEV;
301 } 292 }
302 293
303 card->dai_link = abe_twl6040_dai_links; 294 card->dai_link = priv->dai_links;
304 card->num_links = num_links; 295 card->num_links = num_links;
305 296
306 snd_soc_card_set_drvdata(card, priv); 297 snd_soc_card_set_drvdata(card, priv);
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index fe966272bd0c..cba9645b6487 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -48,6 +48,8 @@ struct omap_dmic {
48 struct device *dev; 48 struct device *dev;
49 void __iomem *io_base; 49 void __iomem *io_base;
50 struct clk *fclk; 50 struct clk *fclk;
51 struct pm_qos_request pm_qos_req;
52 int latency;
51 int fclk_freq; 53 int fclk_freq;
52 int out_freq; 54 int out_freq;
53 int clk_div; 55 int clk_div;
@@ -124,6 +126,8 @@ static void omap_dmic_dai_shutdown(struct snd_pcm_substream *substream,
124 126
125 mutex_lock(&dmic->mutex); 127 mutex_lock(&dmic->mutex);
126 128
129 pm_qos_remove_request(&dmic->pm_qos_req);
130
127 if (!dai->active) 131 if (!dai->active)
128 dmic->active = 0; 132 dmic->active = 0;
129 133
@@ -228,6 +232,8 @@ static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream,
228 /* packet size is threshold * channels */ 232 /* packet size is threshold * channels */
229 dma_data = snd_soc_dai_get_dma_data(dai, substream); 233 dma_data = snd_soc_dai_get_dma_data(dai, substream);
230 dma_data->maxburst = dmic->threshold * channels; 234 dma_data->maxburst = dmic->threshold * channels;
235 dmic->latency = (OMAP_DMIC_THRES_MAX - dmic->threshold) * USEC_PER_SEC /
236 params_rate(params);
231 237
232 return 0; 238 return 0;
233} 239}
@@ -238,6 +244,9 @@ static int omap_dmic_dai_prepare(struct snd_pcm_substream *substream,
238 struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai); 244 struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
239 u32 ctrl; 245 u32 ctrl;
240 246
247 if (pm_qos_request_active(&dmic->pm_qos_req))
248 pm_qos_update_request(&dmic->pm_qos_req, dmic->latency);
249
241 /* Configure uplink threshold */ 250 /* Configure uplink threshold */
242 omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold); 251 omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold);
243 252
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index d0ebb6b9bfac..2d6decbfc99e 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -308,9 +308,9 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
308 pkt_size = channels; 308 pkt_size = channels;
309 } 309 }
310 310
311 latency = ((((buffer_size - pkt_size) / channels) * 1000) 311 latency = (buffer_size - pkt_size) / channels;
312 / (params->rate_num / params->rate_den)); 312 latency = latency * USEC_PER_SEC /
313 313 (params->rate_num / params->rate_den);
314 mcbsp->latency[substream->stream] = latency; 314 mcbsp->latency[substream->stream] = latency;
315 315
316 omap_mcbsp_set_threshold(substream, pkt_size); 316 omap_mcbsp_set_threshold(substream, pkt_size);
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 4c1be36c2207..7d5bdc5a2890 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -54,6 +54,8 @@ struct omap_mcpdm {
54 unsigned long phys_base; 54 unsigned long phys_base;
55 void __iomem *io_base; 55 void __iomem *io_base;
56 int irq; 56 int irq;
57 struct pm_qos_request pm_qos_req;
58 int latency[2];
57 59
58 struct mutex mutex; 60 struct mutex mutex;
59 61
@@ -277,6 +279,9 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
277 struct snd_soc_dai *dai) 279 struct snd_soc_dai *dai)
278{ 280{
279 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); 281 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
282 int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
283 int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
284 int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
280 285
281 mutex_lock(&mcpdm->mutex); 286 mutex_lock(&mcpdm->mutex);
282 287
@@ -289,6 +294,14 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
289 } 294 }
290 } 295 }
291 296
297 if (mcpdm->latency[stream2])
298 pm_qos_update_request(&mcpdm->pm_qos_req,
299 mcpdm->latency[stream2]);
300 else if (mcpdm->latency[stream1])
301 pm_qos_remove_request(&mcpdm->pm_qos_req);
302
303 mcpdm->latency[stream1] = 0;
304
292 mutex_unlock(&mcpdm->mutex); 305 mutex_unlock(&mcpdm->mutex);
293} 306}
294 307
@@ -300,7 +313,7 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
300 int stream = substream->stream; 313 int stream = substream->stream;
301 struct snd_dmaengine_dai_dma_data *dma_data; 314 struct snd_dmaengine_dai_dma_data *dma_data;
302 u32 threshold; 315 u32 threshold;
303 int channels; 316 int channels, latency;
304 int link_mask = 0; 317 int link_mask = 0;
305 318
306 channels = params_channels(params); 319 channels = params_channels(params);
@@ -344,14 +357,25 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
344 357
345 dma_data->maxburst = 358 dma_data->maxburst =
346 (MCPDM_DN_THRES_MAX - threshold) * channels; 359 (MCPDM_DN_THRES_MAX - threshold) * channels;
360 latency = threshold;
347 } else { 361 } else {
348 /* If playback is not running assume a stereo stream to come */ 362 /* If playback is not running assume a stereo stream to come */
349 if (!mcpdm->config[!stream].link_mask) 363 if (!mcpdm->config[!stream].link_mask)
350 mcpdm->config[!stream].link_mask = (0x3 << 3); 364 mcpdm->config[!stream].link_mask = (0x3 << 3);
351 365
352 dma_data->maxburst = threshold * channels; 366 dma_data->maxburst = threshold * channels;
367 latency = (MCPDM_DN_THRES_MAX - threshold);
353 } 368 }
354 369
370 /*
371 * The DMA must act to a DMA request within latency time (usec) to avoid
372 * under/overflow
373 */
374 mcpdm->latency[stream] = latency * USEC_PER_SEC / params_rate(params);
375
376 if (!mcpdm->latency[stream])
377 mcpdm->latency[stream] = 10;
378
355 /* Check if we need to restart McPDM with this stream */ 379 /* Check if we need to restart McPDM with this stream */
356 if (mcpdm->config[stream].link_mask && 380 if (mcpdm->config[stream].link_mask &&
357 mcpdm->config[stream].link_mask != link_mask) 381 mcpdm->config[stream].link_mask != link_mask)
@@ -366,6 +390,20 @@ static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
366 struct snd_soc_dai *dai) 390 struct snd_soc_dai *dai)
367{ 391{
368 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); 392 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
393 struct pm_qos_request *pm_qos_req = &mcpdm->pm_qos_req;
394 int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
395 int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
396 int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
397 int latency = mcpdm->latency[stream2];
398
399 /* Prevent omap hardware from hitting off between FIFO fills */
400 if (!latency || mcpdm->latency[stream1] < latency)
401 latency = mcpdm->latency[stream1];
402
403 if (pm_qos_request_active(pm_qos_req))
404 pm_qos_update_request(pm_qos_req, latency);
405 else if (latency)
406 pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
369 407
370 if (!omap_mcpdm_active(mcpdm)) { 408 if (!omap_mcpdm_active(mcpdm)) {
371 omap_mcpdm_start(mcpdm); 409 omap_mcpdm_start(mcpdm);
@@ -427,6 +465,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
427 free_irq(mcpdm->irq, (void *)mcpdm); 465 free_irq(mcpdm->irq, (void *)mcpdm);
428 pm_runtime_disable(mcpdm->dev); 466 pm_runtime_disable(mcpdm->dev);
429 467
468 if (pm_qos_request_active(&mcpdm->pm_qos_req))
469 pm_qos_remove_request(&mcpdm->pm_qos_req);
470
430 return 0; 471 return 0;
431} 472}
432 473
diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
index eb1b9da05dd4..4715527054e5 100644
--- a/sound/soc/qcom/common.c
+++ b/sound/soc/qcom/common.c
@@ -13,6 +13,7 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
13 struct device_node *cpu = NULL; 13 struct device_node *cpu = NULL;
14 struct device *dev = card->dev; 14 struct device *dev = card->dev;
15 struct snd_soc_dai_link *link; 15 struct snd_soc_dai_link *link;
16 struct of_phandle_args args;
16 int ret, num_links; 17 int ret, num_links;
17 18
18 ret = snd_soc_of_parse_card_name(card, "model"); 19 ret = snd_soc_of_parse_card_name(card, "model");
@@ -47,12 +48,14 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
47 goto err; 48 goto err;
48 } 49 }
49 50
50 link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0); 51 ret = of_parse_phandle_with_args(cpu, "sound-dai",
51 if (!link->cpu_of_node) { 52 "#sound-dai-cells", 0, &args);
53 if (ret) {
52 dev_err(card->dev, "error getting cpu phandle\n"); 54 dev_err(card->dev, "error getting cpu phandle\n");
53 ret = -EINVAL;
54 goto err; 55 goto err;
55 } 56 }
57 link->cpu_of_node = args.np;
58 link->id = args.args[0];
56 59
57 ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name); 60 ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
58 if (ret) { 61 if (ret) {
diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
index 60ff4a2d3577..8f6c8fc073a9 100644
--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
+++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
@@ -1112,204 +1112,204 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
1112} 1112}
1113 1113
1114static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = { 1114static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
1115 SND_SOC_DAPM_AIF_OUT("HDMI_RX", "HDMI Playback", 0, 0, 0, 0), 1115 SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
1116 SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0), 1116 SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
1117 SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0), 1117 SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
1118 SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0), 1118 SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
1119 SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0), 1119 SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
1120 SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0, 0), 1120 SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
1121 SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0), 1121 SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
1122 SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0, 0), 1122 SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
1123 SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0), 1123 SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
1124 SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0), 1124 SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
1125 SND_SOC_DAPM_AIF_IN("SLIMBUS_2_TX", "Slimbus2 Capture", 0, 0, 0, 0), 1125 SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
1126 SND_SOC_DAPM_AIF_IN("SLIMBUS_3_TX", "Slimbus3 Capture", 0, 0, 0, 0), 1126 SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
1127 SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture", 0, 0, 0, 0), 1127 SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
1128 SND_SOC_DAPM_AIF_IN("SLIMBUS_5_TX", "Slimbus5 Capture", 0, 0, 0, 0), 1128 SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
1129 SND_SOC_DAPM_AIF_IN("SLIMBUS_6_TX", "Slimbus6 Capture", 0, 0, 0, 0), 1129 SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
1130 SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback", 1130 SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
1131 0, 0, 0, 0), 1131 0, 0, 0, 0),
1132 SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture", 1132 SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
1133 0, 0, 0, 0), 1133 0, 0, 0, 0),
1134 SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback", 1134 SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
1135 0, 0, 0, 0), 1135 0, 0, 0, 0),
1136 SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture", 1136 SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
1137 0, 0, 0, 0), 1137 0, 0, 0, 0),
1138 SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback", 1138 SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
1139 0, 0, 0, 0), 1139 0, 0, 0, 0),
1140 SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture", 1140 SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
1141 0, 0, 0, 0), 1141 0, 0, 0, 0),
1142 SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1", 1142 SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
1143 "Secondary MI2S Playback SD1", 1143 "Secondary MI2S Playback SD1",
1144 0, 0, 0, 0), 1144 0, 0, 0, 0),
1145 SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback", 1145 SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
1146 0, 0, 0, 0), 1146 0, 0, 0, 0),
1147 SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture", 1147 SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
1148 0, 0, 0, 0), 1148 0, 0, 0, 0),
1149 1149
1150 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_0", "Primary TDM0 Playback", 1150 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
1151 0, 0, 0, 0), 1151 0, 0, 0, 0),
1152 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_1", "Primary TDM1 Playback", 1152 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
1153 0, 0, 0, 0), 1153 0, 0, 0, 0),
1154 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_2", "Primary TDM2 Playback", 1154 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
1155 0, 0, 0, 0), 1155 0, 0, 0, 0),
1156 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_3", "Primary TDM3 Playback", 1156 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
1157 0, 0, 0, 0), 1157 0, 0, 0, 0),
1158 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_4", "Primary TDM4 Playback", 1158 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
1159 0, 0, 0, 0), 1159 0, 0, 0, 0),
1160 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_5", "Primary TDM5 Playback", 1160 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
1161 0, 0, 0, 0), 1161 0, 0, 0, 0),
1162 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_6", "Primary TDM6 Playback", 1162 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
1163 0, 0, 0, 0), 1163 0, 0, 0, 0),
1164 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_7", "Primary TDM7 Playback", 1164 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
1165 0, 0, 0, 0), 1165 0, 0, 0, 0),
1166 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_0", "Primary TDM0 Capture", 1166 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
1167 0, 0, 0, 0), 1167 0, 0, 0, 0),
1168 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_1", "Primary TDM1 Capture", 1168 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
1169 0, 0, 0, 0), 1169 0, 0, 0, 0),
1170 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_2", "Primary TDM2 Capture", 1170 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
1171 0, 0, 0, 0), 1171 0, 0, 0, 0),
1172 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_3", "Primary TDM3 Capture", 1172 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
1173 0, 0, 0, 0), 1173 0, 0, 0, 0),
1174 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_4", "Primary TDM4 Capture", 1174 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
1175 0, 0, 0, 0), 1175 0, 0, 0, 0),
1176 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_5", "Primary TDM5 Capture", 1176 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
1177 0, 0, 0, 0), 1177 0, 0, 0, 0),
1178 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_6", "Primary TDM6 Capture", 1178 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
1179 0, 0, 0, 0), 1179 0, 0, 0, 0),
1180 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_7", "Primary TDM7 Capture", 1180 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
1181 0, 0, 0, 0), 1181 0, 0, 0, 0),
1182 1182
1183 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_0", "Secondary TDM0 Playback", 1183 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
1184 0, 0, 0, 0), 1184 0, 0, 0, 0),
1185 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_1", "Secondary TDM1 Playback", 1185 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
1186 0, 0, 0, 0), 1186 0, 0, 0, 0),
1187 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_2", "Secondary TDM2 Playback", 1187 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
1188 0, 0, 0, 0), 1188 0, 0, 0, 0),
1189 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_3", "Secondary TDM3 Playback", 1189 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
1190 0, 0, 0, 0), 1190 0, 0, 0, 0),
1191 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_4", "Secondary TDM4 Playback", 1191 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
1192 0, 0, 0, 0), 1192 0, 0, 0, 0),
1193 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_5", "Secondary TDM5 Playback", 1193 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
1194 0, 0, 0, 0), 1194 0, 0, 0, 0),
1195 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_6", "Secondary TDM6 Playback", 1195 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
1196 0, 0, 0, 0), 1196 0, 0, 0, 0),
1197 SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_7", "Secondary TDM7 Playback", 1197 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
1198 0, 0, 0, 0), 1198 0, 0, 0, 0),
1199 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_0", "Secondary TDM0 Capture", 1199 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
1200 0, 0, 0, 0), 1200 0, 0, 0, 0),
1201 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_1", "Secondary TDM1 Capture", 1201 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
1202 0, 0, 0, 0), 1202 0, 0, 0, 0),
1203 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_2", "Secondary TDM2 Capture", 1203 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
1204 0, 0, 0, 0), 1204 0, 0, 0, 0),
1205 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_3", "Secondary TDM3 Capture", 1205 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
1206 0, 0, 0, 0), 1206 0, 0, 0, 0),
1207 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_4", "Secondary TDM4 Capture", 1207 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
1208 0, 0, 0, 0), 1208 0, 0, 0, 0),
1209 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_5", "Secondary TDM5 Capture", 1209 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
1210 0, 0, 0, 0), 1210 0, 0, 0, 0),
1211 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_6", "Secondary TDM6 Capture", 1211 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
1212 0, 0, 0, 0), 1212 0, 0, 0, 0),
1213 SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_7", "Secondary TDM7 Capture", 1213 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
1214 0, 0, 0, 0), 1214 0, 0, 0, 0),
1215 1215
1216 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_0", "Tertiary TDM0 Playback", 1216 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
1217 0, 0, 0, 0), 1217 0, 0, 0, 0),
1218 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_1", "Tertiary TDM1 Playback", 1218 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
1219 0, 0, 0, 0), 1219 0, 0, 0, 0),
1220 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_2", "Tertiary TDM2 Playback", 1220 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
1221 0, 0, 0, 0), 1221 0, 0, 0, 0),
1222 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_3", "Tertiary TDM3 Playback", 1222 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
1223 0, 0, 0, 0), 1223 0, 0, 0, 0),
1224 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_4", "Tertiary TDM4 Playback", 1224 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
1225 0, 0, 0, 0), 1225 0, 0, 0, 0),
1226 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_5", "Tertiary TDM5 Playback", 1226 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
1227 0, 0, 0, 0), 1227 0, 0, 0, 0),
1228 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_6", "Tertiary TDM6 Playback", 1228 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
1229 0, 0, 0, 0), 1229 0, 0, 0, 0),
1230 SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_7", "Tertiary TDM7 Playback", 1230 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
1231 0, 0, 0, 0), 1231 0, 0, 0, 0),
1232 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_0", "Tertiary TDM0 Capture", 1232 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
1233 0, 0, 0, 0), 1233 0, 0, 0, 0),
1234 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_1", "Tertiary TDM1 Capture", 1234 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
1235 0, 0, 0, 0), 1235 0, 0, 0, 0),
1236 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_2", "Tertiary TDM2 Capture", 1236 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
1237 0, 0, 0, 0), 1237 0, 0, 0, 0),
1238 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_3", "Tertiary TDM3 Capture", 1238 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
1239 0, 0, 0, 0), 1239 0, 0, 0, 0),
1240 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_4", "Tertiary TDM4 Capture", 1240 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
1241 0, 0, 0, 0), 1241 0, 0, 0, 0),
1242 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_5", "Tertiary TDM5 Capture", 1242 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
1243 0, 0, 0, 0), 1243 0, 0, 0, 0),
1244 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_6", "Tertiary TDM6 Capture", 1244 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
1245 0, 0, 0, 0), 1245 0, 0, 0, 0),
1246 SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_7", "Tertiary TDM7 Capture", 1246 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
1247 0, 0, 0, 0), 1247 0, 0, 0, 0),
1248 1248
1249 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_0", "Quaternary TDM0 Playback", 1249 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
1250 0, 0, 0, 0), 1250 0, 0, 0, 0),
1251 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_1", "Quaternary TDM1 Playback", 1251 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
1252 0, 0, 0, 0), 1252 0, 0, 0, 0),
1253 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_2", "Quaternary TDM2 Playback", 1253 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
1254 0, 0, 0, 0), 1254 0, 0, 0, 0),
1255 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_3", "Quaternary TDM3 Playback", 1255 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
1256 0, 0, 0, 0), 1256 0, 0, 0, 0),
1257 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_4", "Quaternary TDM4 Playback", 1257 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
1258 0, 0, 0, 0), 1258 0, 0, 0, 0),
1259 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_5", "Quaternary TDM5 Playback", 1259 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
1260 0, 0, 0, 0), 1260 0, 0, 0, 0),
1261 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_6", "Quaternary TDM6 Playback", 1261 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
1262 0, 0, 0, 0), 1262 0, 0, 0, 0),
1263 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_7", "Quaternary TDM7 Playback", 1263 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
1264 0, 0, 0, 0), 1264 0, 0, 0, 0),
1265 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_0", "Quaternary TDM0 Capture", 1265 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
1266 0, 0, 0, 0), 1266 0, 0, 0, 0),
1267 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_1", "Quaternary TDM1 Capture", 1267 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
1268 0, 0, 0, 0), 1268 0, 0, 0, 0),
1269 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_2", "Quaternary TDM2 Capture", 1269 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
1270 0, 0, 0, 0), 1270 0, 0, 0, 0),
1271 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_3", "Quaternary TDM3 Capture", 1271 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
1272 0, 0, 0, 0), 1272 0, 0, 0, 0),
1273 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_4", "Quaternary TDM4 Capture", 1273 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
1274 0, 0, 0, 0), 1274 0, 0, 0, 0),
1275 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_5", "Quaternary TDM5 Capture", 1275 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
1276 0, 0, 0, 0), 1276 0, 0, 0, 0),
1277 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_6", "Quaternary TDM6 Capture", 1277 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
1278 0, 0, 0, 0), 1278 0, 0, 0, 0),
1279 SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_7", "Quaternary TDM7 Capture", 1279 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
1280 0, 0, 0, 0), 1280 0, 0, 0, 0),
1281 1281
1282 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_0", "Quinary TDM0 Playback", 1282 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
1283 0, 0, 0, 0), 1283 0, 0, 0, 0),
1284 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_1", "Quinary TDM1 Playback", 1284 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
1285 0, 0, 0, 0), 1285 0, 0, 0, 0),
1286 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_2", "Quinary TDM2 Playback", 1286 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
1287 0, 0, 0, 0), 1287 0, 0, 0, 0),
1288 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_3", "Quinary TDM3 Playback", 1288 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
1289 0, 0, 0, 0), 1289 0, 0, 0, 0),
1290 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_4", "Quinary TDM4 Playback", 1290 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
1291 0, 0, 0, 0), 1291 0, 0, 0, 0),
1292 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_5", "Quinary TDM5 Playback", 1292 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
1293 0, 0, 0, 0), 1293 0, 0, 0, 0),
1294 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_6", "Quinary TDM6 Playback", 1294 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
1295 0, 0, 0, 0), 1295 0, 0, 0, 0),
1296 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_7", "Quinary TDM7 Playback", 1296 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
1297 0, 0, 0, 0), 1297 0, 0, 0, 0),
1298 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_0", "Quinary TDM0 Capture", 1298 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
1299 0, 0, 0, 0), 1299 0, 0, 0, 0),
1300 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_1", "Quinary TDM1 Capture", 1300 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
1301 0, 0, 0, 0), 1301 0, 0, 0, 0),
1302 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_2", "Quinary TDM2 Capture", 1302 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
1303 0, 0, 0, 0), 1303 0, 0, 0, 0),
1304 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_3", "Quinary TDM3 Capture", 1304 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
1305 0, 0, 0, 0), 1305 0, 0, 0, 0),
1306 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_4", "Quinary TDM4 Capture", 1306 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
1307 0, 0, 0, 0), 1307 0, 0, 0, 0),
1308 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_5", "Quinary TDM5 Capture", 1308 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
1309 0, 0, 0, 0), 1309 0, 0, 0, 0),
1310 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_6", "Quinary TDM6 Capture", 1310 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
1311 0, 0, 0, 0), 1311 0, 0, 0, 0),
1312 SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_7", "Quinary TDM7 Capture", 1312 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
1313 0, 0, 0, 0), 1313 0, 0, 0, 0),
1314}; 1314};
1315 1315
diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
index 000775b4bba8..829b5e987b2a 100644
--- a/sound/soc/qcom/qdsp6/q6afe.c
+++ b/sound/soc/qcom/qdsp6/q6afe.c
@@ -49,14 +49,14 @@
49#define AFE_PORT_I2S_SD1 0x2 49#define AFE_PORT_I2S_SD1 0x2
50#define AFE_PORT_I2S_SD2 0x3 50#define AFE_PORT_I2S_SD2 0x3
51#define AFE_PORT_I2S_SD3 0x4 51#define AFE_PORT_I2S_SD3 0x4
52#define AFE_PORT_I2S_SD0_MASK BIT(0x1) 52#define AFE_PORT_I2S_SD0_MASK BIT(0x0)
53#define AFE_PORT_I2S_SD1_MASK BIT(0x2) 53#define AFE_PORT_I2S_SD1_MASK BIT(0x1)
54#define AFE_PORT_I2S_SD2_MASK BIT(0x3) 54#define AFE_PORT_I2S_SD2_MASK BIT(0x2)
55#define AFE_PORT_I2S_SD3_MASK BIT(0x4) 55#define AFE_PORT_I2S_SD3_MASK BIT(0x3)
56#define AFE_PORT_I2S_SD0_1_MASK GENMASK(2, 1) 56#define AFE_PORT_I2S_SD0_1_MASK GENMASK(1, 0)
57#define AFE_PORT_I2S_SD2_3_MASK GENMASK(4, 3) 57#define AFE_PORT_I2S_SD2_3_MASK GENMASK(3, 2)
58#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(3, 1) 58#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(2, 0)
59#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(4, 1) 59#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
60#define AFE_PORT_I2S_QUAD01 0x5 60#define AFE_PORT_I2S_QUAD01 0x5
61#define AFE_PORT_I2S_QUAD23 0x6 61#define AFE_PORT_I2S_QUAD23 0x6
62#define AFE_PORT_I2S_6CHS 0x7 62#define AFE_PORT_I2S_6CHS 0x7
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index a16c71c03058..86115de5c1b2 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -122,7 +122,6 @@ static struct snd_pcm_hardware q6asm_dai_hardware_playback = {
122 .rate_max = 48000, \ 122 .rate_max = 48000, \
123 }, \ 123 }, \
124 .name = "MultiMedia"#num, \ 124 .name = "MultiMedia"#num, \
125 .probe = fe_dai_probe, \
126 .id = MSM_FRONTEND_DAI_MULTIMEDIA##num, \ 125 .id = MSM_FRONTEND_DAI_MULTIMEDIA##num, \
127 } 126 }
128 127
@@ -511,38 +510,6 @@ static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
511 } 510 }
512} 511}
513 512
514static const struct snd_soc_dapm_route afe_pcm_routes[] = {
515 {"MM_DL1", NULL, "MultiMedia1 Playback" },
516 {"MM_DL2", NULL, "MultiMedia2 Playback" },
517 {"MM_DL3", NULL, "MultiMedia3 Playback" },
518 {"MM_DL4", NULL, "MultiMedia4 Playback" },
519 {"MM_DL5", NULL, "MultiMedia5 Playback" },
520 {"MM_DL6", NULL, "MultiMedia6 Playback" },
521 {"MM_DL7", NULL, "MultiMedia7 Playback" },
522 {"MM_DL7", NULL, "MultiMedia8 Playback" },
523 {"MultiMedia1 Capture", NULL, "MM_UL1"},
524 {"MultiMedia2 Capture", NULL, "MM_UL2"},
525 {"MultiMedia3 Capture", NULL, "MM_UL3"},
526 {"MultiMedia4 Capture", NULL, "MM_UL4"},
527 {"MultiMedia5 Capture", NULL, "MM_UL5"},
528 {"MultiMedia6 Capture", NULL, "MM_UL6"},
529 {"MultiMedia7 Capture", NULL, "MM_UL7"},
530 {"MultiMedia8 Capture", NULL, "MM_UL8"},
531
532};
533
534static int fe_dai_probe(struct snd_soc_dai *dai)
535{
536 struct snd_soc_dapm_context *dapm;
537
538 dapm = snd_soc_component_get_dapm(dai->component);
539 snd_soc_dapm_add_routes(dapm, afe_pcm_routes,
540 ARRAY_SIZE(afe_pcm_routes));
541
542 return 0;
543}
544
545
546static const struct snd_soc_component_driver q6asm_fe_dai_component = { 513static const struct snd_soc_component_driver q6asm_fe_dai_component = {
547 .name = DRV_NAME, 514 .name = DRV_NAME,
548 .ops = &q6asm_dai_ops, 515 .ops = &q6asm_dai_ops,
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index c6b51571be94..d61b8404f7da 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -909,6 +909,25 @@ static const struct snd_soc_dapm_route intercon[] = {
909 {"MM_UL6", NULL, "MultiMedia6 Mixer"}, 909 {"MM_UL6", NULL, "MultiMedia6 Mixer"},
910 {"MM_UL7", NULL, "MultiMedia7 Mixer"}, 910 {"MM_UL7", NULL, "MultiMedia7 Mixer"},
911 {"MM_UL8", NULL, "MultiMedia8 Mixer"}, 911 {"MM_UL8", NULL, "MultiMedia8 Mixer"},
912
913 {"MM_DL1", NULL, "MultiMedia1 Playback" },
914 {"MM_DL2", NULL, "MultiMedia2 Playback" },
915 {"MM_DL3", NULL, "MultiMedia3 Playback" },
916 {"MM_DL4", NULL, "MultiMedia4 Playback" },
917 {"MM_DL5", NULL, "MultiMedia5 Playback" },
918 {"MM_DL6", NULL, "MultiMedia6 Playback" },
919 {"MM_DL7", NULL, "MultiMedia7 Playback" },
920 {"MM_DL8", NULL, "MultiMedia8 Playback" },
921
922 {"MultiMedia1 Capture", NULL, "MM_UL1"},
923 {"MultiMedia2 Capture", NULL, "MM_UL2"},
924 {"MultiMedia3 Capture", NULL, "MM_UL3"},
925 {"MultiMedia4 Capture", NULL, "MM_UL4"},
926 {"MultiMedia5 Capture", NULL, "MM_UL5"},
927 {"MultiMedia6 Capture", NULL, "MM_UL6"},
928 {"MultiMedia7 Capture", NULL, "MM_UL7"},
929 {"MultiMedia8 Capture", NULL, "MM_UL8"},
930
912}; 931};
913 932
914static int routing_hw_params(struct snd_pcm_substream *substream, 933static int routing_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
index 9e7b5fa4cf59..4ac78d7a4b2d 100644
--- a/sound/soc/rockchip/rockchip_pcm.c
+++ b/sound/soc/rockchip/rockchip_pcm.c
@@ -33,6 +33,7 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
33 33
34static const struct snd_dmaengine_pcm_config rk_dmaengine_pcm_config = { 34static const struct snd_dmaengine_pcm_config rk_dmaengine_pcm_config = {
35 .pcm_hardware = &snd_rockchip_hardware, 35 .pcm_hardware = &snd_rockchip_hardware,
36 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
36 .prealloc_buffer_size = 32 * 1024, 37 .prealloc_buffer_size = 32 * 1024,
37}; 38};
38 39
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index fcb4df23248c..6ec78f3096dd 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -306,7 +306,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
306 if (rsnd_ssi_is_multi_slave(mod, io)) 306 if (rsnd_ssi_is_multi_slave(mod, io))
307 return 0; 307 return 0;
308 308
309 if (ssi->rate) { 309 if (ssi->usrcnt > 1) {
310 if (ssi->rate != rate) { 310 if (ssi->rate != rate) {
311 dev_err(dev, "SSI parent/child should use same rate\n"); 311 dev_err(dev, "SSI parent/child should use same rate\n");
312 return -EINVAL; 312 return -EINVAL;
diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
index b8e72b52db30..4fb29f0e561e 100644
--- a/sound/soc/soc-acpi.c
+++ b/sound/soc/soc-acpi.c
@@ -10,11 +10,17 @@ struct snd_soc_acpi_mach *
10snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines) 10snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
11{ 11{
12 struct snd_soc_acpi_mach *mach; 12 struct snd_soc_acpi_mach *mach;
13 struct snd_soc_acpi_mach *mach_alt;
13 14
14 for (mach = machines; mach->id[0]; mach++) { 15 for (mach = machines; mach->id[0]; mach++) {
15 if (acpi_dev_present(mach->id, NULL, -1)) { 16 if (acpi_dev_present(mach->id, NULL, -1)) {
16 if (mach->machine_quirk) 17 if (mach->machine_quirk) {
17 mach = mach->machine_quirk(mach); 18 mach_alt = mach->machine_quirk(mach);
19 if (!mach_alt)
20 continue; /* not full match, ignore */
21 mach = mach_alt;
22 }
23
18 return mach; 24 return mach;
19 } 25 }
20 } 26 }
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 6ddcf12bc030..b29d0f65611e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2131,6 +2131,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
2131 } 2131 }
2132 2132
2133 card->instantiated = 1; 2133 card->instantiated = 1;
2134 dapm_mark_endpoints_dirty(card);
2134 snd_soc_dapm_sync(&card->dapm); 2135 snd_soc_dapm_sync(&card->dapm);
2135 mutex_unlock(&card->mutex); 2136 mutex_unlock(&card->mutex);
2136 mutex_unlock(&client_mutex); 2137 mutex_unlock(&client_mutex);
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index ea05cc91aa05..211589b0b2ef 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -390,7 +390,7 @@ static int stm32_sai_add_mclk_provider(struct stm32_sai_sub_data *sai)
390 char *mclk_name, *p, *s = (char *)pname; 390 char *mclk_name, *p, *s = (char *)pname;
391 int ret, i = 0; 391 int ret, i = 0;
392 392
393 mclk = devm_kzalloc(dev, sizeof(mclk), GFP_KERNEL); 393 mclk = devm_kzalloc(dev, sizeof(*mclk), GFP_KERNEL);
394 if (!mclk) 394 if (!mclk)
395 return -ENOMEM; 395 return -ENOMEM;
396 396
diff --git a/sound/soc/sunxi/Kconfig b/sound/soc/sunxi/Kconfig
index 66aad0d3f9c7..8134c3c94229 100644
--- a/sound/soc/sunxi/Kconfig
+++ b/sound/soc/sunxi/Kconfig
@@ -31,7 +31,7 @@ config SND_SUN8I_CODEC_ANALOG
31config SND_SUN50I_CODEC_ANALOG 31config SND_SUN50I_CODEC_ANALOG
32 tristate "Allwinner sun50i Codec Analog Controls Support" 32 tristate "Allwinner sun50i Codec Analog Controls Support"
33 depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST 33 depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
34 select SND_SUNXI_ADDA_PR_REGMAP 34 select SND_SUN8I_ADDA_PR_REGMAP
35 help 35 help
36 Say Y or M if you want to add support for the analog controls for 36 Say Y or M if you want to add support for the analog controls for
37 the codec embedded in Allwinner A64 SoC. 37 the codec embedded in Allwinner A64 SoC.
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index 522a72fde78d..92c5de026c43 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -481,7 +481,11 @@ static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
481 { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch", 481 { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
482 "AIF1 Slot 0 Right"}, 482 "AIF1 Slot 0 Right"},
483 483
484 /* ADC routes */ 484 /* ADC Routes */
485 { "AIF1 Slot 0 Right ADC", NULL, "ADC" },
486 { "AIF1 Slot 0 Left ADC", NULL, "ADC" },
487
488 /* ADC Mixer Routes */
485 { "Left Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch", 489 { "Left Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch",
486 "AIF1 Slot 0 Left ADC" }, 490 "AIF1 Slot 0 Left ADC" },
487 { "Right Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch", 491 { "Right Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch",
@@ -605,16 +609,10 @@ err_pm_disable:
605 609
606static int sun8i_codec_remove(struct platform_device *pdev) 610static int sun8i_codec_remove(struct platform_device *pdev)
607{ 611{
608 struct snd_soc_card *card = platform_get_drvdata(pdev);
609 struct sun8i_codec *scodec = snd_soc_card_get_drvdata(card);
610
611 pm_runtime_disable(&pdev->dev); 612 pm_runtime_disable(&pdev->dev);
612 if (!pm_runtime_status_suspended(&pdev->dev)) 613 if (!pm_runtime_status_suspended(&pdev->dev))
613 sun8i_codec_runtime_suspend(&pdev->dev); 614 sun8i_codec_runtime_suspend(&pdev->dev);
614 615
615 clk_disable_unprepare(scodec->clk_module);
616 clk_disable_unprepare(scodec->clk_bus);
617
618 return 0; 616 return 0;
619} 617}
620 618
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index e73c962590eb..079063d8038d 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -1146,10 +1146,8 @@ static int snd_cs4231_playback_open(struct snd_pcm_substream *substream)
1146 runtime->hw = snd_cs4231_playback; 1146 runtime->hw = snd_cs4231_playback;
1147 1147
1148 err = snd_cs4231_open(chip, CS4231_MODE_PLAY); 1148 err = snd_cs4231_open(chip, CS4231_MODE_PLAY);
1149 if (err < 0) { 1149 if (err < 0)
1150 snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1151 return err; 1150 return err;
1152 }
1153 chip->playback_substream = substream; 1151 chip->playback_substream = substream;
1154 chip->p_periods_sent = 0; 1152 chip->p_periods_sent = 0;
1155 snd_pcm_set_sync(substream); 1153 snd_pcm_set_sync(substream);
@@ -1167,10 +1165,8 @@ static int snd_cs4231_capture_open(struct snd_pcm_substream *substream)
1167 runtime->hw = snd_cs4231_capture; 1165 runtime->hw = snd_cs4231_capture;
1168 1166
1169 err = snd_cs4231_open(chip, CS4231_MODE_RECORD); 1167 err = snd_cs4231_open(chip, CS4231_MODE_RECORD);
1170 if (err < 0) { 1168 if (err < 0)
1171 snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1172 return err; 1169 return err;
1173 }
1174 chip->capture_substream = substream; 1170 chip->capture_substream = substream;
1175 chip->c_periods_sent = 0; 1171 chip->c_periods_sent = 0;
1176 snd_pcm_set_sync(substream); 1172 snd_pcm_set_sync(substream);
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 849953e5775c..37fc0447c071 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3382,5 +3382,15 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
3382 .ifnum = QUIRK_NO_INTERFACE 3382 .ifnum = QUIRK_NO_INTERFACE
3383 } 3383 }
3384}, 3384},
3385/* Dell WD19 Dock */
3386{
3387 USB_DEVICE(0x0bda, 0x402e),
3388 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
3389 .vendor_name = "Dell",
3390 .product_name = "WD19 Dock",
3391 .profile_name = "Dell-WD15-Dock",
3392 .ifnum = QUIRK_NO_INTERFACE
3393 }
3394},
3385 3395
3386#undef USB_DEVICE_VENDOR_SPEC 3396#undef USB_DEVICE_VENDOR_SPEC
diff --git a/tools/arch/arm64/include/asm/barrier.h b/tools/arch/arm64/include/asm/barrier.h
index 12835ea0e417..378c051fa177 100644
--- a/tools/arch/arm64/include/asm/barrier.h
+++ b/tools/arch/arm64/include/asm/barrier.h
@@ -14,74 +14,75 @@
14#define wmb() asm volatile("dmb ishst" ::: "memory") 14#define wmb() asm volatile("dmb ishst" ::: "memory")
15#define rmb() asm volatile("dmb ishld" ::: "memory") 15#define rmb() asm volatile("dmb ishld" ::: "memory")
16 16
17#define smp_store_release(p, v) \ 17#define smp_store_release(p, v) \
18do { \ 18do { \
19 union { typeof(*p) __val; char __c[1]; } __u = \ 19 union { typeof(*p) __val; char __c[1]; } __u = \
20 { .__val = (__force typeof(*p)) (v) }; \ 20 { .__val = (v) }; \
21 \ 21 \
22 switch (sizeof(*p)) { \ 22 switch (sizeof(*p)) { \
23 case 1: \ 23 case 1: \
24 asm volatile ("stlrb %w1, %0" \ 24 asm volatile ("stlrb %w1, %0" \
25 : "=Q" (*p) \ 25 : "=Q" (*p) \
26 : "r" (*(__u8 *)__u.__c) \ 26 : "r" (*(__u8_alias_t *)__u.__c) \
27 : "memory"); \ 27 : "memory"); \
28 break; \ 28 break; \
29 case 2: \ 29 case 2: \
30 asm volatile ("stlrh %w1, %0" \ 30 asm volatile ("stlrh %w1, %0" \
31 : "=Q" (*p) \ 31 : "=Q" (*p) \
32 : "r" (*(__u16 *)__u.__c) \ 32 : "r" (*(__u16_alias_t *)__u.__c) \
33 : "memory"); \ 33 : "memory"); \
34 break; \ 34 break; \
35 case 4: \ 35 case 4: \
36 asm volatile ("stlr %w1, %0" \ 36 asm volatile ("stlr %w1, %0" \
37 : "=Q" (*p) \ 37 : "=Q" (*p) \
38 : "r" (*(__u32 *)__u.__c) \ 38 : "r" (*(__u32_alias_t *)__u.__c) \
39 : "memory"); \ 39 : "memory"); \
40 break; \ 40 break; \
41 case 8: \ 41 case 8: \
42 asm volatile ("stlr %1, %0" \ 42 asm volatile ("stlr %1, %0" \
43 : "=Q" (*p) \ 43 : "=Q" (*p) \
44 : "r" (*(__u64 *)__u.__c) \ 44 : "r" (*(__u64_alias_t *)__u.__c) \
45 : "memory"); \ 45 : "memory"); \
46 break; \ 46 break; \
47 default: \ 47 default: \
48 /* Only to shut up gcc ... */ \ 48 /* Only to shut up gcc ... */ \
49 mb(); \ 49 mb(); \
50 break; \ 50 break; \
51 } \ 51 } \
52} while (0) 52} while (0)
53 53
54#define smp_load_acquire(p) \ 54#define smp_load_acquire(p) \
55({ \ 55({ \
56 union { typeof(*p) __val; char __c[1]; } __u; \ 56 union { typeof(*p) __val; char __c[1]; } __u = \
57 \ 57 { .__c = { 0 } }; \
58 switch (sizeof(*p)) { \ 58 \
59 case 1: \ 59 switch (sizeof(*p)) { \
60 asm volatile ("ldarb %w0, %1" \ 60 case 1: \
61 : "=r" (*(__u8 *)__u.__c) \ 61 asm volatile ("ldarb %w0, %1" \
62 : "Q" (*p) : "memory"); \ 62 : "=r" (*(__u8_alias_t *)__u.__c) \
63 break; \ 63 : "Q" (*p) : "memory"); \
64 case 2: \ 64 break; \
65 asm volatile ("ldarh %w0, %1" \ 65 case 2: \
66 : "=r" (*(__u16 *)__u.__c) \ 66 asm volatile ("ldarh %w0, %1" \
67 : "Q" (*p) : "memory"); \ 67 : "=r" (*(__u16_alias_t *)__u.__c) \
68 break; \ 68 : "Q" (*p) : "memory"); \
69 case 4: \ 69 break; \
70 asm volatile ("ldar %w0, %1" \ 70 case 4: \
71 : "=r" (*(__u32 *)__u.__c) \ 71 asm volatile ("ldar %w0, %1" \
72 : "Q" (*p) : "memory"); \ 72 : "=r" (*(__u32_alias_t *)__u.__c) \
73 break; \ 73 : "Q" (*p) : "memory"); \
74 case 8: \ 74 break; \
75 asm volatile ("ldar %0, %1" \ 75 case 8: \
76 : "=r" (*(__u64 *)__u.__c) \ 76 asm volatile ("ldar %0, %1" \
77 : "Q" (*p) : "memory"); \ 77 : "=r" (*(__u64_alias_t *)__u.__c) \
78 break; \ 78 : "Q" (*p) : "memory"); \
79 default: \ 79 break; \
80 /* Only to shut up gcc ... */ \ 80 default: \
81 mb(); \ 81 /* Only to shut up gcc ... */ \
82 break; \ 82 mb(); \
83 } \ 83 break; \
84 __u.__val; \ 84 } \
85 __u.__val; \
85}) 86})
86 87
87#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */ 88#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 89a048c2faec..28c4a502b419 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -331,6 +331,8 @@
331#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 331#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
332#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 332#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
333#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ 333#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
334#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
335#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
334 336
335/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ 337/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
336#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ 338#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index edbe81534c6d..d07ccf8a23f7 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -137,4 +137,10 @@ EXAMPLES
137 137
138SEE ALSO 138SEE ALSO
139======== 139========
140 **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8) 140 **bpf**\ (2),
141 **bpf-helpers**\ (7),
142 **bpftool**\ (8),
143 **bpftool-prog**\ (8),
144 **bpftool-map**\ (8),
145 **bpftool-net**\ (8),
146 **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index f55a2daed59b..7bb787cfa971 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -171,4 +171,10 @@ The following three commands are equivalent:
171 171
172SEE ALSO 172SEE ALSO
173======== 173========
174 **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8) 174 **bpf**\ (2),
175 **bpf-helpers**\ (7),
176 **bpftool**\ (8),
177 **bpftool-prog**\ (8),
178 **bpftool-cgroup**\ (8),
179 **bpftool-net**\ (8),
180 **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index 408ec30d8872..ed87c9b619ad 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -136,4 +136,10 @@ EXAMPLES
136 136
137SEE ALSO 137SEE ALSO
138======== 138========
139 **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8) 139 **bpf**\ (2),
140 **bpf-helpers**\ (7),
141 **bpftool**\ (8),
142 **bpftool-prog**\ (8),
143 **bpftool-map**\ (8),
144 **bpftool-cgroup**\ (8),
145 **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
index e3eb0eab7641..f4c5e5538bb8 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
@@ -78,4 +78,10 @@ EXAMPLES
78 78
79SEE ALSO 79SEE ALSO
80======== 80========
81 **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8) 81 **bpf**\ (2),
82 **bpf-helpers**\ (7),
83 **bpftool**\ (8),
84 **bpftool-prog**\ (8),
85 **bpftool-map**\ (8),
86 **bpftool-cgroup**\ (8),
87 **bpftool-net**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index ac4e904b10fb..ecf618807125 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -124,7 +124,8 @@ OPTIONS
124 Generate human-readable JSON output. Implies **-j**. 124 Generate human-readable JSON output. Implies **-j**.
125 125
126 -f, --bpffs 126 -f, --bpffs
127 Show file names of pinned programs. 127 When showing BPF programs, show file names of pinned
128 programs.
128 129
129EXAMPLES 130EXAMPLES
130======== 131========
@@ -206,4 +207,10 @@ EXAMPLES
206 207
207SEE ALSO 208SEE ALSO
208======== 209========
209 **bpftool**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8) 210 **bpf**\ (2),
211 **bpf-helpers**\ (7),
212 **bpftool**\ (8),
213 **bpftool-map**\ (8),
214 **bpftool-cgroup**\ (8),
215 **bpftool-net**\ (8),
216 **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 04cd4f92ab89..129b7a9c0f9b 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -63,5 +63,10 @@ OPTIONS
63 63
64SEE ALSO 64SEE ALSO
65======== 65========
66 **bpftool-map**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8) 66 **bpf**\ (2),
67 **bpftool-perf**\ (8), **bpftool-net**\ (8) 67 **bpf-helpers**\ (7),
68 **bpftool-prog**\ (8),
69 **bpftool-map**\ (8),
70 **bpftool-cgroup**\ (8),
71 **bpftool-net**\ (8),
72 **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 25af85304ebe..70fd48d79f61 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -130,16 +130,17 @@ static int mnt_bpffs(const char *target, char *buff, size_t bufflen)
130 return 0; 130 return 0;
131} 131}
132 132
133int open_obj_pinned(char *path) 133int open_obj_pinned(char *path, bool quiet)
134{ 134{
135 int fd; 135 int fd;
136 136
137 fd = bpf_obj_get(path); 137 fd = bpf_obj_get(path);
138 if (fd < 0) { 138 if (fd < 0) {
139 p_err("bpf obj get (%s): %s", path, 139 if (!quiet)
140 errno == EACCES && !is_bpffs(dirname(path)) ? 140 p_err("bpf obj get (%s): %s", path,
141 "directory not in bpf file system (bpffs)" : 141 errno == EACCES && !is_bpffs(dirname(path)) ?
142 strerror(errno)); 142 "directory not in bpf file system (bpffs)" :
143 strerror(errno));
143 return -1; 144 return -1;
144 } 145 }
145 146
@@ -151,7 +152,7 @@ int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
151 enum bpf_obj_type type; 152 enum bpf_obj_type type;
152 int fd; 153 int fd;
153 154
154 fd = open_obj_pinned(path); 155 fd = open_obj_pinned(path, false);
155 if (fd < 0) 156 if (fd < 0)
156 return -1; 157 return -1;
157 158
@@ -304,7 +305,7 @@ char *get_fdinfo(int fd, const char *key)
304 return NULL; 305 return NULL;
305 } 306 }
306 307
307 while ((n = getline(&line, &line_n, fdi))) { 308 while ((n = getline(&line, &line_n, fdi)) > 0) {
308 char *value; 309 char *value;
309 int len; 310 int len;
310 311
@@ -384,7 +385,7 @@ int build_pinned_obj_table(struct pinned_obj_table *tab,
384 while ((ftse = fts_read(fts))) { 385 while ((ftse = fts_read(fts))) {
385 if (!(ftse->fts_info & FTS_F)) 386 if (!(ftse->fts_info & FTS_F))
386 continue; 387 continue;
387 fd = open_obj_pinned(ftse->fts_path); 388 fd = open_obj_pinned(ftse->fts_path, true);
388 if (fd < 0) 389 if (fd < 0)
389 continue; 390 continue;
390 391
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 28322ace2856..a8bf1e2d9818 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -127,7 +127,7 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv,
127int get_fd_type(int fd); 127int get_fd_type(int fd);
128const char *get_fd_type_name(enum bpf_obj_type type); 128const char *get_fd_type_name(enum bpf_obj_type type);
129char *get_fdinfo(int fd, const char *key); 129char *get_fdinfo(int fd, const char *key);
130int open_obj_pinned(char *path); 130int open_obj_pinned(char *path, bool quiet);
131int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type); 131int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
132int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)); 132int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
133int do_pin_fd(int fd, const char *name); 133int do_pin_fd(int fd, const char *name);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 5302ee282409..ccee180dfb76 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -357,10 +357,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
357 if (!hash_empty(prog_table.table)) { 357 if (!hash_empty(prog_table.table)) {
358 struct pinned_obj *obj; 358 struct pinned_obj *obj;
359 359
360 printf("\n");
361 hash_for_each_possible(prog_table.table, obj, hash, info->id) { 360 hash_for_each_possible(prog_table.table, obj, hash, info->id) {
362 if (obj->id == info->id) 361 if (obj->id == info->id)
363 printf("\tpinned %s\n", obj->path); 362 printf("\n\tpinned %s", obj->path);
364 } 363 }
365 } 364 }
366 365
@@ -845,6 +844,7 @@ static int do_load(int argc, char **argv)
845 } 844 }
846 NEXT_ARG(); 845 NEXT_ARG();
847 } else if (is_prefix(*argv, "map")) { 846 } else if (is_prefix(*argv, "map")) {
847 void *new_map_replace;
848 char *endptr, *name; 848 char *endptr, *name;
849 int fd; 849 int fd;
850 850
@@ -878,12 +878,15 @@ static int do_load(int argc, char **argv)
878 if (fd < 0) 878 if (fd < 0)
879 goto err_free_reuse_maps; 879 goto err_free_reuse_maps;
880 880
881 map_replace = reallocarray(map_replace, old_map_fds + 1, 881 new_map_replace = reallocarray(map_replace,
882 sizeof(*map_replace)); 882 old_map_fds + 1,
883 if (!map_replace) { 883 sizeof(*map_replace));
884 if (!new_map_replace) {
884 p_err("mem alloc failed"); 885 p_err("mem alloc failed");
885 goto err_free_reuse_maps; 886 goto err_free_reuse_maps;
886 } 887 }
888 map_replace = new_map_replace;
889
887 map_replace[old_map_fds].idx = idx; 890 map_replace[old_map_fds].idx = idx;
888 map_replace[old_map_fds].name = name; 891 map_replace[old_map_fds].name = name;
889 map_replace[old_map_fds].fd = fd; 892 map_replace[old_map_fds].fd = fd;
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index f216b2f5c3d7..d74bb9414d7c 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -33,6 +33,7 @@ FEATURE_TESTS_BASIC := \
33 dwarf_getlocations \ 33 dwarf_getlocations \
34 fortify-source \ 34 fortify-source \
35 sync-compare-and-swap \ 35 sync-compare-and-swap \
36 get_current_dir_name \
36 glibc \ 37 glibc \
37 gtk2 \ 38 gtk2 \
38 gtk2-infobar \ 39 gtk2-infobar \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 0516259be70f..304b984f11b9 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -7,6 +7,7 @@ FILES= \
7 test-dwarf_getlocations.bin \ 7 test-dwarf_getlocations.bin \
8 test-fortify-source.bin \ 8 test-fortify-source.bin \
9 test-sync-compare-and-swap.bin \ 9 test-sync-compare-and-swap.bin \
10 test-get_current_dir_name.bin \
10 test-glibc.bin \ 11 test-glibc.bin \
11 test-gtk2.bin \ 12 test-gtk2.bin \
12 test-gtk2-infobar.bin \ 13 test-gtk2-infobar.bin \
@@ -101,6 +102,9 @@ $(OUTPUT)test-bionic.bin:
101$(OUTPUT)test-libelf.bin: 102$(OUTPUT)test-libelf.bin:
102 $(BUILD) -lelf 103 $(BUILD) -lelf
103 104
105$(OUTPUT)test-get_current_dir_name.bin:
106 $(BUILD)
107
104$(OUTPUT)test-glibc.bin: 108$(OUTPUT)test-glibc.bin:
105 $(BUILD) 109 $(BUILD)
106 110
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 8dc20a61341f..56722bfe6bdd 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -34,6 +34,10 @@
34# include "test-libelf-mmap.c" 34# include "test-libelf-mmap.c"
35#undef main 35#undef main
36 36
37#define main main_test_get_current_dir_name
38# include "test-get_current_dir_name.c"
39#undef main
40
37#define main main_test_glibc 41#define main main_test_glibc
38# include "test-glibc.c" 42# include "test-glibc.c"
39#undef main 43#undef main
@@ -174,6 +178,7 @@ int main(int argc, char *argv[])
174 main_test_hello(); 178 main_test_hello();
175 main_test_libelf(); 179 main_test_libelf();
176 main_test_libelf_mmap(); 180 main_test_libelf_mmap();
181 main_test_get_current_dir_name();
177 main_test_glibc(); 182 main_test_glibc();
178 main_test_dwarf(); 183 main_test_dwarf();
179 main_test_dwarf_getlocations(); 184 main_test_dwarf_getlocations();
diff --git a/tools/build/feature/test-get_current_dir_name.c b/tools/build/feature/test-get_current_dir_name.c
new file mode 100644
index 000000000000..573000f93212
--- /dev/null
+++ b/tools/build/feature/test-get_current_dir_name.c
@@ -0,0 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0
2#define _GNU_SOURCE
3#include <unistd.h>
4#include <stdlib.h>
5
6int main(void)
7{
8 free(get_current_dir_name());
9 return 0;
10}
diff --git a/tools/include/uapi/asm-generic/ioctls.h b/tools/include/uapi/asm-generic/ioctls.h
index 040651735662..cdc9f4ca8c27 100644
--- a/tools/include/uapi/asm-generic/ioctls.h
+++ b/tools/include/uapi/asm-generic/ioctls.h
@@ -79,6 +79,8 @@
79#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ 79#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
80#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ 80#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
81#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */ 81#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
82#define TIOCGISO7816 _IOR('T', 0x42, struct serial_iso7816)
83#define TIOCSISO7816 _IOWR('T', 0x43, struct serial_iso7816)
82 84
83#define FIONCLEX 0x5450 85#define FIONCLEX 0x5450
84#define FIOCLEX 0x5451 86#define FIOCLEX 0x5451
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 7f5634ce8e88..a4446f452040 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -529,6 +529,28 @@ typedef struct drm_i915_irq_wait {
529 */ 529 */
530#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 530#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
531 531
532/*
533 * Once upon a time we supposed that writes through the GGTT would be
534 * immediately in physical memory (once flushed out of the CPU path). However,
535 * on a few different processors and chipsets, this is not necessarily the case
536 * as the writes appear to be buffered internally. Thus a read of the backing
537 * storage (physical memory) via a different path (with different physical tags
538 * to the indirect write via the GGTT) will see stale values from before
539 * the GGTT write. Inside the kernel, we can for the most part keep track of
540 * the different read/write domains in use (e.g. set-domain), but the assumption
541 * of coherency is baked into the ABI, hence reporting its true state in this
542 * parameter.
543 *
544 * Reports true when writes via mmap_gtt are immediately visible following an
545 * lfence to flush the WCB.
546 *
547 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
548 * internal buffer and are _not_ immediately visible to third parties accessing
549 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
550 * communications channel when reporting false is strongly disadvised.
551 */
552#define I915_PARAM_MMAP_GTT_COHERENT 52
553
532typedef struct drm_i915_getparam { 554typedef struct drm_i915_getparam {
533 __s32 param; 555 __s32 param;
534 /* 556 /*
diff --git a/tools/include/uapi/linux/pkt_cls.h b/tools/include/uapi/linux/pkt_cls.h
new file mode 100644
index 000000000000..401d0c1e612d
--- /dev/null
+++ b/tools/include/uapi/linux/pkt_cls.h
@@ -0,0 +1,612 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __LINUX_PKT_CLS_H
3#define __LINUX_PKT_CLS_H
4
5#include <linux/types.h>
6#include <linux/pkt_sched.h>
7
8#define TC_COOKIE_MAX_SIZE 16
9
10/* Action attributes */
11enum {
12 TCA_ACT_UNSPEC,
13 TCA_ACT_KIND,
14 TCA_ACT_OPTIONS,
15 TCA_ACT_INDEX,
16 TCA_ACT_STATS,
17 TCA_ACT_PAD,
18 TCA_ACT_COOKIE,
19 __TCA_ACT_MAX
20};
21
22#define TCA_ACT_MAX __TCA_ACT_MAX
23#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
24#define TCA_ACT_MAX_PRIO 32
25#define TCA_ACT_BIND 1
26#define TCA_ACT_NOBIND 0
27#define TCA_ACT_UNBIND 1
28#define TCA_ACT_NOUNBIND 0
29#define TCA_ACT_REPLACE 1
30#define TCA_ACT_NOREPLACE 0
31
32#define TC_ACT_UNSPEC (-1)
33#define TC_ACT_OK 0
34#define TC_ACT_RECLASSIFY 1
35#define TC_ACT_SHOT 2
36#define TC_ACT_PIPE 3
37#define TC_ACT_STOLEN 4
38#define TC_ACT_QUEUED 5
39#define TC_ACT_REPEAT 6
40#define TC_ACT_REDIRECT 7
41#define TC_ACT_TRAP 8 /* For hw path, this means "trap to cpu"
42 * and don't further process the frame
43 * in hardware. For sw path, this is
44 * equivalent of TC_ACT_STOLEN - drop
45 * the skb and act like everything
46 * is alright.
47 */
48#define TC_ACT_VALUE_MAX TC_ACT_TRAP
49
50/* There is a special kind of actions called "extended actions",
51 * which need a value parameter. These have a local opcode located in
52 * the highest nibble, starting from 1. The rest of the bits
53 * are used to carry the value. These two parts together make
54 * a combined opcode.
55 */
56#define __TC_ACT_EXT_SHIFT 28
57#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT)
58#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1)
59#define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK))
60#define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode)
61
62#define TC_ACT_JUMP __TC_ACT_EXT(1)
63#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
64#define TC_ACT_EXT_OPCODE_MAX TC_ACT_GOTO_CHAIN
65
66/* Action type identifiers*/
67enum {
68 TCA_ID_UNSPEC=0,
69 TCA_ID_POLICE=1,
70 /* other actions go here */
71 __TCA_ID_MAX=255
72};
73
74#define TCA_ID_MAX __TCA_ID_MAX
75
76struct tc_police {
77 __u32 index;
78 int action;
79#define TC_POLICE_UNSPEC TC_ACT_UNSPEC
80#define TC_POLICE_OK TC_ACT_OK
81#define TC_POLICE_RECLASSIFY TC_ACT_RECLASSIFY
82#define TC_POLICE_SHOT TC_ACT_SHOT
83#define TC_POLICE_PIPE TC_ACT_PIPE
84
85 __u32 limit;
86 __u32 burst;
87 __u32 mtu;
88 struct tc_ratespec rate;
89 struct tc_ratespec peakrate;
90 int refcnt;
91 int bindcnt;
92 __u32 capab;
93};
94
95struct tcf_t {
96 __u64 install;
97 __u64 lastuse;
98 __u64 expires;
99 __u64 firstuse;
100};
101
102struct tc_cnt {
103 int refcnt;
104 int bindcnt;
105};
106
107#define tc_gen \
108 __u32 index; \
109 __u32 capab; \
110 int action; \
111 int refcnt; \
112 int bindcnt
113
114enum {
115 TCA_POLICE_UNSPEC,
116 TCA_POLICE_TBF,
117 TCA_POLICE_RATE,
118 TCA_POLICE_PEAKRATE,
119 TCA_POLICE_AVRATE,
120 TCA_POLICE_RESULT,
121 TCA_POLICE_TM,
122 TCA_POLICE_PAD,
123 __TCA_POLICE_MAX
124#define TCA_POLICE_RESULT TCA_POLICE_RESULT
125};
126
127#define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1)
128
129/* tca flags definitions */
130#define TCA_CLS_FLAGS_SKIP_HW (1 << 0) /* don't offload filter to HW */
131#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) /* don't use filter in SW */
132#define TCA_CLS_FLAGS_IN_HW (1 << 2) /* filter is offloaded to HW */
133#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */
134#define TCA_CLS_FLAGS_VERBOSE (1 << 4) /* verbose logging */
135
136/* U32 filters */
137
138#define TC_U32_HTID(h) ((h)&0xFFF00000)
139#define TC_U32_USERHTID(h) (TC_U32_HTID(h)>>20)
140#define TC_U32_HASH(h) (((h)>>12)&0xFF)
141#define TC_U32_NODE(h) ((h)&0xFFF)
142#define TC_U32_KEY(h) ((h)&0xFFFFF)
143#define TC_U32_UNSPEC 0
144#define TC_U32_ROOT (0xFFF00000)
145
146enum {
147 TCA_U32_UNSPEC,
148 TCA_U32_CLASSID,
149 TCA_U32_HASH,
150 TCA_U32_LINK,
151 TCA_U32_DIVISOR,
152 TCA_U32_SEL,
153 TCA_U32_POLICE,
154 TCA_U32_ACT,
155 TCA_U32_INDEV,
156 TCA_U32_PCNT,
157 TCA_U32_MARK,
158 TCA_U32_FLAGS,
159 TCA_U32_PAD,
160 __TCA_U32_MAX
161};
162
163#define TCA_U32_MAX (__TCA_U32_MAX - 1)
164
165struct tc_u32_key {
166 __be32 mask;
167 __be32 val;
168 int off;
169 int offmask;
170};
171
172struct tc_u32_sel {
173 unsigned char flags;
174 unsigned char offshift;
175 unsigned char nkeys;
176
177 __be16 offmask;
178 __u16 off;
179 short offoff;
180
181 short hoff;
182 __be32 hmask;
183 struct tc_u32_key keys[0];
184};
185
186struct tc_u32_mark {
187 __u32 val;
188 __u32 mask;
189 __u32 success;
190};
191
192struct tc_u32_pcnt {
193 __u64 rcnt;
194 __u64 rhit;
195 __u64 kcnts[0];
196};
197
198/* Flags */
199
200#define TC_U32_TERMINAL 1
201#define TC_U32_OFFSET 2
202#define TC_U32_VAROFFSET 4
203#define TC_U32_EAT 8
204
205#define TC_U32_MAXDEPTH 8
206
207
208/* RSVP filter */
209
210enum {
211 TCA_RSVP_UNSPEC,
212 TCA_RSVP_CLASSID,
213 TCA_RSVP_DST,
214 TCA_RSVP_SRC,
215 TCA_RSVP_PINFO,
216 TCA_RSVP_POLICE,
217 TCA_RSVP_ACT,
218 __TCA_RSVP_MAX
219};
220
221#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 )
222
223struct tc_rsvp_gpi {
224 __u32 key;
225 __u32 mask;
226 int offset;
227};
228
229struct tc_rsvp_pinfo {
230 struct tc_rsvp_gpi dpi;
231 struct tc_rsvp_gpi spi;
232 __u8 protocol;
233 __u8 tunnelid;
234 __u8 tunnelhdr;
235 __u8 pad;
236};
237
238/* ROUTE filter */
239
240enum {
241 TCA_ROUTE4_UNSPEC,
242 TCA_ROUTE4_CLASSID,
243 TCA_ROUTE4_TO,
244 TCA_ROUTE4_FROM,
245 TCA_ROUTE4_IIF,
246 TCA_ROUTE4_POLICE,
247 TCA_ROUTE4_ACT,
248 __TCA_ROUTE4_MAX
249};
250
251#define TCA_ROUTE4_MAX (__TCA_ROUTE4_MAX - 1)
252
253
254/* FW filter */
255
256enum {
257 TCA_FW_UNSPEC,
258 TCA_FW_CLASSID,
259 TCA_FW_POLICE,
260 TCA_FW_INDEV, /* used by CONFIG_NET_CLS_IND */
261 TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */
262 TCA_FW_MASK,
263 __TCA_FW_MAX
264};
265
266#define TCA_FW_MAX (__TCA_FW_MAX - 1)
267
268/* TC index filter */
269
270enum {
271 TCA_TCINDEX_UNSPEC,
272 TCA_TCINDEX_HASH,
273 TCA_TCINDEX_MASK,
274 TCA_TCINDEX_SHIFT,
275 TCA_TCINDEX_FALL_THROUGH,
276 TCA_TCINDEX_CLASSID,
277 TCA_TCINDEX_POLICE,
278 TCA_TCINDEX_ACT,
279 __TCA_TCINDEX_MAX
280};
281
282#define TCA_TCINDEX_MAX (__TCA_TCINDEX_MAX - 1)
283
284/* Flow filter */
285
286enum {
287 FLOW_KEY_SRC,
288 FLOW_KEY_DST,
289 FLOW_KEY_PROTO,
290 FLOW_KEY_PROTO_SRC,
291 FLOW_KEY_PROTO_DST,
292 FLOW_KEY_IIF,
293 FLOW_KEY_PRIORITY,
294 FLOW_KEY_MARK,
295 FLOW_KEY_NFCT,
296 FLOW_KEY_NFCT_SRC,
297 FLOW_KEY_NFCT_DST,
298 FLOW_KEY_NFCT_PROTO_SRC,
299 FLOW_KEY_NFCT_PROTO_DST,
300 FLOW_KEY_RTCLASSID,
301 FLOW_KEY_SKUID,
302 FLOW_KEY_SKGID,
303 FLOW_KEY_VLAN_TAG,
304 FLOW_KEY_RXHASH,
305 __FLOW_KEY_MAX,
306};
307
308#define FLOW_KEY_MAX (__FLOW_KEY_MAX - 1)
309
310enum {
311 FLOW_MODE_MAP,
312 FLOW_MODE_HASH,
313};
314
315enum {
316 TCA_FLOW_UNSPEC,
317 TCA_FLOW_KEYS,
318 TCA_FLOW_MODE,
319 TCA_FLOW_BASECLASS,
320 TCA_FLOW_RSHIFT,
321 TCA_FLOW_ADDEND,
322 TCA_FLOW_MASK,
323 TCA_FLOW_XOR,
324 TCA_FLOW_DIVISOR,
325 TCA_FLOW_ACT,
326 TCA_FLOW_POLICE,
327 TCA_FLOW_EMATCHES,
328 TCA_FLOW_PERTURB,
329 __TCA_FLOW_MAX
330};
331
332#define TCA_FLOW_MAX (__TCA_FLOW_MAX - 1)
333
334/* Basic filter */
335
336enum {
337 TCA_BASIC_UNSPEC,
338 TCA_BASIC_CLASSID,
339 TCA_BASIC_EMATCHES,
340 TCA_BASIC_ACT,
341 TCA_BASIC_POLICE,
342 __TCA_BASIC_MAX
343};
344
345#define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1)
346
347
348/* Cgroup classifier */
349
350enum {
351 TCA_CGROUP_UNSPEC,
352 TCA_CGROUP_ACT,
353 TCA_CGROUP_POLICE,
354 TCA_CGROUP_EMATCHES,
355 __TCA_CGROUP_MAX,
356};
357
358#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1)
359
360/* BPF classifier */
361
362#define TCA_BPF_FLAG_ACT_DIRECT (1 << 0)
363
364enum {
365 TCA_BPF_UNSPEC,
366 TCA_BPF_ACT,
367 TCA_BPF_POLICE,
368 TCA_BPF_CLASSID,
369 TCA_BPF_OPS_LEN,
370 TCA_BPF_OPS,
371 TCA_BPF_FD,
372 TCA_BPF_NAME,
373 TCA_BPF_FLAGS,
374 TCA_BPF_FLAGS_GEN,
375 TCA_BPF_TAG,
376 TCA_BPF_ID,
377 __TCA_BPF_MAX,
378};
379
380#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
381
382/* Flower classifier */
383
384enum {
385 TCA_FLOWER_UNSPEC,
386 TCA_FLOWER_CLASSID,
387 TCA_FLOWER_INDEV,
388 TCA_FLOWER_ACT,
389 TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
390 TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
391 TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
392 TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
393 TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
394 TCA_FLOWER_KEY_IP_PROTO, /* u8 */
395 TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
396 TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
397 TCA_FLOWER_KEY_IPV4_DST, /* be32 */
398 TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
399 TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
400 TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
401 TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
402 TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
403 TCA_FLOWER_KEY_TCP_SRC, /* be16 */
404 TCA_FLOWER_KEY_TCP_DST, /* be16 */
405 TCA_FLOWER_KEY_UDP_SRC, /* be16 */
406 TCA_FLOWER_KEY_UDP_DST, /* be16 */
407
408 TCA_FLOWER_FLAGS,
409 TCA_FLOWER_KEY_VLAN_ID, /* be16 */
410 TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
411 TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
412
413 TCA_FLOWER_KEY_ENC_KEY_ID, /* be32 */
414 TCA_FLOWER_KEY_ENC_IPV4_SRC, /* be32 */
415 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */
416 TCA_FLOWER_KEY_ENC_IPV4_DST, /* be32 */
417 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */
418 TCA_FLOWER_KEY_ENC_IPV6_SRC, /* struct in6_addr */
419 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */
420 TCA_FLOWER_KEY_ENC_IPV6_DST, /* struct in6_addr */
421 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
422
423 TCA_FLOWER_KEY_TCP_SRC_MASK, /* be16 */
424 TCA_FLOWER_KEY_TCP_DST_MASK, /* be16 */
425 TCA_FLOWER_KEY_UDP_SRC_MASK, /* be16 */
426 TCA_FLOWER_KEY_UDP_DST_MASK, /* be16 */
427 TCA_FLOWER_KEY_SCTP_SRC_MASK, /* be16 */
428 TCA_FLOWER_KEY_SCTP_DST_MASK, /* be16 */
429
430 TCA_FLOWER_KEY_SCTP_SRC, /* be16 */
431 TCA_FLOWER_KEY_SCTP_DST, /* be16 */
432
433 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, /* be16 */
434 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, /* be16 */
435 TCA_FLOWER_KEY_ENC_UDP_DST_PORT, /* be16 */
436 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, /* be16 */
437
438 TCA_FLOWER_KEY_FLAGS, /* be32 */
439 TCA_FLOWER_KEY_FLAGS_MASK, /* be32 */
440
441 TCA_FLOWER_KEY_ICMPV4_CODE, /* u8 */
442 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,/* u8 */
443 TCA_FLOWER_KEY_ICMPV4_TYPE, /* u8 */
444 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,/* u8 */
445 TCA_FLOWER_KEY_ICMPV6_CODE, /* u8 */
446 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,/* u8 */
447 TCA_FLOWER_KEY_ICMPV6_TYPE, /* u8 */
448 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */
449
450 TCA_FLOWER_KEY_ARP_SIP, /* be32 */
451 TCA_FLOWER_KEY_ARP_SIP_MASK, /* be32 */
452 TCA_FLOWER_KEY_ARP_TIP, /* be32 */
453 TCA_FLOWER_KEY_ARP_TIP_MASK, /* be32 */
454 TCA_FLOWER_KEY_ARP_OP, /* u8 */
455 TCA_FLOWER_KEY_ARP_OP_MASK, /* u8 */
456 TCA_FLOWER_KEY_ARP_SHA, /* ETH_ALEN */
457 TCA_FLOWER_KEY_ARP_SHA_MASK, /* ETH_ALEN */
458 TCA_FLOWER_KEY_ARP_THA, /* ETH_ALEN */
459 TCA_FLOWER_KEY_ARP_THA_MASK, /* ETH_ALEN */
460
461 TCA_FLOWER_KEY_MPLS_TTL, /* u8 - 8 bits */
462 TCA_FLOWER_KEY_MPLS_BOS, /* u8 - 1 bit */
463 TCA_FLOWER_KEY_MPLS_TC, /* u8 - 3 bits */
464 TCA_FLOWER_KEY_MPLS_LABEL, /* be32 - 20 bits */
465
466 TCA_FLOWER_KEY_TCP_FLAGS, /* be16 */
467 TCA_FLOWER_KEY_TCP_FLAGS_MASK, /* be16 */
468
469 TCA_FLOWER_KEY_IP_TOS, /* u8 */
470 TCA_FLOWER_KEY_IP_TOS_MASK, /* u8 */
471 TCA_FLOWER_KEY_IP_TTL, /* u8 */
472 TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */
473
474 TCA_FLOWER_KEY_CVLAN_ID, /* be16 */
475 TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */
476 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */
477
478 TCA_FLOWER_KEY_ENC_IP_TOS, /* u8 */
479 TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */
480 TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */
481 TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */
482
483 TCA_FLOWER_KEY_ENC_OPTS,
484 TCA_FLOWER_KEY_ENC_OPTS_MASK,
485
486 TCA_FLOWER_IN_HW_COUNT,
487
488 __TCA_FLOWER_MAX,
489};
490
491#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
492
493enum {
494 TCA_FLOWER_KEY_ENC_OPTS_UNSPEC,
495 TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested
496 * TCA_FLOWER_KEY_ENC_OPT_GENEVE_
497 * attributes
498 */
499 __TCA_FLOWER_KEY_ENC_OPTS_MAX,
500};
501
502#define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1)
503
504enum {
505 TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC,
506 TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, /* u16 */
507 TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
508 TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
509
510 __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
511};
512
513#define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \
514 (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
515
516enum {
517 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
518 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
519};
520
521/* Match-all classifier */
522
523enum {
524 TCA_MATCHALL_UNSPEC,
525 TCA_MATCHALL_CLASSID,
526 TCA_MATCHALL_ACT,
527 TCA_MATCHALL_FLAGS,
528 __TCA_MATCHALL_MAX,
529};
530
531#define TCA_MATCHALL_MAX (__TCA_MATCHALL_MAX - 1)
532
533/* Extended Matches */
534
535struct tcf_ematch_tree_hdr {
536 __u16 nmatches;
537 __u16 progid;
538};
539
540enum {
541 TCA_EMATCH_TREE_UNSPEC,
542 TCA_EMATCH_TREE_HDR,
543 TCA_EMATCH_TREE_LIST,
544 __TCA_EMATCH_TREE_MAX
545};
546#define TCA_EMATCH_TREE_MAX (__TCA_EMATCH_TREE_MAX - 1)
547
548struct tcf_ematch_hdr {
549 __u16 matchid;
550 __u16 kind;
551 __u16 flags;
552 __u16 pad; /* currently unused */
553};
554
555/* 0 1
556 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
557 * +-----------------------+-+-+---+
558 * | Unused |S|I| R |
559 * +-----------------------+-+-+---+
560 *
561 * R(2) ::= relation to next ematch
562 * where: 0 0 END (last ematch)
563 * 0 1 AND
564 * 1 0 OR
565 * 1 1 Unused (invalid)
566 * I(1) ::= invert result
567 * S(1) ::= simple payload
568 */
569#define TCF_EM_REL_END 0
570#define TCF_EM_REL_AND (1<<0)
571#define TCF_EM_REL_OR (1<<1)
572#define TCF_EM_INVERT (1<<2)
573#define TCF_EM_SIMPLE (1<<3)
574
575#define TCF_EM_REL_MASK 3
576#define TCF_EM_REL_VALID(v) (((v) & TCF_EM_REL_MASK) != TCF_EM_REL_MASK)
577
578enum {
579 TCF_LAYER_LINK,
580 TCF_LAYER_NETWORK,
581 TCF_LAYER_TRANSPORT,
582 __TCF_LAYER_MAX
583};
584#define TCF_LAYER_MAX (__TCF_LAYER_MAX - 1)
585
586/* Ematch type assignments
587 * 1..32767 Reserved for ematches inside kernel tree
588 * 32768..65535 Free to use, not reliable
589 */
590#define TCF_EM_CONTAINER 0
591#define TCF_EM_CMP 1
592#define TCF_EM_NBYTE 2
593#define TCF_EM_U32 3
594#define TCF_EM_META 4
595#define TCF_EM_TEXT 5
596#define TCF_EM_VLAN 6
597#define TCF_EM_CANID 7
598#define TCF_EM_IPSET 8
599#define TCF_EM_IPT 9
600#define TCF_EM_MAX 9
601
602enum {
603 TCF_EM_PROG_TC
604};
605
606enum {
607 TCF_EM_OPND_EQ,
608 TCF_EM_OPND_GT,
609 TCF_EM_OPND_LT
610};
611
612#endif
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index c0d7ea0bf5b6..b17201edfa09 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -212,6 +212,7 @@ struct prctl_mm_map {
212#define PR_SET_SPECULATION_CTRL 53 212#define PR_SET_SPECULATION_CTRL 53
213/* Speculation control variants */ 213/* Speculation control variants */
214# define PR_SPEC_STORE_BYPASS 0 214# define PR_SPEC_STORE_BYPASS 0
215# define PR_SPEC_INDIRECT_BRANCH 1
215/* Return and control values for PR_SET/GET_SPECULATION_CTRL */ 216/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
216# define PR_SPEC_NOT_AFFECTED 0 217# define PR_SPEC_NOT_AFFECTED 0
217# define PR_SPEC_PRCTL (1UL << 0) 218# define PR_SPEC_PRCTL (1UL << 0)
diff --git a/tools/include/uapi/linux/tc_act/tc_bpf.h b/tools/include/uapi/linux/tc_act/tc_bpf.h
new file mode 100644
index 000000000000..6e89a5df49a4
--- /dev/null
+++ b/tools/include/uapi/linux/tc_act/tc_bpf.h
@@ -0,0 +1,37 @@
1/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2/*
3 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#ifndef __LINUX_TC_BPF_H
12#define __LINUX_TC_BPF_H
13
14#include <linux/pkt_cls.h>
15
16#define TCA_ACT_BPF 13
17
18struct tc_act_bpf {
19 tc_gen;
20};
21
22enum {
23 TCA_ACT_BPF_UNSPEC,
24 TCA_ACT_BPF_TM,
25 TCA_ACT_BPF_PARMS,
26 TCA_ACT_BPF_OPS_LEN,
27 TCA_ACT_BPF_OPS,
28 TCA_ACT_BPF_FD,
29 TCA_ACT_BPF_NAME,
30 TCA_ACT_BPF_PAD,
31 TCA_ACT_BPF_TAG,
32 TCA_ACT_BPF_ID,
33 __TCA_ACT_BPF_MAX,
34};
35#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
36
37#endif
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 6dbb9fae0f9d..b8f3cca8e58b 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -31,6 +31,8 @@
31#include "elf.h" 31#include "elf.h"
32#include "warn.h" 32#include "warn.h"
33 33
34#define MAX_NAME_LEN 128
35
34struct section *find_section_by_name(struct elf *elf, const char *name) 36struct section *find_section_by_name(struct elf *elf, const char *name)
35{ 37{
36 struct section *sec; 38 struct section *sec;
@@ -298,6 +300,8 @@ static int read_symbols(struct elf *elf)
298 /* Create parent/child links for any cold subfunctions */ 300 /* Create parent/child links for any cold subfunctions */
299 list_for_each_entry(sec, &elf->sections, list) { 301 list_for_each_entry(sec, &elf->sections, list) {
300 list_for_each_entry(sym, &sec->symbol_list, list) { 302 list_for_each_entry(sym, &sec->symbol_list, list) {
303 char pname[MAX_NAME_LEN + 1];
304 size_t pnamelen;
301 if (sym->type != STT_FUNC) 305 if (sym->type != STT_FUNC)
302 continue; 306 continue;
303 sym->pfunc = sym->cfunc = sym; 307 sym->pfunc = sym->cfunc = sym;
@@ -305,14 +309,21 @@ static int read_symbols(struct elf *elf)
305 if (!coldstr) 309 if (!coldstr)
306 continue; 310 continue;
307 311
308 coldstr[0] = '\0'; 312 pnamelen = coldstr - sym->name;
309 pfunc = find_symbol_by_name(elf, sym->name); 313 if (pnamelen > MAX_NAME_LEN) {
310 coldstr[0] = '.'; 314 WARN("%s(): parent function name exceeds maximum length of %d characters",
315 sym->name, MAX_NAME_LEN);
316 return -1;
317 }
318
319 strncpy(pname, sym->name, pnamelen);
320 pname[pnamelen] = '\0';
321 pfunc = find_symbol_by_name(elf, pname);
311 322
312 if (!pfunc) { 323 if (!pfunc) {
313 WARN("%s(): can't find parent function", 324 WARN("%s(): can't find parent function",
314 sym->name); 325 sym->name);
315 goto err; 326 return -1;
316 } 327 }
317 328
318 sym->pfunc = pfunc; 329 sym->pfunc = pfunc;
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 236b9b97dfdb..667c14e56031 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -55,7 +55,6 @@ counted. The following modifiers exist:
55 S - read sample value (PERF_SAMPLE_READ) 55 S - read sample value (PERF_SAMPLE_READ)
56 D - pin the event to the PMU 56 D - pin the event to the PMU
57 W - group is weak and will fallback to non-group if not schedulable, 57 W - group is weak and will fallback to non-group if not schedulable,
58 only supported in 'perf stat' for now.
59 58
60The 'p' modifier can be used for specifying how precise the instruction 59The 'p' modifier can be used for specifying how precise the instruction
61address should be. The 'p' modifier can be specified multiple times: 60address should be. The 'p' modifier can be specified multiple times:
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index e30d20fb482d..a0e8c23f9125 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -299,6 +299,11 @@ ifndef NO_BIONIC
299 endif 299 endif
300endif 300endif
301 301
302ifeq ($(feature-get_current_dir_name), 1)
303 CFLAGS += -DHAVE_GET_CURRENT_DIR_NAME
304endif
305
306
302ifdef NO_LIBELF 307ifdef NO_LIBELF
303 NO_DWARF := 1 308 NO_DWARF := 1
304 NO_DEMANGLE := 1 309 NO_DEMANGLE := 1
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 3ccb4f0bf088..d95655489f7e 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -387,7 +387,7 @@ SHELL = $(SHELL_PATH)
387 387
388linux_uapi_dir := $(srctree)/tools/include/uapi/linux 388linux_uapi_dir := $(srctree)/tools/include/uapi/linux
389asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic 389asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
390arch_asm_uapi_dir := $(srctree)/tools/arch/$(ARCH)/include/uapi/asm/ 390arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/
391 391
392beauty_outdir := $(OUTPUT)trace/beauty/generated 392beauty_outdir := $(OUTPUT)trace/beauty/generated
393beauty_ioctl_outdir := $(beauty_outdir)/ioctl 393beauty_ioctl_outdir := $(beauty_outdir)/ioctl
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 10cf889c6d75..488779bc4c8d 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -391,7 +391,12 @@ try_again:
391 ui__warning("%s\n", msg); 391 ui__warning("%s\n", msg);
392 goto try_again; 392 goto try_again;
393 } 393 }
394 394 if ((errno == EINVAL || errno == EBADF) &&
395 pos->leader != pos &&
396 pos->weak_group) {
397 pos = perf_evlist__reset_weak_group(evlist, pos);
398 goto try_again;
399 }
395 rc = -errno; 400 rc = -errno;
396 perf_evsel__open_strerror(pos, &opts->target, 401 perf_evsel__open_strerror(pos, &opts->target,
397 errno, msg, sizeof(msg)); 402 errno, msg, sizeof(msg));
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index d1028d7755bb..a635abfa77b6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -383,32 +383,6 @@ static bool perf_evsel__should_store_id(struct perf_evsel *counter)
383 return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID; 383 return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
384} 384}
385 385
386static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
387{
388 struct perf_evsel *c2, *leader;
389 bool is_open = true;
390
391 leader = evsel->leader;
392 pr_debug("Weak group for %s/%d failed\n",
393 leader->name, leader->nr_members);
394
395 /*
396 * for_each_group_member doesn't work here because it doesn't
397 * include the first entry.
398 */
399 evlist__for_each_entry(evsel_list, c2) {
400 if (c2 == evsel)
401 is_open = false;
402 if (c2->leader == leader) {
403 if (is_open)
404 perf_evsel__close(c2);
405 c2->leader = c2;
406 c2->nr_members = 0;
407 }
408 }
409 return leader;
410}
411
412static bool is_target_alive(struct target *_target, 386static bool is_target_alive(struct target *_target,
413 struct thread_map *threads) 387 struct thread_map *threads)
414{ 388{
@@ -477,7 +451,7 @@ try_again:
477 if ((errno == EINVAL || errno == EBADF) && 451 if ((errno == EINVAL || errno == EBADF) &&
478 counter->leader != counter && 452 counter->leader != counter &&
479 counter->weak_group) { 453 counter->weak_group) {
480 counter = perf_evsel__reset_weak_group(counter); 454 counter = perf_evlist__reset_weak_group(evsel_list, counter);
481 goto try_again; 455 goto try_again;
482 } 456 }
483 457
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index b2838de13de0..aa0c73e57924 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1429,6 +1429,9 @@ int cmd_top(int argc, const char **argv)
1429 } 1429 }
1430 } 1430 }
1431 1431
1432 if (opts->branch_stack && callchain_param.enabled)
1433 symbol_conf.show_branchflag_count = true;
1434
1432 sort__mode = SORT_MODE__TOP; 1435 sort__mode = SORT_MODE__TOP;
1433 /* display thread wants entries to be collapsed in a different tree */ 1436 /* display thread wants entries to be collapsed in a different tree */
1434 perf_hpp_list.need_collapse = 1; 1437 perf_hpp_list.need_collapse = 1;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index dc8a6c4986ce..835619476370 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -108,6 +108,7 @@ struct trace {
108 } stats; 108 } stats;
109 unsigned int max_stack; 109 unsigned int max_stack;
110 unsigned int min_stack; 110 unsigned int min_stack;
111 bool raw_augmented_syscalls;
111 bool not_ev_qualifier; 112 bool not_ev_qualifier;
112 bool live; 113 bool live;
113 bool full_time; 114 bool full_time;
@@ -1724,13 +1725,28 @@ static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
1724 return printed; 1725 return printed;
1725} 1726}
1726 1727
1727static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size) 1728static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, bool raw_augmented)
1728{ 1729{
1729 void *augmented_args = NULL; 1730 void *augmented_args = NULL;
1731 /*
1732 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
1733 * and there we get all 6 syscall args plus the tracepoint common
1734 * fields (sizeof(long)) and the syscall_nr (another long). So we check
1735 * if that is the case and if so don't look after the sc->args_size,
1736 * but always after the full raw_syscalls:sys_enter payload, which is
1737 * fixed.
1738 *
1739 * We'll revisit this later to pass s->args_size to the BPF augmenter
1740 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
1741 * copies only what we need for each syscall, like what happens when we
1742 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
1743 * traffic to just what is needed for each syscall.
1744 */
1745 int args_size = raw_augmented ? (8 * (int)sizeof(long)) : sc->args_size;
1730 1746
1731 *augmented_args_size = sample->raw_size - sc->args_size; 1747 *augmented_args_size = sample->raw_size - args_size;
1732 if (*augmented_args_size > 0) 1748 if (*augmented_args_size > 0)
1733 augmented_args = sample->raw_data + sc->args_size; 1749 augmented_args = sample->raw_data + args_size;
1734 1750
1735 return augmented_args; 1751 return augmented_args;
1736} 1752}
@@ -1780,7 +1796,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
1780 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one. 1796 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
1781 */ 1797 */
1782 if (evsel != trace->syscalls.events.sys_enter) 1798 if (evsel != trace->syscalls.events.sys_enter)
1783 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size); 1799 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
1784 ttrace->entry_time = sample->time; 1800 ttrace->entry_time = sample->time;
1785 msg = ttrace->entry_str; 1801 msg = ttrace->entry_str;
1786 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); 1802 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
@@ -1833,7 +1849,7 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evse
1833 goto out_put; 1849 goto out_put;
1834 1850
1835 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 1851 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1836 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size); 1852 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
1837 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); 1853 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
1838 fprintf(trace->output, "%s", msg); 1854 fprintf(trace->output, "%s", msg);
1839 err = 0; 1855 err = 0;
@@ -3501,7 +3517,15 @@ int cmd_trace(int argc, const char **argv)
3501 evsel->handler = trace__sys_enter; 3517 evsel->handler = trace__sys_enter;
3502 3518
3503 evlist__for_each_entry(trace.evlist, evsel) { 3519 evlist__for_each_entry(trace.evlist, evsel) {
3520 bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
3521
3522 if (raw_syscalls_sys_exit) {
3523 trace.raw_augmented_syscalls = true;
3524 goto init_augmented_syscall_tp;
3525 }
3526
3504 if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) { 3527 if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
3528init_augmented_syscall_tp:
3505 perf_evsel__init_augmented_syscall_tp(evsel); 3529 perf_evsel__init_augmented_syscall_tp(evsel);
3506 perf_evsel__init_augmented_syscall_tp_ret(evsel); 3530 perf_evsel__init_augmented_syscall_tp_ret(evsel);
3507 evsel->handler = trace__sys_exit; 3531 evsel->handler = trace__sys_exit;
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
new file mode 100644
index 000000000000..90a19336310b
--- /dev/null
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -0,0 +1,131 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
4 *
5 * Test it with:
6 *
7 * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
8 *
9 * This exactly matches what is marshalled into the raw_syscall:sys_enter
10 * payload expected by the 'perf trace' beautifiers.
11 *
12 * For now it just uses the existing tracepoint augmentation code in 'perf
13 * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
14 * code that will combine entry/exit in a strace like way.
15 */
16
17#include <stdio.h>
18#include <linux/socket.h>
19
20/* bpf-output associated map */
21struct bpf_map SEC("maps") __augmented_syscalls__ = {
22 .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
23 .key_size = sizeof(int),
24 .value_size = sizeof(u32),
25 .max_entries = __NR_CPUS__,
26};
27
28struct syscall_enter_args {
29 unsigned long long common_tp_fields;
30 long syscall_nr;
31 unsigned long args[6];
32};
33
34struct syscall_exit_args {
35 unsigned long long common_tp_fields;
36 long syscall_nr;
37 long ret;
38};
39
40struct augmented_filename {
41 unsigned int size;
42 int reserved;
43 char value[256];
44};
45
46#define SYS_OPEN 2
47#define SYS_OPENAT 257
48
49SEC("raw_syscalls:sys_enter")
50int sys_enter(struct syscall_enter_args *args)
51{
52 struct {
53 struct syscall_enter_args args;
54 struct augmented_filename filename;
55 } augmented_args;
56 unsigned int len = sizeof(augmented_args);
57 const void *filename_arg = NULL;
58
59 probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
60 /*
61 * Yonghong and Edward Cree sayz:
62 *
63 * https://www.spinics.net/lists/netdev/msg531645.html
64 *
65 * >> R0=inv(id=0) R1=inv2 R6=ctx(id=0,off=0,imm=0) R7=inv64 R10=fp0,call_-1
66 * >> 10: (bf) r1 = r6
67 * >> 11: (07) r1 += 16
68 * >> 12: (05) goto pc+2
69 * >> 15: (79) r3 = *(u64 *)(r1 +0)
70 * >> dereference of modified ctx ptr R1 off=16 disallowed
71 * > Aha, we at least got a different error message this time.
72 * > And indeed llvm has done that optimisation, rather than the more obvious
73 * > 11: r3 = *(u64 *)(r1 +16)
74 * > because it wants to have lots of reads share a single insn. You may be able
75 * > to defeat that optimisation by adding compiler barriers, idk. Maybe someone
76 * > with llvm knowledge can figure out how to stop it (ideally, llvm would know
77 * > when it's generating for bpf backend and not do that). -O0? ¯\_(ツ)_/¯
78 *
79 * The optimization mostly likes below:
80 *
81 * br1:
82 * ...
83 * r1 += 16
84 * goto merge
85 * br2:
86 * ...
87 * r1 += 20
88 * goto merge
89 * merge:
90 * *(u64 *)(r1 + 0)
91 *
92 * The compiler tries to merge common loads. There is no easy way to
93 * stop this compiler optimization without turning off a lot of other
94 * optimizations. The easiest way is to add barriers:
95 *
96 * __asm__ __volatile__("": : :"memory")
97 *
98 * after the ctx memory access to prevent their down stream merging.
99 */
100 switch (augmented_args.args.syscall_nr) {
101 case SYS_OPEN: filename_arg = (const void *)args->args[0];
102 __asm__ __volatile__("": : :"memory");
103 break;
104 case SYS_OPENAT: filename_arg = (const void *)args->args[1];
105 break;
106 }
107
108 if (filename_arg != NULL) {
109 augmented_args.filename.reserved = 0;
110 augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
111 sizeof(augmented_args.filename.value),
112 filename_arg);
113 if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
114 len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
115 len &= sizeof(augmented_args.filename.value) - 1;
116 }
117 } else {
118 len = sizeof(augmented_args.args);
119 }
120
121 perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
122 return 0;
123}
124
125SEC("raw_syscalls:sys_exit")
126int sys_exit(struct syscall_exit_args *args)
127{
128 return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */
129}
130
131license(GPL);
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index ac1bcdc17dae..f7eb63cbbc65 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -125,7 +125,7 @@ perf_get_timestamp(void)
125} 125}
126 126
127static int 127static int
128debug_cache_init(void) 128create_jit_cache_dir(void)
129{ 129{
130 char str[32]; 130 char str[32];
131 char *base, *p; 131 char *base, *p;
@@ -144,8 +144,13 @@ debug_cache_init(void)
144 144
145 strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm); 145 strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm);
146 146
147 snprintf(jit_path, PATH_MAX - 1, "%s/.debug/", base); 147 ret = snprintf(jit_path, PATH_MAX, "%s/.debug/", base);
148 148 if (ret >= PATH_MAX) {
149 warnx("jvmti: cannot generate jit cache dir because %s/.debug/"
150 " is too long, please check the cwd, JITDUMPDIR, and"
151 " HOME variables", base);
152 return -1;
153 }
149 ret = mkdir(jit_path, 0755); 154 ret = mkdir(jit_path, 0755);
150 if (ret == -1) { 155 if (ret == -1) {
151 if (errno != EEXIST) { 156 if (errno != EEXIST) {
@@ -154,20 +159,32 @@ debug_cache_init(void)
154 } 159 }
155 } 160 }
156 161
157 snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit", base); 162 ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit", base);
163 if (ret >= PATH_MAX) {
164 warnx("jvmti: cannot generate jit cache dir because"
165 " %s/.debug/jit is too long, please check the cwd,"
166 " JITDUMPDIR, and HOME variables", base);
167 return -1;
168 }
158 ret = mkdir(jit_path, 0755); 169 ret = mkdir(jit_path, 0755);
159 if (ret == -1) { 170 if (ret == -1) {
160 if (errno != EEXIST) { 171 if (errno != EEXIST) {
161 warn("cannot create jit cache dir %s", jit_path); 172 warn("jvmti: cannot create jit cache dir %s", jit_path);
162 return -1; 173 return -1;
163 } 174 }
164 } 175 }
165 176
166 snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit/%s.XXXXXXXX", base, str); 177 ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit/%s.XXXXXXXX", base, str);
167 178 if (ret >= PATH_MAX) {
179 warnx("jvmti: cannot generate jit cache dir because"
180 " %s/.debug/jit/%s.XXXXXXXX is too long, please check"
181 " the cwd, JITDUMPDIR, and HOME variables",
182 base, str);
183 return -1;
184 }
168 p = mkdtemp(jit_path); 185 p = mkdtemp(jit_path);
169 if (p != jit_path) { 186 if (p != jit_path) {
170 warn("cannot create jit cache dir %s", jit_path); 187 warn("jvmti: cannot create jit cache dir %s", jit_path);
171 return -1; 188 return -1;
172 } 189 }
173 190
@@ -228,7 +245,7 @@ void *jvmti_open(void)
228{ 245{
229 char dump_path[PATH_MAX]; 246 char dump_path[PATH_MAX];
230 struct jitheader header; 247 struct jitheader header;
231 int fd; 248 int fd, ret;
232 FILE *fp; 249 FILE *fp;
233 250
234 init_arch_timestamp(); 251 init_arch_timestamp();
@@ -245,12 +262,22 @@ void *jvmti_open(void)
245 262
246 memset(&header, 0, sizeof(header)); 263 memset(&header, 0, sizeof(header));
247 264
248 debug_cache_init(); 265 /*
266 * jitdump file dir
267 */
268 if (create_jit_cache_dir() < 0)
269 return NULL;
249 270
250 /* 271 /*
251 * jitdump file name 272 * jitdump file name
252 */ 273 */
253 scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid()); 274 ret = snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
275 if (ret >= PATH_MAX) {
276 warnx("jvmti: cannot generate jitdump file full path because"
277 " %s/jit-%i.dump is too long, please check the cwd,"
278 " JITDUMPDIR, and HOME variables", jit_path, getpid());
279 return NULL;
280 }
254 281
255 fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666); 282 fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
256 if (fd == -1) 283 if (fd == -1)
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 24cb0bd56afa..f278ce5ebab7 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -119,6 +119,14 @@ def dsoname(name):
119 return "[kernel]" 119 return "[kernel]"
120 return name 120 return name
121 121
122def findnth(s, sub, n, offs=0):
123 pos = s.find(sub)
124 if pos < 0:
125 return pos
126 if n <= 1:
127 return offs + pos
128 return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
129
122# Percent to one decimal place 130# Percent to one decimal place
123 131
124def PercentToOneDP(n, d): 132def PercentToOneDP(n, d):
@@ -1464,6 +1472,317 @@ class BranchWindow(QMdiSubWindow):
1464 else: 1472 else:
1465 self.find_bar.NotFound() 1473 self.find_bar.NotFound()
1466 1474
1475# Dialog data item converted and validated using a SQL table
1476
1477class SQLTableDialogDataItem():
1478
1479 def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
1480 self.glb = glb
1481 self.label = label
1482 self.placeholder_text = placeholder_text
1483 self.table_name = table_name
1484 self.match_column = match_column
1485 self.column_name1 = column_name1
1486 self.column_name2 = column_name2
1487 self.parent = parent
1488
1489 self.value = ""
1490
1491 self.widget = QLineEdit()
1492 self.widget.editingFinished.connect(self.Validate)
1493 self.widget.textChanged.connect(self.Invalidate)
1494 self.red = False
1495 self.error = ""
1496 self.validated = True
1497
1498 self.last_id = 0
1499 self.first_time = 0
1500 self.last_time = 2 ** 64
1501 if self.table_name == "<timeranges>":
1502 query = QSqlQuery(self.glb.db)
1503 QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
1504 if query.next():
1505 self.last_id = int(query.value(0))
1506 self.last_time = int(query.value(1))
1507 QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
1508 if query.next():
1509 self.first_time = int(query.value(0))
1510 if placeholder_text:
1511 placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
1512
1513 if placeholder_text:
1514 self.widget.setPlaceholderText(placeholder_text)
1515
1516 def ValueToIds(self, value):
1517 ids = []
1518 query = QSqlQuery(self.glb.db)
1519 stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
1520 ret = query.exec_(stmt)
1521 if ret:
1522 while query.next():
1523 ids.append(str(query.value(0)))
1524 return ids
1525
1526 def IdBetween(self, query, lower_id, higher_id, order):
1527 QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
1528 if query.next():
1529 return True, int(query.value(0))
1530 else:
1531 return False, 0
1532
1533 def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
1534 query = QSqlQuery(self.glb.db)
1535 while True:
1536 next_id = int((lower_id + higher_id) / 2)
1537 QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
1538 if not query.next():
1539 ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
1540 if not ok:
1541 ok, dbid = self.IdBetween(query, next_id, higher_id, "")
1542 if not ok:
1543 return str(higher_id)
1544 next_id = dbid
1545 QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
1546 next_time = int(query.value(0))
1547 if get_floor:
1548 if target_time > next_time:
1549 lower_id = next_id
1550 else:
1551 higher_id = next_id
1552 if higher_id <= lower_id + 1:
1553 return str(higher_id)
1554 else:
1555 if target_time >= next_time:
1556 lower_id = next_id
1557 else:
1558 higher_id = next_id
1559 if higher_id <= lower_id + 1:
1560 return str(lower_id)
1561
1562 def ConvertRelativeTime(self, val):
1563 print "val ", val
1564 mult = 1
1565 suffix = val[-2:]
1566 if suffix == "ms":
1567 mult = 1000000
1568 elif suffix == "us":
1569 mult = 1000
1570 elif suffix == "ns":
1571 mult = 1
1572 else:
1573 return val
1574 val = val[:-2].strip()
1575 if not self.IsNumber(val):
1576 return val
1577 val = int(val) * mult
1578 if val >= 0:
1579 val += self.first_time
1580 else:
1581 val += self.last_time
1582 return str(val)
1583
1584 def ConvertTimeRange(self, vrange):
1585 print "vrange ", vrange
1586 if vrange[0] == "":
1587 vrange[0] = str(self.first_time)
1588 if vrange[1] == "":
1589 vrange[1] = str(self.last_time)
1590 vrange[0] = self.ConvertRelativeTime(vrange[0])
1591 vrange[1] = self.ConvertRelativeTime(vrange[1])
1592 print "vrange2 ", vrange
1593 if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
1594 return False
1595 print "ok1"
1596 beg_range = max(int(vrange[0]), self.first_time)
1597 end_range = min(int(vrange[1]), self.last_time)
1598 if beg_range > self.last_time or end_range < self.first_time:
1599 return False
1600 print "ok2"
1601 vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
1602 vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
1603 print "vrange3 ", vrange
1604 return True
1605
1606 def AddTimeRange(self, value, ranges):
1607 print "value ", value
1608 n = value.count("-")
1609 if n == 1:
1610 pass
1611 elif n == 2:
1612 if value.split("-")[1].strip() == "":
1613 n = 1
1614 elif n == 3:
1615 n = 2
1616 else:
1617 return False
1618 pos = findnth(value, "-", n)
1619 vrange = [value[:pos].strip() ,value[pos+1:].strip()]
1620 if self.ConvertTimeRange(vrange):
1621 ranges.append(vrange)
1622 return True
1623 return False
1624
1625 def InvalidValue(self, value):
1626 self.value = ""
1627 palette = QPalette()
1628 palette.setColor(QPalette.Text,Qt.red)
1629 self.widget.setPalette(palette)
1630 self.red = True
1631 self.error = self.label + " invalid value '" + value + "'"
1632 self.parent.ShowMessage(self.error)
1633
1634 def IsNumber(self, value):
1635 try:
1636 x = int(value)
1637 except:
1638 x = 0
1639 return str(x) == value
1640
1641 def Invalidate(self):
1642 self.validated = False
1643
1644 def Validate(self):
1645 input_string = self.widget.text()
1646 self.validated = True
1647 if self.red:
1648 palette = QPalette()
1649 self.widget.setPalette(palette)
1650 self.red = False
1651 if not len(input_string.strip()):
1652 self.error = ""
1653 self.value = ""
1654 return
1655 if self.table_name == "<timeranges>":
1656 ranges = []
1657 for value in [x.strip() for x in input_string.split(",")]:
1658 if not self.AddTimeRange(value, ranges):
1659 return self.InvalidValue(value)
1660 ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
1661 self.value = " OR ".join(ranges)
1662 elif self.table_name == "<ranges>":
1663 singles = []
1664 ranges = []
1665 for value in [x.strip() for x in input_string.split(",")]:
1666 if "-" in value:
1667 vrange = value.split("-")
1668 if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
1669 return self.InvalidValue(value)
1670 ranges.append(vrange)
1671 else:
1672 if not self.IsNumber(value):
1673 return self.InvalidValue(value)
1674 singles.append(value)
1675 ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
1676 if len(singles):
1677 ranges.append(self.column_name1 + " IN (" + ",".join(singles) + ")")
1678 self.value = " OR ".join(ranges)
1679 elif self.table_name:
1680 all_ids = []
1681 for value in [x.strip() for x in input_string.split(",")]:
1682 ids = self.ValueToIds(value)
1683 if len(ids):
1684 all_ids.extend(ids)
1685 else:
1686 return self.InvalidValue(value)
1687 self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
1688 if self.column_name2:
1689 self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
1690 else:
1691 self.value = input_string.strip()
1692 self.error = ""
1693 self.parent.ClearMessage()
1694
1695 def IsValid(self):
1696 if not self.validated:
1697 self.Validate()
1698 if len(self.error):
1699 self.parent.ShowMessage(self.error)
1700 return False
1701 return True
1702
1703# Selected branch report creation dialog
1704
1705class SelectedBranchDialog(QDialog):
1706
1707 def __init__(self, glb, parent=None):
1708 super(SelectedBranchDialog, self).__init__(parent)
1709
1710 self.glb = glb
1711
1712 self.name = ""
1713 self.where_clause = ""
1714
1715 self.setWindowTitle("Selected Branches")
1716 self.setMinimumWidth(600)
1717
1718 items = (
1719 ("Report name:", "Enter a name to appear in the window title bar", "", "", "", ""),
1720 ("Time ranges:", "Enter time ranges", "<timeranges>", "", "samples.id", ""),
1721 ("CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "<ranges>", "", "cpu", ""),
1722 ("Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", ""),
1723 ("PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", ""),
1724 ("TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", ""),
1725 ("DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id"),
1726 ("Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id"),
1727 ("Raw SQL clause: ", "Enter a raw SQL WHERE clause", "", "", "", ""),
1728 )
1729 self.data_items = [SQLTableDialogDataItem(glb, *x, parent=self) for x in items]
1730
1731 self.grid = QGridLayout()
1732
1733 for row in xrange(len(self.data_items)):
1734 self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
1735 self.grid.addWidget(self.data_items[row].widget, row, 1)
1736
1737 self.status = QLabel()
1738
1739 self.ok_button = QPushButton("Ok", self)
1740 self.ok_button.setDefault(True)
1741 self.ok_button.released.connect(self.Ok)
1742 self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
1743
1744 self.cancel_button = QPushButton("Cancel", self)
1745 self.cancel_button.released.connect(self.reject)
1746 self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
1747
1748 self.hbox = QHBoxLayout()
1749 #self.hbox.addStretch()
1750 self.hbox.addWidget(self.status)
1751 self.hbox.addWidget(self.ok_button)
1752 self.hbox.addWidget(self.cancel_button)
1753
1754 self.vbox = QVBoxLayout()
1755 self.vbox.addLayout(self.grid)
1756 self.vbox.addLayout(self.hbox)
1757
1758 self.setLayout(self.vbox);
1759
1760 def Ok(self):
1761 self.name = self.data_items[0].value
1762 if not self.name:
1763 self.ShowMessage("Report name is required")
1764 return
1765 for d in self.data_items:
1766 if not d.IsValid():
1767 return
1768 for d in self.data_items[1:]:
1769 if len(d.value):
1770 if len(self.where_clause):
1771 self.where_clause += " AND "
1772 self.where_clause += d.value
1773 if len(self.where_clause):
1774 self.where_clause = " AND ( " + self.where_clause + " ) "
1775 else:
1776 self.ShowMessage("No selection")
1777 return
1778 self.accept()
1779
1780 def ShowMessage(self, msg):
1781 self.status.setText("<font color=#FF0000>" + msg)
1782
1783 def ClearMessage(self):
1784 self.status.setText("")
1785
1467# Event list 1786# Event list
1468 1787
1469def GetEventList(db): 1788def GetEventList(db):
@@ -1656,7 +1975,7 @@ class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
1656 def FindDone(self, row): 1975 def FindDone(self, row):
1657 self.find_bar.Idle() 1976 self.find_bar.Idle()
1658 if row >= 0: 1977 if row >= 0:
1659 self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex())) 1978 self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
1660 else: 1979 else:
1661 self.find_bar.NotFound() 1980 self.find_bar.NotFound()
1662 1981
@@ -1765,6 +2084,149 @@ class WindowMenu():
1765 def setActiveSubWindow(self, nr): 2084 def setActiveSubWindow(self, nr):
1766 self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1]) 2085 self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
1767 2086
2087# Help text
2088
2089glb_help_text = """
2090<h1>Contents</h1>
2091<style>
2092p.c1 {
2093 text-indent: 40px;
2094}
2095p.c2 {
2096 text-indent: 80px;
2097}
2098}
2099</style>
2100<p class=c1><a href=#reports>1. Reports</a></p>
2101<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
2102<p class=c2><a href=#allbranches>1.2 All branches</a></p>
2103<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
2104<p class=c1><a href=#tables>2. Tables</a></p>
2105<h1 id=reports>1. Reports</h1>
2106<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
2107The result is a GUI window with a tree representing a context-sensitive
2108call-graph. Expanding a couple of levels of the tree and adjusting column
2109widths to suit will display something like:
2110<pre>
2111 Call Graph: pt_example
2112Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
2113v- ls
2114 v- 2638:2638
2115 v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
2116 |- unknown unknown 1 13198 0.1 1 0.0
2117 >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
2118 >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
2119 v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
2120 >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
2121 >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
2122 >- __libc_csu_init ls 1 10354 0.1 10 0.0
2123 |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
2124 v- main ls 1 8182043 99.6 180254 99.9
2125</pre>
2126<h3>Points to note:</h3>
2127<ul>
2128<li>The top level is a command name (comm)</li>
2129<li>The next level is a thread (pid:tid)</li>
2130<li>Subsequent levels are functions</li>
2131<li>'Count' is the number of calls</li>
2132<li>'Time' is the elapsed time until the function returns</li>
2133<li>Percentages are relative to the level above</li>
2134<li>'Branch Count' is the total number of branches for that function and all functions that it calls
2135</ul>
2136<h3>Find</h3>
2137Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
2138The pattern matching symbols are ? for any character and * for zero or more characters.
2139<h2 id=allbranches>1.2 All branches</h2>
2140The All branches report displays all branches in chronological order.
2141Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
2142<h3>Disassembly</h3>
2143Open a branch to display disassembly. This only works if:
2144<ol>
2145<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
2146<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
2147The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
2148One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
2149or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
2150</ol>
2151<h4 id=xed>Intel XED Setup</h4>
2152To use Intel XED, libxed.so must be present. To build and install libxed.so:
2153<pre>
2154git clone https://github.com/intelxed/mbuild.git mbuild
2155git clone https://github.com/intelxed/xed
2156cd xed
2157./mfile.py --share
2158sudo ./mfile.py --prefix=/usr/local install
2159sudo ldconfig
2160</pre>
2161<h3>Find</h3>
2162Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
2163Refer to Python documentation for the regular expression syntax.
2164All columns are searched, but only currently fetched rows are searched.
2165<h2 id=selectedbranches>1.3 Selected branches</h2>
2166This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
2167by various selection criteria. A dialog box displays available criteria which are AND'ed together.
2168<h3>1.3.1 Time ranges</h3>
2169The time ranges hint text shows the total time range. Relative time ranges can also be entered in
2170ms, us or ns. Also, negative values are relative to the end of trace. Examples:
2171<pre>
2172 81073085947329-81073085958238 From 81073085947329 to 81073085958238
2173 100us-200us From 100us to 200us
2174 10ms- From 10ms to the end
2175 -100ns The first 100ns
2176 -10ms- The last 10ms
2177</pre>
2178N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
2179<h1 id=tables>2. Tables</h1>
2180The Tables menu shows all tables and views in the database. Most tables have an associated view
2181which displays the information in a more friendly way. Not all data for large tables is fetched
2182immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
2183but that can be slow for large tables.
2184<p>There are also tables of database meta-information.
2185For SQLite3 databases, the sqlite_master table is included.
2186For PostgreSQL databases, information_schema.tables/views/columns are included.
2187<h3>Find</h3>
2188Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
2189Refer to Python documentation for the regular expression syntax.
2190All columns are searched, but only currently fetched rows are searched.
2191<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
2192will go to the next/previous result in id order, instead of display order.
2193"""
2194
2195# Help window
2196
2197class HelpWindow(QMdiSubWindow):
2198
2199 def __init__(self, glb, parent=None):
2200 super(HelpWindow, self).__init__(parent)
2201
2202 self.text = QTextBrowser()
2203 self.text.setHtml(glb_help_text)
2204 self.text.setReadOnly(True)
2205 self.text.setOpenExternalLinks(True)
2206
2207 self.setWidget(self.text)
2208
2209 AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
2210
2211# Main window that only displays the help text
2212
2213class HelpOnlyWindow(QMainWindow):
2214
2215 def __init__(self, parent=None):
2216 super(HelpOnlyWindow, self).__init__(parent)
2217
2218 self.setMinimumSize(200, 100)
2219 self.resize(800, 600)
2220 self.setWindowTitle("Exported SQL Viewer Help")
2221 self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
2222
2223 self.text = QTextBrowser()
2224 self.text.setHtml(glb_help_text)
2225 self.text.setReadOnly(True)
2226 self.text.setOpenExternalLinks(True)
2227
2228 self.setCentralWidget(self.text)
2229
1768# Font resize 2230# Font resize
1769 2231
1770def ResizeFont(widget, diff): 2232def ResizeFont(widget, diff):
@@ -1851,6 +2313,9 @@ class MainWindow(QMainWindow):
1851 2313
1852 self.window_menu = WindowMenu(self.mdi_area, menu) 2314 self.window_menu = WindowMenu(self.mdi_area, menu)
1853 2315
2316 help_menu = menu.addMenu("&Help")
2317 help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
2318
1854 def Find(self): 2319 def Find(self):
1855 win = self.mdi_area.activeSubWindow() 2320 win = self.mdi_area.activeSubWindow()
1856 if win: 2321 if win:
@@ -1888,6 +2353,8 @@ class MainWindow(QMainWindow):
1888 if event == "branches": 2353 if event == "branches":
1889 label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")" 2354 label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
1890 reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self)) 2355 reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self))
2356 label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
2357 reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewSelectedBranchView(x), self))
1891 2358
1892 def TableMenu(self, tables, menu): 2359 def TableMenu(self, tables, menu):
1893 table_menu = menu.addMenu("&Tables") 2360 table_menu = menu.addMenu("&Tables")
@@ -1900,9 +2367,18 @@ class MainWindow(QMainWindow):
1900 def NewBranchView(self, event_id): 2367 def NewBranchView(self, event_id):
1901 BranchWindow(self.glb, event_id, "", "", self) 2368 BranchWindow(self.glb, event_id, "", "", self)
1902 2369
2370 def NewSelectedBranchView(self, event_id):
2371 dialog = SelectedBranchDialog(self.glb, self)
2372 ret = dialog.exec_()
2373 if ret:
2374 BranchWindow(self.glb, event_id, dialog.name, dialog.where_clause, self)
2375
1903 def NewTableView(self, table_name): 2376 def NewTableView(self, table_name):
1904 TableWindow(self.glb, table_name, self) 2377 TableWindow(self.glb, table_name, self)
1905 2378
2379 def Help(self):
2380 HelpWindow(self.glb, self)
2381
1906# XED Disassembler 2382# XED Disassembler
1907 2383
1908class xed_state_t(Structure): 2384class xed_state_t(Structure):
@@ -1929,7 +2405,12 @@ class XEDInstruction():
1929class LibXED(): 2405class LibXED():
1930 2406
1931 def __init__(self): 2407 def __init__(self):
1932 self.libxed = CDLL("libxed.so") 2408 try:
2409 self.libxed = CDLL("libxed.so")
2410 except:
2411 self.libxed = None
2412 if not self.libxed:
2413 self.libxed = CDLL("/usr/local/lib/libxed.so")
1933 2414
1934 self.xed_tables_init = self.libxed.xed_tables_init 2415 self.xed_tables_init = self.libxed.xed_tables_init
1935 self.xed_tables_init.restype = None 2416 self.xed_tables_init.restype = None
@@ -2097,10 +2578,16 @@ class DBRef():
2097 2578
2098def Main(): 2579def Main():
2099 if (len(sys.argv) < 2): 2580 if (len(sys.argv) < 2):
2100 print >> sys.stderr, "Usage is: exported-sql-viewer.py <database name>" 2581 print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
2101 raise Exception("Too few arguments") 2582 raise Exception("Too few arguments")
2102 2583
2103 dbname = sys.argv[1] 2584 dbname = sys.argv[1]
2585 if dbname == "--help-only":
2586 app = QApplication(sys.argv)
2587 mainwindow = HelpOnlyWindow()
2588 mainwindow.show()
2589 err = app.exec_()
2590 sys.exit(err)
2104 2591
2105 is_sqlite3 = False 2592 is_sqlite3 = False
2106 try: 2593 try:
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index 37940665f736..efd0157b9d22 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -9,7 +9,7 @@ size=112
9config=0 9config=0
10sample_period=* 10sample_period=*
11sample_type=263 11sample_type=263
12read_format=0 12read_format=0|4
13disabled=1 13disabled=1
14inherit=1 14inherit=1
15pinned=0 15pinned=0
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index 8a33ca4f9e1f..f0729c454f16 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -37,4 +37,3 @@ sample_freq=0
37sample_period=0 37sample_period=0
38freq=0 38freq=0
39write_backward=0 39write_backward=0
40sample_id_all=0
diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c
index 5d2a7fd8d407..eae59ad15ce3 100644
--- a/tools/perf/trace/beauty/ioctl.c
+++ b/tools/perf/trace/beauty/ioctl.c
@@ -31,6 +31,7 @@ static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size)
31 "TCSETSW2", "TCSETSF2", "TIOCGRS48", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK", 31 "TCSETSW2", "TCSETSF2", "TIOCGRS48", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
32 "TIOCGDEV", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG", "TIOCVHANGUP", "TIOCGPKT", 32 "TIOCGDEV", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG", "TIOCVHANGUP", "TIOCGPKT",
33 "TIOCGPTLCK", [_IOC_NR(TIOCGEXCL)] = "TIOCGEXCL", "TIOCGPTPEER", 33 "TIOCGPTLCK", [_IOC_NR(TIOCGEXCL)] = "TIOCGEXCL", "TIOCGPTPEER",
34 "TIOCGISO7816", "TIOCSISO7816",
34 [_IOC_NR(FIONCLEX)] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG", 35 [_IOC_NR(FIONCLEX)] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
35 "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS", 36 "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
36 "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI", 37 "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index ecd9f9ceda77..b7bf201fe8a8 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -10,6 +10,7 @@ libperf-y += evlist.o
10libperf-y += evsel.o 10libperf-y += evsel.o
11libperf-y += evsel_fprintf.o 11libperf-y += evsel_fprintf.o
12libperf-y += find_bit.o 12libperf-y += find_bit.o
13libperf-y += get_current_dir_name.o
13libperf-y += kallsyms.o 14libperf-y += kallsyms.o
14libperf-y += levenshtein.o 15libperf-y += levenshtein.o
15libperf-y += llvm-utils.o 16libperf-y += llvm-utils.o
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index e88e6f9b1463..668d2a9ef0f4 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1810,3 +1810,30 @@ void perf_evlist__force_leader(struct perf_evlist *evlist)
1810 leader->forced_leader = true; 1810 leader->forced_leader = true;
1811 } 1811 }
1812} 1812}
1813
1814struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
1815 struct perf_evsel *evsel)
1816{
1817 struct perf_evsel *c2, *leader;
1818 bool is_open = true;
1819
1820 leader = evsel->leader;
1821 pr_debug("Weak group for %s/%d failed\n",
1822 leader->name, leader->nr_members);
1823
1824 /*
1825 * for_each_group_member doesn't work here because it doesn't
1826 * include the first entry.
1827 */
1828 evlist__for_each_entry(evsel_list, c2) {
1829 if (c2 == evsel)
1830 is_open = false;
1831 if (c2->leader == leader) {
1832 if (is_open)
1833 perf_evsel__close(c2);
1834 c2->leader = c2;
1835 c2->nr_members = 0;
1836 }
1837 }
1838 return leader;
1839}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index dc66436add98..9919eed6d15b 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -312,4 +312,7 @@ bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
312 312
313void perf_evlist__force_leader(struct perf_evlist *evlist); 313void perf_evlist__force_leader(struct perf_evlist *evlist);
314 314
315struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
316 struct perf_evsel *evsel);
317
315#endif /* __PERF_EVLIST_H */ 318#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 6d187059a373..dbc0466db368 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -956,7 +956,6 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
956 attr->sample_freq = 0; 956 attr->sample_freq = 0;
957 attr->sample_period = 0; 957 attr->sample_period = 0;
958 attr->write_backward = 0; 958 attr->write_backward = 0;
959 attr->sample_id_all = 0;
960 } 959 }
961 960
962 if (opts->no_samples) 961 if (opts->no_samples)
@@ -1093,7 +1092,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
1093 attr->exclude_user = 1; 1092 attr->exclude_user = 1;
1094 } 1093 }
1095 1094
1096 if (evsel->own_cpus) 1095 if (evsel->own_cpus || evsel->unit)
1097 evsel->attr.read_format |= PERF_FORMAT_ID; 1096 evsel->attr.read_format |= PERF_FORMAT_ID;
1098 1097
1099 /* 1098 /*
diff --git a/tools/perf/util/get_current_dir_name.c b/tools/perf/util/get_current_dir_name.c
new file mode 100644
index 000000000000..267aa609a582
--- /dev/null
+++ b/tools/perf/util/get_current_dir_name.c
@@ -0,0 +1,18 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3//
4#ifndef HAVE_GET_CURRENT_DIR_NAME
5#include "util.h"
6#include <unistd.h>
7#include <stdlib.h>
8#include <stdlib.h>
9
10/* Android's 'bionic' library, for one, doesn't have this */
11
12char *get_current_dir_name(void)
13{
14 char pwd[PATH_MAX];
15
16 return getcwd(pwd, sizeof(pwd)) == NULL ? NULL : strdup(pwd);
17}
18#endif // HAVE_GET_CURRENT_DIR_NAME
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 58f6a9ceb590..4503f3ca45ab 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1474,6 +1474,8 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
1474 decoder->have_calc_cyc_to_tsc = false; 1474 decoder->have_calc_cyc_to_tsc = false;
1475 intel_pt_calc_cyc_to_tsc(decoder, true); 1475 intel_pt_calc_cyc_to_tsc(decoder, true);
1476 } 1476 }
1477
1478 intel_pt_log_to("Setting timestamp", decoder->timestamp);
1477} 1479}
1478 1480
1479static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder) 1481static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
@@ -1514,6 +1516,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
1514 decoder->timestamp = timestamp; 1516 decoder->timestamp = timestamp;
1515 1517
1516 decoder->timestamp_insn_cnt = 0; 1518 decoder->timestamp_insn_cnt = 0;
1519
1520 intel_pt_log_to("Setting timestamp", decoder->timestamp);
1517} 1521}
1518 1522
1519/* Walk PSB+ packets when already in sync. */ 1523/* Walk PSB+ packets when already in sync. */
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.c b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
index e02bc7b166a0..5e64da270f97 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
@@ -31,6 +31,11 @@ static FILE *f;
31static char log_name[MAX_LOG_NAME]; 31static char log_name[MAX_LOG_NAME];
32bool intel_pt_enable_logging; 32bool intel_pt_enable_logging;
33 33
34void *intel_pt_log_fp(void)
35{
36 return f;
37}
38
34void intel_pt_log_enable(void) 39void intel_pt_log_enable(void)
35{ 40{
36 intel_pt_enable_logging = true; 41 intel_pt_enable_logging = true;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.h b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
index 45b64f93f358..cc084937f701 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
@@ -22,6 +22,7 @@
22 22
23struct intel_pt_pkt; 23struct intel_pt_pkt;
24 24
25void *intel_pt_log_fp(void);
25void intel_pt_log_enable(void); 26void intel_pt_log_enable(void);
26void intel_pt_log_disable(void); 27void intel_pt_log_disable(void);
27void intel_pt_log_set_name(const char *name); 28void intel_pt_log_set_name(const char *name);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 86cc9a64e982..149ff361ca78 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -206,6 +206,16 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
206 intel_pt_dump(pt, buf, len); 206 intel_pt_dump(pt, buf, len);
207} 207}
208 208
209static void intel_pt_log_event(union perf_event *event)
210{
211 FILE *f = intel_pt_log_fp();
212
213 if (!intel_pt_enable_logging || !f)
214 return;
215
216 perf_event__fprintf(event, f);
217}
218
209static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a, 219static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
210 struct auxtrace_buffer *b) 220 struct auxtrace_buffer *b)
211{ 221{
@@ -2010,9 +2020,9 @@ static int intel_pt_process_event(struct perf_session *session,
2010 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) 2020 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2011 err = intel_pt_context_switch(pt, event, sample); 2021 err = intel_pt_context_switch(pt, event, sample);
2012 2022
2013 intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n", 2023 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
2014 perf_event__name(event->header.type), event->header.type, 2024 event->header.type, sample->cpu, sample->time, timestamp);
2015 sample->cpu, sample->time, timestamp); 2025 intel_pt_log_event(event);
2016 2026
2017 return err; 2027 return err;
2018} 2028}
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
index cf8bd123cf73..aed170bd4384 100644
--- a/tools/perf/util/namespaces.c
+++ b/tools/perf/util/namespaces.c
@@ -18,6 +18,7 @@
18#include <stdio.h> 18#include <stdio.h>
19#include <string.h> 19#include <string.h>
20#include <unistd.h> 20#include <unistd.h>
21#include <asm/bug.h>
21 22
22struct namespaces *namespaces__new(struct namespaces_event *event) 23struct namespaces *namespaces__new(struct namespaces_event *event)
23{ 24{
@@ -186,6 +187,7 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
186 char curpath[PATH_MAX]; 187 char curpath[PATH_MAX];
187 int oldns = -1; 188 int oldns = -1;
188 int newns = -1; 189 int newns = -1;
190 char *oldcwd = NULL;
189 191
190 if (nc == NULL) 192 if (nc == NULL)
191 return; 193 return;
@@ -199,9 +201,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
199 if (snprintf(curpath, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX) 201 if (snprintf(curpath, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX)
200 return; 202 return;
201 203
204 oldcwd = get_current_dir_name();
205 if (!oldcwd)
206 return;
207
202 oldns = open(curpath, O_RDONLY); 208 oldns = open(curpath, O_RDONLY);
203 if (oldns < 0) 209 if (oldns < 0)
204 return; 210 goto errout;
205 211
206 newns = open(nsi->mntns_path, O_RDONLY); 212 newns = open(nsi->mntns_path, O_RDONLY);
207 if (newns < 0) 213 if (newns < 0)
@@ -210,11 +216,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
210 if (setns(newns, CLONE_NEWNS) < 0) 216 if (setns(newns, CLONE_NEWNS) < 0)
211 goto errout; 217 goto errout;
212 218
219 nc->oldcwd = oldcwd;
213 nc->oldns = oldns; 220 nc->oldns = oldns;
214 nc->newns = newns; 221 nc->newns = newns;
215 return; 222 return;
216 223
217errout: 224errout:
225 free(oldcwd);
218 if (oldns > -1) 226 if (oldns > -1)
219 close(oldns); 227 close(oldns);
220 if (newns > -1) 228 if (newns > -1)
@@ -223,11 +231,16 @@ errout:
223 231
224void nsinfo__mountns_exit(struct nscookie *nc) 232void nsinfo__mountns_exit(struct nscookie *nc)
225{ 233{
226 if (nc == NULL || nc->oldns == -1 || nc->newns == -1) 234 if (nc == NULL || nc->oldns == -1 || nc->newns == -1 || !nc->oldcwd)
227 return; 235 return;
228 236
229 setns(nc->oldns, CLONE_NEWNS); 237 setns(nc->oldns, CLONE_NEWNS);
230 238
239 if (nc->oldcwd) {
240 WARN_ON_ONCE(chdir(nc->oldcwd));
241 zfree(&nc->oldcwd);
242 }
243
231 if (nc->oldns > -1) { 244 if (nc->oldns > -1) {
232 close(nc->oldns); 245 close(nc->oldns);
233 nc->oldns = -1; 246 nc->oldns = -1;
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
index cae1a9a39722..d5f46c09ea31 100644
--- a/tools/perf/util/namespaces.h
+++ b/tools/perf/util/namespaces.h
@@ -38,6 +38,7 @@ struct nsinfo {
38struct nscookie { 38struct nscookie {
39 int oldns; 39 int oldns;
40 int newns; 40 int newns;
41 char *oldcwd;
41}; 42};
42 43
43int nsinfo__init(struct nsinfo *nsi); 44int nsinfo__init(struct nsinfo *nsi);
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 7799788f662f..7e49baad304d 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -773,7 +773,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
773 773
774 if (!is_arm_pmu_core(name)) { 774 if (!is_arm_pmu_core(name)) {
775 pname = pe->pmu ? pe->pmu : "cpu"; 775 pname = pe->pmu ? pe->pmu : "cpu";
776 if (strncmp(pname, name, strlen(pname))) 776 if (strcmp(pname, name))
777 continue; 777 continue;
778 } 778 }
779 779
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 14508ee7707a..ece040b799f6 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -59,6 +59,10 @@ int fetch_kernel_version(unsigned int *puint,
59 59
60const char *perf_tip(const char *dirpath); 60const char *perf_tip(const char *dirpath);
61 61
62#ifndef HAVE_GET_CURRENT_DIR_NAME
63char *get_current_dir_name(void);
64#endif
65
62#ifndef HAVE_SCHED_GETCPU_SUPPORT 66#ifndef HAVE_SCHED_GETCPU_SUPPORT
63int sched_getcpu(void); 67int sched_getcpu(void);
64#endif 68#endif
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 1dd5f4fcffd5..db66a952c173 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -129,7 +129,7 @@ WARNINGS += $(call cc-supports,-Wno-pointer-sign)
129WARNINGS += $(call cc-supports,-Wdeclaration-after-statement) 129WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
130WARNINGS += -Wshadow 130WARNINGS += -Wshadow
131 131
132CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \ 132override CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \
133 -DPACKAGE_BUGREPORT=\"$(PACKAGE_BUGREPORT)\" -D_GNU_SOURCE 133 -DPACKAGE_BUGREPORT=\"$(PACKAGE_BUGREPORT)\" -D_GNU_SOURCE
134 134
135UTIL_OBJS = utils/helpers/amd.o utils/helpers/msr.o \ 135UTIL_OBJS = utils/helpers/amd.o utils/helpers/msr.o \
@@ -156,12 +156,12 @@ LIB_SRC = lib/cpufreq.c lib/cpupower.c lib/cpuidle.c
156LIB_OBJS = lib/cpufreq.o lib/cpupower.o lib/cpuidle.o 156LIB_OBJS = lib/cpufreq.o lib/cpupower.o lib/cpuidle.o
157LIB_OBJS := $(addprefix $(OUTPUT),$(LIB_OBJS)) 157LIB_OBJS := $(addprefix $(OUTPUT),$(LIB_OBJS))
158 158
159CFLAGS += -pipe 159override CFLAGS += -pipe
160 160
161ifeq ($(strip $(NLS)),true) 161ifeq ($(strip $(NLS)),true)
162 INSTALL_NLS += install-gmo 162 INSTALL_NLS += install-gmo
163 COMPILE_NLS += create-gmo 163 COMPILE_NLS += create-gmo
164 CFLAGS += -DNLS 164 override CFLAGS += -DNLS
165endif 165endif
166 166
167ifeq ($(strip $(CPUFREQ_BENCH)),true) 167ifeq ($(strip $(CPUFREQ_BENCH)),true)
@@ -175,7 +175,7 @@ ifeq ($(strip $(STATIC)),true)
175 UTIL_SRC += $(LIB_SRC) 175 UTIL_SRC += $(LIB_SRC)
176endif 176endif
177 177
178CFLAGS += $(WARNINGS) 178override CFLAGS += $(WARNINGS)
179 179
180ifeq ($(strip $(V)),false) 180ifeq ($(strip $(V)),false)
181 QUIET=@ 181 QUIET=@
@@ -188,10 +188,10 @@ export QUIET ECHO
188 188
189# if DEBUG is enabled, then we do not strip or optimize 189# if DEBUG is enabled, then we do not strip or optimize
190ifeq ($(strip $(DEBUG)),true) 190ifeq ($(strip $(DEBUG)),true)
191 CFLAGS += -O1 -g -DDEBUG 191 override CFLAGS += -O1 -g -DDEBUG
192 STRIPCMD = /bin/true -Since_we_are_debugging 192 STRIPCMD = /bin/true -Since_we_are_debugging
193else 193else
194 CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer 194 override CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
195 STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment 195 STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
196endif 196endif
197 197
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index d79ab161cc75..f68b4bc55273 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -9,7 +9,7 @@ endif
9ifeq ($(strip $(STATIC)),true) 9ifeq ($(strip $(STATIC)),true)
10LIBS = -L../ -L$(OUTPUT) -lm 10LIBS = -L../ -L$(OUTPUT) -lm
11OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \ 11OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \
12 $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/sysfs.o 12 $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/cpupower.o
13else 13else
14LIBS = -L../ -L$(OUTPUT) -lm -lcpupower 14LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
15OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o 15OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
diff --git a/tools/power/cpupower/debug/x86_64/Makefile b/tools/power/cpupower/debug/x86_64/Makefile
index 59af84b8ef45..b1b6c43644e7 100644
--- a/tools/power/cpupower/debug/x86_64/Makefile
+++ b/tools/power/cpupower/debug/x86_64/Makefile
@@ -13,10 +13,10 @@ INSTALL = /usr/bin/install
13default: all 13default: all
14 14
15$(OUTPUT)centrino-decode: ../i386/centrino-decode.c 15$(OUTPUT)centrino-decode: ../i386/centrino-decode.c
16 $(CC) $(CFLAGS) -o $@ $< 16 $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $<
17 17
18$(OUTPUT)powernow-k8-decode: ../i386/powernow-k8-decode.c 18$(OUTPUT)powernow-k8-decode: ../i386/powernow-k8-decode.c
19 $(CC) $(CFLAGS) -o $@ $< 19 $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $<
20 20
21all: $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode 21all: $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode
22 22
diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c
index 1b993fe1ce23..0c0f3e3f0d80 100644
--- a/tools/power/cpupower/lib/cpufreq.c
+++ b/tools/power/cpupower/lib/cpufreq.c
@@ -28,7 +28,7 @@ static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
28 28
29 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s", 29 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
30 cpu, fname); 30 cpu, fname);
31 return sysfs_read_file(path, buf, buflen); 31 return cpupower_read_sysfs(path, buf, buflen);
32} 32}
33 33
34/* helper function to write a new value to a /sys file */ 34/* helper function to write a new value to a /sys file */
diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c
index 9bd4c7655fdb..852d25462388 100644
--- a/tools/power/cpupower/lib/cpuidle.c
+++ b/tools/power/cpupower/lib/cpuidle.c
@@ -319,7 +319,7 @@ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf,
319 319
320 snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname); 320 snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname);
321 321
322 return sysfs_read_file(path, buf, buflen); 322 return cpupower_read_sysfs(path, buf, buflen);
323} 323}
324 324
325 325
diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c
index 9c395ec924de..9711d628b0f4 100644
--- a/tools/power/cpupower/lib/cpupower.c
+++ b/tools/power/cpupower/lib/cpupower.c
@@ -15,7 +15,7 @@
15#include "cpupower.h" 15#include "cpupower.h"
16#include "cpupower_intern.h" 16#include "cpupower_intern.h"
17 17
18unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) 18unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen)
19{ 19{
20 int fd; 20 int fd;
21 ssize_t numread; 21 ssize_t numread;
@@ -95,7 +95,7 @@ static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *re
95 95
96 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s", 96 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
97 cpu, fname); 97 cpu, fname);
98 if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) 98 if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0)
99 return -1; 99 return -1;
100 *result = strtol(linebuf, &endp, 0); 100 *result = strtol(linebuf, &endp, 0);
101 if (endp == linebuf || errno == ERANGE) 101 if (endp == linebuf || errno == ERANGE)
diff --git a/tools/power/cpupower/lib/cpupower_intern.h b/tools/power/cpupower/lib/cpupower_intern.h
index 92affdfbe417..4887c76d23f8 100644
--- a/tools/power/cpupower/lib/cpupower_intern.h
+++ b/tools/power/cpupower/lib/cpupower_intern.h
@@ -3,4 +3,4 @@
3#define MAX_LINE_LEN 4096 3#define MAX_LINE_LEN 4096
4#define SYSFS_PATH_MAX 255 4#define SYSFS_PATH_MAX 255
5 5
6unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen); 6unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 9527d47a1070..01ec04bf91b5 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -140,8 +140,8 @@ static u32 handle[] = {
140 [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1), 140 [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
141}; 141};
142 142
143static unsigned long dimm_fail_cmd_flags[NUM_DCR]; 143static unsigned long dimm_fail_cmd_flags[ARRAY_SIZE(handle)];
144static int dimm_fail_cmd_code[NUM_DCR]; 144static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
145 145
146static const struct nd_intel_smart smart_def = { 146static const struct nd_intel_smart smart_def = {
147 .flags = ND_INTEL_SMART_HEALTH_VALID 147 .flags = ND_INTEL_SMART_HEALTH_VALID
@@ -205,7 +205,7 @@ struct nfit_test {
205 unsigned long deadline; 205 unsigned long deadline;
206 spinlock_t lock; 206 spinlock_t lock;
207 } ars_state; 207 } ars_state;
208 struct device *dimm_dev[NUM_DCR]; 208 struct device *dimm_dev[ARRAY_SIZE(handle)];
209 struct nd_intel_smart *smart; 209 struct nd_intel_smart *smart;
210 struct nd_intel_smart_threshold *smart_threshold; 210 struct nd_intel_smart_threshold *smart_threshold;
211 struct badrange badrange; 211 struct badrange badrange;
@@ -2680,7 +2680,7 @@ static int nfit_test_probe(struct platform_device *pdev)
2680 u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle; 2680 u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
2681 int i; 2681 int i;
2682 2682
2683 for (i = 0; i < NUM_DCR; i++) 2683 for (i = 0; i < ARRAY_SIZE(handle); i++)
2684 if (nfit_handle == handle[i]) 2684 if (nfit_handle == handle[i])
2685 dev_set_drvdata(nfit_test->dimm_dev[i], 2685 dev_set_drvdata(nfit_test->dimm_dev[i],
2686 nfit_mem); 2686 nfit_mem);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index f1fe492c8e17..f0017c831e57 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -24,6 +24,7 @@ TARGETS += memory-hotplug
24TARGETS += mount 24TARGETS += mount
25TARGETS += mqueue 25TARGETS += mqueue
26TARGETS += net 26TARGETS += net
27TARGETS += netfilter
27TARGETS += nsfs 28TARGETS += nsfs
28TARGETS += powerpc 29TARGETS += powerpc
29TARGETS += proc 30TARGETS += proc
diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c
index 7887df693399..44ed7f29f8ab 100644
--- a/tools/testing/selftests/bpf/test_netcnt.c
+++ b/tools/testing/selftests/bpf/test_netcnt.c
@@ -81,7 +81,10 @@ int main(int argc, char **argv)
81 goto err; 81 goto err;
82 } 82 }
83 83
84 assert(system("ping localhost -6 -c 10000 -f -q > /dev/null") == 0); 84 if (system("which ping6 &>/dev/null") == 0)
85 assert(!system("ping6 localhost -c 10000 -f -q > /dev/null"));
86 else
87 assert(!system("ping -6 localhost -c 10000 -f -q > /dev/null"));
85 88
86 if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL, 89 if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL,
87 &prog_cnt)) { 90 &prog_cnt)) {
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 6f61df62f690..550b7e46bf4a 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -13896,6 +13896,25 @@ static struct bpf_test tests[] = {
13896 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 13896 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13897 .result = ACCEPT, 13897 .result = ACCEPT,
13898 }, 13898 },
13899 {
13900 "calls: ctx read at start of subprog",
13901 .insns = {
13902 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13903 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13904 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
13905 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13906 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13908 BPF_EXIT_INSN(),
13909 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
13910 BPF_MOV64_IMM(BPF_REG_0, 0),
13911 BPF_EXIT_INSN(),
13912 },
13913 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
13914 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
13915 .result_unpriv = REJECT,
13916 .result = ACCEPT,
13917 },
13899}; 13918};
13900 13919
13901static int probe_filter_length(const struct bpf_insn *fp) 13920static int probe_filter_length(const struct bpf_insn *fp)
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
new file mode 100644
index 000000000000..47ed6cef93fb
--- /dev/null
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -0,0 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0
2# Makefile for netfilter selftests
3
4TEST_PROGS := nft_trans_stress.sh
5
6include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
new file mode 100644
index 000000000000..1017313e41a8
--- /dev/null
+++ b/tools/testing/selftests/netfilter/config
@@ -0,0 +1,2 @@
1CONFIG_NET_NS=y
2NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_trans_stress.sh b/tools/testing/selftests/netfilter/nft_trans_stress.sh
new file mode 100755
index 000000000000..f1affd12c4b1
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_trans_stress.sh
@@ -0,0 +1,78 @@
1#!/bin/bash
2#
3# This test is for stress-testing the nf_tables config plane path vs.
4# packet path processing: Make sure we never release rules that are
5# still visible to other cpus.
6#
7# set -e
8
9# Kselftest framework requirement - SKIP code is 4.
10ksft_skip=4
11
12testns=testns1
13tables="foo bar baz quux"
14
15nft --version > /dev/null 2>&1
16if [ $? -ne 0 ];then
17 echo "SKIP: Could not run test without nft tool"
18 exit $ksft_skip
19fi
20
21ip -Version > /dev/null 2>&1
22if [ $? -ne 0 ];then
23 echo "SKIP: Could not run test without ip tool"
24 exit $ksft_skip
25fi
26
27tmp=$(mktemp)
28
29for table in $tables; do
30 echo add table inet "$table" >> "$tmp"
31 echo flush table inet "$table" >> "$tmp"
32
33 echo "add chain inet $table INPUT { type filter hook input priority 0; }" >> "$tmp"
34 echo "add chain inet $table OUTPUT { type filter hook output priority 0; }" >> "$tmp"
35 for c in $(seq 1 400); do
36 chain=$(printf "chain%03u" "$c")
37 echo "add chain inet $table $chain" >> "$tmp"
38 done
39
40 for c in $(seq 1 400); do
41 chain=$(printf "chain%03u" "$c")
42 for BASE in INPUT OUTPUT; do
43 echo "add rule inet $table $BASE counter jump $chain" >> "$tmp"
44 done
45 echo "add rule inet $table $chain counter return" >> "$tmp"
46 done
47done
48
49ip netns add "$testns"
50ip -netns "$testns" link set lo up
51
52lscpu | grep ^CPU\(s\): | ( read cpu cpunum ;
53cpunum=$((cpunum-1))
54for i in $(seq 0 $cpunum);do
55 mask=$(printf 0x%x $((1<<$i)))
56 ip netns exec "$testns" taskset $mask ping -4 127.0.0.1 -fq > /dev/null &
57 ip netns exec "$testns" taskset $mask ping -6 ::1 -fq > /dev/null &
58done)
59
60sleep 1
61
62for i in $(seq 1 10) ; do ip netns exec "$testns" nft -f "$tmp" & done
63
64for table in $tables;do
65 randsleep=$((RANDOM%10))
66 sleep $randsleep
67 ip netns exec "$testns" nft delete table inet $table 2>/dev/null
68done
69
70randsleep=$((RANDOM%10))
71sleep $randsleep
72
73pkill -9 ping
74
75wait
76
77rm -f "$tmp"
78ip netns del "$testns"
diff --git a/tools/testing/selftests/powerpc/mm/wild_bctr.c b/tools/testing/selftests/powerpc/mm/wild_bctr.c
index 1b0e9e9a2ddc..f2fa101c5a6a 100644
--- a/tools/testing/selftests/powerpc/mm/wild_bctr.c
+++ b/tools/testing/selftests/powerpc/mm/wild_bctr.c
@@ -47,8 +47,9 @@ static int ok(void)
47 return 0; 47 return 0;
48} 48}
49 49
50#define REG_POISON 0x5a5aUL 50#define REG_POISON 0x5a5a
51#define POISONED_REG(n) ((REG_POISON << 48) | ((n) << 32) | (REG_POISON << 16) | (n)) 51#define POISONED_REG(n) ((((unsigned long)REG_POISON) << 48) | ((n) << 32) | \
52 (((unsigned long)REG_POISON) << 16) | (n))
52 53
53static inline void poison_regs(void) 54static inline void poison_regs(void)
54{ 55{
@@ -105,6 +106,20 @@ static void dump_regs(void)
105 } 106 }
106} 107}
107 108
109#ifdef _CALL_AIXDESC
110struct opd {
111 unsigned long ip;
112 unsigned long toc;
113 unsigned long env;
114};
115static struct opd bad_opd = {
116 .ip = BAD_NIP,
117};
118#define BAD_FUNC (&bad_opd)
119#else
120#define BAD_FUNC BAD_NIP
121#endif
122
108int test_wild_bctr(void) 123int test_wild_bctr(void)
109{ 124{
110 int (*func_ptr)(void); 125 int (*func_ptr)(void);
@@ -133,7 +148,7 @@ int test_wild_bctr(void)
133 148
134 poison_regs(); 149 poison_regs();
135 150
136 func_ptr = (int (*)(void))BAD_NIP; 151 func_ptr = (int (*)(void))BAD_FUNC;
137 func_ptr(); 152 func_ptr();
138 153
139 FAIL_IF(1); /* we didn't segv? */ 154 FAIL_IF(1); /* we didn't segv? */
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
index 6f1f4a6e1ecb..85744425b08d 100644
--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
+++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
@@ -13,7 +13,7 @@
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16/* Test readlink /proc/self/map_files/... with address 0. */ 16/* Test readlink /proc/self/map_files/... with minimum address. */
17#include <errno.h> 17#include <errno.h>
18#include <sys/types.h> 18#include <sys/types.h>
19#include <sys/stat.h> 19#include <sys/stat.h>
@@ -47,6 +47,11 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
47int main(void) 47int main(void)
48{ 48{
49 const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE); 49 const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
50#ifdef __arm__
51 unsigned long va = 2 * PAGE_SIZE;
52#else
53 unsigned long va = 0;
54#endif
50 void *p; 55 void *p;
51 int fd; 56 int fd;
52 unsigned long a, b; 57 unsigned long a, b;
@@ -55,7 +60,7 @@ int main(void)
55 if (fd == -1) 60 if (fd == -1)
56 return 1; 61 return 1;
57 62
58 p = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0); 63 p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
59 if (p == MAP_FAILED) { 64 if (p == MAP_FAILED) {
60 if (errno == EPERM) 65 if (errno == EPERM)
61 return 2; 66 return 2;
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index 87a04a8a5945..7607ba3e3cbe 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -134,9 +134,9 @@ def exec_cmd(args, pm, stage, command):
134 (rawout, serr) = proc.communicate() 134 (rawout, serr) = proc.communicate()
135 135
136 if proc.returncode != 0 and len(serr) > 0: 136 if proc.returncode != 0 and len(serr) > 0:
137 foutput = serr.decode("utf-8") 137 foutput = serr.decode("utf-8", errors="ignore")
138 else: 138 else:
139 foutput = rawout.decode("utf-8") 139 foutput = rawout.decode("utf-8", errors="ignore")
140 140
141 proc.stdout.close() 141 proc.stdout.close()
142 proc.stderr.close() 142 proc.stderr.close()
@@ -169,6 +169,8 @@ def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
169 file=sys.stderr) 169 file=sys.stderr)
170 print("\n{} *** Error message: \"{}\"".format(prefix, foutput), 170 print("\n{} *** Error message: \"{}\"".format(prefix, foutput),
171 file=sys.stderr) 171 file=sys.stderr)
172 print("returncode {}; expected {}".format(proc.returncode,
173 exit_codes))
172 print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr) 174 print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr)
173 print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr) 175 print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr)
174 print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr) 176 print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr)
@@ -195,12 +197,18 @@ def run_one_test(pm, args, index, tidx):
195 print('-----> execute stage') 197 print('-----> execute stage')
196 pm.call_pre_execute() 198 pm.call_pre_execute()
197 (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"]) 199 (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"])
198 exit_code = p.returncode 200 if p:
201 exit_code = p.returncode
202 else:
203 exit_code = None
204
199 pm.call_post_execute() 205 pm.call_post_execute()
200 206
201 if (exit_code != int(tidx["expExitCode"])): 207 if (exit_code is None or exit_code != int(tidx["expExitCode"])):
202 result = False 208 result = False
203 print("exit:", exit_code, int(tidx["expExitCode"])) 209 print("exit: {!r}".format(exit_code))
210 print("exit: {}".format(int(tidx["expExitCode"])))
211 #print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"])))
204 print(procout) 212 print(procout)
205 else: 213 else:
206 if args.verbose > 0: 214 if args.verbose > 0: